cm.c 92 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307
  1. /*
  2. * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  4. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
  5. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. *
  35. * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
  36. */
  37. #include <linux/dma-mapping.h>
  38. #include <linux/err.h>
  39. #include <linux/idr.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/pci.h>
  42. #include <linux/rbtree.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/workqueue.h>
  45. #include <rdma/ib_cache.h>
  46. #include <rdma/ib_cm.h>
  47. #include "cm_msgs.h"
  48. MODULE_AUTHOR("Sean Hefty");
  49. MODULE_DESCRIPTION("InfiniBand CM");
  50. MODULE_LICENSE("Dual BSD/GPL");
  51. static void cm_add_one(struct ib_device *device);
  52. static void cm_remove_one(struct ib_device *device);
  53. static struct ib_client cm_client = {
  54. .name = "cm",
  55. .add = cm_add_one,
  56. .remove = cm_remove_one
  57. };
  58. static struct ib_cm {
  59. spinlock_t lock;
  60. struct list_head device_list;
  61. rwlock_t device_lock;
  62. struct rb_root listen_service_table;
  63. u64 listen_service_id;
  64. /* struct rb_root peer_service_table; todo: fix peer to peer */
  65. struct rb_root remote_qp_table;
  66. struct rb_root remote_id_table;
  67. struct rb_root remote_sidr_table;
  68. struct idr local_id_table;
  69. struct workqueue_struct *wq;
  70. } cm;
  71. struct cm_port {
  72. struct cm_device *cm_dev;
  73. struct ib_mad_agent *mad_agent;
  74. u8 port_num;
  75. };
  76. struct cm_device {
  77. struct list_head list;
  78. struct ib_device *device;
  79. __be64 ca_guid;
  80. struct cm_port port[0];
  81. };
  82. struct cm_av {
  83. struct cm_port *port;
  84. union ib_gid dgid;
  85. struct ib_ah_attr ah_attr;
  86. u16 pkey_index;
  87. u8 packet_life_time;
  88. };
  89. struct cm_work {
  90. struct work_struct work;
  91. struct list_head list;
  92. struct cm_port *port;
  93. struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
  94. __be32 local_id; /* Established / timewait */
  95. __be32 remote_id;
  96. struct ib_cm_event cm_event;
  97. struct ib_sa_path_rec path[0];
  98. };
  99. struct cm_timewait_info {
  100. struct cm_work work; /* Must be first. */
  101. struct rb_node remote_qp_node;
  102. struct rb_node remote_id_node;
  103. __be64 remote_ca_guid;
  104. __be32 remote_qpn;
  105. u8 inserted_remote_qp;
  106. u8 inserted_remote_id;
  107. };
  108. struct cm_id_private {
  109. struct ib_cm_id id;
  110. struct rb_node service_node;
  111. struct rb_node sidr_id_node;
  112. spinlock_t lock;
  113. wait_queue_head_t wait;
  114. atomic_t refcount;
  115. struct ib_mad_send_buf *msg;
  116. struct cm_timewait_info *timewait_info;
  117. /* todo: use alternate port on send failure */
  118. struct cm_av av;
  119. struct cm_av alt_av;
  120. void *private_data;
  121. __be64 tid;
  122. __be32 local_qpn;
  123. __be32 remote_qpn;
  124. enum ib_qp_type qp_type;
  125. __be32 sq_psn;
  126. __be32 rq_psn;
  127. int timeout_ms;
  128. enum ib_mtu path_mtu;
  129. u8 private_data_len;
  130. u8 max_cm_retries;
  131. u8 peer_to_peer;
  132. u8 responder_resources;
  133. u8 initiator_depth;
  134. u8 local_ack_timeout;
  135. u8 retry_count;
  136. u8 rnr_retry_count;
  137. u8 service_timeout;
  138. struct list_head work_list;
  139. atomic_t work_count;
  140. };
  141. static void cm_work_handler(void *data);
  142. static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
  143. {
  144. if (atomic_dec_and_test(&cm_id_priv->refcount))
  145. wake_up(&cm_id_priv->wait);
  146. }
  147. static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
  148. struct ib_mad_send_buf **msg)
  149. {
  150. struct ib_mad_agent *mad_agent;
  151. struct ib_mad_send_buf *m;
  152. struct ib_ah *ah;
  153. mad_agent = cm_id_priv->av.port->mad_agent;
  154. ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
  155. if (IS_ERR(ah))
  156. return PTR_ERR(ah);
  157. m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
  158. cm_id_priv->av.pkey_index,
  159. 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
  160. GFP_ATOMIC);
  161. if (IS_ERR(m)) {
  162. ib_destroy_ah(ah);
  163. return PTR_ERR(m);
  164. }
  165. /* Timeout set by caller if response is expected. */
  166. m->ah = ah;
  167. m->retries = cm_id_priv->max_cm_retries;
  168. atomic_inc(&cm_id_priv->refcount);
  169. m->context[0] = cm_id_priv;
  170. *msg = m;
  171. return 0;
  172. }
  173. static int cm_alloc_response_msg(struct cm_port *port,
  174. struct ib_mad_recv_wc *mad_recv_wc,
  175. struct ib_mad_send_buf **msg)
  176. {
  177. struct ib_mad_send_buf *m;
  178. struct ib_ah *ah;
  179. ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
  180. mad_recv_wc->recv_buf.grh, port->port_num);
  181. if (IS_ERR(ah))
  182. return PTR_ERR(ah);
  183. m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
  184. 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
  185. GFP_ATOMIC);
  186. if (IS_ERR(m)) {
  187. ib_destroy_ah(ah);
  188. return PTR_ERR(m);
  189. }
  190. m->ah = ah;
  191. *msg = m;
  192. return 0;
  193. }
  194. static void cm_free_msg(struct ib_mad_send_buf *msg)
  195. {
  196. ib_destroy_ah(msg->ah);
  197. if (msg->context[0])
  198. cm_deref_id(msg->context[0]);
  199. ib_free_send_mad(msg);
  200. }
  201. static void * cm_copy_private_data(const void *private_data,
  202. u8 private_data_len)
  203. {
  204. void *data;
  205. if (!private_data || !private_data_len)
  206. return NULL;
  207. data = kmalloc(private_data_len, GFP_KERNEL);
  208. if (!data)
  209. return ERR_PTR(-ENOMEM);
  210. memcpy(data, private_data, private_data_len);
  211. return data;
  212. }
  213. static void cm_set_private_data(struct cm_id_private *cm_id_priv,
  214. void *private_data, u8 private_data_len)
  215. {
  216. if (cm_id_priv->private_data && cm_id_priv->private_data_len)
  217. kfree(cm_id_priv->private_data);
  218. cm_id_priv->private_data = private_data;
  219. cm_id_priv->private_data_len = private_data_len;
  220. }
  221. static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
  222. u16 dlid, u8 sl, u16 src_path_bits)
  223. {
  224. memset(ah_attr, 0, sizeof ah_attr);
  225. ah_attr->dlid = dlid;
  226. ah_attr->sl = sl;
  227. ah_attr->src_path_bits = src_path_bits;
  228. ah_attr->port_num = port_num;
  229. }
  230. static void cm_init_av_for_response(struct cm_port *port,
  231. struct ib_wc *wc, struct cm_av *av)
  232. {
  233. av->port = port;
  234. av->pkey_index = wc->pkey_index;
  235. cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
  236. wc->sl, wc->dlid_path_bits);
  237. }
  238. static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
  239. {
  240. struct cm_device *cm_dev;
  241. struct cm_port *port = NULL;
  242. unsigned long flags;
  243. int ret;
  244. u8 p;
  245. read_lock_irqsave(&cm.device_lock, flags);
  246. list_for_each_entry(cm_dev, &cm.device_list, list) {
  247. if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
  248. &p, NULL)) {
  249. port = &cm_dev->port[p-1];
  250. break;
  251. }
  252. }
  253. read_unlock_irqrestore(&cm.device_lock, flags);
  254. if (!port)
  255. return -EINVAL;
  256. ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
  257. be16_to_cpu(path->pkey), &av->pkey_index);
  258. if (ret)
  259. return ret;
  260. av->port = port;
  261. cm_set_ah_attr(&av->ah_attr, av->port->port_num,
  262. be16_to_cpu(path->dlid), path->sl,
  263. be16_to_cpu(path->slid) & 0x7F);
  264. av->packet_life_time = path->packet_life_time;
  265. return 0;
  266. }
  267. static int cm_alloc_id(struct cm_id_private *cm_id_priv)
  268. {
  269. unsigned long flags;
  270. int ret;
  271. do {
  272. spin_lock_irqsave(&cm.lock, flags);
  273. ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
  274. (__force int *) &cm_id_priv->id.local_id);
  275. spin_unlock_irqrestore(&cm.lock, flags);
  276. } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
  277. return ret;
  278. }
  279. static void cm_free_id(__be32 local_id)
  280. {
  281. unsigned long flags;
  282. spin_lock_irqsave(&cm.lock, flags);
  283. idr_remove(&cm.local_id_table, (__force int) local_id);
  284. spin_unlock_irqrestore(&cm.lock, flags);
  285. }
  286. static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
  287. {
  288. struct cm_id_private *cm_id_priv;
  289. cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
  290. if (cm_id_priv) {
  291. if (cm_id_priv->id.remote_id == remote_id)
  292. atomic_inc(&cm_id_priv->refcount);
  293. else
  294. cm_id_priv = NULL;
  295. }
  296. return cm_id_priv;
  297. }
  298. static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
  299. {
  300. struct cm_id_private *cm_id_priv;
  301. unsigned long flags;
  302. spin_lock_irqsave(&cm.lock, flags);
  303. cm_id_priv = cm_get_id(local_id, remote_id);
  304. spin_unlock_irqrestore(&cm.lock, flags);
  305. return cm_id_priv;
  306. }
  307. static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
  308. {
  309. struct rb_node **link = &cm.listen_service_table.rb_node;
  310. struct rb_node *parent = NULL;
  311. struct cm_id_private *cur_cm_id_priv;
  312. __be64 service_id = cm_id_priv->id.service_id;
  313. __be64 service_mask = cm_id_priv->id.service_mask;
  314. while (*link) {
  315. parent = *link;
  316. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  317. service_node);
  318. if ((cur_cm_id_priv->id.service_mask & service_id) ==
  319. (service_mask & cur_cm_id_priv->id.service_id) &&
  320. (cm_id_priv->id.device == cur_cm_id_priv->id.device))
  321. return cur_cm_id_priv;
  322. if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
  323. link = &(*link)->rb_left;
  324. else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
  325. link = &(*link)->rb_right;
  326. else if (service_id < cur_cm_id_priv->id.service_id)
  327. link = &(*link)->rb_left;
  328. else
  329. link = &(*link)->rb_right;
  330. }
  331. rb_link_node(&cm_id_priv->service_node, parent, link);
  332. rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
  333. return NULL;
  334. }
  335. static struct cm_id_private * cm_find_listen(struct ib_device *device,
  336. __be64 service_id)
  337. {
  338. struct rb_node *node = cm.listen_service_table.rb_node;
  339. struct cm_id_private *cm_id_priv;
  340. while (node) {
  341. cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
  342. if ((cm_id_priv->id.service_mask & service_id) ==
  343. cm_id_priv->id.service_id &&
  344. (cm_id_priv->id.device == device))
  345. return cm_id_priv;
  346. if (device < cm_id_priv->id.device)
  347. node = node->rb_left;
  348. else if (device > cm_id_priv->id.device)
  349. node = node->rb_right;
  350. else if (service_id < cm_id_priv->id.service_id)
  351. node = node->rb_left;
  352. else
  353. node = node->rb_right;
  354. }
  355. return NULL;
  356. }
  357. static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
  358. *timewait_info)
  359. {
  360. struct rb_node **link = &cm.remote_id_table.rb_node;
  361. struct rb_node *parent = NULL;
  362. struct cm_timewait_info *cur_timewait_info;
  363. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  364. __be32 remote_id = timewait_info->work.remote_id;
  365. while (*link) {
  366. parent = *link;
  367. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  368. remote_id_node);
  369. if (remote_id < cur_timewait_info->work.remote_id)
  370. link = &(*link)->rb_left;
  371. else if (remote_id > cur_timewait_info->work.remote_id)
  372. link = &(*link)->rb_right;
  373. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  374. link = &(*link)->rb_left;
  375. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  376. link = &(*link)->rb_right;
  377. else
  378. return cur_timewait_info;
  379. }
  380. timewait_info->inserted_remote_id = 1;
  381. rb_link_node(&timewait_info->remote_id_node, parent, link);
  382. rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
  383. return NULL;
  384. }
  385. static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
  386. __be32 remote_id)
  387. {
  388. struct rb_node *node = cm.remote_id_table.rb_node;
  389. struct cm_timewait_info *timewait_info;
  390. while (node) {
  391. timewait_info = rb_entry(node, struct cm_timewait_info,
  392. remote_id_node);
  393. if (remote_id < timewait_info->work.remote_id)
  394. node = node->rb_left;
  395. else if (remote_id > timewait_info->work.remote_id)
  396. node = node->rb_right;
  397. else if (remote_ca_guid < timewait_info->remote_ca_guid)
  398. node = node->rb_left;
  399. else if (remote_ca_guid > timewait_info->remote_ca_guid)
  400. node = node->rb_right;
  401. else
  402. return timewait_info;
  403. }
  404. return NULL;
  405. }
  406. static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
  407. *timewait_info)
  408. {
  409. struct rb_node **link = &cm.remote_qp_table.rb_node;
  410. struct rb_node *parent = NULL;
  411. struct cm_timewait_info *cur_timewait_info;
  412. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  413. __be32 remote_qpn = timewait_info->remote_qpn;
  414. while (*link) {
  415. parent = *link;
  416. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  417. remote_qp_node);
  418. if (remote_qpn < cur_timewait_info->remote_qpn)
  419. link = &(*link)->rb_left;
  420. else if (remote_qpn > cur_timewait_info->remote_qpn)
  421. link = &(*link)->rb_right;
  422. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  423. link = &(*link)->rb_left;
  424. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  425. link = &(*link)->rb_right;
  426. else
  427. return cur_timewait_info;
  428. }
  429. timewait_info->inserted_remote_qp = 1;
  430. rb_link_node(&timewait_info->remote_qp_node, parent, link);
  431. rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  432. return NULL;
  433. }
  434. static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
  435. *cm_id_priv)
  436. {
  437. struct rb_node **link = &cm.remote_sidr_table.rb_node;
  438. struct rb_node *parent = NULL;
  439. struct cm_id_private *cur_cm_id_priv;
  440. union ib_gid *port_gid = &cm_id_priv->av.dgid;
  441. __be32 remote_id = cm_id_priv->id.remote_id;
  442. while (*link) {
  443. parent = *link;
  444. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  445. sidr_id_node);
  446. if (remote_id < cur_cm_id_priv->id.remote_id)
  447. link = &(*link)->rb_left;
  448. else if (remote_id > cur_cm_id_priv->id.remote_id)
  449. link = &(*link)->rb_right;
  450. else {
  451. int cmp;
  452. cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
  453. sizeof *port_gid);
  454. if (cmp < 0)
  455. link = &(*link)->rb_left;
  456. else if (cmp > 0)
  457. link = &(*link)->rb_right;
  458. else
  459. return cur_cm_id_priv;
  460. }
  461. }
  462. rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
  463. rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  464. return NULL;
  465. }
  466. static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
  467. enum ib_cm_sidr_status status)
  468. {
  469. struct ib_cm_sidr_rep_param param;
  470. memset(&param, 0, sizeof param);
  471. param.status = status;
  472. ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
  473. }
  474. struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
  475. ib_cm_handler cm_handler,
  476. void *context)
  477. {
  478. struct cm_id_private *cm_id_priv;
  479. int ret;
  480. cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
  481. if (!cm_id_priv)
  482. return ERR_PTR(-ENOMEM);
  483. cm_id_priv->id.state = IB_CM_IDLE;
  484. cm_id_priv->id.device = device;
  485. cm_id_priv->id.cm_handler = cm_handler;
  486. cm_id_priv->id.context = context;
  487. cm_id_priv->id.remote_cm_qpn = 1;
  488. ret = cm_alloc_id(cm_id_priv);
  489. if (ret)
  490. goto error;
  491. spin_lock_init(&cm_id_priv->lock);
  492. init_waitqueue_head(&cm_id_priv->wait);
  493. INIT_LIST_HEAD(&cm_id_priv->work_list);
  494. atomic_set(&cm_id_priv->work_count, -1);
  495. atomic_set(&cm_id_priv->refcount, 1);
  496. return &cm_id_priv->id;
  497. error:
  498. kfree(cm_id_priv);
  499. return ERR_PTR(-ENOMEM);
  500. }
  501. EXPORT_SYMBOL(ib_create_cm_id);
  502. static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
  503. {
  504. struct cm_work *work;
  505. if (list_empty(&cm_id_priv->work_list))
  506. return NULL;
  507. work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
  508. list_del(&work->list);
  509. return work;
  510. }
  511. static void cm_free_work(struct cm_work *work)
  512. {
  513. if (work->mad_recv_wc)
  514. ib_free_recv_mad(work->mad_recv_wc);
  515. kfree(work);
  516. }
  517. static inline int cm_convert_to_ms(int iba_time)
  518. {
  519. /* approximate conversion to ms from 4.096us x 2^iba_time */
  520. return 1 << max(iba_time - 8, 0);
  521. }
  522. static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
  523. {
  524. unsigned long flags;
  525. if (!timewait_info->inserted_remote_id &&
  526. !timewait_info->inserted_remote_qp)
  527. return;
  528. spin_lock_irqsave(&cm.lock, flags);
  529. if (timewait_info->inserted_remote_id) {
  530. rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
  531. timewait_info->inserted_remote_id = 0;
  532. }
  533. if (timewait_info->inserted_remote_qp) {
  534. rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  535. timewait_info->inserted_remote_qp = 0;
  536. }
  537. spin_unlock_irqrestore(&cm.lock, flags);
  538. }
  539. static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
  540. {
  541. struct cm_timewait_info *timewait_info;
  542. timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
  543. if (!timewait_info)
  544. return ERR_PTR(-ENOMEM);
  545. timewait_info->work.local_id = local_id;
  546. INIT_WORK(&timewait_info->work.work, cm_work_handler,
  547. &timewait_info->work);
  548. timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
  549. return timewait_info;
  550. }
  551. static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
  552. {
  553. int wait_time;
  554. /*
  555. * The cm_id could be destroyed by the user before we exit timewait.
  556. * To protect against this, we search for the cm_id after exiting
  557. * timewait before notifying the user that we've exited timewait.
  558. */
  559. cm_id_priv->id.state = IB_CM_TIMEWAIT;
  560. wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
  561. queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
  562. msecs_to_jiffies(wait_time));
  563. cm_id_priv->timewait_info = NULL;
  564. }
  565. static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
  566. {
  567. cm_id_priv->id.state = IB_CM_IDLE;
  568. if (cm_id_priv->timewait_info) {
  569. cm_cleanup_timewait(cm_id_priv->timewait_info);
  570. kfree(cm_id_priv->timewait_info);
  571. cm_id_priv->timewait_info = NULL;
  572. }
  573. }
  574. void ib_destroy_cm_id(struct ib_cm_id *cm_id)
  575. {
  576. struct cm_id_private *cm_id_priv;
  577. struct cm_work *work;
  578. unsigned long flags;
  579. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  580. retest:
  581. spin_lock_irqsave(&cm_id_priv->lock, flags);
  582. switch (cm_id->state) {
  583. case IB_CM_LISTEN:
  584. cm_id->state = IB_CM_IDLE;
  585. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  586. spin_lock_irqsave(&cm.lock, flags);
  587. rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
  588. spin_unlock_irqrestore(&cm.lock, flags);
  589. break;
  590. case IB_CM_SIDR_REQ_SENT:
  591. cm_id->state = IB_CM_IDLE;
  592. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  593. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  594. break;
  595. case IB_CM_SIDR_REQ_RCVD:
  596. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  597. cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
  598. break;
  599. case IB_CM_REQ_SENT:
  600. case IB_CM_MRA_REQ_RCVD:
  601. case IB_CM_REP_SENT:
  602. case IB_CM_MRA_REP_RCVD:
  603. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  604. /* Fall through */
  605. case IB_CM_REQ_RCVD:
  606. case IB_CM_MRA_REQ_SENT:
  607. case IB_CM_REP_RCVD:
  608. case IB_CM_MRA_REP_SENT:
  609. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  610. ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
  611. &cm_id_priv->av.port->cm_dev->ca_guid,
  612. sizeof cm_id_priv->av.port->cm_dev->ca_guid,
  613. NULL, 0);
  614. break;
  615. case IB_CM_ESTABLISHED:
  616. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  617. ib_send_cm_dreq(cm_id, NULL, 0);
  618. goto retest;
  619. case IB_CM_DREQ_SENT:
  620. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  621. cm_enter_timewait(cm_id_priv);
  622. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  623. break;
  624. case IB_CM_DREQ_RCVD:
  625. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  626. ib_send_cm_drep(cm_id, NULL, 0);
  627. break;
  628. default:
  629. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  630. break;
  631. }
  632. cm_free_id(cm_id->local_id);
  633. atomic_dec(&cm_id_priv->refcount);
  634. wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
  635. while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
  636. cm_free_work(work);
  637. if (cm_id_priv->private_data && cm_id_priv->private_data_len)
  638. kfree(cm_id_priv->private_data);
  639. kfree(cm_id_priv);
  640. }
  641. EXPORT_SYMBOL(ib_destroy_cm_id);
  642. int ib_cm_listen(struct ib_cm_id *cm_id,
  643. __be64 service_id,
  644. __be64 service_mask)
  645. {
  646. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  647. unsigned long flags;
  648. int ret = 0;
  649. service_mask = service_mask ? service_mask :
  650. __constant_cpu_to_be64(~0ULL);
  651. service_id &= service_mask;
  652. if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
  653. (service_id != IB_CM_ASSIGN_SERVICE_ID))
  654. return -EINVAL;
  655. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  656. BUG_ON(cm_id->state != IB_CM_IDLE);
  657. cm_id->state = IB_CM_LISTEN;
  658. spin_lock_irqsave(&cm.lock, flags);
  659. if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
  660. cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
  661. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  662. } else {
  663. cm_id->service_id = service_id;
  664. cm_id->service_mask = service_mask;
  665. }
  666. cur_cm_id_priv = cm_insert_listen(cm_id_priv);
  667. spin_unlock_irqrestore(&cm.lock, flags);
  668. if (cur_cm_id_priv) {
  669. cm_id->state = IB_CM_IDLE;
  670. ret = -EBUSY;
  671. }
  672. return ret;
  673. }
  674. EXPORT_SYMBOL(ib_cm_listen);
  675. static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
  676. enum cm_msg_sequence msg_seq)
  677. {
  678. u64 hi_tid, low_tid;
  679. hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
  680. low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
  681. (msg_seq << 30));
  682. return cpu_to_be64(hi_tid | low_tid);
  683. }
  684. static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
  685. __be16 attr_id, __be64 tid)
  686. {
  687. hdr->base_version = IB_MGMT_BASE_VERSION;
  688. hdr->mgmt_class = IB_MGMT_CLASS_CM;
  689. hdr->class_version = IB_CM_CLASS_VERSION;
  690. hdr->method = IB_MGMT_METHOD_SEND;
  691. hdr->attr_id = attr_id;
  692. hdr->tid = tid;
  693. }
  694. static void cm_format_req(struct cm_req_msg *req_msg,
  695. struct cm_id_private *cm_id_priv,
  696. struct ib_cm_req_param *param)
  697. {
  698. cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
  699. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
  700. req_msg->local_comm_id = cm_id_priv->id.local_id;
  701. req_msg->service_id = param->service_id;
  702. req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
  703. cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
  704. cm_req_set_resp_res(req_msg, param->responder_resources);
  705. cm_req_set_init_depth(req_msg, param->initiator_depth);
  706. cm_req_set_remote_resp_timeout(req_msg,
  707. param->remote_cm_response_timeout);
  708. cm_req_set_qp_type(req_msg, param->qp_type);
  709. cm_req_set_flow_ctrl(req_msg, param->flow_control);
  710. cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
  711. cm_req_set_local_resp_timeout(req_msg,
  712. param->local_cm_response_timeout);
  713. cm_req_set_retry_count(req_msg, param->retry_count);
  714. req_msg->pkey = param->primary_path->pkey;
  715. cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
  716. cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
  717. cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
  718. cm_req_set_srq(req_msg, param->srq);
  719. req_msg->primary_local_lid = param->primary_path->slid;
  720. req_msg->primary_remote_lid = param->primary_path->dlid;
  721. req_msg->primary_local_gid = param->primary_path->sgid;
  722. req_msg->primary_remote_gid = param->primary_path->dgid;
  723. cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
  724. cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
  725. req_msg->primary_traffic_class = param->primary_path->traffic_class;
  726. req_msg->primary_hop_limit = param->primary_path->hop_limit;
  727. cm_req_set_primary_sl(req_msg, param->primary_path->sl);
  728. cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
  729. cm_req_set_primary_local_ack_timeout(req_msg,
  730. min(31, param->primary_path->packet_life_time + 1));
  731. if (param->alternate_path) {
  732. req_msg->alt_local_lid = param->alternate_path->slid;
  733. req_msg->alt_remote_lid = param->alternate_path->dlid;
  734. req_msg->alt_local_gid = param->alternate_path->sgid;
  735. req_msg->alt_remote_gid = param->alternate_path->dgid;
  736. cm_req_set_alt_flow_label(req_msg,
  737. param->alternate_path->flow_label);
  738. cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
  739. req_msg->alt_traffic_class = param->alternate_path->traffic_class;
  740. req_msg->alt_hop_limit = param->alternate_path->hop_limit;
  741. cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
  742. cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
  743. cm_req_set_alt_local_ack_timeout(req_msg,
  744. min(31, param->alternate_path->packet_life_time + 1));
  745. }
  746. if (param->private_data && param->private_data_len)
  747. memcpy(req_msg->private_data, param->private_data,
  748. param->private_data_len);
  749. }
  750. static inline int cm_validate_req_param(struct ib_cm_req_param *param)
  751. {
  752. /* peer-to-peer not supported */
  753. if (param->peer_to_peer)
  754. return -EINVAL;
  755. if (!param->primary_path)
  756. return -EINVAL;
  757. if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
  758. return -EINVAL;
  759. if (param->private_data &&
  760. param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
  761. return -EINVAL;
  762. if (param->alternate_path &&
  763. (param->alternate_path->pkey != param->primary_path->pkey ||
  764. param->alternate_path->mtu != param->primary_path->mtu))
  765. return -EINVAL;
  766. return 0;
  767. }
  768. int ib_send_cm_req(struct ib_cm_id *cm_id,
  769. struct ib_cm_req_param *param)
  770. {
  771. struct cm_id_private *cm_id_priv;
  772. struct cm_req_msg *req_msg;
  773. unsigned long flags;
  774. int ret;
  775. ret = cm_validate_req_param(param);
  776. if (ret)
  777. return ret;
  778. /* Verify that we're not in timewait. */
  779. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  780. spin_lock_irqsave(&cm_id_priv->lock, flags);
  781. if (cm_id->state != IB_CM_IDLE) {
  782. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  783. ret = -EINVAL;
  784. goto out;
  785. }
  786. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  787. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  788. id.local_id);
  789. if (IS_ERR(cm_id_priv->timewait_info))
  790. goto out;
  791. ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
  792. if (ret)
  793. goto error1;
  794. if (param->alternate_path) {
  795. ret = cm_init_av_by_path(param->alternate_path,
  796. &cm_id_priv->alt_av);
  797. if (ret)
  798. goto error1;
  799. }
  800. cm_id->service_id = param->service_id;
  801. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  802. cm_id_priv->timeout_ms = cm_convert_to_ms(
  803. param->primary_path->packet_life_time) * 2 +
  804. cm_convert_to_ms(
  805. param->remote_cm_response_timeout);
  806. cm_id_priv->max_cm_retries = param->max_cm_retries;
  807. cm_id_priv->initiator_depth = param->initiator_depth;
  808. cm_id_priv->responder_resources = param->responder_resources;
  809. cm_id_priv->retry_count = param->retry_count;
  810. cm_id_priv->path_mtu = param->primary_path->mtu;
  811. cm_id_priv->qp_type = param->qp_type;
  812. ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
  813. if (ret)
  814. goto error1;
  815. req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
  816. cm_format_req(req_msg, cm_id_priv, param);
  817. cm_id_priv->tid = req_msg->hdr.tid;
  818. cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
  819. cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
  820. cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
  821. cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
  822. cm_id_priv->local_ack_timeout =
  823. cm_req_get_primary_local_ack_timeout(req_msg);
  824. spin_lock_irqsave(&cm_id_priv->lock, flags);
  825. ret = ib_post_send_mad(cm_id_priv->msg, NULL);
  826. if (ret) {
  827. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  828. goto error2;
  829. }
  830. BUG_ON(cm_id->state != IB_CM_IDLE);
  831. cm_id->state = IB_CM_REQ_SENT;
  832. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  833. return 0;
  834. error2: cm_free_msg(cm_id_priv->msg);
  835. error1: kfree(cm_id_priv->timewait_info);
  836. out: return ret;
  837. }
  838. EXPORT_SYMBOL(ib_send_cm_req);
  839. static int cm_issue_rej(struct cm_port *port,
  840. struct ib_mad_recv_wc *mad_recv_wc,
  841. enum ib_cm_rej_reason reason,
  842. enum cm_msg_response msg_rejected,
  843. void *ari, u8 ari_length)
  844. {
  845. struct ib_mad_send_buf *msg = NULL;
  846. struct cm_rej_msg *rej_msg, *rcv_msg;
  847. int ret;
  848. ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
  849. if (ret)
  850. return ret;
  851. /* We just need common CM header information. Cast to any message. */
  852. rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
  853. rej_msg = (struct cm_rej_msg *) msg->mad;
  854. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
  855. rej_msg->remote_comm_id = rcv_msg->local_comm_id;
  856. rej_msg->local_comm_id = rcv_msg->remote_comm_id;
  857. cm_rej_set_msg_rejected(rej_msg, msg_rejected);
  858. rej_msg->reason = cpu_to_be16(reason);
  859. if (ari && ari_length) {
  860. cm_rej_set_reject_info_len(rej_msg, ari_length);
  861. memcpy(rej_msg->ari, ari, ari_length);
  862. }
  863. ret = ib_post_send_mad(msg, NULL);
  864. if (ret)
  865. cm_free_msg(msg);
  866. return ret;
  867. }
  868. static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
  869. __be32 local_qpn, __be32 remote_qpn)
  870. {
  871. return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
  872. ((local_ca_guid == remote_ca_guid) &&
  873. (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
  874. }
  875. static inline void cm_format_paths_from_req(struct cm_req_msg *req_msg,
  876. struct ib_sa_path_rec *primary_path,
  877. struct ib_sa_path_rec *alt_path)
  878. {
  879. memset(primary_path, 0, sizeof *primary_path);
  880. primary_path->dgid = req_msg->primary_local_gid;
  881. primary_path->sgid = req_msg->primary_remote_gid;
  882. primary_path->dlid = req_msg->primary_local_lid;
  883. primary_path->slid = req_msg->primary_remote_lid;
  884. primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
  885. primary_path->hop_limit = req_msg->primary_hop_limit;
  886. primary_path->traffic_class = req_msg->primary_traffic_class;
  887. primary_path->reversible = 1;
  888. primary_path->pkey = req_msg->pkey;
  889. primary_path->sl = cm_req_get_primary_sl(req_msg);
  890. primary_path->mtu_selector = IB_SA_EQ;
  891. primary_path->mtu = cm_req_get_path_mtu(req_msg);
  892. primary_path->rate_selector = IB_SA_EQ;
  893. primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
  894. primary_path->packet_life_time_selector = IB_SA_EQ;
  895. primary_path->packet_life_time =
  896. cm_req_get_primary_local_ack_timeout(req_msg);
  897. primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
  898. if (req_msg->alt_local_lid) {
  899. memset(alt_path, 0, sizeof *alt_path);
  900. alt_path->dgid = req_msg->alt_local_gid;
  901. alt_path->sgid = req_msg->alt_remote_gid;
  902. alt_path->dlid = req_msg->alt_local_lid;
  903. alt_path->slid = req_msg->alt_remote_lid;
  904. alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
  905. alt_path->hop_limit = req_msg->alt_hop_limit;
  906. alt_path->traffic_class = req_msg->alt_traffic_class;
  907. alt_path->reversible = 1;
  908. alt_path->pkey = req_msg->pkey;
  909. alt_path->sl = cm_req_get_alt_sl(req_msg);
  910. alt_path->mtu_selector = IB_SA_EQ;
  911. alt_path->mtu = cm_req_get_path_mtu(req_msg);
  912. alt_path->rate_selector = IB_SA_EQ;
  913. alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
  914. alt_path->packet_life_time_selector = IB_SA_EQ;
  915. alt_path->packet_life_time =
  916. cm_req_get_alt_local_ack_timeout(req_msg);
  917. alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
  918. }
  919. }
  920. static void cm_format_req_event(struct cm_work *work,
  921. struct cm_id_private *cm_id_priv,
  922. struct ib_cm_id *listen_id)
  923. {
  924. struct cm_req_msg *req_msg;
  925. struct ib_cm_req_event_param *param;
  926. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  927. param = &work->cm_event.param.req_rcvd;
  928. param->listen_id = listen_id;
  929. param->port = cm_id_priv->av.port->port_num;
  930. param->primary_path = &work->path[0];
  931. if (req_msg->alt_local_lid)
  932. param->alternate_path = &work->path[1];
  933. else
  934. param->alternate_path = NULL;
  935. param->remote_ca_guid = req_msg->local_ca_guid;
  936. param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
  937. param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
  938. param->qp_type = cm_req_get_qp_type(req_msg);
  939. param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
  940. param->responder_resources = cm_req_get_init_depth(req_msg);
  941. param->initiator_depth = cm_req_get_resp_res(req_msg);
  942. param->local_cm_response_timeout =
  943. cm_req_get_remote_resp_timeout(req_msg);
  944. param->flow_control = cm_req_get_flow_ctrl(req_msg);
  945. param->remote_cm_response_timeout =
  946. cm_req_get_local_resp_timeout(req_msg);
  947. param->retry_count = cm_req_get_retry_count(req_msg);
  948. param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  949. param->srq = cm_req_get_srq(req_msg);
  950. work->cm_event.private_data = &req_msg->private_data;
  951. }
  952. static void cm_process_work(struct cm_id_private *cm_id_priv,
  953. struct cm_work *work)
  954. {
  955. unsigned long flags;
  956. int ret;
  957. /* We will typically only have the current event to report. */
  958. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
  959. cm_free_work(work);
  960. while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
  961. spin_lock_irqsave(&cm_id_priv->lock, flags);
  962. work = cm_dequeue_work(cm_id_priv);
  963. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  964. BUG_ON(!work);
  965. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
  966. &work->cm_event);
  967. cm_free_work(work);
  968. }
  969. cm_deref_id(cm_id_priv);
  970. if (ret)
  971. ib_destroy_cm_id(&cm_id_priv->id);
  972. }
  973. static void cm_format_mra(struct cm_mra_msg *mra_msg,
  974. struct cm_id_private *cm_id_priv,
  975. enum cm_msg_response msg_mraed, u8 service_timeout,
  976. const void *private_data, u8 private_data_len)
  977. {
  978. cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
  979. cm_mra_set_msg_mraed(mra_msg, msg_mraed);
  980. mra_msg->local_comm_id = cm_id_priv->id.local_id;
  981. mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
  982. cm_mra_set_service_timeout(mra_msg, service_timeout);
  983. if (private_data && private_data_len)
  984. memcpy(mra_msg->private_data, private_data, private_data_len);
  985. }
  986. static void cm_format_rej(struct cm_rej_msg *rej_msg,
  987. struct cm_id_private *cm_id_priv,
  988. enum ib_cm_rej_reason reason,
  989. void *ari,
  990. u8 ari_length,
  991. const void *private_data,
  992. u8 private_data_len)
  993. {
  994. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
  995. rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
  996. switch(cm_id_priv->id.state) {
  997. case IB_CM_REQ_RCVD:
  998. rej_msg->local_comm_id = 0;
  999. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  1000. break;
  1001. case IB_CM_MRA_REQ_SENT:
  1002. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1003. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  1004. break;
  1005. case IB_CM_REP_RCVD:
  1006. case IB_CM_MRA_REP_SENT:
  1007. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1008. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
  1009. break;
  1010. default:
  1011. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1012. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
  1013. break;
  1014. }
  1015. rej_msg->reason = cpu_to_be16(reason);
  1016. if (ari && ari_length) {
  1017. cm_rej_set_reject_info_len(rej_msg, ari_length);
  1018. memcpy(rej_msg->ari, ari, ari_length);
  1019. }
  1020. if (private_data && private_data_len)
  1021. memcpy(rej_msg->private_data, private_data, private_data_len);
  1022. }
  1023. static void cm_dup_req_handler(struct cm_work *work,
  1024. struct cm_id_private *cm_id_priv)
  1025. {
  1026. struct ib_mad_send_buf *msg = NULL;
  1027. unsigned long flags;
  1028. int ret;
  1029. /* Quick state check to discard duplicate REQs. */
  1030. if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
  1031. return;
  1032. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1033. if (ret)
  1034. return;
  1035. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1036. switch (cm_id_priv->id.state) {
  1037. case IB_CM_MRA_REQ_SENT:
  1038. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1039. CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
  1040. cm_id_priv->private_data,
  1041. cm_id_priv->private_data_len);
  1042. break;
  1043. case IB_CM_TIMEWAIT:
  1044. cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
  1045. IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
  1046. break;
  1047. default:
  1048. goto unlock;
  1049. }
  1050. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1051. ret = ib_post_send_mad(msg, NULL);
  1052. if (ret)
  1053. goto free;
  1054. return;
  1055. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1056. free: cm_free_msg(msg);
  1057. }
  1058. static struct cm_id_private * cm_match_req(struct cm_work *work,
  1059. struct cm_id_private *cm_id_priv)
  1060. {
  1061. struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
  1062. struct cm_timewait_info *timewait_info;
  1063. struct cm_req_msg *req_msg;
  1064. unsigned long flags;
  1065. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1066. /* Check for duplicate REQ and stale connections. */
  1067. spin_lock_irqsave(&cm.lock, flags);
  1068. timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
  1069. if (!timewait_info)
  1070. timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
  1071. if (timewait_info) {
  1072. cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
  1073. timewait_info->work.remote_id);
  1074. spin_unlock_irqrestore(&cm.lock, flags);
  1075. if (cur_cm_id_priv) {
  1076. cm_dup_req_handler(work, cur_cm_id_priv);
  1077. cm_deref_id(cur_cm_id_priv);
  1078. } else
  1079. cm_issue_rej(work->port, work->mad_recv_wc,
  1080. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
  1081. NULL, 0);
  1082. goto error;
  1083. }
  1084. /* Find matching listen request. */
  1085. listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
  1086. req_msg->service_id);
  1087. if (!listen_cm_id_priv) {
  1088. spin_unlock_irqrestore(&cm.lock, flags);
  1089. cm_issue_rej(work->port, work->mad_recv_wc,
  1090. IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
  1091. NULL, 0);
  1092. goto error;
  1093. }
  1094. atomic_inc(&listen_cm_id_priv->refcount);
  1095. atomic_inc(&cm_id_priv->refcount);
  1096. cm_id_priv->id.state = IB_CM_REQ_RCVD;
  1097. atomic_inc(&cm_id_priv->work_count);
  1098. spin_unlock_irqrestore(&cm.lock, flags);
  1099. return listen_cm_id_priv;
  1100. error: cm_cleanup_timewait(cm_id_priv->timewait_info);
  1101. return NULL;
  1102. }
  1103. static int cm_req_handler(struct cm_work *work)
  1104. {
  1105. struct ib_cm_id *cm_id;
  1106. struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
  1107. struct cm_req_msg *req_msg;
  1108. int ret;
  1109. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1110. cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
  1111. if (IS_ERR(cm_id))
  1112. return PTR_ERR(cm_id);
  1113. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1114. cm_id_priv->id.remote_id = req_msg->local_comm_id;
  1115. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  1116. &cm_id_priv->av);
  1117. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  1118. id.local_id);
  1119. if (IS_ERR(cm_id_priv->timewait_info)) {
  1120. ret = PTR_ERR(cm_id_priv->timewait_info);
  1121. goto error1;
  1122. }
  1123. cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
  1124. cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
  1125. cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
  1126. listen_cm_id_priv = cm_match_req(work, cm_id_priv);
  1127. if (!listen_cm_id_priv) {
  1128. ret = -EINVAL;
  1129. goto error2;
  1130. }
  1131. cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
  1132. cm_id_priv->id.context = listen_cm_id_priv->id.context;
  1133. cm_id_priv->id.service_id = req_msg->service_id;
  1134. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  1135. cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
  1136. ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
  1137. if (ret)
  1138. goto error3;
  1139. if (req_msg->alt_local_lid) {
  1140. ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
  1141. if (ret)
  1142. goto error3;
  1143. }
  1144. cm_id_priv->tid = req_msg->hdr.tid;
  1145. cm_id_priv->timeout_ms = cm_convert_to_ms(
  1146. cm_req_get_local_resp_timeout(req_msg));
  1147. cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
  1148. cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
  1149. cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
  1150. cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
  1151. cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
  1152. cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
  1153. cm_id_priv->local_ack_timeout =
  1154. cm_req_get_primary_local_ack_timeout(req_msg);
  1155. cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
  1156. cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  1157. cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
  1158. cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
  1159. cm_process_work(cm_id_priv, work);
  1160. cm_deref_id(listen_cm_id_priv);
  1161. return 0;
  1162. error3: atomic_dec(&cm_id_priv->refcount);
  1163. cm_deref_id(listen_cm_id_priv);
  1164. cm_cleanup_timewait(cm_id_priv->timewait_info);
  1165. error2: kfree(cm_id_priv->timewait_info);
  1166. cm_id_priv->timewait_info = NULL;
  1167. error1: ib_destroy_cm_id(&cm_id_priv->id);
  1168. return ret;
  1169. }
  1170. static void cm_format_rep(struct cm_rep_msg *rep_msg,
  1171. struct cm_id_private *cm_id_priv,
  1172. struct ib_cm_rep_param *param)
  1173. {
  1174. cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
  1175. rep_msg->local_comm_id = cm_id_priv->id.local_id;
  1176. rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1177. cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
  1178. cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
  1179. rep_msg->resp_resources = param->responder_resources;
  1180. rep_msg->initiator_depth = param->initiator_depth;
  1181. cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
  1182. cm_rep_set_failover(rep_msg, param->failover_accepted);
  1183. cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
  1184. cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
  1185. cm_rep_set_srq(rep_msg, param->srq);
  1186. rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
  1187. if (param->private_data && param->private_data_len)
  1188. memcpy(rep_msg->private_data, param->private_data,
  1189. param->private_data_len);
  1190. }
  1191. int ib_send_cm_rep(struct ib_cm_id *cm_id,
  1192. struct ib_cm_rep_param *param)
  1193. {
  1194. struct cm_id_private *cm_id_priv;
  1195. struct ib_mad_send_buf *msg;
  1196. struct cm_rep_msg *rep_msg;
  1197. unsigned long flags;
  1198. int ret;
  1199. if (param->private_data &&
  1200. param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
  1201. return -EINVAL;
  1202. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1203. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1204. if (cm_id->state != IB_CM_REQ_RCVD &&
  1205. cm_id->state != IB_CM_MRA_REQ_SENT) {
  1206. ret = -EINVAL;
  1207. goto out;
  1208. }
  1209. ret = cm_alloc_msg(cm_id_priv, &msg);
  1210. if (ret)
  1211. goto out;
  1212. rep_msg = (struct cm_rep_msg *) msg->mad;
  1213. cm_format_rep(rep_msg, cm_id_priv, param);
  1214. msg->timeout_ms = cm_id_priv->timeout_ms;
  1215. msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
  1216. ret = ib_post_send_mad(msg, NULL);
  1217. if (ret) {
  1218. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1219. cm_free_msg(msg);
  1220. return ret;
  1221. }
  1222. cm_id->state = IB_CM_REP_SENT;
  1223. cm_id_priv->msg = msg;
  1224. cm_id_priv->initiator_depth = param->initiator_depth;
  1225. cm_id_priv->responder_resources = param->responder_resources;
  1226. cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
  1227. cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
  1228. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1229. return ret;
  1230. }
  1231. EXPORT_SYMBOL(ib_send_cm_rep);
  1232. static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
  1233. struct cm_id_private *cm_id_priv,
  1234. const void *private_data,
  1235. u8 private_data_len)
  1236. {
  1237. cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
  1238. rtu_msg->local_comm_id = cm_id_priv->id.local_id;
  1239. rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1240. if (private_data && private_data_len)
  1241. memcpy(rtu_msg->private_data, private_data, private_data_len);
  1242. }
  1243. int ib_send_cm_rtu(struct ib_cm_id *cm_id,
  1244. const void *private_data,
  1245. u8 private_data_len)
  1246. {
  1247. struct cm_id_private *cm_id_priv;
  1248. struct ib_mad_send_buf *msg;
  1249. unsigned long flags;
  1250. void *data;
  1251. int ret;
  1252. if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
  1253. return -EINVAL;
  1254. data = cm_copy_private_data(private_data, private_data_len);
  1255. if (IS_ERR(data))
  1256. return PTR_ERR(data);
  1257. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1258. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1259. if (cm_id->state != IB_CM_REP_RCVD &&
  1260. cm_id->state != IB_CM_MRA_REP_SENT) {
  1261. ret = -EINVAL;
  1262. goto error;
  1263. }
  1264. ret = cm_alloc_msg(cm_id_priv, &msg);
  1265. if (ret)
  1266. goto error;
  1267. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1268. private_data, private_data_len);
  1269. ret = ib_post_send_mad(msg, NULL);
  1270. if (ret) {
  1271. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1272. cm_free_msg(msg);
  1273. kfree(data);
  1274. return ret;
  1275. }
  1276. cm_id->state = IB_CM_ESTABLISHED;
  1277. cm_set_private_data(cm_id_priv, data, private_data_len);
  1278. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1279. return 0;
  1280. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1281. kfree(data);
  1282. return ret;
  1283. }
  1284. EXPORT_SYMBOL(ib_send_cm_rtu);
  1285. static void cm_format_rep_event(struct cm_work *work)
  1286. {
  1287. struct cm_rep_msg *rep_msg;
  1288. struct ib_cm_rep_event_param *param;
  1289. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1290. param = &work->cm_event.param.rep_rcvd;
  1291. param->remote_ca_guid = rep_msg->local_ca_guid;
  1292. param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
  1293. param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
  1294. param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
  1295. param->responder_resources = rep_msg->initiator_depth;
  1296. param->initiator_depth = rep_msg->resp_resources;
  1297. param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
  1298. param->failover_accepted = cm_rep_get_failover(rep_msg);
  1299. param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
  1300. param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1301. param->srq = cm_rep_get_srq(rep_msg);
  1302. work->cm_event.private_data = &rep_msg->private_data;
  1303. }
  1304. static void cm_dup_rep_handler(struct cm_work *work)
  1305. {
  1306. struct cm_id_private *cm_id_priv;
  1307. struct cm_rep_msg *rep_msg;
  1308. struct ib_mad_send_buf *msg = NULL;
  1309. unsigned long flags;
  1310. int ret;
  1311. rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
  1312. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
  1313. rep_msg->local_comm_id);
  1314. if (!cm_id_priv)
  1315. return;
  1316. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1317. if (ret)
  1318. goto deref;
  1319. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1320. if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
  1321. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1322. cm_id_priv->private_data,
  1323. cm_id_priv->private_data_len);
  1324. else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
  1325. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1326. CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
  1327. cm_id_priv->private_data,
  1328. cm_id_priv->private_data_len);
  1329. else
  1330. goto unlock;
  1331. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1332. ret = ib_post_send_mad(msg, NULL);
  1333. if (ret)
  1334. goto free;
  1335. goto deref;
  1336. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1337. free: cm_free_msg(msg);
  1338. deref: cm_deref_id(cm_id_priv);
  1339. }
  1340. static int cm_rep_handler(struct cm_work *work)
  1341. {
  1342. struct cm_id_private *cm_id_priv;
  1343. struct cm_rep_msg *rep_msg;
  1344. unsigned long flags;
  1345. int ret;
  1346. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1347. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
  1348. if (!cm_id_priv) {
  1349. cm_dup_rep_handler(work);
  1350. return -EINVAL;
  1351. }
  1352. cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
  1353. cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
  1354. cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1355. spin_lock_irqsave(&cm.lock, flags);
  1356. /* Check for duplicate REP. */
  1357. if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
  1358. spin_unlock_irqrestore(&cm.lock, flags);
  1359. ret = -EINVAL;
  1360. goto error;
  1361. }
  1362. /* Check for a stale connection. */
  1363. if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
  1364. spin_unlock_irqrestore(&cm.lock, flags);
  1365. cm_issue_rej(work->port, work->mad_recv_wc,
  1366. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
  1367. NULL, 0);
  1368. ret = -EINVAL;
  1369. goto error;
  1370. }
  1371. spin_unlock_irqrestore(&cm.lock, flags);
  1372. cm_format_rep_event(work);
  1373. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1374. switch (cm_id_priv->id.state) {
  1375. case IB_CM_REQ_SENT:
  1376. case IB_CM_MRA_REQ_RCVD:
  1377. break;
  1378. default:
  1379. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1380. ret = -EINVAL;
  1381. goto error;
  1382. }
  1383. cm_id_priv->id.state = IB_CM_REP_RCVD;
  1384. cm_id_priv->id.remote_id = rep_msg->local_comm_id;
  1385. cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1386. cm_id_priv->initiator_depth = rep_msg->resp_resources;
  1387. cm_id_priv->responder_resources = rep_msg->initiator_depth;
  1388. cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
  1389. cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1390. /* todo: handle peer_to_peer */
  1391. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1392. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1393. if (!ret)
  1394. list_add_tail(&work->list, &cm_id_priv->work_list);
  1395. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1396. if (ret)
  1397. cm_process_work(cm_id_priv, work);
  1398. else
  1399. cm_deref_id(cm_id_priv);
  1400. return 0;
  1401. error: cm_cleanup_timewait(cm_id_priv->timewait_info);
  1402. cm_deref_id(cm_id_priv);
  1403. return ret;
  1404. }
  1405. static int cm_establish_handler(struct cm_work *work)
  1406. {
  1407. struct cm_id_private *cm_id_priv;
  1408. unsigned long flags;
  1409. int ret;
  1410. /* See comment in ib_cm_establish about lookup. */
  1411. cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
  1412. if (!cm_id_priv)
  1413. return -EINVAL;
  1414. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1415. if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
  1416. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1417. goto out;
  1418. }
  1419. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1420. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1421. if (!ret)
  1422. list_add_tail(&work->list, &cm_id_priv->work_list);
  1423. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1424. if (ret)
  1425. cm_process_work(cm_id_priv, work);
  1426. else
  1427. cm_deref_id(cm_id_priv);
  1428. return 0;
  1429. out:
  1430. cm_deref_id(cm_id_priv);
  1431. return -EINVAL;
  1432. }
  1433. static int cm_rtu_handler(struct cm_work *work)
  1434. {
  1435. struct cm_id_private *cm_id_priv;
  1436. struct cm_rtu_msg *rtu_msg;
  1437. unsigned long flags;
  1438. int ret;
  1439. rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
  1440. cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
  1441. rtu_msg->local_comm_id);
  1442. if (!cm_id_priv)
  1443. return -EINVAL;
  1444. work->cm_event.private_data = &rtu_msg->private_data;
  1445. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1446. if (cm_id_priv->id.state != IB_CM_REP_SENT &&
  1447. cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
  1448. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1449. goto out;
  1450. }
  1451. cm_id_priv->id.state = IB_CM_ESTABLISHED;
  1452. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1453. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1454. if (!ret)
  1455. list_add_tail(&work->list, &cm_id_priv->work_list);
  1456. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1457. if (ret)
  1458. cm_process_work(cm_id_priv, work);
  1459. else
  1460. cm_deref_id(cm_id_priv);
  1461. return 0;
  1462. out:
  1463. cm_deref_id(cm_id_priv);
  1464. return -EINVAL;
  1465. }
  1466. static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
  1467. struct cm_id_private *cm_id_priv,
  1468. const void *private_data,
  1469. u8 private_data_len)
  1470. {
  1471. cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
  1472. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
  1473. dreq_msg->local_comm_id = cm_id_priv->id.local_id;
  1474. dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1475. cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
  1476. if (private_data && private_data_len)
  1477. memcpy(dreq_msg->private_data, private_data, private_data_len);
  1478. }
  1479. int ib_send_cm_dreq(struct ib_cm_id *cm_id,
  1480. const void *private_data,
  1481. u8 private_data_len)
  1482. {
  1483. struct cm_id_private *cm_id_priv;
  1484. struct ib_mad_send_buf *msg;
  1485. unsigned long flags;
  1486. int ret;
  1487. if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
  1488. return -EINVAL;
  1489. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1490. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1491. if (cm_id->state != IB_CM_ESTABLISHED) {
  1492. ret = -EINVAL;
  1493. goto out;
  1494. }
  1495. ret = cm_alloc_msg(cm_id_priv, &msg);
  1496. if (ret) {
  1497. cm_enter_timewait(cm_id_priv);
  1498. goto out;
  1499. }
  1500. cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
  1501. private_data, private_data_len);
  1502. msg->timeout_ms = cm_id_priv->timeout_ms;
  1503. msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
  1504. ret = ib_post_send_mad(msg, NULL);
  1505. if (ret) {
  1506. cm_enter_timewait(cm_id_priv);
  1507. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1508. cm_free_msg(msg);
  1509. return ret;
  1510. }
  1511. cm_id->state = IB_CM_DREQ_SENT;
  1512. cm_id_priv->msg = msg;
  1513. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1514. return ret;
  1515. }
  1516. EXPORT_SYMBOL(ib_send_cm_dreq);
  1517. static void cm_format_drep(struct cm_drep_msg *drep_msg,
  1518. struct cm_id_private *cm_id_priv,
  1519. const void *private_data,
  1520. u8 private_data_len)
  1521. {
  1522. cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
  1523. drep_msg->local_comm_id = cm_id_priv->id.local_id;
  1524. drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1525. if (private_data && private_data_len)
  1526. memcpy(drep_msg->private_data, private_data, private_data_len);
  1527. }
  1528. int ib_send_cm_drep(struct ib_cm_id *cm_id,
  1529. const void *private_data,
  1530. u8 private_data_len)
  1531. {
  1532. struct cm_id_private *cm_id_priv;
  1533. struct ib_mad_send_buf *msg;
  1534. unsigned long flags;
  1535. void *data;
  1536. int ret;
  1537. if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
  1538. return -EINVAL;
  1539. data = cm_copy_private_data(private_data, private_data_len);
  1540. if (IS_ERR(data))
  1541. return PTR_ERR(data);
  1542. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1543. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1544. if (cm_id->state != IB_CM_DREQ_RCVD) {
  1545. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1546. kfree(data);
  1547. return -EINVAL;
  1548. }
  1549. cm_set_private_data(cm_id_priv, data, private_data_len);
  1550. cm_enter_timewait(cm_id_priv);
  1551. ret = cm_alloc_msg(cm_id_priv, &msg);
  1552. if (ret)
  1553. goto out;
  1554. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1555. private_data, private_data_len);
  1556. ret = ib_post_send_mad(msg, NULL);
  1557. if (ret) {
  1558. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1559. cm_free_msg(msg);
  1560. return ret;
  1561. }
  1562. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1563. return ret;
  1564. }
  1565. EXPORT_SYMBOL(ib_send_cm_drep);
  1566. static int cm_dreq_handler(struct cm_work *work)
  1567. {
  1568. struct cm_id_private *cm_id_priv;
  1569. struct cm_dreq_msg *dreq_msg;
  1570. struct ib_mad_send_buf *msg = NULL;
  1571. unsigned long flags;
  1572. int ret;
  1573. dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
  1574. cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
  1575. dreq_msg->local_comm_id);
  1576. if (!cm_id_priv)
  1577. return -EINVAL;
  1578. work->cm_event.private_data = &dreq_msg->private_data;
  1579. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1580. if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
  1581. goto unlock;
  1582. switch (cm_id_priv->id.state) {
  1583. case IB_CM_REP_SENT:
  1584. case IB_CM_DREQ_SENT:
  1585. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1586. break;
  1587. case IB_CM_ESTABLISHED:
  1588. case IB_CM_MRA_REP_RCVD:
  1589. break;
  1590. case IB_CM_TIMEWAIT:
  1591. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  1592. goto unlock;
  1593. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1594. cm_id_priv->private_data,
  1595. cm_id_priv->private_data_len);
  1596. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1597. if (ib_post_send_mad(msg, NULL))
  1598. cm_free_msg(msg);
  1599. goto deref;
  1600. default:
  1601. goto unlock;
  1602. }
  1603. cm_id_priv->id.state = IB_CM_DREQ_RCVD;
  1604. cm_id_priv->tid = dreq_msg->hdr.tid;
  1605. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1606. if (!ret)
  1607. list_add_tail(&work->list, &cm_id_priv->work_list);
  1608. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1609. if (ret)
  1610. cm_process_work(cm_id_priv, work);
  1611. else
  1612. cm_deref_id(cm_id_priv);
  1613. return 0;
  1614. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1615. deref: cm_deref_id(cm_id_priv);
  1616. return -EINVAL;
  1617. }
  1618. static int cm_drep_handler(struct cm_work *work)
  1619. {
  1620. struct cm_id_private *cm_id_priv;
  1621. struct cm_drep_msg *drep_msg;
  1622. unsigned long flags;
  1623. int ret;
  1624. drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
  1625. cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
  1626. drep_msg->local_comm_id);
  1627. if (!cm_id_priv)
  1628. return -EINVAL;
  1629. work->cm_event.private_data = &drep_msg->private_data;
  1630. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1631. if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
  1632. cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
  1633. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1634. goto out;
  1635. }
  1636. cm_enter_timewait(cm_id_priv);
  1637. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1638. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1639. if (!ret)
  1640. list_add_tail(&work->list, &cm_id_priv->work_list);
  1641. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1642. if (ret)
  1643. cm_process_work(cm_id_priv, work);
  1644. else
  1645. cm_deref_id(cm_id_priv);
  1646. return 0;
  1647. out:
  1648. cm_deref_id(cm_id_priv);
  1649. return -EINVAL;
  1650. }
  1651. int ib_send_cm_rej(struct ib_cm_id *cm_id,
  1652. enum ib_cm_rej_reason reason,
  1653. void *ari,
  1654. u8 ari_length,
  1655. const void *private_data,
  1656. u8 private_data_len)
  1657. {
  1658. struct cm_id_private *cm_id_priv;
  1659. struct ib_mad_send_buf *msg;
  1660. unsigned long flags;
  1661. int ret;
  1662. if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
  1663. (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
  1664. return -EINVAL;
  1665. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1666. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1667. switch (cm_id->state) {
  1668. case IB_CM_REQ_SENT:
  1669. case IB_CM_MRA_REQ_RCVD:
  1670. case IB_CM_REQ_RCVD:
  1671. case IB_CM_MRA_REQ_SENT:
  1672. case IB_CM_REP_RCVD:
  1673. case IB_CM_MRA_REP_SENT:
  1674. ret = cm_alloc_msg(cm_id_priv, &msg);
  1675. if (!ret)
  1676. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1677. cm_id_priv, reason, ari, ari_length,
  1678. private_data, private_data_len);
  1679. cm_reset_to_idle(cm_id_priv);
  1680. break;
  1681. case IB_CM_REP_SENT:
  1682. case IB_CM_MRA_REP_RCVD:
  1683. ret = cm_alloc_msg(cm_id_priv, &msg);
  1684. if (!ret)
  1685. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1686. cm_id_priv, reason, ari, ari_length,
  1687. private_data, private_data_len);
  1688. cm_enter_timewait(cm_id_priv);
  1689. break;
  1690. default:
  1691. ret = -EINVAL;
  1692. goto out;
  1693. }
  1694. if (ret)
  1695. goto out;
  1696. ret = ib_post_send_mad(msg, NULL);
  1697. if (ret)
  1698. cm_free_msg(msg);
  1699. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1700. return ret;
  1701. }
  1702. EXPORT_SYMBOL(ib_send_cm_rej);
  1703. static void cm_format_rej_event(struct cm_work *work)
  1704. {
  1705. struct cm_rej_msg *rej_msg;
  1706. struct ib_cm_rej_event_param *param;
  1707. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1708. param = &work->cm_event.param.rej_rcvd;
  1709. param->ari = rej_msg->ari;
  1710. param->ari_length = cm_rej_get_reject_info_len(rej_msg);
  1711. param->reason = __be16_to_cpu(rej_msg->reason);
  1712. work->cm_event.private_data = &rej_msg->private_data;
  1713. }
  1714. static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
  1715. {
  1716. struct cm_timewait_info *timewait_info;
  1717. struct cm_id_private *cm_id_priv;
  1718. unsigned long flags;
  1719. __be32 remote_id;
  1720. remote_id = rej_msg->local_comm_id;
  1721. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
  1722. spin_lock_irqsave(&cm.lock, flags);
  1723. timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
  1724. remote_id);
  1725. if (!timewait_info) {
  1726. spin_unlock_irqrestore(&cm.lock, flags);
  1727. return NULL;
  1728. }
  1729. cm_id_priv = idr_find(&cm.local_id_table,
  1730. (__force int) timewait_info->work.local_id);
  1731. if (cm_id_priv) {
  1732. if (cm_id_priv->id.remote_id == remote_id)
  1733. atomic_inc(&cm_id_priv->refcount);
  1734. else
  1735. cm_id_priv = NULL;
  1736. }
  1737. spin_unlock_irqrestore(&cm.lock, flags);
  1738. } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
  1739. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
  1740. else
  1741. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
  1742. return cm_id_priv;
  1743. }
  1744. static int cm_rej_handler(struct cm_work *work)
  1745. {
  1746. struct cm_id_private *cm_id_priv;
  1747. struct cm_rej_msg *rej_msg;
  1748. unsigned long flags;
  1749. int ret;
  1750. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1751. cm_id_priv = cm_acquire_rejected_id(rej_msg);
  1752. if (!cm_id_priv)
  1753. return -EINVAL;
  1754. cm_format_rej_event(work);
  1755. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1756. switch (cm_id_priv->id.state) {
  1757. case IB_CM_REQ_SENT:
  1758. case IB_CM_MRA_REQ_RCVD:
  1759. case IB_CM_REP_SENT:
  1760. case IB_CM_MRA_REP_RCVD:
  1761. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1762. /* fall through */
  1763. case IB_CM_REQ_RCVD:
  1764. case IB_CM_MRA_REQ_SENT:
  1765. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
  1766. cm_enter_timewait(cm_id_priv);
  1767. else
  1768. cm_reset_to_idle(cm_id_priv);
  1769. break;
  1770. case IB_CM_DREQ_SENT:
  1771. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1772. /* fall through */
  1773. case IB_CM_REP_RCVD:
  1774. case IB_CM_MRA_REP_SENT:
  1775. case IB_CM_ESTABLISHED:
  1776. cm_enter_timewait(cm_id_priv);
  1777. break;
  1778. default:
  1779. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1780. ret = -EINVAL;
  1781. goto out;
  1782. }
  1783. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1784. if (!ret)
  1785. list_add_tail(&work->list, &cm_id_priv->work_list);
  1786. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1787. if (ret)
  1788. cm_process_work(cm_id_priv, work);
  1789. else
  1790. cm_deref_id(cm_id_priv);
  1791. return 0;
  1792. out:
  1793. cm_deref_id(cm_id_priv);
  1794. return -EINVAL;
  1795. }
  1796. int ib_send_cm_mra(struct ib_cm_id *cm_id,
  1797. u8 service_timeout,
  1798. const void *private_data,
  1799. u8 private_data_len)
  1800. {
  1801. struct cm_id_private *cm_id_priv;
  1802. struct ib_mad_send_buf *msg;
  1803. void *data;
  1804. unsigned long flags;
  1805. int ret;
  1806. if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
  1807. return -EINVAL;
  1808. data = cm_copy_private_data(private_data, private_data_len);
  1809. if (IS_ERR(data))
  1810. return PTR_ERR(data);
  1811. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1812. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1813. switch(cm_id_priv->id.state) {
  1814. case IB_CM_REQ_RCVD:
  1815. ret = cm_alloc_msg(cm_id_priv, &msg);
  1816. if (ret)
  1817. goto error1;
  1818. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1819. CM_MSG_RESPONSE_REQ, service_timeout,
  1820. private_data, private_data_len);
  1821. ret = ib_post_send_mad(msg, NULL);
  1822. if (ret)
  1823. goto error2;
  1824. cm_id->state = IB_CM_MRA_REQ_SENT;
  1825. break;
  1826. case IB_CM_REP_RCVD:
  1827. ret = cm_alloc_msg(cm_id_priv, &msg);
  1828. if (ret)
  1829. goto error1;
  1830. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1831. CM_MSG_RESPONSE_REP, service_timeout,
  1832. private_data, private_data_len);
  1833. ret = ib_post_send_mad(msg, NULL);
  1834. if (ret)
  1835. goto error2;
  1836. cm_id->state = IB_CM_MRA_REP_SENT;
  1837. break;
  1838. case IB_CM_ESTABLISHED:
  1839. ret = cm_alloc_msg(cm_id_priv, &msg);
  1840. if (ret)
  1841. goto error1;
  1842. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1843. CM_MSG_RESPONSE_OTHER, service_timeout,
  1844. private_data, private_data_len);
  1845. ret = ib_post_send_mad(msg, NULL);
  1846. if (ret)
  1847. goto error2;
  1848. cm_id->lap_state = IB_CM_MRA_LAP_SENT;
  1849. break;
  1850. default:
  1851. ret = -EINVAL;
  1852. goto error1;
  1853. }
  1854. cm_id_priv->service_timeout = service_timeout;
  1855. cm_set_private_data(cm_id_priv, data, private_data_len);
  1856. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1857. return 0;
  1858. error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1859. kfree(data);
  1860. return ret;
  1861. error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1862. kfree(data);
  1863. cm_free_msg(msg);
  1864. return ret;
  1865. }
  1866. EXPORT_SYMBOL(ib_send_cm_mra);
  1867. static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
  1868. {
  1869. switch (cm_mra_get_msg_mraed(mra_msg)) {
  1870. case CM_MSG_RESPONSE_REQ:
  1871. return cm_acquire_id(mra_msg->remote_comm_id, 0);
  1872. case CM_MSG_RESPONSE_REP:
  1873. case CM_MSG_RESPONSE_OTHER:
  1874. return cm_acquire_id(mra_msg->remote_comm_id,
  1875. mra_msg->local_comm_id);
  1876. default:
  1877. return NULL;
  1878. }
  1879. }
  1880. static int cm_mra_handler(struct cm_work *work)
  1881. {
  1882. struct cm_id_private *cm_id_priv;
  1883. struct cm_mra_msg *mra_msg;
  1884. unsigned long flags;
  1885. int timeout, ret;
  1886. mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
  1887. cm_id_priv = cm_acquire_mraed_id(mra_msg);
  1888. if (!cm_id_priv)
  1889. return -EINVAL;
  1890. work->cm_event.private_data = &mra_msg->private_data;
  1891. work->cm_event.param.mra_rcvd.service_timeout =
  1892. cm_mra_get_service_timeout(mra_msg);
  1893. timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
  1894. cm_convert_to_ms(cm_id_priv->av.packet_life_time);
  1895. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1896. switch (cm_id_priv->id.state) {
  1897. case IB_CM_REQ_SENT:
  1898. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
  1899. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  1900. cm_id_priv->msg, timeout))
  1901. goto out;
  1902. cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
  1903. break;
  1904. case IB_CM_REP_SENT:
  1905. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
  1906. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  1907. cm_id_priv->msg, timeout))
  1908. goto out;
  1909. cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
  1910. break;
  1911. case IB_CM_ESTABLISHED:
  1912. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
  1913. cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
  1914. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  1915. cm_id_priv->msg, timeout))
  1916. goto out;
  1917. cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
  1918. break;
  1919. default:
  1920. goto out;
  1921. }
  1922. cm_id_priv->msg->context[1] = (void *) (unsigned long)
  1923. cm_id_priv->id.state;
  1924. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1925. if (!ret)
  1926. list_add_tail(&work->list, &cm_id_priv->work_list);
  1927. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1928. if (ret)
  1929. cm_process_work(cm_id_priv, work);
  1930. else
  1931. cm_deref_id(cm_id_priv);
  1932. return 0;
  1933. out:
  1934. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1935. cm_deref_id(cm_id_priv);
  1936. return -EINVAL;
  1937. }
  1938. static void cm_format_lap(struct cm_lap_msg *lap_msg,
  1939. struct cm_id_private *cm_id_priv,
  1940. struct ib_sa_path_rec *alternate_path,
  1941. const void *private_data,
  1942. u8 private_data_len)
  1943. {
  1944. cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
  1945. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
  1946. lap_msg->local_comm_id = cm_id_priv->id.local_id;
  1947. lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1948. cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
  1949. /* todo: need remote CM response timeout */
  1950. cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
  1951. lap_msg->alt_local_lid = alternate_path->slid;
  1952. lap_msg->alt_remote_lid = alternate_path->dlid;
  1953. lap_msg->alt_local_gid = alternate_path->sgid;
  1954. lap_msg->alt_remote_gid = alternate_path->dgid;
  1955. cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
  1956. cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
  1957. lap_msg->alt_hop_limit = alternate_path->hop_limit;
  1958. cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
  1959. cm_lap_set_sl(lap_msg, alternate_path->sl);
  1960. cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
  1961. cm_lap_set_local_ack_timeout(lap_msg,
  1962. min(31, alternate_path->packet_life_time + 1));
  1963. if (private_data && private_data_len)
  1964. memcpy(lap_msg->private_data, private_data, private_data_len);
  1965. }
  1966. int ib_send_cm_lap(struct ib_cm_id *cm_id,
  1967. struct ib_sa_path_rec *alternate_path,
  1968. const void *private_data,
  1969. u8 private_data_len)
  1970. {
  1971. struct cm_id_private *cm_id_priv;
  1972. struct ib_mad_send_buf *msg;
  1973. unsigned long flags;
  1974. int ret;
  1975. if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
  1976. return -EINVAL;
  1977. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1978. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1979. if (cm_id->state != IB_CM_ESTABLISHED ||
  1980. cm_id->lap_state != IB_CM_LAP_IDLE) {
  1981. ret = -EINVAL;
  1982. goto out;
  1983. }
  1984. ret = cm_alloc_msg(cm_id_priv, &msg);
  1985. if (ret)
  1986. goto out;
  1987. cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
  1988. alternate_path, private_data, private_data_len);
  1989. msg->timeout_ms = cm_id_priv->timeout_ms;
  1990. msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
  1991. ret = ib_post_send_mad(msg, NULL);
  1992. if (ret) {
  1993. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1994. cm_free_msg(msg);
  1995. return ret;
  1996. }
  1997. cm_id->lap_state = IB_CM_LAP_SENT;
  1998. cm_id_priv->msg = msg;
  1999. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2000. return ret;
  2001. }
  2002. EXPORT_SYMBOL(ib_send_cm_lap);
  2003. static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
  2004. struct cm_lap_msg *lap_msg)
  2005. {
  2006. memset(path, 0, sizeof *path);
  2007. path->dgid = lap_msg->alt_local_gid;
  2008. path->sgid = lap_msg->alt_remote_gid;
  2009. path->dlid = lap_msg->alt_local_lid;
  2010. path->slid = lap_msg->alt_remote_lid;
  2011. path->flow_label = cm_lap_get_flow_label(lap_msg);
  2012. path->hop_limit = lap_msg->alt_hop_limit;
  2013. path->traffic_class = cm_lap_get_traffic_class(lap_msg);
  2014. path->reversible = 1;
  2015. /* pkey is same as in REQ */
  2016. path->sl = cm_lap_get_sl(lap_msg);
  2017. path->mtu_selector = IB_SA_EQ;
  2018. /* mtu is same as in REQ */
  2019. path->rate_selector = IB_SA_EQ;
  2020. path->rate = cm_lap_get_packet_rate(lap_msg);
  2021. path->packet_life_time_selector = IB_SA_EQ;
  2022. path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
  2023. path->packet_life_time -= (path->packet_life_time > 0);
  2024. }
  2025. static int cm_lap_handler(struct cm_work *work)
  2026. {
  2027. struct cm_id_private *cm_id_priv;
  2028. struct cm_lap_msg *lap_msg;
  2029. struct ib_cm_lap_event_param *param;
  2030. struct ib_mad_send_buf *msg = NULL;
  2031. unsigned long flags;
  2032. int ret;
  2033. /* todo: verify LAP request and send reject APR if invalid. */
  2034. lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
  2035. cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
  2036. lap_msg->local_comm_id);
  2037. if (!cm_id_priv)
  2038. return -EINVAL;
  2039. param = &work->cm_event.param.lap_rcvd;
  2040. param->alternate_path = &work->path[0];
  2041. cm_format_path_from_lap(param->alternate_path, lap_msg);
  2042. work->cm_event.private_data = &lap_msg->private_data;
  2043. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2044. if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
  2045. goto unlock;
  2046. switch (cm_id_priv->id.lap_state) {
  2047. case IB_CM_LAP_IDLE:
  2048. break;
  2049. case IB_CM_MRA_LAP_SENT:
  2050. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  2051. goto unlock;
  2052. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  2053. CM_MSG_RESPONSE_OTHER,
  2054. cm_id_priv->service_timeout,
  2055. cm_id_priv->private_data,
  2056. cm_id_priv->private_data_len);
  2057. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2058. if (ib_post_send_mad(msg, NULL))
  2059. cm_free_msg(msg);
  2060. goto deref;
  2061. default:
  2062. goto unlock;
  2063. }
  2064. cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
  2065. cm_id_priv->tid = lap_msg->hdr.tid;
  2066. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2067. if (!ret)
  2068. list_add_tail(&work->list, &cm_id_priv->work_list);
  2069. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2070. if (ret)
  2071. cm_process_work(cm_id_priv, work);
  2072. else
  2073. cm_deref_id(cm_id_priv);
  2074. return 0;
  2075. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2076. deref: cm_deref_id(cm_id_priv);
  2077. return -EINVAL;
  2078. }
  2079. static void cm_format_apr(struct cm_apr_msg *apr_msg,
  2080. struct cm_id_private *cm_id_priv,
  2081. enum ib_cm_apr_status status,
  2082. void *info,
  2083. u8 info_length,
  2084. const void *private_data,
  2085. u8 private_data_len)
  2086. {
  2087. cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
  2088. apr_msg->local_comm_id = cm_id_priv->id.local_id;
  2089. apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
  2090. apr_msg->ap_status = (u8) status;
  2091. if (info && info_length) {
  2092. apr_msg->info_length = info_length;
  2093. memcpy(apr_msg->info, info, info_length);
  2094. }
  2095. if (private_data && private_data_len)
  2096. memcpy(apr_msg->private_data, private_data, private_data_len);
  2097. }
  2098. int ib_send_cm_apr(struct ib_cm_id *cm_id,
  2099. enum ib_cm_apr_status status,
  2100. void *info,
  2101. u8 info_length,
  2102. const void *private_data,
  2103. u8 private_data_len)
  2104. {
  2105. struct cm_id_private *cm_id_priv;
  2106. struct ib_mad_send_buf *msg;
  2107. unsigned long flags;
  2108. int ret;
  2109. if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
  2110. (info && info_length > IB_CM_APR_INFO_LENGTH))
  2111. return -EINVAL;
  2112. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2113. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2114. if (cm_id->state != IB_CM_ESTABLISHED ||
  2115. (cm_id->lap_state != IB_CM_LAP_RCVD &&
  2116. cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
  2117. ret = -EINVAL;
  2118. goto out;
  2119. }
  2120. ret = cm_alloc_msg(cm_id_priv, &msg);
  2121. if (ret)
  2122. goto out;
  2123. cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
  2124. info, info_length, private_data, private_data_len);
  2125. ret = ib_post_send_mad(msg, NULL);
  2126. if (ret) {
  2127. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2128. cm_free_msg(msg);
  2129. return ret;
  2130. }
  2131. cm_id->lap_state = IB_CM_LAP_IDLE;
  2132. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2133. return ret;
  2134. }
  2135. EXPORT_SYMBOL(ib_send_cm_apr);
  2136. static int cm_apr_handler(struct cm_work *work)
  2137. {
  2138. struct cm_id_private *cm_id_priv;
  2139. struct cm_apr_msg *apr_msg;
  2140. unsigned long flags;
  2141. int ret;
  2142. apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
  2143. cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
  2144. apr_msg->local_comm_id);
  2145. if (!cm_id_priv)
  2146. return -EINVAL; /* Unmatched reply. */
  2147. work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
  2148. work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
  2149. work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
  2150. work->cm_event.private_data = &apr_msg->private_data;
  2151. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2152. if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
  2153. (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
  2154. cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
  2155. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2156. goto out;
  2157. }
  2158. cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
  2159. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2160. cm_id_priv->msg = NULL;
  2161. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2162. if (!ret)
  2163. list_add_tail(&work->list, &cm_id_priv->work_list);
  2164. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2165. if (ret)
  2166. cm_process_work(cm_id_priv, work);
  2167. else
  2168. cm_deref_id(cm_id_priv);
  2169. return 0;
  2170. out:
  2171. cm_deref_id(cm_id_priv);
  2172. return -EINVAL;
  2173. }
  2174. static int cm_timewait_handler(struct cm_work *work)
  2175. {
  2176. struct cm_timewait_info *timewait_info;
  2177. struct cm_id_private *cm_id_priv;
  2178. unsigned long flags;
  2179. int ret;
  2180. timewait_info = (struct cm_timewait_info *)work;
  2181. cm_cleanup_timewait(timewait_info);
  2182. cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
  2183. timewait_info->work.remote_id);
  2184. if (!cm_id_priv)
  2185. return -EINVAL;
  2186. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2187. if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
  2188. cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
  2189. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2190. goto out;
  2191. }
  2192. cm_id_priv->id.state = IB_CM_IDLE;
  2193. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2194. if (!ret)
  2195. list_add_tail(&work->list, &cm_id_priv->work_list);
  2196. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2197. if (ret)
  2198. cm_process_work(cm_id_priv, work);
  2199. else
  2200. cm_deref_id(cm_id_priv);
  2201. return 0;
  2202. out:
  2203. cm_deref_id(cm_id_priv);
  2204. return -EINVAL;
  2205. }
  2206. static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
  2207. struct cm_id_private *cm_id_priv,
  2208. struct ib_cm_sidr_req_param *param)
  2209. {
  2210. cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
  2211. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
  2212. sidr_req_msg->request_id = cm_id_priv->id.local_id;
  2213. sidr_req_msg->pkey = cpu_to_be16(param->pkey);
  2214. sidr_req_msg->service_id = param->service_id;
  2215. if (param->private_data && param->private_data_len)
  2216. memcpy(sidr_req_msg->private_data, param->private_data,
  2217. param->private_data_len);
  2218. }
  2219. int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
  2220. struct ib_cm_sidr_req_param *param)
  2221. {
  2222. struct cm_id_private *cm_id_priv;
  2223. struct ib_mad_send_buf *msg;
  2224. unsigned long flags;
  2225. int ret;
  2226. if (!param->path || (param->private_data &&
  2227. param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
  2228. return -EINVAL;
  2229. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2230. ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
  2231. if (ret)
  2232. goto out;
  2233. cm_id->service_id = param->service_id;
  2234. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  2235. cm_id_priv->timeout_ms = param->timeout_ms;
  2236. cm_id_priv->max_cm_retries = param->max_cm_retries;
  2237. ret = cm_alloc_msg(cm_id_priv, &msg);
  2238. if (ret)
  2239. goto out;
  2240. cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
  2241. param);
  2242. msg->timeout_ms = cm_id_priv->timeout_ms;
  2243. msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
  2244. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2245. if (cm_id->state == IB_CM_IDLE)
  2246. ret = ib_post_send_mad(msg, NULL);
  2247. else
  2248. ret = -EINVAL;
  2249. if (ret) {
  2250. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2251. cm_free_msg(msg);
  2252. goto out;
  2253. }
  2254. cm_id->state = IB_CM_SIDR_REQ_SENT;
  2255. cm_id_priv->msg = msg;
  2256. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2257. out:
  2258. return ret;
  2259. }
  2260. EXPORT_SYMBOL(ib_send_cm_sidr_req);
  2261. static void cm_format_sidr_req_event(struct cm_work *work,
  2262. struct ib_cm_id *listen_id)
  2263. {
  2264. struct cm_sidr_req_msg *sidr_req_msg;
  2265. struct ib_cm_sidr_req_event_param *param;
  2266. sidr_req_msg = (struct cm_sidr_req_msg *)
  2267. work->mad_recv_wc->recv_buf.mad;
  2268. param = &work->cm_event.param.sidr_req_rcvd;
  2269. param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
  2270. param->listen_id = listen_id;
  2271. param->port = work->port->port_num;
  2272. work->cm_event.private_data = &sidr_req_msg->private_data;
  2273. }
  2274. static int cm_sidr_req_handler(struct cm_work *work)
  2275. {
  2276. struct ib_cm_id *cm_id;
  2277. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  2278. struct cm_sidr_req_msg *sidr_req_msg;
  2279. struct ib_wc *wc;
  2280. unsigned long flags;
  2281. cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
  2282. if (IS_ERR(cm_id))
  2283. return PTR_ERR(cm_id);
  2284. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2285. /* Record SGID/SLID and request ID for lookup. */
  2286. sidr_req_msg = (struct cm_sidr_req_msg *)
  2287. work->mad_recv_wc->recv_buf.mad;
  2288. wc = work->mad_recv_wc->wc;
  2289. cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
  2290. cm_id_priv->av.dgid.global.interface_id = 0;
  2291. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  2292. &cm_id_priv->av);
  2293. cm_id_priv->id.remote_id = sidr_req_msg->request_id;
  2294. cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
  2295. cm_id_priv->tid = sidr_req_msg->hdr.tid;
  2296. atomic_inc(&cm_id_priv->work_count);
  2297. spin_lock_irqsave(&cm.lock, flags);
  2298. cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
  2299. if (cur_cm_id_priv) {
  2300. spin_unlock_irqrestore(&cm.lock, flags);
  2301. goto out; /* Duplicate message. */
  2302. }
  2303. cur_cm_id_priv = cm_find_listen(cm_id->device,
  2304. sidr_req_msg->service_id);
  2305. if (!cur_cm_id_priv) {
  2306. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2307. spin_unlock_irqrestore(&cm.lock, flags);
  2308. /* todo: reply with no match */
  2309. goto out; /* No match. */
  2310. }
  2311. atomic_inc(&cur_cm_id_priv->refcount);
  2312. spin_unlock_irqrestore(&cm.lock, flags);
  2313. cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
  2314. cm_id_priv->id.context = cur_cm_id_priv->id.context;
  2315. cm_id_priv->id.service_id = sidr_req_msg->service_id;
  2316. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  2317. cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
  2318. cm_process_work(cm_id_priv, work);
  2319. cm_deref_id(cur_cm_id_priv);
  2320. return 0;
  2321. out:
  2322. ib_destroy_cm_id(&cm_id_priv->id);
  2323. return -EINVAL;
  2324. }
  2325. static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
  2326. struct cm_id_private *cm_id_priv,
  2327. struct ib_cm_sidr_rep_param *param)
  2328. {
  2329. cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
  2330. cm_id_priv->tid);
  2331. sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
  2332. sidr_rep_msg->status = param->status;
  2333. cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
  2334. sidr_rep_msg->service_id = cm_id_priv->id.service_id;
  2335. sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
  2336. if (param->info && param->info_length)
  2337. memcpy(sidr_rep_msg->info, param->info, param->info_length);
  2338. if (param->private_data && param->private_data_len)
  2339. memcpy(sidr_rep_msg->private_data, param->private_data,
  2340. param->private_data_len);
  2341. }
  2342. int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
  2343. struct ib_cm_sidr_rep_param *param)
  2344. {
  2345. struct cm_id_private *cm_id_priv;
  2346. struct ib_mad_send_buf *msg;
  2347. unsigned long flags;
  2348. int ret;
  2349. if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
  2350. (param->private_data &&
  2351. param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
  2352. return -EINVAL;
  2353. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2354. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2355. if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
  2356. ret = -EINVAL;
  2357. goto error;
  2358. }
  2359. ret = cm_alloc_msg(cm_id_priv, &msg);
  2360. if (ret)
  2361. goto error;
  2362. cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
  2363. param);
  2364. ret = ib_post_send_mad(msg, NULL);
  2365. if (ret) {
  2366. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2367. cm_free_msg(msg);
  2368. return ret;
  2369. }
  2370. cm_id->state = IB_CM_IDLE;
  2371. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2372. spin_lock_irqsave(&cm.lock, flags);
  2373. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2374. spin_unlock_irqrestore(&cm.lock, flags);
  2375. return 0;
  2376. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2377. return ret;
  2378. }
  2379. EXPORT_SYMBOL(ib_send_cm_sidr_rep);
  2380. static void cm_format_sidr_rep_event(struct cm_work *work)
  2381. {
  2382. struct cm_sidr_rep_msg *sidr_rep_msg;
  2383. struct ib_cm_sidr_rep_event_param *param;
  2384. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2385. work->mad_recv_wc->recv_buf.mad;
  2386. param = &work->cm_event.param.sidr_rep_rcvd;
  2387. param->status = sidr_rep_msg->status;
  2388. param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
  2389. param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
  2390. param->info = &sidr_rep_msg->info;
  2391. param->info_len = sidr_rep_msg->info_length;
  2392. work->cm_event.private_data = &sidr_rep_msg->private_data;
  2393. }
  2394. static int cm_sidr_rep_handler(struct cm_work *work)
  2395. {
  2396. struct cm_sidr_rep_msg *sidr_rep_msg;
  2397. struct cm_id_private *cm_id_priv;
  2398. unsigned long flags;
  2399. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2400. work->mad_recv_wc->recv_buf.mad;
  2401. cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
  2402. if (!cm_id_priv)
  2403. return -EINVAL; /* Unmatched reply. */
  2404. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2405. if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
  2406. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2407. goto out;
  2408. }
  2409. cm_id_priv->id.state = IB_CM_IDLE;
  2410. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2411. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2412. cm_format_sidr_rep_event(work);
  2413. cm_process_work(cm_id_priv, work);
  2414. return 0;
  2415. out:
  2416. cm_deref_id(cm_id_priv);
  2417. return -EINVAL;
  2418. }
  2419. static void cm_process_send_error(struct ib_mad_send_buf *msg,
  2420. enum ib_wc_status wc_status)
  2421. {
  2422. struct cm_id_private *cm_id_priv;
  2423. struct ib_cm_event cm_event;
  2424. enum ib_cm_state state;
  2425. unsigned long flags;
  2426. int ret;
  2427. memset(&cm_event, 0, sizeof cm_event);
  2428. cm_id_priv = msg->context[0];
  2429. /* Discard old sends or ones without a response. */
  2430. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2431. state = (enum ib_cm_state) (unsigned long) msg->context[1];
  2432. if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
  2433. goto discard;
  2434. switch (state) {
  2435. case IB_CM_REQ_SENT:
  2436. case IB_CM_MRA_REQ_RCVD:
  2437. cm_reset_to_idle(cm_id_priv);
  2438. cm_event.event = IB_CM_REQ_ERROR;
  2439. break;
  2440. case IB_CM_REP_SENT:
  2441. case IB_CM_MRA_REP_RCVD:
  2442. cm_reset_to_idle(cm_id_priv);
  2443. cm_event.event = IB_CM_REP_ERROR;
  2444. break;
  2445. case IB_CM_DREQ_SENT:
  2446. cm_enter_timewait(cm_id_priv);
  2447. cm_event.event = IB_CM_DREQ_ERROR;
  2448. break;
  2449. case IB_CM_SIDR_REQ_SENT:
  2450. cm_id_priv->id.state = IB_CM_IDLE;
  2451. cm_event.event = IB_CM_SIDR_REQ_ERROR;
  2452. break;
  2453. default:
  2454. goto discard;
  2455. }
  2456. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2457. cm_event.param.send_status = wc_status;
  2458. /* No other events can occur on the cm_id at this point. */
  2459. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
  2460. cm_free_msg(msg);
  2461. if (ret)
  2462. ib_destroy_cm_id(&cm_id_priv->id);
  2463. return;
  2464. discard:
  2465. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2466. cm_free_msg(msg);
  2467. }
  2468. static void cm_send_handler(struct ib_mad_agent *mad_agent,
  2469. struct ib_mad_send_wc *mad_send_wc)
  2470. {
  2471. struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
  2472. switch (mad_send_wc->status) {
  2473. case IB_WC_SUCCESS:
  2474. case IB_WC_WR_FLUSH_ERR:
  2475. cm_free_msg(msg);
  2476. break;
  2477. default:
  2478. if (msg->context[0] && msg->context[1])
  2479. cm_process_send_error(msg, mad_send_wc->status);
  2480. else
  2481. cm_free_msg(msg);
  2482. break;
  2483. }
  2484. }
  2485. static void cm_work_handler(void *data)
  2486. {
  2487. struct cm_work *work = data;
  2488. int ret;
  2489. switch (work->cm_event.event) {
  2490. case IB_CM_REQ_RECEIVED:
  2491. ret = cm_req_handler(work);
  2492. break;
  2493. case IB_CM_MRA_RECEIVED:
  2494. ret = cm_mra_handler(work);
  2495. break;
  2496. case IB_CM_REJ_RECEIVED:
  2497. ret = cm_rej_handler(work);
  2498. break;
  2499. case IB_CM_REP_RECEIVED:
  2500. ret = cm_rep_handler(work);
  2501. break;
  2502. case IB_CM_RTU_RECEIVED:
  2503. ret = cm_rtu_handler(work);
  2504. break;
  2505. case IB_CM_USER_ESTABLISHED:
  2506. ret = cm_establish_handler(work);
  2507. break;
  2508. case IB_CM_DREQ_RECEIVED:
  2509. ret = cm_dreq_handler(work);
  2510. break;
  2511. case IB_CM_DREP_RECEIVED:
  2512. ret = cm_drep_handler(work);
  2513. break;
  2514. case IB_CM_SIDR_REQ_RECEIVED:
  2515. ret = cm_sidr_req_handler(work);
  2516. break;
  2517. case IB_CM_SIDR_REP_RECEIVED:
  2518. ret = cm_sidr_rep_handler(work);
  2519. break;
  2520. case IB_CM_LAP_RECEIVED:
  2521. ret = cm_lap_handler(work);
  2522. break;
  2523. case IB_CM_APR_RECEIVED:
  2524. ret = cm_apr_handler(work);
  2525. break;
  2526. case IB_CM_TIMEWAIT_EXIT:
  2527. ret = cm_timewait_handler(work);
  2528. break;
  2529. default:
  2530. ret = -EINVAL;
  2531. break;
  2532. }
  2533. if (ret)
  2534. cm_free_work(work);
  2535. }
  2536. int ib_cm_establish(struct ib_cm_id *cm_id)
  2537. {
  2538. struct cm_id_private *cm_id_priv;
  2539. struct cm_work *work;
  2540. unsigned long flags;
  2541. int ret = 0;
  2542. work = kmalloc(sizeof *work, GFP_ATOMIC);
  2543. if (!work)
  2544. return -ENOMEM;
  2545. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2546. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2547. switch (cm_id->state)
  2548. {
  2549. case IB_CM_REP_SENT:
  2550. case IB_CM_MRA_REP_RCVD:
  2551. cm_id->state = IB_CM_ESTABLISHED;
  2552. break;
  2553. case IB_CM_ESTABLISHED:
  2554. ret = -EISCONN;
  2555. break;
  2556. default:
  2557. ret = -EINVAL;
  2558. break;
  2559. }
  2560. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2561. if (ret) {
  2562. kfree(work);
  2563. goto out;
  2564. }
  2565. /*
  2566. * The CM worker thread may try to destroy the cm_id before it
  2567. * can execute this work item. To prevent potential deadlock,
  2568. * we need to find the cm_id once we're in the context of the
  2569. * worker thread, rather than holding a reference on it.
  2570. */
  2571. INIT_WORK(&work->work, cm_work_handler, work);
  2572. work->local_id = cm_id->local_id;
  2573. work->remote_id = cm_id->remote_id;
  2574. work->mad_recv_wc = NULL;
  2575. work->cm_event.event = IB_CM_USER_ESTABLISHED;
  2576. queue_work(cm.wq, &work->work);
  2577. out:
  2578. return ret;
  2579. }
  2580. EXPORT_SYMBOL(ib_cm_establish);
  2581. static void cm_recv_handler(struct ib_mad_agent *mad_agent,
  2582. struct ib_mad_recv_wc *mad_recv_wc)
  2583. {
  2584. struct cm_work *work;
  2585. enum ib_cm_event_type event;
  2586. int paths = 0;
  2587. switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
  2588. case CM_REQ_ATTR_ID:
  2589. paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
  2590. alt_local_lid != 0);
  2591. event = IB_CM_REQ_RECEIVED;
  2592. break;
  2593. case CM_MRA_ATTR_ID:
  2594. event = IB_CM_MRA_RECEIVED;
  2595. break;
  2596. case CM_REJ_ATTR_ID:
  2597. event = IB_CM_REJ_RECEIVED;
  2598. break;
  2599. case CM_REP_ATTR_ID:
  2600. event = IB_CM_REP_RECEIVED;
  2601. break;
  2602. case CM_RTU_ATTR_ID:
  2603. event = IB_CM_RTU_RECEIVED;
  2604. break;
  2605. case CM_DREQ_ATTR_ID:
  2606. event = IB_CM_DREQ_RECEIVED;
  2607. break;
  2608. case CM_DREP_ATTR_ID:
  2609. event = IB_CM_DREP_RECEIVED;
  2610. break;
  2611. case CM_SIDR_REQ_ATTR_ID:
  2612. event = IB_CM_SIDR_REQ_RECEIVED;
  2613. break;
  2614. case CM_SIDR_REP_ATTR_ID:
  2615. event = IB_CM_SIDR_REP_RECEIVED;
  2616. break;
  2617. case CM_LAP_ATTR_ID:
  2618. paths = 1;
  2619. event = IB_CM_LAP_RECEIVED;
  2620. break;
  2621. case CM_APR_ATTR_ID:
  2622. event = IB_CM_APR_RECEIVED;
  2623. break;
  2624. default:
  2625. ib_free_recv_mad(mad_recv_wc);
  2626. return;
  2627. }
  2628. work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
  2629. GFP_KERNEL);
  2630. if (!work) {
  2631. ib_free_recv_mad(mad_recv_wc);
  2632. return;
  2633. }
  2634. INIT_WORK(&work->work, cm_work_handler, work);
  2635. work->cm_event.event = event;
  2636. work->mad_recv_wc = mad_recv_wc;
  2637. work->port = (struct cm_port *)mad_agent->context;
  2638. queue_work(cm.wq, &work->work);
  2639. }
  2640. static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
  2641. struct ib_qp_attr *qp_attr,
  2642. int *qp_attr_mask)
  2643. {
  2644. unsigned long flags;
  2645. int ret;
  2646. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2647. switch (cm_id_priv->id.state) {
  2648. case IB_CM_REQ_SENT:
  2649. case IB_CM_MRA_REQ_RCVD:
  2650. case IB_CM_REQ_RCVD:
  2651. case IB_CM_MRA_REQ_SENT:
  2652. case IB_CM_REP_RCVD:
  2653. case IB_CM_MRA_REP_SENT:
  2654. case IB_CM_REP_SENT:
  2655. case IB_CM_MRA_REP_RCVD:
  2656. case IB_CM_ESTABLISHED:
  2657. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
  2658. IB_QP_PKEY_INDEX | IB_QP_PORT;
  2659. qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
  2660. IB_ACCESS_REMOTE_WRITE;
  2661. if (cm_id_priv->responder_resources)
  2662. qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
  2663. qp_attr->pkey_index = cm_id_priv->av.pkey_index;
  2664. qp_attr->port_num = cm_id_priv->av.port->port_num;
  2665. ret = 0;
  2666. break;
  2667. default:
  2668. ret = -EINVAL;
  2669. break;
  2670. }
  2671. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2672. return ret;
  2673. }
  2674. static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
  2675. struct ib_qp_attr *qp_attr,
  2676. int *qp_attr_mask)
  2677. {
  2678. unsigned long flags;
  2679. int ret;
  2680. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2681. switch (cm_id_priv->id.state) {
  2682. case IB_CM_REQ_RCVD:
  2683. case IB_CM_MRA_REQ_SENT:
  2684. case IB_CM_REP_RCVD:
  2685. case IB_CM_MRA_REP_SENT:
  2686. case IB_CM_REP_SENT:
  2687. case IB_CM_MRA_REP_RCVD:
  2688. case IB_CM_ESTABLISHED:
  2689. *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
  2690. IB_QP_DEST_QPN | IB_QP_RQ_PSN;
  2691. qp_attr->ah_attr = cm_id_priv->av.ah_attr;
  2692. qp_attr->path_mtu = cm_id_priv->path_mtu;
  2693. qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
  2694. qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
  2695. if (cm_id_priv->qp_type == IB_QPT_RC) {
  2696. *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
  2697. IB_QP_MIN_RNR_TIMER;
  2698. qp_attr->max_dest_rd_atomic =
  2699. cm_id_priv->responder_resources;
  2700. qp_attr->min_rnr_timer = 0;
  2701. }
  2702. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2703. *qp_attr_mask |= IB_QP_ALT_PATH;
  2704. qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
  2705. }
  2706. ret = 0;
  2707. break;
  2708. default:
  2709. ret = -EINVAL;
  2710. break;
  2711. }
  2712. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2713. return ret;
  2714. }
  2715. static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
  2716. struct ib_qp_attr *qp_attr,
  2717. int *qp_attr_mask)
  2718. {
  2719. unsigned long flags;
  2720. int ret;
  2721. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2722. switch (cm_id_priv->id.state) {
  2723. case IB_CM_REP_RCVD:
  2724. case IB_CM_MRA_REP_SENT:
  2725. case IB_CM_REP_SENT:
  2726. case IB_CM_MRA_REP_RCVD:
  2727. case IB_CM_ESTABLISHED:
  2728. *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
  2729. qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
  2730. if (cm_id_priv->qp_type == IB_QPT_RC) {
  2731. *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
  2732. IB_QP_RNR_RETRY |
  2733. IB_QP_MAX_QP_RD_ATOMIC;
  2734. qp_attr->timeout = cm_id_priv->local_ack_timeout;
  2735. qp_attr->retry_cnt = cm_id_priv->retry_count;
  2736. qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
  2737. qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
  2738. }
  2739. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2740. *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
  2741. qp_attr->path_mig_state = IB_MIG_REARM;
  2742. }
  2743. ret = 0;
  2744. break;
  2745. default:
  2746. ret = -EINVAL;
  2747. break;
  2748. }
  2749. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2750. return ret;
  2751. }
  2752. int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
  2753. struct ib_qp_attr *qp_attr,
  2754. int *qp_attr_mask)
  2755. {
  2756. struct cm_id_private *cm_id_priv;
  2757. int ret;
  2758. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2759. switch (qp_attr->qp_state) {
  2760. case IB_QPS_INIT:
  2761. ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2762. break;
  2763. case IB_QPS_RTR:
  2764. ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2765. break;
  2766. case IB_QPS_RTS:
  2767. ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2768. break;
  2769. default:
  2770. ret = -EINVAL;
  2771. break;
  2772. }
  2773. return ret;
  2774. }
  2775. EXPORT_SYMBOL(ib_cm_init_qp_attr);
  2776. static __be64 cm_get_ca_guid(struct ib_device *device)
  2777. {
  2778. struct ib_device_attr *device_attr;
  2779. __be64 guid;
  2780. int ret;
  2781. device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
  2782. if (!device_attr)
  2783. return 0;
  2784. ret = ib_query_device(device, device_attr);
  2785. guid = ret ? 0 : device_attr->node_guid;
  2786. kfree(device_attr);
  2787. return guid;
  2788. }
  2789. static void cm_add_one(struct ib_device *device)
  2790. {
  2791. struct cm_device *cm_dev;
  2792. struct cm_port *port;
  2793. struct ib_mad_reg_req reg_req = {
  2794. .mgmt_class = IB_MGMT_CLASS_CM,
  2795. .mgmt_class_version = IB_CM_CLASS_VERSION
  2796. };
  2797. struct ib_port_modify port_modify = {
  2798. .set_port_cap_mask = IB_PORT_CM_SUP
  2799. };
  2800. unsigned long flags;
  2801. int ret;
  2802. u8 i;
  2803. cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
  2804. device->phys_port_cnt, GFP_KERNEL);
  2805. if (!cm_dev)
  2806. return;
  2807. cm_dev->device = device;
  2808. cm_dev->ca_guid = cm_get_ca_guid(device);
  2809. if (!cm_dev->ca_guid)
  2810. goto error1;
  2811. set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
  2812. for (i = 1; i <= device->phys_port_cnt; i++) {
  2813. port = &cm_dev->port[i-1];
  2814. port->cm_dev = cm_dev;
  2815. port->port_num = i;
  2816. port->mad_agent = ib_register_mad_agent(device, i,
  2817. IB_QPT_GSI,
  2818. &reg_req,
  2819. 0,
  2820. cm_send_handler,
  2821. cm_recv_handler,
  2822. port);
  2823. if (IS_ERR(port->mad_agent))
  2824. goto error2;
  2825. ret = ib_modify_port(device, i, 0, &port_modify);
  2826. if (ret)
  2827. goto error3;
  2828. }
  2829. ib_set_client_data(device, &cm_client, cm_dev);
  2830. write_lock_irqsave(&cm.device_lock, flags);
  2831. list_add_tail(&cm_dev->list, &cm.device_list);
  2832. write_unlock_irqrestore(&cm.device_lock, flags);
  2833. return;
  2834. error3:
  2835. ib_unregister_mad_agent(port->mad_agent);
  2836. error2:
  2837. port_modify.set_port_cap_mask = 0;
  2838. port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
  2839. while (--i) {
  2840. port = &cm_dev->port[i-1];
  2841. ib_modify_port(device, port->port_num, 0, &port_modify);
  2842. ib_unregister_mad_agent(port->mad_agent);
  2843. }
  2844. error1:
  2845. kfree(cm_dev);
  2846. }
  2847. static void cm_remove_one(struct ib_device *device)
  2848. {
  2849. struct cm_device *cm_dev;
  2850. struct cm_port *port;
  2851. struct ib_port_modify port_modify = {
  2852. .clr_port_cap_mask = IB_PORT_CM_SUP
  2853. };
  2854. unsigned long flags;
  2855. int i;
  2856. cm_dev = ib_get_client_data(device, &cm_client);
  2857. if (!cm_dev)
  2858. return;
  2859. write_lock_irqsave(&cm.device_lock, flags);
  2860. list_del(&cm_dev->list);
  2861. write_unlock_irqrestore(&cm.device_lock, flags);
  2862. for (i = 1; i <= device->phys_port_cnt; i++) {
  2863. port = &cm_dev->port[i-1];
  2864. ib_modify_port(device, port->port_num, 0, &port_modify);
  2865. ib_unregister_mad_agent(port->mad_agent);
  2866. }
  2867. kfree(cm_dev);
  2868. }
  2869. static int __init ib_cm_init(void)
  2870. {
  2871. int ret;
  2872. memset(&cm, 0, sizeof cm);
  2873. INIT_LIST_HEAD(&cm.device_list);
  2874. rwlock_init(&cm.device_lock);
  2875. spin_lock_init(&cm.lock);
  2876. cm.listen_service_table = RB_ROOT;
  2877. cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
  2878. cm.remote_id_table = RB_ROOT;
  2879. cm.remote_qp_table = RB_ROOT;
  2880. cm.remote_sidr_table = RB_ROOT;
  2881. idr_init(&cm.local_id_table);
  2882. idr_pre_get(&cm.local_id_table, GFP_KERNEL);
  2883. cm.wq = create_workqueue("ib_cm");
  2884. if (!cm.wq)
  2885. return -ENOMEM;
  2886. ret = ib_register_client(&cm_client);
  2887. if (ret)
  2888. goto error;
  2889. return 0;
  2890. error:
  2891. destroy_workqueue(cm.wq);
  2892. return ret;
  2893. }
  2894. static void __exit ib_cm_cleanup(void)
  2895. {
  2896. flush_workqueue(cm.wq);
  2897. destroy_workqueue(cm.wq);
  2898. ib_unregister_client(&cm_client);
  2899. idr_destroy(&cm.local_id_table);
  2900. }
  2901. module_init(ib_cm_init);
  2902. module_exit(ib_cm_cleanup);