cm.c 98 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511
  1. /*
  2. * Copyright (c) 2004-2006 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  4. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
  5. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. *
  35. * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
  36. */
  37. #include <linux/completion.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/err.h>
  40. #include <linux/idr.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/random.h>
  43. #include <linux/rbtree.h>
  44. #include <linux/spinlock.h>
  45. #include <linux/workqueue.h>
  46. #include <rdma/ib_cache.h>
  47. #include <rdma/ib_cm.h>
  48. #include "cm_msgs.h"
  49. MODULE_AUTHOR("Sean Hefty");
  50. MODULE_DESCRIPTION("InfiniBand CM");
  51. MODULE_LICENSE("Dual BSD/GPL");
  52. static void cm_add_one(struct ib_device *device);
  53. static void cm_remove_one(struct ib_device *device);
  54. static struct ib_client cm_client = {
  55. .name = "cm",
  56. .add = cm_add_one,
  57. .remove = cm_remove_one
  58. };
  59. static struct ib_cm {
  60. spinlock_t lock;
  61. struct list_head device_list;
  62. rwlock_t device_lock;
  63. struct rb_root listen_service_table;
  64. u64 listen_service_id;
  65. /* struct rb_root peer_service_table; todo: fix peer to peer */
  66. struct rb_root remote_qp_table;
  67. struct rb_root remote_id_table;
  68. struct rb_root remote_sidr_table;
  69. struct idr local_id_table;
  70. __be32 random_id_operand;
  71. struct list_head timewait_list;
  72. struct workqueue_struct *wq;
  73. } cm;
  74. struct cm_port {
  75. struct cm_device *cm_dev;
  76. struct ib_mad_agent *mad_agent;
  77. u8 port_num;
  78. };
  79. struct cm_device {
  80. struct list_head list;
  81. struct ib_device *device;
  82. struct cm_port port[0];
  83. };
  84. struct cm_av {
  85. struct cm_port *port;
  86. union ib_gid dgid;
  87. struct ib_ah_attr ah_attr;
  88. u16 pkey_index;
  89. u8 packet_life_time;
  90. };
  91. struct cm_work {
  92. struct delayed_work work;
  93. struct list_head list;
  94. struct cm_port *port;
  95. struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
  96. __be32 local_id; /* Established / timewait */
  97. __be32 remote_id;
  98. struct ib_cm_event cm_event;
  99. struct ib_sa_path_rec path[0];
  100. };
  101. struct cm_timewait_info {
  102. struct cm_work work; /* Must be first. */
  103. struct list_head list;
  104. struct rb_node remote_qp_node;
  105. struct rb_node remote_id_node;
  106. __be64 remote_ca_guid;
  107. __be32 remote_qpn;
  108. u8 inserted_remote_qp;
  109. u8 inserted_remote_id;
  110. };
  111. struct cm_id_private {
  112. struct ib_cm_id id;
  113. struct rb_node service_node;
  114. struct rb_node sidr_id_node;
  115. spinlock_t lock; /* Do not acquire inside cm.lock */
  116. struct completion comp;
  117. atomic_t refcount;
  118. struct ib_mad_send_buf *msg;
  119. struct cm_timewait_info *timewait_info;
  120. /* todo: use alternate port on send failure */
  121. struct cm_av av;
  122. struct cm_av alt_av;
  123. struct ib_cm_compare_data *compare_data;
  124. void *private_data;
  125. __be64 tid;
  126. __be32 local_qpn;
  127. __be32 remote_qpn;
  128. enum ib_qp_type qp_type;
  129. __be32 sq_psn;
  130. __be32 rq_psn;
  131. int timeout_ms;
  132. enum ib_mtu path_mtu;
  133. __be16 pkey;
  134. u8 private_data_len;
  135. u8 max_cm_retries;
  136. u8 peer_to_peer;
  137. u8 responder_resources;
  138. u8 initiator_depth;
  139. u8 retry_count;
  140. u8 rnr_retry_count;
  141. u8 service_timeout;
  142. struct list_head work_list;
  143. atomic_t work_count;
  144. };
  145. static void cm_work_handler(struct work_struct *work);
  146. static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
  147. {
  148. if (atomic_dec_and_test(&cm_id_priv->refcount))
  149. complete(&cm_id_priv->comp);
  150. }
  151. static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
  152. struct ib_mad_send_buf **msg)
  153. {
  154. struct ib_mad_agent *mad_agent;
  155. struct ib_mad_send_buf *m;
  156. struct ib_ah *ah;
  157. mad_agent = cm_id_priv->av.port->mad_agent;
  158. ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
  159. if (IS_ERR(ah))
  160. return PTR_ERR(ah);
  161. m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
  162. cm_id_priv->av.pkey_index,
  163. 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
  164. GFP_ATOMIC);
  165. if (IS_ERR(m)) {
  166. ib_destroy_ah(ah);
  167. return PTR_ERR(m);
  168. }
  169. /* Timeout set by caller if response is expected. */
  170. m->ah = ah;
  171. m->retries = cm_id_priv->max_cm_retries;
  172. atomic_inc(&cm_id_priv->refcount);
  173. m->context[0] = cm_id_priv;
  174. *msg = m;
  175. return 0;
  176. }
  177. static int cm_alloc_response_msg(struct cm_port *port,
  178. struct ib_mad_recv_wc *mad_recv_wc,
  179. struct ib_mad_send_buf **msg)
  180. {
  181. struct ib_mad_send_buf *m;
  182. struct ib_ah *ah;
  183. ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
  184. mad_recv_wc->recv_buf.grh, port->port_num);
  185. if (IS_ERR(ah))
  186. return PTR_ERR(ah);
  187. m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
  188. 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
  189. GFP_ATOMIC);
  190. if (IS_ERR(m)) {
  191. ib_destroy_ah(ah);
  192. return PTR_ERR(m);
  193. }
  194. m->ah = ah;
  195. *msg = m;
  196. return 0;
  197. }
  198. static void cm_free_msg(struct ib_mad_send_buf *msg)
  199. {
  200. ib_destroy_ah(msg->ah);
  201. if (msg->context[0])
  202. cm_deref_id(msg->context[0]);
  203. ib_free_send_mad(msg);
  204. }
  205. static void * cm_copy_private_data(const void *private_data,
  206. u8 private_data_len)
  207. {
  208. void *data;
  209. if (!private_data || !private_data_len)
  210. return NULL;
  211. data = kmemdup(private_data, private_data_len, GFP_KERNEL);
  212. if (!data)
  213. return ERR_PTR(-ENOMEM);
  214. return data;
  215. }
  216. static void cm_set_private_data(struct cm_id_private *cm_id_priv,
  217. void *private_data, u8 private_data_len)
  218. {
  219. if (cm_id_priv->private_data && cm_id_priv->private_data_len)
  220. kfree(cm_id_priv->private_data);
  221. cm_id_priv->private_data = private_data;
  222. cm_id_priv->private_data_len = private_data_len;
  223. }
  224. static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
  225. struct ib_grh *grh, struct cm_av *av)
  226. {
  227. av->port = port;
  228. av->pkey_index = wc->pkey_index;
  229. ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc,
  230. grh, &av->ah_attr);
  231. }
  232. static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
  233. {
  234. struct cm_device *cm_dev;
  235. struct cm_port *port = NULL;
  236. unsigned long flags;
  237. int ret;
  238. u8 p;
  239. read_lock_irqsave(&cm.device_lock, flags);
  240. list_for_each_entry(cm_dev, &cm.device_list, list) {
  241. if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
  242. &p, NULL)) {
  243. port = &cm_dev->port[p-1];
  244. break;
  245. }
  246. }
  247. read_unlock_irqrestore(&cm.device_lock, flags);
  248. if (!port)
  249. return -EINVAL;
  250. ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
  251. be16_to_cpu(path->pkey), &av->pkey_index);
  252. if (ret)
  253. return ret;
  254. av->port = port;
  255. ib_init_ah_from_path(cm_dev->device, port->port_num, path,
  256. &av->ah_attr);
  257. av->packet_life_time = path->packet_life_time;
  258. return 0;
  259. }
  260. static int cm_alloc_id(struct cm_id_private *cm_id_priv)
  261. {
  262. unsigned long flags;
  263. int ret, id;
  264. static int next_id;
  265. do {
  266. spin_lock_irqsave(&cm.lock, flags);
  267. ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
  268. next_id, &id);
  269. if (!ret)
  270. next_id = ((unsigned) id + 1) & MAX_ID_MASK;
  271. spin_unlock_irqrestore(&cm.lock, flags);
  272. } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
  273. cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand);
  274. return ret;
  275. }
  276. static void cm_free_id(__be32 local_id)
  277. {
  278. unsigned long flags;
  279. spin_lock_irqsave(&cm.lock, flags);
  280. idr_remove(&cm.local_id_table,
  281. (__force int) (local_id ^ cm.random_id_operand));
  282. spin_unlock_irqrestore(&cm.lock, flags);
  283. }
  284. static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
  285. {
  286. struct cm_id_private *cm_id_priv;
  287. cm_id_priv = idr_find(&cm.local_id_table,
  288. (__force int) (local_id ^ cm.random_id_operand));
  289. if (cm_id_priv) {
  290. if (cm_id_priv->id.remote_id == remote_id)
  291. atomic_inc(&cm_id_priv->refcount);
  292. else
  293. cm_id_priv = NULL;
  294. }
  295. return cm_id_priv;
  296. }
  297. static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
  298. {
  299. struct cm_id_private *cm_id_priv;
  300. unsigned long flags;
  301. spin_lock_irqsave(&cm.lock, flags);
  302. cm_id_priv = cm_get_id(local_id, remote_id);
  303. spin_unlock_irqrestore(&cm.lock, flags);
  304. return cm_id_priv;
  305. }
  306. static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
  307. {
  308. int i;
  309. for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
  310. ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
  311. ((unsigned long *) mask)[i];
  312. }
  313. static int cm_compare_data(struct ib_cm_compare_data *src_data,
  314. struct ib_cm_compare_data *dst_data)
  315. {
  316. u8 src[IB_CM_COMPARE_SIZE];
  317. u8 dst[IB_CM_COMPARE_SIZE];
  318. if (!src_data || !dst_data)
  319. return 0;
  320. cm_mask_copy(src, src_data->data, dst_data->mask);
  321. cm_mask_copy(dst, dst_data->data, src_data->mask);
  322. return memcmp(src, dst, IB_CM_COMPARE_SIZE);
  323. }
  324. static int cm_compare_private_data(u8 *private_data,
  325. struct ib_cm_compare_data *dst_data)
  326. {
  327. u8 src[IB_CM_COMPARE_SIZE];
  328. if (!dst_data)
  329. return 0;
  330. cm_mask_copy(src, private_data, dst_data->mask);
  331. return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
  332. }
  333. static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
  334. {
  335. struct rb_node **link = &cm.listen_service_table.rb_node;
  336. struct rb_node *parent = NULL;
  337. struct cm_id_private *cur_cm_id_priv;
  338. __be64 service_id = cm_id_priv->id.service_id;
  339. __be64 service_mask = cm_id_priv->id.service_mask;
  340. int data_cmp;
  341. while (*link) {
  342. parent = *link;
  343. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  344. service_node);
  345. data_cmp = cm_compare_data(cm_id_priv->compare_data,
  346. cur_cm_id_priv->compare_data);
  347. if ((cur_cm_id_priv->id.service_mask & service_id) ==
  348. (service_mask & cur_cm_id_priv->id.service_id) &&
  349. (cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
  350. !data_cmp)
  351. return cur_cm_id_priv;
  352. if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
  353. link = &(*link)->rb_left;
  354. else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
  355. link = &(*link)->rb_right;
  356. else if (service_id < cur_cm_id_priv->id.service_id)
  357. link = &(*link)->rb_left;
  358. else if (service_id > cur_cm_id_priv->id.service_id)
  359. link = &(*link)->rb_right;
  360. else if (data_cmp < 0)
  361. link = &(*link)->rb_left;
  362. else
  363. link = &(*link)->rb_right;
  364. }
  365. rb_link_node(&cm_id_priv->service_node, parent, link);
  366. rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
  367. return NULL;
  368. }
  369. static struct cm_id_private * cm_find_listen(struct ib_device *device,
  370. __be64 service_id,
  371. u8 *private_data)
  372. {
  373. struct rb_node *node = cm.listen_service_table.rb_node;
  374. struct cm_id_private *cm_id_priv;
  375. int data_cmp;
  376. while (node) {
  377. cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
  378. data_cmp = cm_compare_private_data(private_data,
  379. cm_id_priv->compare_data);
  380. if ((cm_id_priv->id.service_mask & service_id) ==
  381. cm_id_priv->id.service_id &&
  382. (cm_id_priv->id.device == device) && !data_cmp)
  383. return cm_id_priv;
  384. if (device < cm_id_priv->id.device)
  385. node = node->rb_left;
  386. else if (device > cm_id_priv->id.device)
  387. node = node->rb_right;
  388. else if (service_id < cm_id_priv->id.service_id)
  389. node = node->rb_left;
  390. else if (service_id > cm_id_priv->id.service_id)
  391. node = node->rb_right;
  392. else if (data_cmp < 0)
  393. node = node->rb_left;
  394. else
  395. node = node->rb_right;
  396. }
  397. return NULL;
  398. }
  399. static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
  400. *timewait_info)
  401. {
  402. struct rb_node **link = &cm.remote_id_table.rb_node;
  403. struct rb_node *parent = NULL;
  404. struct cm_timewait_info *cur_timewait_info;
  405. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  406. __be32 remote_id = timewait_info->work.remote_id;
  407. while (*link) {
  408. parent = *link;
  409. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  410. remote_id_node);
  411. if (remote_id < cur_timewait_info->work.remote_id)
  412. link = &(*link)->rb_left;
  413. else if (remote_id > cur_timewait_info->work.remote_id)
  414. link = &(*link)->rb_right;
  415. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  416. link = &(*link)->rb_left;
  417. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  418. link = &(*link)->rb_right;
  419. else
  420. return cur_timewait_info;
  421. }
  422. timewait_info->inserted_remote_id = 1;
  423. rb_link_node(&timewait_info->remote_id_node, parent, link);
  424. rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
  425. return NULL;
  426. }
  427. static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
  428. __be32 remote_id)
  429. {
  430. struct rb_node *node = cm.remote_id_table.rb_node;
  431. struct cm_timewait_info *timewait_info;
  432. while (node) {
  433. timewait_info = rb_entry(node, struct cm_timewait_info,
  434. remote_id_node);
  435. if (remote_id < timewait_info->work.remote_id)
  436. node = node->rb_left;
  437. else if (remote_id > timewait_info->work.remote_id)
  438. node = node->rb_right;
  439. else if (remote_ca_guid < timewait_info->remote_ca_guid)
  440. node = node->rb_left;
  441. else if (remote_ca_guid > timewait_info->remote_ca_guid)
  442. node = node->rb_right;
  443. else
  444. return timewait_info;
  445. }
  446. return NULL;
  447. }
  448. static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
  449. *timewait_info)
  450. {
  451. struct rb_node **link = &cm.remote_qp_table.rb_node;
  452. struct rb_node *parent = NULL;
  453. struct cm_timewait_info *cur_timewait_info;
  454. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  455. __be32 remote_qpn = timewait_info->remote_qpn;
  456. while (*link) {
  457. parent = *link;
  458. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  459. remote_qp_node);
  460. if (remote_qpn < cur_timewait_info->remote_qpn)
  461. link = &(*link)->rb_left;
  462. else if (remote_qpn > cur_timewait_info->remote_qpn)
  463. link = &(*link)->rb_right;
  464. else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
  465. link = &(*link)->rb_left;
  466. else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
  467. link = &(*link)->rb_right;
  468. else
  469. return cur_timewait_info;
  470. }
  471. timewait_info->inserted_remote_qp = 1;
  472. rb_link_node(&timewait_info->remote_qp_node, parent, link);
  473. rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  474. return NULL;
  475. }
  476. static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
  477. *cm_id_priv)
  478. {
  479. struct rb_node **link = &cm.remote_sidr_table.rb_node;
  480. struct rb_node *parent = NULL;
  481. struct cm_id_private *cur_cm_id_priv;
  482. union ib_gid *port_gid = &cm_id_priv->av.dgid;
  483. __be32 remote_id = cm_id_priv->id.remote_id;
  484. while (*link) {
  485. parent = *link;
  486. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  487. sidr_id_node);
  488. if (remote_id < cur_cm_id_priv->id.remote_id)
  489. link = &(*link)->rb_left;
  490. else if (remote_id > cur_cm_id_priv->id.remote_id)
  491. link = &(*link)->rb_right;
  492. else {
  493. int cmp;
  494. cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
  495. sizeof *port_gid);
  496. if (cmp < 0)
  497. link = &(*link)->rb_left;
  498. else if (cmp > 0)
  499. link = &(*link)->rb_right;
  500. else
  501. return cur_cm_id_priv;
  502. }
  503. }
  504. rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
  505. rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  506. return NULL;
  507. }
  508. static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
  509. enum ib_cm_sidr_status status)
  510. {
  511. struct ib_cm_sidr_rep_param param;
  512. memset(&param, 0, sizeof param);
  513. param.status = status;
  514. ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
  515. }
  516. struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
  517. ib_cm_handler cm_handler,
  518. void *context)
  519. {
  520. struct cm_id_private *cm_id_priv;
  521. int ret;
  522. cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
  523. if (!cm_id_priv)
  524. return ERR_PTR(-ENOMEM);
  525. cm_id_priv->id.state = IB_CM_IDLE;
  526. cm_id_priv->id.device = device;
  527. cm_id_priv->id.cm_handler = cm_handler;
  528. cm_id_priv->id.context = context;
  529. cm_id_priv->id.remote_cm_qpn = 1;
  530. ret = cm_alloc_id(cm_id_priv);
  531. if (ret)
  532. goto error;
  533. spin_lock_init(&cm_id_priv->lock);
  534. init_completion(&cm_id_priv->comp);
  535. INIT_LIST_HEAD(&cm_id_priv->work_list);
  536. atomic_set(&cm_id_priv->work_count, -1);
  537. atomic_set(&cm_id_priv->refcount, 1);
  538. return &cm_id_priv->id;
  539. error:
  540. kfree(cm_id_priv);
  541. return ERR_PTR(-ENOMEM);
  542. }
  543. EXPORT_SYMBOL(ib_create_cm_id);
  544. static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
  545. {
  546. struct cm_work *work;
  547. if (list_empty(&cm_id_priv->work_list))
  548. return NULL;
  549. work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
  550. list_del(&work->list);
  551. return work;
  552. }
  553. static void cm_free_work(struct cm_work *work)
  554. {
  555. if (work->mad_recv_wc)
  556. ib_free_recv_mad(work->mad_recv_wc);
  557. kfree(work);
  558. }
  559. static inline int cm_convert_to_ms(int iba_time)
  560. {
  561. /* approximate conversion to ms from 4.096us x 2^iba_time */
  562. return 1 << max(iba_time - 8, 0);
  563. }
  564. static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
  565. {
  566. if (timewait_info->inserted_remote_id) {
  567. rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
  568. timewait_info->inserted_remote_id = 0;
  569. }
  570. if (timewait_info->inserted_remote_qp) {
  571. rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  572. timewait_info->inserted_remote_qp = 0;
  573. }
  574. }
  575. static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
  576. {
  577. struct cm_timewait_info *timewait_info;
  578. timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
  579. if (!timewait_info)
  580. return ERR_PTR(-ENOMEM);
  581. timewait_info->work.local_id = local_id;
  582. INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
  583. timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
  584. return timewait_info;
  585. }
  586. static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
  587. {
  588. int wait_time;
  589. unsigned long flags;
  590. spin_lock_irqsave(&cm.lock, flags);
  591. cm_cleanup_timewait(cm_id_priv->timewait_info);
  592. list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
  593. spin_unlock_irqrestore(&cm.lock, flags);
  594. /*
  595. * The cm_id could be destroyed by the user before we exit timewait.
  596. * To protect against this, we search for the cm_id after exiting
  597. * timewait before notifying the user that we've exited timewait.
  598. */
  599. cm_id_priv->id.state = IB_CM_TIMEWAIT;
  600. wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
  601. queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
  602. msecs_to_jiffies(wait_time));
  603. cm_id_priv->timewait_info = NULL;
  604. }
  605. static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
  606. {
  607. unsigned long flags;
  608. cm_id_priv->id.state = IB_CM_IDLE;
  609. if (cm_id_priv->timewait_info) {
  610. spin_lock_irqsave(&cm.lock, flags);
  611. cm_cleanup_timewait(cm_id_priv->timewait_info);
  612. spin_unlock_irqrestore(&cm.lock, flags);
  613. kfree(cm_id_priv->timewait_info);
  614. cm_id_priv->timewait_info = NULL;
  615. }
  616. }
  617. static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
  618. {
  619. struct cm_id_private *cm_id_priv;
  620. struct cm_work *work;
  621. unsigned long flags;
  622. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  623. retest:
  624. spin_lock_irqsave(&cm_id_priv->lock, flags);
  625. switch (cm_id->state) {
  626. case IB_CM_LISTEN:
  627. cm_id->state = IB_CM_IDLE;
  628. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  629. spin_lock_irqsave(&cm.lock, flags);
  630. rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
  631. spin_unlock_irqrestore(&cm.lock, flags);
  632. break;
  633. case IB_CM_SIDR_REQ_SENT:
  634. cm_id->state = IB_CM_IDLE;
  635. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  636. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  637. break;
  638. case IB_CM_SIDR_REQ_RCVD:
  639. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  640. cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
  641. break;
  642. case IB_CM_REQ_SENT:
  643. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  644. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  645. ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
  646. &cm_id_priv->id.device->node_guid,
  647. sizeof cm_id_priv->id.device->node_guid,
  648. NULL, 0);
  649. break;
  650. case IB_CM_REQ_RCVD:
  651. if (err == -ENOMEM) {
  652. /* Do not reject to allow future retries. */
  653. cm_reset_to_idle(cm_id_priv);
  654. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  655. } else {
  656. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  657. ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
  658. NULL, 0, NULL, 0);
  659. }
  660. break;
  661. case IB_CM_MRA_REQ_RCVD:
  662. case IB_CM_REP_SENT:
  663. case IB_CM_MRA_REP_RCVD:
  664. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  665. /* Fall through */
  666. case IB_CM_MRA_REQ_SENT:
  667. case IB_CM_REP_RCVD:
  668. case IB_CM_MRA_REP_SENT:
  669. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  670. ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
  671. NULL, 0, NULL, 0);
  672. break;
  673. case IB_CM_ESTABLISHED:
  674. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  675. ib_send_cm_dreq(cm_id, NULL, 0);
  676. goto retest;
  677. case IB_CM_DREQ_SENT:
  678. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  679. cm_enter_timewait(cm_id_priv);
  680. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  681. break;
  682. case IB_CM_DREQ_RCVD:
  683. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  684. ib_send_cm_drep(cm_id, NULL, 0);
  685. break;
  686. default:
  687. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  688. break;
  689. }
  690. cm_free_id(cm_id->local_id);
  691. cm_deref_id(cm_id_priv);
  692. wait_for_completion(&cm_id_priv->comp);
  693. while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
  694. cm_free_work(work);
  695. kfree(cm_id_priv->compare_data);
  696. kfree(cm_id_priv->private_data);
  697. kfree(cm_id_priv);
  698. }
  699. void ib_destroy_cm_id(struct ib_cm_id *cm_id)
  700. {
  701. cm_destroy_id(cm_id, 0);
  702. }
  703. EXPORT_SYMBOL(ib_destroy_cm_id);
  704. int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
  705. struct ib_cm_compare_data *compare_data)
  706. {
  707. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  708. unsigned long flags;
  709. int ret = 0;
  710. service_mask = service_mask ? service_mask :
  711. __constant_cpu_to_be64(~0ULL);
  712. service_id &= service_mask;
  713. if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
  714. (service_id != IB_CM_ASSIGN_SERVICE_ID))
  715. return -EINVAL;
  716. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  717. if (cm_id->state != IB_CM_IDLE)
  718. return -EINVAL;
  719. if (compare_data) {
  720. cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
  721. GFP_KERNEL);
  722. if (!cm_id_priv->compare_data)
  723. return -ENOMEM;
  724. cm_mask_copy(cm_id_priv->compare_data->data,
  725. compare_data->data, compare_data->mask);
  726. memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
  727. IB_CM_COMPARE_SIZE);
  728. }
  729. cm_id->state = IB_CM_LISTEN;
  730. spin_lock_irqsave(&cm.lock, flags);
  731. if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
  732. cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
  733. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  734. } else {
  735. cm_id->service_id = service_id;
  736. cm_id->service_mask = service_mask;
  737. }
  738. cur_cm_id_priv = cm_insert_listen(cm_id_priv);
  739. spin_unlock_irqrestore(&cm.lock, flags);
  740. if (cur_cm_id_priv) {
  741. cm_id->state = IB_CM_IDLE;
  742. kfree(cm_id_priv->compare_data);
  743. cm_id_priv->compare_data = NULL;
  744. ret = -EBUSY;
  745. }
  746. return ret;
  747. }
  748. EXPORT_SYMBOL(ib_cm_listen);
  749. static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
  750. enum cm_msg_sequence msg_seq)
  751. {
  752. u64 hi_tid, low_tid;
  753. hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
  754. low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
  755. (msg_seq << 30));
  756. return cpu_to_be64(hi_tid | low_tid);
  757. }
  758. static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
  759. __be16 attr_id, __be64 tid)
  760. {
  761. hdr->base_version = IB_MGMT_BASE_VERSION;
  762. hdr->mgmt_class = IB_MGMT_CLASS_CM;
  763. hdr->class_version = IB_CM_CLASS_VERSION;
  764. hdr->method = IB_MGMT_METHOD_SEND;
  765. hdr->attr_id = attr_id;
  766. hdr->tid = tid;
  767. }
  768. static void cm_format_req(struct cm_req_msg *req_msg,
  769. struct cm_id_private *cm_id_priv,
  770. struct ib_cm_req_param *param)
  771. {
  772. cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
  773. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
  774. req_msg->local_comm_id = cm_id_priv->id.local_id;
  775. req_msg->service_id = param->service_id;
  776. req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
  777. cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
  778. cm_req_set_resp_res(req_msg, param->responder_resources);
  779. cm_req_set_init_depth(req_msg, param->initiator_depth);
  780. cm_req_set_remote_resp_timeout(req_msg,
  781. param->remote_cm_response_timeout);
  782. cm_req_set_qp_type(req_msg, param->qp_type);
  783. cm_req_set_flow_ctrl(req_msg, param->flow_control);
  784. cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
  785. cm_req_set_local_resp_timeout(req_msg,
  786. param->local_cm_response_timeout);
  787. cm_req_set_retry_count(req_msg, param->retry_count);
  788. req_msg->pkey = param->primary_path->pkey;
  789. cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
  790. cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
  791. cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
  792. cm_req_set_srq(req_msg, param->srq);
  793. req_msg->primary_local_lid = param->primary_path->slid;
  794. req_msg->primary_remote_lid = param->primary_path->dlid;
  795. req_msg->primary_local_gid = param->primary_path->sgid;
  796. req_msg->primary_remote_gid = param->primary_path->dgid;
  797. cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
  798. cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
  799. req_msg->primary_traffic_class = param->primary_path->traffic_class;
  800. req_msg->primary_hop_limit = param->primary_path->hop_limit;
  801. cm_req_set_primary_sl(req_msg, param->primary_path->sl);
  802. cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
  803. cm_req_set_primary_local_ack_timeout(req_msg,
  804. min(31, param->primary_path->packet_life_time + 1));
  805. if (param->alternate_path) {
  806. req_msg->alt_local_lid = param->alternate_path->slid;
  807. req_msg->alt_remote_lid = param->alternate_path->dlid;
  808. req_msg->alt_local_gid = param->alternate_path->sgid;
  809. req_msg->alt_remote_gid = param->alternate_path->dgid;
  810. cm_req_set_alt_flow_label(req_msg,
  811. param->alternate_path->flow_label);
  812. cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
  813. req_msg->alt_traffic_class = param->alternate_path->traffic_class;
  814. req_msg->alt_hop_limit = param->alternate_path->hop_limit;
  815. cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
  816. cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
  817. cm_req_set_alt_local_ack_timeout(req_msg,
  818. min(31, param->alternate_path->packet_life_time + 1));
  819. }
  820. if (param->private_data && param->private_data_len)
  821. memcpy(req_msg->private_data, param->private_data,
  822. param->private_data_len);
  823. }
  824. static int cm_validate_req_param(struct ib_cm_req_param *param)
  825. {
  826. /* peer-to-peer not supported */
  827. if (param->peer_to_peer)
  828. return -EINVAL;
  829. if (!param->primary_path)
  830. return -EINVAL;
  831. if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
  832. return -EINVAL;
  833. if (param->private_data &&
  834. param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
  835. return -EINVAL;
  836. if (param->alternate_path &&
  837. (param->alternate_path->pkey != param->primary_path->pkey ||
  838. param->alternate_path->mtu != param->primary_path->mtu))
  839. return -EINVAL;
  840. return 0;
  841. }
  842. int ib_send_cm_req(struct ib_cm_id *cm_id,
  843. struct ib_cm_req_param *param)
  844. {
  845. struct cm_id_private *cm_id_priv;
  846. struct cm_req_msg *req_msg;
  847. unsigned long flags;
  848. int ret;
  849. ret = cm_validate_req_param(param);
  850. if (ret)
  851. return ret;
  852. /* Verify that we're not in timewait. */
  853. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  854. spin_lock_irqsave(&cm_id_priv->lock, flags);
  855. if (cm_id->state != IB_CM_IDLE) {
  856. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  857. ret = -EINVAL;
  858. goto out;
  859. }
  860. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  861. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  862. id.local_id);
  863. if (IS_ERR(cm_id_priv->timewait_info)) {
  864. ret = PTR_ERR(cm_id_priv->timewait_info);
  865. goto out;
  866. }
  867. ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
  868. if (ret)
  869. goto error1;
  870. if (param->alternate_path) {
  871. ret = cm_init_av_by_path(param->alternate_path,
  872. &cm_id_priv->alt_av);
  873. if (ret)
  874. goto error1;
  875. }
  876. cm_id->service_id = param->service_id;
  877. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  878. cm_id_priv->timeout_ms = cm_convert_to_ms(
  879. param->primary_path->packet_life_time) * 2 +
  880. cm_convert_to_ms(
  881. param->remote_cm_response_timeout);
  882. cm_id_priv->max_cm_retries = param->max_cm_retries;
  883. cm_id_priv->initiator_depth = param->initiator_depth;
  884. cm_id_priv->responder_resources = param->responder_resources;
  885. cm_id_priv->retry_count = param->retry_count;
  886. cm_id_priv->path_mtu = param->primary_path->mtu;
  887. cm_id_priv->pkey = param->primary_path->pkey;
  888. cm_id_priv->qp_type = param->qp_type;
  889. ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
  890. if (ret)
  891. goto error1;
  892. req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
  893. cm_format_req(req_msg, cm_id_priv, param);
  894. cm_id_priv->tid = req_msg->hdr.tid;
  895. cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
  896. cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
  897. cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
  898. cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
  899. spin_lock_irqsave(&cm_id_priv->lock, flags);
  900. ret = ib_post_send_mad(cm_id_priv->msg, NULL);
  901. if (ret) {
  902. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  903. goto error2;
  904. }
  905. BUG_ON(cm_id->state != IB_CM_IDLE);
  906. cm_id->state = IB_CM_REQ_SENT;
  907. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  908. return 0;
  909. error2: cm_free_msg(cm_id_priv->msg);
  910. error1: kfree(cm_id_priv->timewait_info);
  911. out: return ret;
  912. }
  913. EXPORT_SYMBOL(ib_send_cm_req);
  914. static int cm_issue_rej(struct cm_port *port,
  915. struct ib_mad_recv_wc *mad_recv_wc,
  916. enum ib_cm_rej_reason reason,
  917. enum cm_msg_response msg_rejected,
  918. void *ari, u8 ari_length)
  919. {
  920. struct ib_mad_send_buf *msg = NULL;
  921. struct cm_rej_msg *rej_msg, *rcv_msg;
  922. int ret;
  923. ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
  924. if (ret)
  925. return ret;
  926. /* We just need common CM header information. Cast to any message. */
  927. rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
  928. rej_msg = (struct cm_rej_msg *) msg->mad;
  929. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
  930. rej_msg->remote_comm_id = rcv_msg->local_comm_id;
  931. rej_msg->local_comm_id = rcv_msg->remote_comm_id;
  932. cm_rej_set_msg_rejected(rej_msg, msg_rejected);
  933. rej_msg->reason = cpu_to_be16(reason);
  934. if (ari && ari_length) {
  935. cm_rej_set_reject_info_len(rej_msg, ari_length);
  936. memcpy(rej_msg->ari, ari, ari_length);
  937. }
  938. ret = ib_post_send_mad(msg, NULL);
  939. if (ret)
  940. cm_free_msg(msg);
  941. return ret;
  942. }
  943. static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
  944. __be32 local_qpn, __be32 remote_qpn)
  945. {
  946. return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
  947. ((local_ca_guid == remote_ca_guid) &&
  948. (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
  949. }
  950. static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
  951. struct ib_sa_path_rec *primary_path,
  952. struct ib_sa_path_rec *alt_path)
  953. {
  954. memset(primary_path, 0, sizeof *primary_path);
  955. primary_path->dgid = req_msg->primary_local_gid;
  956. primary_path->sgid = req_msg->primary_remote_gid;
  957. primary_path->dlid = req_msg->primary_local_lid;
  958. primary_path->slid = req_msg->primary_remote_lid;
  959. primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
  960. primary_path->hop_limit = req_msg->primary_hop_limit;
  961. primary_path->traffic_class = req_msg->primary_traffic_class;
  962. primary_path->reversible = 1;
  963. primary_path->pkey = req_msg->pkey;
  964. primary_path->sl = cm_req_get_primary_sl(req_msg);
  965. primary_path->mtu_selector = IB_SA_EQ;
  966. primary_path->mtu = cm_req_get_path_mtu(req_msg);
  967. primary_path->rate_selector = IB_SA_EQ;
  968. primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
  969. primary_path->packet_life_time_selector = IB_SA_EQ;
  970. primary_path->packet_life_time =
  971. cm_req_get_primary_local_ack_timeout(req_msg);
  972. primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
  973. if (req_msg->alt_local_lid) {
  974. memset(alt_path, 0, sizeof *alt_path);
  975. alt_path->dgid = req_msg->alt_local_gid;
  976. alt_path->sgid = req_msg->alt_remote_gid;
  977. alt_path->dlid = req_msg->alt_local_lid;
  978. alt_path->slid = req_msg->alt_remote_lid;
  979. alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
  980. alt_path->hop_limit = req_msg->alt_hop_limit;
  981. alt_path->traffic_class = req_msg->alt_traffic_class;
  982. alt_path->reversible = 1;
  983. alt_path->pkey = req_msg->pkey;
  984. alt_path->sl = cm_req_get_alt_sl(req_msg);
  985. alt_path->mtu_selector = IB_SA_EQ;
  986. alt_path->mtu = cm_req_get_path_mtu(req_msg);
  987. alt_path->rate_selector = IB_SA_EQ;
  988. alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
  989. alt_path->packet_life_time_selector = IB_SA_EQ;
  990. alt_path->packet_life_time =
  991. cm_req_get_alt_local_ack_timeout(req_msg);
  992. alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
  993. }
  994. }
  995. static void cm_format_req_event(struct cm_work *work,
  996. struct cm_id_private *cm_id_priv,
  997. struct ib_cm_id *listen_id)
  998. {
  999. struct cm_req_msg *req_msg;
  1000. struct ib_cm_req_event_param *param;
  1001. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1002. param = &work->cm_event.param.req_rcvd;
  1003. param->listen_id = listen_id;
  1004. param->port = cm_id_priv->av.port->port_num;
  1005. param->primary_path = &work->path[0];
  1006. if (req_msg->alt_local_lid)
  1007. param->alternate_path = &work->path[1];
  1008. else
  1009. param->alternate_path = NULL;
  1010. param->remote_ca_guid = req_msg->local_ca_guid;
  1011. param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
  1012. param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
  1013. param->qp_type = cm_req_get_qp_type(req_msg);
  1014. param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
  1015. param->responder_resources = cm_req_get_init_depth(req_msg);
  1016. param->initiator_depth = cm_req_get_resp_res(req_msg);
  1017. param->local_cm_response_timeout =
  1018. cm_req_get_remote_resp_timeout(req_msg);
  1019. param->flow_control = cm_req_get_flow_ctrl(req_msg);
  1020. param->remote_cm_response_timeout =
  1021. cm_req_get_local_resp_timeout(req_msg);
  1022. param->retry_count = cm_req_get_retry_count(req_msg);
  1023. param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  1024. param->srq = cm_req_get_srq(req_msg);
  1025. work->cm_event.private_data = &req_msg->private_data;
  1026. }
  1027. static void cm_process_work(struct cm_id_private *cm_id_priv,
  1028. struct cm_work *work)
  1029. {
  1030. unsigned long flags;
  1031. int ret;
  1032. /* We will typically only have the current event to report. */
  1033. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
  1034. cm_free_work(work);
  1035. while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
  1036. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1037. work = cm_dequeue_work(cm_id_priv);
  1038. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1039. BUG_ON(!work);
  1040. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
  1041. &work->cm_event);
  1042. cm_free_work(work);
  1043. }
  1044. cm_deref_id(cm_id_priv);
  1045. if (ret)
  1046. cm_destroy_id(&cm_id_priv->id, ret);
  1047. }
  1048. static void cm_format_mra(struct cm_mra_msg *mra_msg,
  1049. struct cm_id_private *cm_id_priv,
  1050. enum cm_msg_response msg_mraed, u8 service_timeout,
  1051. const void *private_data, u8 private_data_len)
  1052. {
  1053. cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
  1054. cm_mra_set_msg_mraed(mra_msg, msg_mraed);
  1055. mra_msg->local_comm_id = cm_id_priv->id.local_id;
  1056. mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1057. cm_mra_set_service_timeout(mra_msg, service_timeout);
  1058. if (private_data && private_data_len)
  1059. memcpy(mra_msg->private_data, private_data, private_data_len);
  1060. }
  1061. static void cm_format_rej(struct cm_rej_msg *rej_msg,
  1062. struct cm_id_private *cm_id_priv,
  1063. enum ib_cm_rej_reason reason,
  1064. void *ari,
  1065. u8 ari_length,
  1066. const void *private_data,
  1067. u8 private_data_len)
  1068. {
  1069. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
  1070. rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1071. switch(cm_id_priv->id.state) {
  1072. case IB_CM_REQ_RCVD:
  1073. rej_msg->local_comm_id = 0;
  1074. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  1075. break;
  1076. case IB_CM_MRA_REQ_SENT:
  1077. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1078. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
  1079. break;
  1080. case IB_CM_REP_RCVD:
  1081. case IB_CM_MRA_REP_SENT:
  1082. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1083. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
  1084. break;
  1085. default:
  1086. rej_msg->local_comm_id = cm_id_priv->id.local_id;
  1087. cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
  1088. break;
  1089. }
  1090. rej_msg->reason = cpu_to_be16(reason);
  1091. if (ari && ari_length) {
  1092. cm_rej_set_reject_info_len(rej_msg, ari_length);
  1093. memcpy(rej_msg->ari, ari, ari_length);
  1094. }
  1095. if (private_data && private_data_len)
  1096. memcpy(rej_msg->private_data, private_data, private_data_len);
  1097. }
  1098. static void cm_dup_req_handler(struct cm_work *work,
  1099. struct cm_id_private *cm_id_priv)
  1100. {
  1101. struct ib_mad_send_buf *msg = NULL;
  1102. unsigned long flags;
  1103. int ret;
  1104. /* Quick state check to discard duplicate REQs. */
  1105. if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
  1106. return;
  1107. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1108. if (ret)
  1109. return;
  1110. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1111. switch (cm_id_priv->id.state) {
  1112. case IB_CM_MRA_REQ_SENT:
  1113. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1114. CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
  1115. cm_id_priv->private_data,
  1116. cm_id_priv->private_data_len);
  1117. break;
  1118. case IB_CM_TIMEWAIT:
  1119. cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
  1120. IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
  1121. break;
  1122. default:
  1123. goto unlock;
  1124. }
  1125. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1126. ret = ib_post_send_mad(msg, NULL);
  1127. if (ret)
  1128. goto free;
  1129. return;
  1130. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1131. free: cm_free_msg(msg);
  1132. }
  1133. static struct cm_id_private * cm_match_req(struct cm_work *work,
  1134. struct cm_id_private *cm_id_priv)
  1135. {
  1136. struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
  1137. struct cm_timewait_info *timewait_info;
  1138. struct cm_req_msg *req_msg;
  1139. unsigned long flags;
  1140. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1141. /* Check for possible duplicate REQ. */
  1142. spin_lock_irqsave(&cm.lock, flags);
  1143. timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
  1144. if (timewait_info) {
  1145. cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
  1146. timewait_info->work.remote_id);
  1147. spin_unlock_irqrestore(&cm.lock, flags);
  1148. if (cur_cm_id_priv) {
  1149. cm_dup_req_handler(work, cur_cm_id_priv);
  1150. cm_deref_id(cur_cm_id_priv);
  1151. }
  1152. return NULL;
  1153. }
  1154. /* Check for stale connections. */
  1155. timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
  1156. if (timewait_info) {
  1157. cm_cleanup_timewait(cm_id_priv->timewait_info);
  1158. spin_unlock_irqrestore(&cm.lock, flags);
  1159. cm_issue_rej(work->port, work->mad_recv_wc,
  1160. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
  1161. NULL, 0);
  1162. return NULL;
  1163. }
  1164. /* Find matching listen request. */
  1165. listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
  1166. req_msg->service_id,
  1167. req_msg->private_data);
  1168. if (!listen_cm_id_priv) {
  1169. cm_cleanup_timewait(cm_id_priv->timewait_info);
  1170. spin_unlock_irqrestore(&cm.lock, flags);
  1171. cm_issue_rej(work->port, work->mad_recv_wc,
  1172. IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
  1173. NULL, 0);
  1174. goto out;
  1175. }
  1176. atomic_inc(&listen_cm_id_priv->refcount);
  1177. atomic_inc(&cm_id_priv->refcount);
  1178. cm_id_priv->id.state = IB_CM_REQ_RCVD;
  1179. atomic_inc(&cm_id_priv->work_count);
  1180. spin_unlock_irqrestore(&cm.lock, flags);
  1181. out:
  1182. return listen_cm_id_priv;
  1183. }
  1184. static int cm_req_handler(struct cm_work *work)
  1185. {
  1186. struct ib_cm_id *cm_id;
  1187. struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
  1188. struct cm_req_msg *req_msg;
  1189. int ret;
  1190. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1191. cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
  1192. if (IS_ERR(cm_id))
  1193. return PTR_ERR(cm_id);
  1194. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1195. cm_id_priv->id.remote_id = req_msg->local_comm_id;
  1196. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  1197. work->mad_recv_wc->recv_buf.grh,
  1198. &cm_id_priv->av);
  1199. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  1200. id.local_id);
  1201. if (IS_ERR(cm_id_priv->timewait_info)) {
  1202. ret = PTR_ERR(cm_id_priv->timewait_info);
  1203. goto destroy;
  1204. }
  1205. cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
  1206. cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
  1207. cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
  1208. listen_cm_id_priv = cm_match_req(work, cm_id_priv);
  1209. if (!listen_cm_id_priv) {
  1210. ret = -EINVAL;
  1211. kfree(cm_id_priv->timewait_info);
  1212. goto destroy;
  1213. }
  1214. cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
  1215. cm_id_priv->id.context = listen_cm_id_priv->id.context;
  1216. cm_id_priv->id.service_id = req_msg->service_id;
  1217. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  1218. cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
  1219. ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
  1220. if (ret) {
  1221. ib_get_cached_gid(work->port->cm_dev->device,
  1222. work->port->port_num, 0, &work->path[0].sgid);
  1223. ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
  1224. &work->path[0].sgid, sizeof work->path[0].sgid,
  1225. NULL, 0);
  1226. goto rejected;
  1227. }
  1228. if (req_msg->alt_local_lid) {
  1229. ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
  1230. if (ret) {
  1231. ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
  1232. &work->path[0].sgid,
  1233. sizeof work->path[0].sgid, NULL, 0);
  1234. goto rejected;
  1235. }
  1236. }
  1237. cm_id_priv->tid = req_msg->hdr.tid;
  1238. cm_id_priv->timeout_ms = cm_convert_to_ms(
  1239. cm_req_get_local_resp_timeout(req_msg));
  1240. cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
  1241. cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
  1242. cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
  1243. cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
  1244. cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
  1245. cm_id_priv->pkey = req_msg->pkey;
  1246. cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
  1247. cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
  1248. cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
  1249. cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
  1250. cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
  1251. cm_process_work(cm_id_priv, work);
  1252. cm_deref_id(listen_cm_id_priv);
  1253. return 0;
  1254. rejected:
  1255. atomic_dec(&cm_id_priv->refcount);
  1256. cm_deref_id(listen_cm_id_priv);
  1257. destroy:
  1258. ib_destroy_cm_id(cm_id);
  1259. return ret;
  1260. }
  1261. static void cm_format_rep(struct cm_rep_msg *rep_msg,
  1262. struct cm_id_private *cm_id_priv,
  1263. struct ib_cm_rep_param *param)
  1264. {
  1265. cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
  1266. rep_msg->local_comm_id = cm_id_priv->id.local_id;
  1267. rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1268. cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
  1269. cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
  1270. rep_msg->resp_resources = param->responder_resources;
  1271. rep_msg->initiator_depth = param->initiator_depth;
  1272. cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
  1273. cm_rep_set_failover(rep_msg, param->failover_accepted);
  1274. cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
  1275. cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
  1276. cm_rep_set_srq(rep_msg, param->srq);
  1277. rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
  1278. if (param->private_data && param->private_data_len)
  1279. memcpy(rep_msg->private_data, param->private_data,
  1280. param->private_data_len);
  1281. }
  1282. int ib_send_cm_rep(struct ib_cm_id *cm_id,
  1283. struct ib_cm_rep_param *param)
  1284. {
  1285. struct cm_id_private *cm_id_priv;
  1286. struct ib_mad_send_buf *msg;
  1287. struct cm_rep_msg *rep_msg;
  1288. unsigned long flags;
  1289. int ret;
  1290. if (param->private_data &&
  1291. param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
  1292. return -EINVAL;
  1293. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1294. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1295. if (cm_id->state != IB_CM_REQ_RCVD &&
  1296. cm_id->state != IB_CM_MRA_REQ_SENT) {
  1297. ret = -EINVAL;
  1298. goto out;
  1299. }
  1300. ret = cm_alloc_msg(cm_id_priv, &msg);
  1301. if (ret)
  1302. goto out;
  1303. rep_msg = (struct cm_rep_msg *) msg->mad;
  1304. cm_format_rep(rep_msg, cm_id_priv, param);
  1305. msg->timeout_ms = cm_id_priv->timeout_ms;
  1306. msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
  1307. ret = ib_post_send_mad(msg, NULL);
  1308. if (ret) {
  1309. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1310. cm_free_msg(msg);
  1311. return ret;
  1312. }
  1313. cm_id->state = IB_CM_REP_SENT;
  1314. cm_id_priv->msg = msg;
  1315. cm_id_priv->initiator_depth = param->initiator_depth;
  1316. cm_id_priv->responder_resources = param->responder_resources;
  1317. cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
  1318. cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
  1319. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1320. return ret;
  1321. }
  1322. EXPORT_SYMBOL(ib_send_cm_rep);
  1323. static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
  1324. struct cm_id_private *cm_id_priv,
  1325. const void *private_data,
  1326. u8 private_data_len)
  1327. {
  1328. cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
  1329. rtu_msg->local_comm_id = cm_id_priv->id.local_id;
  1330. rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1331. if (private_data && private_data_len)
  1332. memcpy(rtu_msg->private_data, private_data, private_data_len);
  1333. }
  1334. int ib_send_cm_rtu(struct ib_cm_id *cm_id,
  1335. const void *private_data,
  1336. u8 private_data_len)
  1337. {
  1338. struct cm_id_private *cm_id_priv;
  1339. struct ib_mad_send_buf *msg;
  1340. unsigned long flags;
  1341. void *data;
  1342. int ret;
  1343. if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
  1344. return -EINVAL;
  1345. data = cm_copy_private_data(private_data, private_data_len);
  1346. if (IS_ERR(data))
  1347. return PTR_ERR(data);
  1348. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1349. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1350. if (cm_id->state != IB_CM_REP_RCVD &&
  1351. cm_id->state != IB_CM_MRA_REP_SENT) {
  1352. ret = -EINVAL;
  1353. goto error;
  1354. }
  1355. ret = cm_alloc_msg(cm_id_priv, &msg);
  1356. if (ret)
  1357. goto error;
  1358. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1359. private_data, private_data_len);
  1360. ret = ib_post_send_mad(msg, NULL);
  1361. if (ret) {
  1362. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1363. cm_free_msg(msg);
  1364. kfree(data);
  1365. return ret;
  1366. }
  1367. cm_id->state = IB_CM_ESTABLISHED;
  1368. cm_set_private_data(cm_id_priv, data, private_data_len);
  1369. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1370. return 0;
  1371. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1372. kfree(data);
  1373. return ret;
  1374. }
  1375. EXPORT_SYMBOL(ib_send_cm_rtu);
  1376. static void cm_format_rep_event(struct cm_work *work)
  1377. {
  1378. struct cm_rep_msg *rep_msg;
  1379. struct ib_cm_rep_event_param *param;
  1380. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1381. param = &work->cm_event.param.rep_rcvd;
  1382. param->remote_ca_guid = rep_msg->local_ca_guid;
  1383. param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
  1384. param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
  1385. param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
  1386. param->responder_resources = rep_msg->initiator_depth;
  1387. param->initiator_depth = rep_msg->resp_resources;
  1388. param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
  1389. param->failover_accepted = cm_rep_get_failover(rep_msg);
  1390. param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
  1391. param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1392. param->srq = cm_rep_get_srq(rep_msg);
  1393. work->cm_event.private_data = &rep_msg->private_data;
  1394. }
  1395. static void cm_dup_rep_handler(struct cm_work *work)
  1396. {
  1397. struct cm_id_private *cm_id_priv;
  1398. struct cm_rep_msg *rep_msg;
  1399. struct ib_mad_send_buf *msg = NULL;
  1400. unsigned long flags;
  1401. int ret;
  1402. rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
  1403. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
  1404. rep_msg->local_comm_id);
  1405. if (!cm_id_priv)
  1406. return;
  1407. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1408. if (ret)
  1409. goto deref;
  1410. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1411. if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
  1412. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  1413. cm_id_priv->private_data,
  1414. cm_id_priv->private_data_len);
  1415. else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
  1416. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1417. CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
  1418. cm_id_priv->private_data,
  1419. cm_id_priv->private_data_len);
  1420. else
  1421. goto unlock;
  1422. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1423. ret = ib_post_send_mad(msg, NULL);
  1424. if (ret)
  1425. goto free;
  1426. goto deref;
  1427. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1428. free: cm_free_msg(msg);
  1429. deref: cm_deref_id(cm_id_priv);
  1430. }
  1431. static int cm_rep_handler(struct cm_work *work)
  1432. {
  1433. struct cm_id_private *cm_id_priv;
  1434. struct cm_rep_msg *rep_msg;
  1435. unsigned long flags;
  1436. int ret;
  1437. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  1438. cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
  1439. if (!cm_id_priv) {
  1440. cm_dup_rep_handler(work);
  1441. return -EINVAL;
  1442. }
  1443. cm_format_rep_event(work);
  1444. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1445. switch (cm_id_priv->id.state) {
  1446. case IB_CM_REQ_SENT:
  1447. case IB_CM_MRA_REQ_RCVD:
  1448. break;
  1449. default:
  1450. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1451. ret = -EINVAL;
  1452. goto error;
  1453. }
  1454. cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
  1455. cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
  1456. cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1457. spin_lock(&cm.lock);
  1458. /* Check for duplicate REP. */
  1459. if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
  1460. spin_unlock(&cm.lock);
  1461. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1462. ret = -EINVAL;
  1463. goto error;
  1464. }
  1465. /* Check for a stale connection. */
  1466. if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
  1467. rb_erase(&cm_id_priv->timewait_info->remote_id_node,
  1468. &cm.remote_id_table);
  1469. cm_id_priv->timewait_info->inserted_remote_id = 0;
  1470. spin_unlock(&cm.lock);
  1471. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1472. cm_issue_rej(work->port, work->mad_recv_wc,
  1473. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
  1474. NULL, 0);
  1475. ret = -EINVAL;
  1476. goto error;
  1477. }
  1478. spin_unlock(&cm.lock);
  1479. cm_id_priv->id.state = IB_CM_REP_RCVD;
  1480. cm_id_priv->id.remote_id = rep_msg->local_comm_id;
  1481. cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
  1482. cm_id_priv->initiator_depth = rep_msg->resp_resources;
  1483. cm_id_priv->responder_resources = rep_msg->initiator_depth;
  1484. cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
  1485. cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
  1486. /* todo: handle peer_to_peer */
  1487. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1488. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1489. if (!ret)
  1490. list_add_tail(&work->list, &cm_id_priv->work_list);
  1491. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1492. if (ret)
  1493. cm_process_work(cm_id_priv, work);
  1494. else
  1495. cm_deref_id(cm_id_priv);
  1496. return 0;
  1497. error:
  1498. cm_deref_id(cm_id_priv);
  1499. return ret;
  1500. }
  1501. static int cm_establish_handler(struct cm_work *work)
  1502. {
  1503. struct cm_id_private *cm_id_priv;
  1504. unsigned long flags;
  1505. int ret;
  1506. /* See comment in cm_establish about lookup. */
  1507. cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
  1508. if (!cm_id_priv)
  1509. return -EINVAL;
  1510. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1511. if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
  1512. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1513. goto out;
  1514. }
  1515. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1516. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1517. if (!ret)
  1518. list_add_tail(&work->list, &cm_id_priv->work_list);
  1519. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1520. if (ret)
  1521. cm_process_work(cm_id_priv, work);
  1522. else
  1523. cm_deref_id(cm_id_priv);
  1524. return 0;
  1525. out:
  1526. cm_deref_id(cm_id_priv);
  1527. return -EINVAL;
  1528. }
  1529. static int cm_rtu_handler(struct cm_work *work)
  1530. {
  1531. struct cm_id_private *cm_id_priv;
  1532. struct cm_rtu_msg *rtu_msg;
  1533. unsigned long flags;
  1534. int ret;
  1535. rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
  1536. cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
  1537. rtu_msg->local_comm_id);
  1538. if (!cm_id_priv)
  1539. return -EINVAL;
  1540. work->cm_event.private_data = &rtu_msg->private_data;
  1541. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1542. if (cm_id_priv->id.state != IB_CM_REP_SENT &&
  1543. cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
  1544. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1545. goto out;
  1546. }
  1547. cm_id_priv->id.state = IB_CM_ESTABLISHED;
  1548. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1549. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1550. if (!ret)
  1551. list_add_tail(&work->list, &cm_id_priv->work_list);
  1552. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1553. if (ret)
  1554. cm_process_work(cm_id_priv, work);
  1555. else
  1556. cm_deref_id(cm_id_priv);
  1557. return 0;
  1558. out:
  1559. cm_deref_id(cm_id_priv);
  1560. return -EINVAL;
  1561. }
  1562. static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
  1563. struct cm_id_private *cm_id_priv,
  1564. const void *private_data,
  1565. u8 private_data_len)
  1566. {
  1567. cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
  1568. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
  1569. dreq_msg->local_comm_id = cm_id_priv->id.local_id;
  1570. dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1571. cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
  1572. if (private_data && private_data_len)
  1573. memcpy(dreq_msg->private_data, private_data, private_data_len);
  1574. }
  1575. int ib_send_cm_dreq(struct ib_cm_id *cm_id,
  1576. const void *private_data,
  1577. u8 private_data_len)
  1578. {
  1579. struct cm_id_private *cm_id_priv;
  1580. struct ib_mad_send_buf *msg;
  1581. unsigned long flags;
  1582. int ret;
  1583. if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
  1584. return -EINVAL;
  1585. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1586. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1587. if (cm_id->state != IB_CM_ESTABLISHED) {
  1588. ret = -EINVAL;
  1589. goto out;
  1590. }
  1591. ret = cm_alloc_msg(cm_id_priv, &msg);
  1592. if (ret) {
  1593. cm_enter_timewait(cm_id_priv);
  1594. goto out;
  1595. }
  1596. cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
  1597. private_data, private_data_len);
  1598. msg->timeout_ms = cm_id_priv->timeout_ms;
  1599. msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
  1600. ret = ib_post_send_mad(msg, NULL);
  1601. if (ret) {
  1602. cm_enter_timewait(cm_id_priv);
  1603. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1604. cm_free_msg(msg);
  1605. return ret;
  1606. }
  1607. cm_id->state = IB_CM_DREQ_SENT;
  1608. cm_id_priv->msg = msg;
  1609. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1610. return ret;
  1611. }
  1612. EXPORT_SYMBOL(ib_send_cm_dreq);
  1613. static void cm_format_drep(struct cm_drep_msg *drep_msg,
  1614. struct cm_id_private *cm_id_priv,
  1615. const void *private_data,
  1616. u8 private_data_len)
  1617. {
  1618. cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
  1619. drep_msg->local_comm_id = cm_id_priv->id.local_id;
  1620. drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
  1621. if (private_data && private_data_len)
  1622. memcpy(drep_msg->private_data, private_data, private_data_len);
  1623. }
  1624. int ib_send_cm_drep(struct ib_cm_id *cm_id,
  1625. const void *private_data,
  1626. u8 private_data_len)
  1627. {
  1628. struct cm_id_private *cm_id_priv;
  1629. struct ib_mad_send_buf *msg;
  1630. unsigned long flags;
  1631. void *data;
  1632. int ret;
  1633. if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
  1634. return -EINVAL;
  1635. data = cm_copy_private_data(private_data, private_data_len);
  1636. if (IS_ERR(data))
  1637. return PTR_ERR(data);
  1638. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1639. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1640. if (cm_id->state != IB_CM_DREQ_RCVD) {
  1641. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1642. kfree(data);
  1643. return -EINVAL;
  1644. }
  1645. cm_set_private_data(cm_id_priv, data, private_data_len);
  1646. cm_enter_timewait(cm_id_priv);
  1647. ret = cm_alloc_msg(cm_id_priv, &msg);
  1648. if (ret)
  1649. goto out;
  1650. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1651. private_data, private_data_len);
  1652. ret = ib_post_send_mad(msg, NULL);
  1653. if (ret) {
  1654. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1655. cm_free_msg(msg);
  1656. return ret;
  1657. }
  1658. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1659. return ret;
  1660. }
  1661. EXPORT_SYMBOL(ib_send_cm_drep);
  1662. static int cm_issue_drep(struct cm_port *port,
  1663. struct ib_mad_recv_wc *mad_recv_wc)
  1664. {
  1665. struct ib_mad_send_buf *msg = NULL;
  1666. struct cm_dreq_msg *dreq_msg;
  1667. struct cm_drep_msg *drep_msg;
  1668. int ret;
  1669. ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
  1670. if (ret)
  1671. return ret;
  1672. dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
  1673. drep_msg = (struct cm_drep_msg *) msg->mad;
  1674. cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
  1675. drep_msg->remote_comm_id = dreq_msg->local_comm_id;
  1676. drep_msg->local_comm_id = dreq_msg->remote_comm_id;
  1677. ret = ib_post_send_mad(msg, NULL);
  1678. if (ret)
  1679. cm_free_msg(msg);
  1680. return ret;
  1681. }
  1682. static int cm_dreq_handler(struct cm_work *work)
  1683. {
  1684. struct cm_id_private *cm_id_priv;
  1685. struct cm_dreq_msg *dreq_msg;
  1686. struct ib_mad_send_buf *msg = NULL;
  1687. unsigned long flags;
  1688. int ret;
  1689. dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
  1690. cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
  1691. dreq_msg->local_comm_id);
  1692. if (!cm_id_priv) {
  1693. cm_issue_drep(work->port, work->mad_recv_wc);
  1694. return -EINVAL;
  1695. }
  1696. work->cm_event.private_data = &dreq_msg->private_data;
  1697. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1698. if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
  1699. goto unlock;
  1700. switch (cm_id_priv->id.state) {
  1701. case IB_CM_REP_SENT:
  1702. case IB_CM_DREQ_SENT:
  1703. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1704. break;
  1705. case IB_CM_ESTABLISHED:
  1706. case IB_CM_MRA_REP_RCVD:
  1707. break;
  1708. case IB_CM_TIMEWAIT:
  1709. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  1710. goto unlock;
  1711. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  1712. cm_id_priv->private_data,
  1713. cm_id_priv->private_data_len);
  1714. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1715. if (ib_post_send_mad(msg, NULL))
  1716. cm_free_msg(msg);
  1717. goto deref;
  1718. default:
  1719. goto unlock;
  1720. }
  1721. cm_id_priv->id.state = IB_CM_DREQ_RCVD;
  1722. cm_id_priv->tid = dreq_msg->hdr.tid;
  1723. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1724. if (!ret)
  1725. list_add_tail(&work->list, &cm_id_priv->work_list);
  1726. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1727. if (ret)
  1728. cm_process_work(cm_id_priv, work);
  1729. else
  1730. cm_deref_id(cm_id_priv);
  1731. return 0;
  1732. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1733. deref: cm_deref_id(cm_id_priv);
  1734. return -EINVAL;
  1735. }
  1736. static int cm_drep_handler(struct cm_work *work)
  1737. {
  1738. struct cm_id_private *cm_id_priv;
  1739. struct cm_drep_msg *drep_msg;
  1740. unsigned long flags;
  1741. int ret;
  1742. drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
  1743. cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
  1744. drep_msg->local_comm_id);
  1745. if (!cm_id_priv)
  1746. return -EINVAL;
  1747. work->cm_event.private_data = &drep_msg->private_data;
  1748. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1749. if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
  1750. cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
  1751. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1752. goto out;
  1753. }
  1754. cm_enter_timewait(cm_id_priv);
  1755. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1756. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1757. if (!ret)
  1758. list_add_tail(&work->list, &cm_id_priv->work_list);
  1759. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1760. if (ret)
  1761. cm_process_work(cm_id_priv, work);
  1762. else
  1763. cm_deref_id(cm_id_priv);
  1764. return 0;
  1765. out:
  1766. cm_deref_id(cm_id_priv);
  1767. return -EINVAL;
  1768. }
  1769. int ib_send_cm_rej(struct ib_cm_id *cm_id,
  1770. enum ib_cm_rej_reason reason,
  1771. void *ari,
  1772. u8 ari_length,
  1773. const void *private_data,
  1774. u8 private_data_len)
  1775. {
  1776. struct cm_id_private *cm_id_priv;
  1777. struct ib_mad_send_buf *msg;
  1778. unsigned long flags;
  1779. int ret;
  1780. if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
  1781. (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
  1782. return -EINVAL;
  1783. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1784. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1785. switch (cm_id->state) {
  1786. case IB_CM_REQ_SENT:
  1787. case IB_CM_MRA_REQ_RCVD:
  1788. case IB_CM_REQ_RCVD:
  1789. case IB_CM_MRA_REQ_SENT:
  1790. case IB_CM_REP_RCVD:
  1791. case IB_CM_MRA_REP_SENT:
  1792. ret = cm_alloc_msg(cm_id_priv, &msg);
  1793. if (!ret)
  1794. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1795. cm_id_priv, reason, ari, ari_length,
  1796. private_data, private_data_len);
  1797. cm_reset_to_idle(cm_id_priv);
  1798. break;
  1799. case IB_CM_REP_SENT:
  1800. case IB_CM_MRA_REP_RCVD:
  1801. ret = cm_alloc_msg(cm_id_priv, &msg);
  1802. if (!ret)
  1803. cm_format_rej((struct cm_rej_msg *) msg->mad,
  1804. cm_id_priv, reason, ari, ari_length,
  1805. private_data, private_data_len);
  1806. cm_enter_timewait(cm_id_priv);
  1807. break;
  1808. default:
  1809. ret = -EINVAL;
  1810. goto out;
  1811. }
  1812. if (ret)
  1813. goto out;
  1814. ret = ib_post_send_mad(msg, NULL);
  1815. if (ret)
  1816. cm_free_msg(msg);
  1817. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1818. return ret;
  1819. }
  1820. EXPORT_SYMBOL(ib_send_cm_rej);
  1821. static void cm_format_rej_event(struct cm_work *work)
  1822. {
  1823. struct cm_rej_msg *rej_msg;
  1824. struct ib_cm_rej_event_param *param;
  1825. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1826. param = &work->cm_event.param.rej_rcvd;
  1827. param->ari = rej_msg->ari;
  1828. param->ari_length = cm_rej_get_reject_info_len(rej_msg);
  1829. param->reason = __be16_to_cpu(rej_msg->reason);
  1830. work->cm_event.private_data = &rej_msg->private_data;
  1831. }
  1832. static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
  1833. {
  1834. struct cm_timewait_info *timewait_info;
  1835. struct cm_id_private *cm_id_priv;
  1836. unsigned long flags;
  1837. __be32 remote_id;
  1838. remote_id = rej_msg->local_comm_id;
  1839. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
  1840. spin_lock_irqsave(&cm.lock, flags);
  1841. timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
  1842. remote_id);
  1843. if (!timewait_info) {
  1844. spin_unlock_irqrestore(&cm.lock, flags);
  1845. return NULL;
  1846. }
  1847. cm_id_priv = idr_find(&cm.local_id_table, (__force int)
  1848. (timewait_info->work.local_id ^
  1849. cm.random_id_operand));
  1850. if (cm_id_priv) {
  1851. if (cm_id_priv->id.remote_id == remote_id)
  1852. atomic_inc(&cm_id_priv->refcount);
  1853. else
  1854. cm_id_priv = NULL;
  1855. }
  1856. spin_unlock_irqrestore(&cm.lock, flags);
  1857. } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
  1858. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
  1859. else
  1860. cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
  1861. return cm_id_priv;
  1862. }
  1863. static int cm_rej_handler(struct cm_work *work)
  1864. {
  1865. struct cm_id_private *cm_id_priv;
  1866. struct cm_rej_msg *rej_msg;
  1867. unsigned long flags;
  1868. int ret;
  1869. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  1870. cm_id_priv = cm_acquire_rejected_id(rej_msg);
  1871. if (!cm_id_priv)
  1872. return -EINVAL;
  1873. cm_format_rej_event(work);
  1874. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1875. switch (cm_id_priv->id.state) {
  1876. case IB_CM_REQ_SENT:
  1877. case IB_CM_MRA_REQ_RCVD:
  1878. case IB_CM_REP_SENT:
  1879. case IB_CM_MRA_REP_RCVD:
  1880. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1881. /* fall through */
  1882. case IB_CM_REQ_RCVD:
  1883. case IB_CM_MRA_REQ_SENT:
  1884. if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
  1885. cm_enter_timewait(cm_id_priv);
  1886. else
  1887. cm_reset_to_idle(cm_id_priv);
  1888. break;
  1889. case IB_CM_DREQ_SENT:
  1890. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1891. /* fall through */
  1892. case IB_CM_REP_RCVD:
  1893. case IB_CM_MRA_REP_SENT:
  1894. case IB_CM_ESTABLISHED:
  1895. cm_enter_timewait(cm_id_priv);
  1896. break;
  1897. default:
  1898. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1899. ret = -EINVAL;
  1900. goto out;
  1901. }
  1902. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  1903. if (!ret)
  1904. list_add_tail(&work->list, &cm_id_priv->work_list);
  1905. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1906. if (ret)
  1907. cm_process_work(cm_id_priv, work);
  1908. else
  1909. cm_deref_id(cm_id_priv);
  1910. return 0;
  1911. out:
  1912. cm_deref_id(cm_id_priv);
  1913. return -EINVAL;
  1914. }
  1915. int ib_send_cm_mra(struct ib_cm_id *cm_id,
  1916. u8 service_timeout,
  1917. const void *private_data,
  1918. u8 private_data_len)
  1919. {
  1920. struct cm_id_private *cm_id_priv;
  1921. struct ib_mad_send_buf *msg;
  1922. void *data;
  1923. unsigned long flags;
  1924. int ret;
  1925. if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
  1926. return -EINVAL;
  1927. data = cm_copy_private_data(private_data, private_data_len);
  1928. if (IS_ERR(data))
  1929. return PTR_ERR(data);
  1930. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1931. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1932. switch(cm_id_priv->id.state) {
  1933. case IB_CM_REQ_RCVD:
  1934. ret = cm_alloc_msg(cm_id_priv, &msg);
  1935. if (ret)
  1936. goto error1;
  1937. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1938. CM_MSG_RESPONSE_REQ, service_timeout,
  1939. private_data, private_data_len);
  1940. ret = ib_post_send_mad(msg, NULL);
  1941. if (ret)
  1942. goto error2;
  1943. cm_id->state = IB_CM_MRA_REQ_SENT;
  1944. break;
  1945. case IB_CM_REP_RCVD:
  1946. ret = cm_alloc_msg(cm_id_priv, &msg);
  1947. if (ret)
  1948. goto error1;
  1949. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1950. CM_MSG_RESPONSE_REP, service_timeout,
  1951. private_data, private_data_len);
  1952. ret = ib_post_send_mad(msg, NULL);
  1953. if (ret)
  1954. goto error2;
  1955. cm_id->state = IB_CM_MRA_REP_SENT;
  1956. break;
  1957. case IB_CM_ESTABLISHED:
  1958. ret = cm_alloc_msg(cm_id_priv, &msg);
  1959. if (ret)
  1960. goto error1;
  1961. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1962. CM_MSG_RESPONSE_OTHER, service_timeout,
  1963. private_data, private_data_len);
  1964. ret = ib_post_send_mad(msg, NULL);
  1965. if (ret)
  1966. goto error2;
  1967. cm_id->lap_state = IB_CM_MRA_LAP_SENT;
  1968. break;
  1969. default:
  1970. ret = -EINVAL;
  1971. goto error1;
  1972. }
  1973. cm_id_priv->service_timeout = service_timeout;
  1974. cm_set_private_data(cm_id_priv, data, private_data_len);
  1975. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1976. return 0;
  1977. error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1978. kfree(data);
  1979. return ret;
  1980. error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1981. kfree(data);
  1982. cm_free_msg(msg);
  1983. return ret;
  1984. }
  1985. EXPORT_SYMBOL(ib_send_cm_mra);
  1986. static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
  1987. {
  1988. switch (cm_mra_get_msg_mraed(mra_msg)) {
  1989. case CM_MSG_RESPONSE_REQ:
  1990. return cm_acquire_id(mra_msg->remote_comm_id, 0);
  1991. case CM_MSG_RESPONSE_REP:
  1992. case CM_MSG_RESPONSE_OTHER:
  1993. return cm_acquire_id(mra_msg->remote_comm_id,
  1994. mra_msg->local_comm_id);
  1995. default:
  1996. return NULL;
  1997. }
  1998. }
  1999. static int cm_mra_handler(struct cm_work *work)
  2000. {
  2001. struct cm_id_private *cm_id_priv;
  2002. struct cm_mra_msg *mra_msg;
  2003. unsigned long flags;
  2004. int timeout, ret;
  2005. mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
  2006. cm_id_priv = cm_acquire_mraed_id(mra_msg);
  2007. if (!cm_id_priv)
  2008. return -EINVAL;
  2009. work->cm_event.private_data = &mra_msg->private_data;
  2010. work->cm_event.param.mra_rcvd.service_timeout =
  2011. cm_mra_get_service_timeout(mra_msg);
  2012. timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
  2013. cm_convert_to_ms(cm_id_priv->av.packet_life_time);
  2014. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2015. switch (cm_id_priv->id.state) {
  2016. case IB_CM_REQ_SENT:
  2017. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
  2018. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  2019. cm_id_priv->msg, timeout))
  2020. goto out;
  2021. cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
  2022. break;
  2023. case IB_CM_REP_SENT:
  2024. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
  2025. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  2026. cm_id_priv->msg, timeout))
  2027. goto out;
  2028. cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
  2029. break;
  2030. case IB_CM_ESTABLISHED:
  2031. if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
  2032. cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
  2033. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  2034. cm_id_priv->msg, timeout))
  2035. goto out;
  2036. cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
  2037. break;
  2038. default:
  2039. goto out;
  2040. }
  2041. cm_id_priv->msg->context[1] = (void *) (unsigned long)
  2042. cm_id_priv->id.state;
  2043. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2044. if (!ret)
  2045. list_add_tail(&work->list, &cm_id_priv->work_list);
  2046. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2047. if (ret)
  2048. cm_process_work(cm_id_priv, work);
  2049. else
  2050. cm_deref_id(cm_id_priv);
  2051. return 0;
  2052. out:
  2053. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2054. cm_deref_id(cm_id_priv);
  2055. return -EINVAL;
  2056. }
  2057. static void cm_format_lap(struct cm_lap_msg *lap_msg,
  2058. struct cm_id_private *cm_id_priv,
  2059. struct ib_sa_path_rec *alternate_path,
  2060. const void *private_data,
  2061. u8 private_data_len)
  2062. {
  2063. cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
  2064. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
  2065. lap_msg->local_comm_id = cm_id_priv->id.local_id;
  2066. lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
  2067. cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
  2068. /* todo: need remote CM response timeout */
  2069. cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
  2070. lap_msg->alt_local_lid = alternate_path->slid;
  2071. lap_msg->alt_remote_lid = alternate_path->dlid;
  2072. lap_msg->alt_local_gid = alternate_path->sgid;
  2073. lap_msg->alt_remote_gid = alternate_path->dgid;
  2074. cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
  2075. cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
  2076. lap_msg->alt_hop_limit = alternate_path->hop_limit;
  2077. cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
  2078. cm_lap_set_sl(lap_msg, alternate_path->sl);
  2079. cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
  2080. cm_lap_set_local_ack_timeout(lap_msg,
  2081. min(31, alternate_path->packet_life_time + 1));
  2082. if (private_data && private_data_len)
  2083. memcpy(lap_msg->private_data, private_data, private_data_len);
  2084. }
  2085. int ib_send_cm_lap(struct ib_cm_id *cm_id,
  2086. struct ib_sa_path_rec *alternate_path,
  2087. const void *private_data,
  2088. u8 private_data_len)
  2089. {
  2090. struct cm_id_private *cm_id_priv;
  2091. struct ib_mad_send_buf *msg;
  2092. unsigned long flags;
  2093. int ret;
  2094. if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
  2095. return -EINVAL;
  2096. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2097. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2098. if (cm_id->state != IB_CM_ESTABLISHED ||
  2099. (cm_id->lap_state != IB_CM_LAP_UNINIT &&
  2100. cm_id->lap_state != IB_CM_LAP_IDLE)) {
  2101. ret = -EINVAL;
  2102. goto out;
  2103. }
  2104. ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
  2105. if (ret)
  2106. goto out;
  2107. ret = cm_alloc_msg(cm_id_priv, &msg);
  2108. if (ret)
  2109. goto out;
  2110. cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
  2111. alternate_path, private_data, private_data_len);
  2112. msg->timeout_ms = cm_id_priv->timeout_ms;
  2113. msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
  2114. ret = ib_post_send_mad(msg, NULL);
  2115. if (ret) {
  2116. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2117. cm_free_msg(msg);
  2118. return ret;
  2119. }
  2120. cm_id->lap_state = IB_CM_LAP_SENT;
  2121. cm_id_priv->msg = msg;
  2122. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2123. return ret;
  2124. }
  2125. EXPORT_SYMBOL(ib_send_cm_lap);
  2126. static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
  2127. struct ib_sa_path_rec *path,
  2128. struct cm_lap_msg *lap_msg)
  2129. {
  2130. memset(path, 0, sizeof *path);
  2131. path->dgid = lap_msg->alt_local_gid;
  2132. path->sgid = lap_msg->alt_remote_gid;
  2133. path->dlid = lap_msg->alt_local_lid;
  2134. path->slid = lap_msg->alt_remote_lid;
  2135. path->flow_label = cm_lap_get_flow_label(lap_msg);
  2136. path->hop_limit = lap_msg->alt_hop_limit;
  2137. path->traffic_class = cm_lap_get_traffic_class(lap_msg);
  2138. path->reversible = 1;
  2139. path->pkey = cm_id_priv->pkey;
  2140. path->sl = cm_lap_get_sl(lap_msg);
  2141. path->mtu_selector = IB_SA_EQ;
  2142. path->mtu = cm_id_priv->path_mtu;
  2143. path->rate_selector = IB_SA_EQ;
  2144. path->rate = cm_lap_get_packet_rate(lap_msg);
  2145. path->packet_life_time_selector = IB_SA_EQ;
  2146. path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
  2147. path->packet_life_time -= (path->packet_life_time > 0);
  2148. }
  2149. static int cm_lap_handler(struct cm_work *work)
  2150. {
  2151. struct cm_id_private *cm_id_priv;
  2152. struct cm_lap_msg *lap_msg;
  2153. struct ib_cm_lap_event_param *param;
  2154. struct ib_mad_send_buf *msg = NULL;
  2155. unsigned long flags;
  2156. int ret;
  2157. /* todo: verify LAP request and send reject APR if invalid. */
  2158. lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
  2159. cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
  2160. lap_msg->local_comm_id);
  2161. if (!cm_id_priv)
  2162. return -EINVAL;
  2163. param = &work->cm_event.param.lap_rcvd;
  2164. param->alternate_path = &work->path[0];
  2165. cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
  2166. work->cm_event.private_data = &lap_msg->private_data;
  2167. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2168. if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
  2169. goto unlock;
  2170. switch (cm_id_priv->id.lap_state) {
  2171. case IB_CM_LAP_UNINIT:
  2172. case IB_CM_LAP_IDLE:
  2173. break;
  2174. case IB_CM_MRA_LAP_SENT:
  2175. if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
  2176. goto unlock;
  2177. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  2178. CM_MSG_RESPONSE_OTHER,
  2179. cm_id_priv->service_timeout,
  2180. cm_id_priv->private_data,
  2181. cm_id_priv->private_data_len);
  2182. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2183. if (ib_post_send_mad(msg, NULL))
  2184. cm_free_msg(msg);
  2185. goto deref;
  2186. default:
  2187. goto unlock;
  2188. }
  2189. cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
  2190. cm_id_priv->tid = lap_msg->hdr.tid;
  2191. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  2192. work->mad_recv_wc->recv_buf.grh,
  2193. &cm_id_priv->av);
  2194. cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
  2195. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2196. if (!ret)
  2197. list_add_tail(&work->list, &cm_id_priv->work_list);
  2198. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2199. if (ret)
  2200. cm_process_work(cm_id_priv, work);
  2201. else
  2202. cm_deref_id(cm_id_priv);
  2203. return 0;
  2204. unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2205. deref: cm_deref_id(cm_id_priv);
  2206. return -EINVAL;
  2207. }
  2208. static void cm_format_apr(struct cm_apr_msg *apr_msg,
  2209. struct cm_id_private *cm_id_priv,
  2210. enum ib_cm_apr_status status,
  2211. void *info,
  2212. u8 info_length,
  2213. const void *private_data,
  2214. u8 private_data_len)
  2215. {
  2216. cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
  2217. apr_msg->local_comm_id = cm_id_priv->id.local_id;
  2218. apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
  2219. apr_msg->ap_status = (u8) status;
  2220. if (info && info_length) {
  2221. apr_msg->info_length = info_length;
  2222. memcpy(apr_msg->info, info, info_length);
  2223. }
  2224. if (private_data && private_data_len)
  2225. memcpy(apr_msg->private_data, private_data, private_data_len);
  2226. }
  2227. int ib_send_cm_apr(struct ib_cm_id *cm_id,
  2228. enum ib_cm_apr_status status,
  2229. void *info,
  2230. u8 info_length,
  2231. const void *private_data,
  2232. u8 private_data_len)
  2233. {
  2234. struct cm_id_private *cm_id_priv;
  2235. struct ib_mad_send_buf *msg;
  2236. unsigned long flags;
  2237. int ret;
  2238. if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
  2239. (info && info_length > IB_CM_APR_INFO_LENGTH))
  2240. return -EINVAL;
  2241. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2242. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2243. if (cm_id->state != IB_CM_ESTABLISHED ||
  2244. (cm_id->lap_state != IB_CM_LAP_RCVD &&
  2245. cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
  2246. ret = -EINVAL;
  2247. goto out;
  2248. }
  2249. ret = cm_alloc_msg(cm_id_priv, &msg);
  2250. if (ret)
  2251. goto out;
  2252. cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
  2253. info, info_length, private_data, private_data_len);
  2254. ret = ib_post_send_mad(msg, NULL);
  2255. if (ret) {
  2256. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2257. cm_free_msg(msg);
  2258. return ret;
  2259. }
  2260. cm_id->lap_state = IB_CM_LAP_IDLE;
  2261. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2262. return ret;
  2263. }
  2264. EXPORT_SYMBOL(ib_send_cm_apr);
  2265. static int cm_apr_handler(struct cm_work *work)
  2266. {
  2267. struct cm_id_private *cm_id_priv;
  2268. struct cm_apr_msg *apr_msg;
  2269. unsigned long flags;
  2270. int ret;
  2271. apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
  2272. cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
  2273. apr_msg->local_comm_id);
  2274. if (!cm_id_priv)
  2275. return -EINVAL; /* Unmatched reply. */
  2276. work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
  2277. work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
  2278. work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
  2279. work->cm_event.private_data = &apr_msg->private_data;
  2280. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2281. if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
  2282. (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
  2283. cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
  2284. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2285. goto out;
  2286. }
  2287. cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
  2288. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2289. cm_id_priv->msg = NULL;
  2290. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2291. if (!ret)
  2292. list_add_tail(&work->list, &cm_id_priv->work_list);
  2293. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2294. if (ret)
  2295. cm_process_work(cm_id_priv, work);
  2296. else
  2297. cm_deref_id(cm_id_priv);
  2298. return 0;
  2299. out:
  2300. cm_deref_id(cm_id_priv);
  2301. return -EINVAL;
  2302. }
  2303. static int cm_timewait_handler(struct cm_work *work)
  2304. {
  2305. struct cm_timewait_info *timewait_info;
  2306. struct cm_id_private *cm_id_priv;
  2307. int ret;
  2308. timewait_info = (struct cm_timewait_info *)work;
  2309. spin_lock_irq(&cm.lock);
  2310. list_del(&timewait_info->list);
  2311. spin_unlock_irq(&cm.lock);
  2312. cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
  2313. timewait_info->work.remote_id);
  2314. if (!cm_id_priv)
  2315. return -EINVAL;
  2316. spin_lock_irq(&cm_id_priv->lock);
  2317. if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
  2318. cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
  2319. spin_unlock_irq(&cm_id_priv->lock);
  2320. goto out;
  2321. }
  2322. cm_id_priv->id.state = IB_CM_IDLE;
  2323. ret = atomic_inc_and_test(&cm_id_priv->work_count);
  2324. if (!ret)
  2325. list_add_tail(&work->list, &cm_id_priv->work_list);
  2326. spin_unlock_irq(&cm_id_priv->lock);
  2327. if (ret)
  2328. cm_process_work(cm_id_priv, work);
  2329. else
  2330. cm_deref_id(cm_id_priv);
  2331. return 0;
  2332. out:
  2333. cm_deref_id(cm_id_priv);
  2334. return -EINVAL;
  2335. }
  2336. static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
  2337. struct cm_id_private *cm_id_priv,
  2338. struct ib_cm_sidr_req_param *param)
  2339. {
  2340. cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
  2341. cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
  2342. sidr_req_msg->request_id = cm_id_priv->id.local_id;
  2343. sidr_req_msg->pkey = cpu_to_be16(param->path->pkey);
  2344. sidr_req_msg->service_id = param->service_id;
  2345. if (param->private_data && param->private_data_len)
  2346. memcpy(sidr_req_msg->private_data, param->private_data,
  2347. param->private_data_len);
  2348. }
  2349. int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
  2350. struct ib_cm_sidr_req_param *param)
  2351. {
  2352. struct cm_id_private *cm_id_priv;
  2353. struct ib_mad_send_buf *msg;
  2354. unsigned long flags;
  2355. int ret;
  2356. if (!param->path || (param->private_data &&
  2357. param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
  2358. return -EINVAL;
  2359. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2360. ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
  2361. if (ret)
  2362. goto out;
  2363. cm_id->service_id = param->service_id;
  2364. cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
  2365. cm_id_priv->timeout_ms = param->timeout_ms;
  2366. cm_id_priv->max_cm_retries = param->max_cm_retries;
  2367. ret = cm_alloc_msg(cm_id_priv, &msg);
  2368. if (ret)
  2369. goto out;
  2370. cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
  2371. param);
  2372. msg->timeout_ms = cm_id_priv->timeout_ms;
  2373. msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
  2374. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2375. if (cm_id->state == IB_CM_IDLE)
  2376. ret = ib_post_send_mad(msg, NULL);
  2377. else
  2378. ret = -EINVAL;
  2379. if (ret) {
  2380. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2381. cm_free_msg(msg);
  2382. goto out;
  2383. }
  2384. cm_id->state = IB_CM_SIDR_REQ_SENT;
  2385. cm_id_priv->msg = msg;
  2386. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2387. out:
  2388. return ret;
  2389. }
  2390. EXPORT_SYMBOL(ib_send_cm_sidr_req);
  2391. static void cm_format_sidr_req_event(struct cm_work *work,
  2392. struct ib_cm_id *listen_id)
  2393. {
  2394. struct cm_sidr_req_msg *sidr_req_msg;
  2395. struct ib_cm_sidr_req_event_param *param;
  2396. sidr_req_msg = (struct cm_sidr_req_msg *)
  2397. work->mad_recv_wc->recv_buf.mad;
  2398. param = &work->cm_event.param.sidr_req_rcvd;
  2399. param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
  2400. param->listen_id = listen_id;
  2401. param->port = work->port->port_num;
  2402. work->cm_event.private_data = &sidr_req_msg->private_data;
  2403. }
  2404. static int cm_sidr_req_handler(struct cm_work *work)
  2405. {
  2406. struct ib_cm_id *cm_id;
  2407. struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
  2408. struct cm_sidr_req_msg *sidr_req_msg;
  2409. struct ib_wc *wc;
  2410. unsigned long flags;
  2411. cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
  2412. if (IS_ERR(cm_id))
  2413. return PTR_ERR(cm_id);
  2414. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2415. /* Record SGID/SLID and request ID for lookup. */
  2416. sidr_req_msg = (struct cm_sidr_req_msg *)
  2417. work->mad_recv_wc->recv_buf.mad;
  2418. wc = work->mad_recv_wc->wc;
  2419. cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
  2420. cm_id_priv->av.dgid.global.interface_id = 0;
  2421. cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  2422. work->mad_recv_wc->recv_buf.grh,
  2423. &cm_id_priv->av);
  2424. cm_id_priv->id.remote_id = sidr_req_msg->request_id;
  2425. cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
  2426. cm_id_priv->tid = sidr_req_msg->hdr.tid;
  2427. atomic_inc(&cm_id_priv->work_count);
  2428. spin_lock_irqsave(&cm.lock, flags);
  2429. cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
  2430. if (cur_cm_id_priv) {
  2431. spin_unlock_irqrestore(&cm.lock, flags);
  2432. goto out; /* Duplicate message. */
  2433. }
  2434. cur_cm_id_priv = cm_find_listen(cm_id->device,
  2435. sidr_req_msg->service_id,
  2436. sidr_req_msg->private_data);
  2437. if (!cur_cm_id_priv) {
  2438. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2439. spin_unlock_irqrestore(&cm.lock, flags);
  2440. /* todo: reply with no match */
  2441. goto out; /* No match. */
  2442. }
  2443. atomic_inc(&cur_cm_id_priv->refcount);
  2444. spin_unlock_irqrestore(&cm.lock, flags);
  2445. cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
  2446. cm_id_priv->id.context = cur_cm_id_priv->id.context;
  2447. cm_id_priv->id.service_id = sidr_req_msg->service_id;
  2448. cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
  2449. cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
  2450. cm_process_work(cm_id_priv, work);
  2451. cm_deref_id(cur_cm_id_priv);
  2452. return 0;
  2453. out:
  2454. ib_destroy_cm_id(&cm_id_priv->id);
  2455. return -EINVAL;
  2456. }
  2457. static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
  2458. struct cm_id_private *cm_id_priv,
  2459. struct ib_cm_sidr_rep_param *param)
  2460. {
  2461. cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
  2462. cm_id_priv->tid);
  2463. sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
  2464. sidr_rep_msg->status = param->status;
  2465. cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
  2466. sidr_rep_msg->service_id = cm_id_priv->id.service_id;
  2467. sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
  2468. if (param->info && param->info_length)
  2469. memcpy(sidr_rep_msg->info, param->info, param->info_length);
  2470. if (param->private_data && param->private_data_len)
  2471. memcpy(sidr_rep_msg->private_data, param->private_data,
  2472. param->private_data_len);
  2473. }
  2474. int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
  2475. struct ib_cm_sidr_rep_param *param)
  2476. {
  2477. struct cm_id_private *cm_id_priv;
  2478. struct ib_mad_send_buf *msg;
  2479. unsigned long flags;
  2480. int ret;
  2481. if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
  2482. (param->private_data &&
  2483. param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
  2484. return -EINVAL;
  2485. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2486. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2487. if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
  2488. ret = -EINVAL;
  2489. goto error;
  2490. }
  2491. ret = cm_alloc_msg(cm_id_priv, &msg);
  2492. if (ret)
  2493. goto error;
  2494. cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
  2495. param);
  2496. ret = ib_post_send_mad(msg, NULL);
  2497. if (ret) {
  2498. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2499. cm_free_msg(msg);
  2500. return ret;
  2501. }
  2502. cm_id->state = IB_CM_IDLE;
  2503. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2504. spin_lock_irqsave(&cm.lock, flags);
  2505. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  2506. spin_unlock_irqrestore(&cm.lock, flags);
  2507. return 0;
  2508. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2509. return ret;
  2510. }
  2511. EXPORT_SYMBOL(ib_send_cm_sidr_rep);
  2512. static void cm_format_sidr_rep_event(struct cm_work *work)
  2513. {
  2514. struct cm_sidr_rep_msg *sidr_rep_msg;
  2515. struct ib_cm_sidr_rep_event_param *param;
  2516. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2517. work->mad_recv_wc->recv_buf.mad;
  2518. param = &work->cm_event.param.sidr_rep_rcvd;
  2519. param->status = sidr_rep_msg->status;
  2520. param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
  2521. param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
  2522. param->info = &sidr_rep_msg->info;
  2523. param->info_len = sidr_rep_msg->info_length;
  2524. work->cm_event.private_data = &sidr_rep_msg->private_data;
  2525. }
  2526. static int cm_sidr_rep_handler(struct cm_work *work)
  2527. {
  2528. struct cm_sidr_rep_msg *sidr_rep_msg;
  2529. struct cm_id_private *cm_id_priv;
  2530. unsigned long flags;
  2531. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  2532. work->mad_recv_wc->recv_buf.mad;
  2533. cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
  2534. if (!cm_id_priv)
  2535. return -EINVAL; /* Unmatched reply. */
  2536. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2537. if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
  2538. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2539. goto out;
  2540. }
  2541. cm_id_priv->id.state = IB_CM_IDLE;
  2542. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2543. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2544. cm_format_sidr_rep_event(work);
  2545. cm_process_work(cm_id_priv, work);
  2546. return 0;
  2547. out:
  2548. cm_deref_id(cm_id_priv);
  2549. return -EINVAL;
  2550. }
  2551. static void cm_process_send_error(struct ib_mad_send_buf *msg,
  2552. enum ib_wc_status wc_status)
  2553. {
  2554. struct cm_id_private *cm_id_priv;
  2555. struct ib_cm_event cm_event;
  2556. enum ib_cm_state state;
  2557. unsigned long flags;
  2558. int ret;
  2559. memset(&cm_event, 0, sizeof cm_event);
  2560. cm_id_priv = msg->context[0];
  2561. /* Discard old sends or ones without a response. */
  2562. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2563. state = (enum ib_cm_state) (unsigned long) msg->context[1];
  2564. if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
  2565. goto discard;
  2566. switch (state) {
  2567. case IB_CM_REQ_SENT:
  2568. case IB_CM_MRA_REQ_RCVD:
  2569. cm_reset_to_idle(cm_id_priv);
  2570. cm_event.event = IB_CM_REQ_ERROR;
  2571. break;
  2572. case IB_CM_REP_SENT:
  2573. case IB_CM_MRA_REP_RCVD:
  2574. cm_reset_to_idle(cm_id_priv);
  2575. cm_event.event = IB_CM_REP_ERROR;
  2576. break;
  2577. case IB_CM_DREQ_SENT:
  2578. cm_enter_timewait(cm_id_priv);
  2579. cm_event.event = IB_CM_DREQ_ERROR;
  2580. break;
  2581. case IB_CM_SIDR_REQ_SENT:
  2582. cm_id_priv->id.state = IB_CM_IDLE;
  2583. cm_event.event = IB_CM_SIDR_REQ_ERROR;
  2584. break;
  2585. default:
  2586. goto discard;
  2587. }
  2588. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2589. cm_event.param.send_status = wc_status;
  2590. /* No other events can occur on the cm_id at this point. */
  2591. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
  2592. cm_free_msg(msg);
  2593. if (ret)
  2594. ib_destroy_cm_id(&cm_id_priv->id);
  2595. return;
  2596. discard:
  2597. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2598. cm_free_msg(msg);
  2599. }
  2600. static void cm_send_handler(struct ib_mad_agent *mad_agent,
  2601. struct ib_mad_send_wc *mad_send_wc)
  2602. {
  2603. struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
  2604. switch (mad_send_wc->status) {
  2605. case IB_WC_SUCCESS:
  2606. case IB_WC_WR_FLUSH_ERR:
  2607. cm_free_msg(msg);
  2608. break;
  2609. default:
  2610. if (msg->context[0] && msg->context[1])
  2611. cm_process_send_error(msg, mad_send_wc->status);
  2612. else
  2613. cm_free_msg(msg);
  2614. break;
  2615. }
  2616. }
  2617. static void cm_work_handler(struct work_struct *_work)
  2618. {
  2619. struct cm_work *work = container_of(_work, struct cm_work, work.work);
  2620. int ret;
  2621. switch (work->cm_event.event) {
  2622. case IB_CM_REQ_RECEIVED:
  2623. ret = cm_req_handler(work);
  2624. break;
  2625. case IB_CM_MRA_RECEIVED:
  2626. ret = cm_mra_handler(work);
  2627. break;
  2628. case IB_CM_REJ_RECEIVED:
  2629. ret = cm_rej_handler(work);
  2630. break;
  2631. case IB_CM_REP_RECEIVED:
  2632. ret = cm_rep_handler(work);
  2633. break;
  2634. case IB_CM_RTU_RECEIVED:
  2635. ret = cm_rtu_handler(work);
  2636. break;
  2637. case IB_CM_USER_ESTABLISHED:
  2638. ret = cm_establish_handler(work);
  2639. break;
  2640. case IB_CM_DREQ_RECEIVED:
  2641. ret = cm_dreq_handler(work);
  2642. break;
  2643. case IB_CM_DREP_RECEIVED:
  2644. ret = cm_drep_handler(work);
  2645. break;
  2646. case IB_CM_SIDR_REQ_RECEIVED:
  2647. ret = cm_sidr_req_handler(work);
  2648. break;
  2649. case IB_CM_SIDR_REP_RECEIVED:
  2650. ret = cm_sidr_rep_handler(work);
  2651. break;
  2652. case IB_CM_LAP_RECEIVED:
  2653. ret = cm_lap_handler(work);
  2654. break;
  2655. case IB_CM_APR_RECEIVED:
  2656. ret = cm_apr_handler(work);
  2657. break;
  2658. case IB_CM_TIMEWAIT_EXIT:
  2659. ret = cm_timewait_handler(work);
  2660. break;
  2661. default:
  2662. ret = -EINVAL;
  2663. break;
  2664. }
  2665. if (ret)
  2666. cm_free_work(work);
  2667. }
  2668. static int cm_establish(struct ib_cm_id *cm_id)
  2669. {
  2670. struct cm_id_private *cm_id_priv;
  2671. struct cm_work *work;
  2672. unsigned long flags;
  2673. int ret = 0;
  2674. work = kmalloc(sizeof *work, GFP_ATOMIC);
  2675. if (!work)
  2676. return -ENOMEM;
  2677. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2678. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2679. switch (cm_id->state)
  2680. {
  2681. case IB_CM_REP_SENT:
  2682. case IB_CM_MRA_REP_RCVD:
  2683. cm_id->state = IB_CM_ESTABLISHED;
  2684. break;
  2685. case IB_CM_ESTABLISHED:
  2686. ret = -EISCONN;
  2687. break;
  2688. default:
  2689. ret = -EINVAL;
  2690. break;
  2691. }
  2692. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2693. if (ret) {
  2694. kfree(work);
  2695. goto out;
  2696. }
  2697. /*
  2698. * The CM worker thread may try to destroy the cm_id before it
  2699. * can execute this work item. To prevent potential deadlock,
  2700. * we need to find the cm_id once we're in the context of the
  2701. * worker thread, rather than holding a reference on it.
  2702. */
  2703. INIT_DELAYED_WORK(&work->work, cm_work_handler);
  2704. work->local_id = cm_id->local_id;
  2705. work->remote_id = cm_id->remote_id;
  2706. work->mad_recv_wc = NULL;
  2707. work->cm_event.event = IB_CM_USER_ESTABLISHED;
  2708. queue_delayed_work(cm.wq, &work->work, 0);
  2709. out:
  2710. return ret;
  2711. }
  2712. static int cm_migrate(struct ib_cm_id *cm_id)
  2713. {
  2714. struct cm_id_private *cm_id_priv;
  2715. unsigned long flags;
  2716. int ret = 0;
  2717. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2718. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2719. if (cm_id->state == IB_CM_ESTABLISHED &&
  2720. (cm_id->lap_state == IB_CM_LAP_UNINIT ||
  2721. cm_id->lap_state == IB_CM_LAP_IDLE)) {
  2722. cm_id->lap_state = IB_CM_LAP_IDLE;
  2723. cm_id_priv->av = cm_id_priv->alt_av;
  2724. } else
  2725. ret = -EINVAL;
  2726. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2727. return ret;
  2728. }
  2729. int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
  2730. {
  2731. int ret;
  2732. switch (event) {
  2733. case IB_EVENT_COMM_EST:
  2734. ret = cm_establish(cm_id);
  2735. break;
  2736. case IB_EVENT_PATH_MIG:
  2737. ret = cm_migrate(cm_id);
  2738. break;
  2739. default:
  2740. ret = -EINVAL;
  2741. }
  2742. return ret;
  2743. }
  2744. EXPORT_SYMBOL(ib_cm_notify);
  2745. static void cm_recv_handler(struct ib_mad_agent *mad_agent,
  2746. struct ib_mad_recv_wc *mad_recv_wc)
  2747. {
  2748. struct cm_work *work;
  2749. enum ib_cm_event_type event;
  2750. int paths = 0;
  2751. switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
  2752. case CM_REQ_ATTR_ID:
  2753. paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
  2754. alt_local_lid != 0);
  2755. event = IB_CM_REQ_RECEIVED;
  2756. break;
  2757. case CM_MRA_ATTR_ID:
  2758. event = IB_CM_MRA_RECEIVED;
  2759. break;
  2760. case CM_REJ_ATTR_ID:
  2761. event = IB_CM_REJ_RECEIVED;
  2762. break;
  2763. case CM_REP_ATTR_ID:
  2764. event = IB_CM_REP_RECEIVED;
  2765. break;
  2766. case CM_RTU_ATTR_ID:
  2767. event = IB_CM_RTU_RECEIVED;
  2768. break;
  2769. case CM_DREQ_ATTR_ID:
  2770. event = IB_CM_DREQ_RECEIVED;
  2771. break;
  2772. case CM_DREP_ATTR_ID:
  2773. event = IB_CM_DREP_RECEIVED;
  2774. break;
  2775. case CM_SIDR_REQ_ATTR_ID:
  2776. event = IB_CM_SIDR_REQ_RECEIVED;
  2777. break;
  2778. case CM_SIDR_REP_ATTR_ID:
  2779. event = IB_CM_SIDR_REP_RECEIVED;
  2780. break;
  2781. case CM_LAP_ATTR_ID:
  2782. paths = 1;
  2783. event = IB_CM_LAP_RECEIVED;
  2784. break;
  2785. case CM_APR_ATTR_ID:
  2786. event = IB_CM_APR_RECEIVED;
  2787. break;
  2788. default:
  2789. ib_free_recv_mad(mad_recv_wc);
  2790. return;
  2791. }
  2792. work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
  2793. GFP_KERNEL);
  2794. if (!work) {
  2795. ib_free_recv_mad(mad_recv_wc);
  2796. return;
  2797. }
  2798. INIT_DELAYED_WORK(&work->work, cm_work_handler);
  2799. work->cm_event.event = event;
  2800. work->mad_recv_wc = mad_recv_wc;
  2801. work->port = (struct cm_port *)mad_agent->context;
  2802. queue_delayed_work(cm.wq, &work->work, 0);
  2803. }
  2804. static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
  2805. struct ib_qp_attr *qp_attr,
  2806. int *qp_attr_mask)
  2807. {
  2808. unsigned long flags;
  2809. int ret;
  2810. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2811. switch (cm_id_priv->id.state) {
  2812. case IB_CM_REQ_SENT:
  2813. case IB_CM_MRA_REQ_RCVD:
  2814. case IB_CM_REQ_RCVD:
  2815. case IB_CM_MRA_REQ_SENT:
  2816. case IB_CM_REP_RCVD:
  2817. case IB_CM_MRA_REP_SENT:
  2818. case IB_CM_REP_SENT:
  2819. case IB_CM_MRA_REP_RCVD:
  2820. case IB_CM_ESTABLISHED:
  2821. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
  2822. IB_QP_PKEY_INDEX | IB_QP_PORT;
  2823. qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
  2824. if (cm_id_priv->responder_resources)
  2825. qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
  2826. IB_ACCESS_REMOTE_ATOMIC;
  2827. qp_attr->pkey_index = cm_id_priv->av.pkey_index;
  2828. qp_attr->port_num = cm_id_priv->av.port->port_num;
  2829. ret = 0;
  2830. break;
  2831. default:
  2832. ret = -EINVAL;
  2833. break;
  2834. }
  2835. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2836. return ret;
  2837. }
  2838. static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
  2839. struct ib_qp_attr *qp_attr,
  2840. int *qp_attr_mask)
  2841. {
  2842. unsigned long flags;
  2843. int ret;
  2844. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2845. switch (cm_id_priv->id.state) {
  2846. case IB_CM_REQ_RCVD:
  2847. case IB_CM_MRA_REQ_SENT:
  2848. case IB_CM_REP_RCVD:
  2849. case IB_CM_MRA_REP_SENT:
  2850. case IB_CM_REP_SENT:
  2851. case IB_CM_MRA_REP_RCVD:
  2852. case IB_CM_ESTABLISHED:
  2853. *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
  2854. IB_QP_DEST_QPN | IB_QP_RQ_PSN;
  2855. qp_attr->ah_attr = cm_id_priv->av.ah_attr;
  2856. qp_attr->path_mtu = cm_id_priv->path_mtu;
  2857. qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
  2858. qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
  2859. if (cm_id_priv->qp_type == IB_QPT_RC) {
  2860. *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
  2861. IB_QP_MIN_RNR_TIMER;
  2862. qp_attr->max_dest_rd_atomic =
  2863. cm_id_priv->responder_resources;
  2864. qp_attr->min_rnr_timer = 0;
  2865. }
  2866. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2867. *qp_attr_mask |= IB_QP_ALT_PATH;
  2868. qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
  2869. qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
  2870. qp_attr->alt_timeout =
  2871. cm_id_priv->alt_av.packet_life_time + 1;
  2872. qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
  2873. }
  2874. ret = 0;
  2875. break;
  2876. default:
  2877. ret = -EINVAL;
  2878. break;
  2879. }
  2880. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2881. return ret;
  2882. }
  2883. static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
  2884. struct ib_qp_attr *qp_attr,
  2885. int *qp_attr_mask)
  2886. {
  2887. unsigned long flags;
  2888. int ret;
  2889. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2890. switch (cm_id_priv->id.state) {
  2891. /* Allow transition to RTS before sending REP */
  2892. case IB_CM_REQ_RCVD:
  2893. case IB_CM_MRA_REQ_SENT:
  2894. case IB_CM_REP_RCVD:
  2895. case IB_CM_MRA_REP_SENT:
  2896. case IB_CM_REP_SENT:
  2897. case IB_CM_MRA_REP_RCVD:
  2898. case IB_CM_ESTABLISHED:
  2899. if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
  2900. *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
  2901. qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
  2902. if (cm_id_priv->qp_type == IB_QPT_RC) {
  2903. *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
  2904. IB_QP_RNR_RETRY |
  2905. IB_QP_MAX_QP_RD_ATOMIC;
  2906. qp_attr->timeout =
  2907. cm_id_priv->av.packet_life_time + 1;
  2908. qp_attr->retry_cnt = cm_id_priv->retry_count;
  2909. qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
  2910. qp_attr->max_rd_atomic =
  2911. cm_id_priv->initiator_depth;
  2912. }
  2913. if (cm_id_priv->alt_av.ah_attr.dlid) {
  2914. *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
  2915. qp_attr->path_mig_state = IB_MIG_REARM;
  2916. }
  2917. } else {
  2918. *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
  2919. qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
  2920. qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
  2921. qp_attr->alt_timeout =
  2922. cm_id_priv->alt_av.packet_life_time + 1;
  2923. qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
  2924. qp_attr->path_mig_state = IB_MIG_REARM;
  2925. }
  2926. ret = 0;
  2927. break;
  2928. default:
  2929. ret = -EINVAL;
  2930. break;
  2931. }
  2932. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2933. return ret;
  2934. }
  2935. int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
  2936. struct ib_qp_attr *qp_attr,
  2937. int *qp_attr_mask)
  2938. {
  2939. struct cm_id_private *cm_id_priv;
  2940. int ret;
  2941. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2942. switch (qp_attr->qp_state) {
  2943. case IB_QPS_INIT:
  2944. ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2945. break;
  2946. case IB_QPS_RTR:
  2947. ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2948. break;
  2949. case IB_QPS_RTS:
  2950. ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
  2951. break;
  2952. default:
  2953. ret = -EINVAL;
  2954. break;
  2955. }
  2956. return ret;
  2957. }
  2958. EXPORT_SYMBOL(ib_cm_init_qp_attr);
  2959. static void cm_add_one(struct ib_device *device)
  2960. {
  2961. struct cm_device *cm_dev;
  2962. struct cm_port *port;
  2963. struct ib_mad_reg_req reg_req = {
  2964. .mgmt_class = IB_MGMT_CLASS_CM,
  2965. .mgmt_class_version = IB_CM_CLASS_VERSION
  2966. };
  2967. struct ib_port_modify port_modify = {
  2968. .set_port_cap_mask = IB_PORT_CM_SUP
  2969. };
  2970. unsigned long flags;
  2971. int ret;
  2972. u8 i;
  2973. if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
  2974. return;
  2975. cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
  2976. device->phys_port_cnt, GFP_KERNEL);
  2977. if (!cm_dev)
  2978. return;
  2979. cm_dev->device = device;
  2980. set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
  2981. for (i = 1; i <= device->phys_port_cnt; i++) {
  2982. port = &cm_dev->port[i-1];
  2983. port->cm_dev = cm_dev;
  2984. port->port_num = i;
  2985. port->mad_agent = ib_register_mad_agent(device, i,
  2986. IB_QPT_GSI,
  2987. &reg_req,
  2988. 0,
  2989. cm_send_handler,
  2990. cm_recv_handler,
  2991. port);
  2992. if (IS_ERR(port->mad_agent))
  2993. goto error1;
  2994. ret = ib_modify_port(device, i, 0, &port_modify);
  2995. if (ret)
  2996. goto error2;
  2997. }
  2998. ib_set_client_data(device, &cm_client, cm_dev);
  2999. write_lock_irqsave(&cm.device_lock, flags);
  3000. list_add_tail(&cm_dev->list, &cm.device_list);
  3001. write_unlock_irqrestore(&cm.device_lock, flags);
  3002. return;
  3003. error2:
  3004. ib_unregister_mad_agent(port->mad_agent);
  3005. error1:
  3006. port_modify.set_port_cap_mask = 0;
  3007. port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
  3008. while (--i) {
  3009. port = &cm_dev->port[i-1];
  3010. ib_modify_port(device, port->port_num, 0, &port_modify);
  3011. ib_unregister_mad_agent(port->mad_agent);
  3012. }
  3013. kfree(cm_dev);
  3014. }
  3015. static void cm_remove_one(struct ib_device *device)
  3016. {
  3017. struct cm_device *cm_dev;
  3018. struct cm_port *port;
  3019. struct ib_port_modify port_modify = {
  3020. .clr_port_cap_mask = IB_PORT_CM_SUP
  3021. };
  3022. unsigned long flags;
  3023. int i;
  3024. cm_dev = ib_get_client_data(device, &cm_client);
  3025. if (!cm_dev)
  3026. return;
  3027. write_lock_irqsave(&cm.device_lock, flags);
  3028. list_del(&cm_dev->list);
  3029. write_unlock_irqrestore(&cm.device_lock, flags);
  3030. for (i = 1; i <= device->phys_port_cnt; i++) {
  3031. port = &cm_dev->port[i-1];
  3032. ib_modify_port(device, port->port_num, 0, &port_modify);
  3033. ib_unregister_mad_agent(port->mad_agent);
  3034. }
  3035. kfree(cm_dev);
  3036. }
  3037. static int __init ib_cm_init(void)
  3038. {
  3039. int ret;
  3040. memset(&cm, 0, sizeof cm);
  3041. INIT_LIST_HEAD(&cm.device_list);
  3042. rwlock_init(&cm.device_lock);
  3043. spin_lock_init(&cm.lock);
  3044. cm.listen_service_table = RB_ROOT;
  3045. cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
  3046. cm.remote_id_table = RB_ROOT;
  3047. cm.remote_qp_table = RB_ROOT;
  3048. cm.remote_sidr_table = RB_ROOT;
  3049. idr_init(&cm.local_id_table);
  3050. get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
  3051. idr_pre_get(&cm.local_id_table, GFP_KERNEL);
  3052. INIT_LIST_HEAD(&cm.timewait_list);
  3053. cm.wq = create_workqueue("ib_cm");
  3054. if (!cm.wq)
  3055. return -ENOMEM;
  3056. ret = ib_register_client(&cm_client);
  3057. if (ret)
  3058. goto error;
  3059. return 0;
  3060. error:
  3061. destroy_workqueue(cm.wq);
  3062. return ret;
  3063. }
  3064. static void __exit ib_cm_cleanup(void)
  3065. {
  3066. struct cm_timewait_info *timewait_info, *tmp;
  3067. spin_lock_irq(&cm.lock);
  3068. list_for_each_entry(timewait_info, &cm.timewait_list, list)
  3069. cancel_delayed_work(&timewait_info->work.work);
  3070. spin_unlock_irq(&cm.lock);
  3071. destroy_workqueue(cm.wq);
  3072. list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
  3073. list_del(&timewait_info->list);
  3074. kfree(timewait_info);
  3075. }
  3076. ib_unregister_client(&cm_client);
  3077. idr_destroy(&cm.local_id_table);
  3078. }
  3079. module_init(ib_cm_init);
  3080. module_exit(ib_cm_cleanup);