cm.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/list.h>
  34. #include <linux/workqueue.h>
  35. #include <linux/skbuff.h>
  36. #include <linux/timer.h>
  37. #include <linux/notifier.h>
  38. #include <linux/inetdevice.h>
  39. #include <linux/ip.h>
  40. #include <linux/tcp.h>
  41. #include <net/neighbour.h>
  42. #include <net/netevent.h>
  43. #include <net/route.h>
  44. #include "iw_cxgb4.h"
  45. static char *states[] = {
  46. "idle",
  47. "listen",
  48. "connecting",
  49. "mpa_wait_req",
  50. "mpa_req_sent",
  51. "mpa_req_rcvd",
  52. "mpa_rep_sent",
  53. "fpdu_mode",
  54. "aborting",
  55. "closing",
  56. "moribund",
  57. "dead",
  58. NULL,
  59. };
  60. static int dack_mode = 1;
  61. module_param(dack_mode, int, 0644);
  62. MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
  63. int c4iw_max_read_depth = 8;
  64. module_param(c4iw_max_read_depth, int, 0644);
  65. MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
  66. static int enable_tcp_timestamps;
  67. module_param(enable_tcp_timestamps, int, 0644);
  68. MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
  69. static int enable_tcp_sack;
  70. module_param(enable_tcp_sack, int, 0644);
  71. MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
  72. static int enable_tcp_window_scaling = 1;
  73. module_param(enable_tcp_window_scaling, int, 0644);
  74. MODULE_PARM_DESC(enable_tcp_window_scaling,
  75. "Enable tcp window scaling (default=1)");
  76. int c4iw_debug;
  77. module_param(c4iw_debug, int, 0644);
  78. MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
  79. static int peer2peer;
  80. module_param(peer2peer, int, 0644);
  81. MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
  82. static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
  83. module_param(p2p_type, int, 0644);
  84. MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
  85. "1=RDMA_READ 0=RDMA_WRITE (default 1)");
  86. static int ep_timeout_secs = 60;
  87. module_param(ep_timeout_secs, int, 0644);
  88. MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
  89. "in seconds (default=60)");
  90. static int mpa_rev = 1;
  91. module_param(mpa_rev, int, 0644);
  92. MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
  93. "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
  94. " compliant (default=1)");
  95. static int markers_enabled;
  96. module_param(markers_enabled, int, 0644);
  97. MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
  98. static int crc_enabled = 1;
  99. module_param(crc_enabled, int, 0644);
  100. MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
  101. static int rcv_win = 256 * 1024;
  102. module_param(rcv_win, int, 0644);
  103. MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
  104. static int snd_win = 128 * 1024;
  105. module_param(snd_win, int, 0644);
  106. MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
  107. static struct workqueue_struct *workq;
  108. static struct sk_buff_head rxq;
  109. static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
  110. static void ep_timeout(unsigned long arg);
  111. static void connect_reply_upcall(struct c4iw_ep *ep, int status);
  112. static LIST_HEAD(timeout_list);
  113. static spinlock_t timeout_lock;
  114. static void start_ep_timer(struct c4iw_ep *ep)
  115. {
  116. PDBG("%s ep %p\n", __func__, ep);
  117. if (timer_pending(&ep->timer)) {
  118. PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
  119. del_timer_sync(&ep->timer);
  120. } else
  121. c4iw_get_ep(&ep->com);
  122. ep->timer.expires = jiffies + ep_timeout_secs * HZ;
  123. ep->timer.data = (unsigned long)ep;
  124. ep->timer.function = ep_timeout;
  125. add_timer(&ep->timer);
  126. }
  127. static void stop_ep_timer(struct c4iw_ep *ep)
  128. {
  129. PDBG("%s ep %p\n", __func__, ep);
  130. if (!timer_pending(&ep->timer)) {
  131. WARN(1, "%s timer stopped when its not running! "
  132. "ep %p state %u\n", __func__, ep, ep->com.state);
  133. return;
  134. }
  135. del_timer_sync(&ep->timer);
  136. c4iw_put_ep(&ep->com);
  137. }
  138. static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
  139. struct l2t_entry *l2e)
  140. {
  141. int error = 0;
  142. if (c4iw_fatal_error(rdev)) {
  143. kfree_skb(skb);
  144. PDBG("%s - device in error state - dropping\n", __func__);
  145. return -EIO;
  146. }
  147. error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
  148. if (error < 0)
  149. kfree_skb(skb);
  150. return error < 0 ? error : 0;
  151. }
  152. int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
  153. {
  154. int error = 0;
  155. if (c4iw_fatal_error(rdev)) {
  156. kfree_skb(skb);
  157. PDBG("%s - device in error state - dropping\n", __func__);
  158. return -EIO;
  159. }
  160. error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
  161. if (error < 0)
  162. kfree_skb(skb);
  163. return error < 0 ? error : 0;
  164. }
  165. static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
  166. {
  167. struct cpl_tid_release *req;
  168. skb = get_skb(skb, sizeof *req, GFP_KERNEL);
  169. if (!skb)
  170. return;
  171. req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
  172. INIT_TP_WR(req, hwtid);
  173. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
  174. set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
  175. c4iw_ofld_send(rdev, skb);
  176. return;
  177. }
  178. static void set_emss(struct c4iw_ep *ep, u16 opt)
  179. {
  180. ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
  181. ep->mss = ep->emss;
  182. if (GET_TCPOPT_TSTAMP(opt))
  183. ep->emss -= 12;
  184. if (ep->emss < 128)
  185. ep->emss = 128;
  186. PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
  187. ep->mss, ep->emss);
  188. }
  189. static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
  190. {
  191. enum c4iw_ep_state state;
  192. mutex_lock(&epc->mutex);
  193. state = epc->state;
  194. mutex_unlock(&epc->mutex);
  195. return state;
  196. }
  197. static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
  198. {
  199. epc->state = new;
  200. }
  201. static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
  202. {
  203. mutex_lock(&epc->mutex);
  204. PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
  205. __state_set(epc, new);
  206. mutex_unlock(&epc->mutex);
  207. return;
  208. }
  209. static void *alloc_ep(int size, gfp_t gfp)
  210. {
  211. struct c4iw_ep_common *epc;
  212. epc = kzalloc(size, gfp);
  213. if (epc) {
  214. kref_init(&epc->kref);
  215. mutex_init(&epc->mutex);
  216. c4iw_init_wr_wait(&epc->wr_wait);
  217. }
  218. PDBG("%s alloc ep %p\n", __func__, epc);
  219. return epc;
  220. }
  221. void _c4iw_free_ep(struct kref *kref)
  222. {
  223. struct c4iw_ep *ep;
  224. ep = container_of(kref, struct c4iw_ep, com.kref);
  225. PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
  226. if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
  227. cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
  228. dst_release(ep->dst);
  229. cxgb4_l2t_release(ep->l2t);
  230. }
  231. kfree(ep);
  232. }
  233. static void release_ep_resources(struct c4iw_ep *ep)
  234. {
  235. set_bit(RELEASE_RESOURCES, &ep->com.flags);
  236. c4iw_put_ep(&ep->com);
  237. }
  238. static int status2errno(int status)
  239. {
  240. switch (status) {
  241. case CPL_ERR_NONE:
  242. return 0;
  243. case CPL_ERR_CONN_RESET:
  244. return -ECONNRESET;
  245. case CPL_ERR_ARP_MISS:
  246. return -EHOSTUNREACH;
  247. case CPL_ERR_CONN_TIMEDOUT:
  248. return -ETIMEDOUT;
  249. case CPL_ERR_TCAM_FULL:
  250. return -ENOMEM;
  251. case CPL_ERR_CONN_EXIST:
  252. return -EADDRINUSE;
  253. default:
  254. return -EIO;
  255. }
  256. }
  257. /*
  258. * Try and reuse skbs already allocated...
  259. */
  260. static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
  261. {
  262. if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
  263. skb_trim(skb, 0);
  264. skb_get(skb);
  265. skb_reset_transport_header(skb);
  266. } else {
  267. skb = alloc_skb(len, gfp);
  268. }
  269. return skb;
  270. }
  271. static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
  272. __be32 peer_ip, __be16 local_port,
  273. __be16 peer_port, u8 tos)
  274. {
  275. struct rtable *rt;
  276. struct flowi4 fl4;
  277. rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
  278. peer_port, local_port, IPPROTO_TCP,
  279. tos, 0);
  280. if (IS_ERR(rt))
  281. return NULL;
  282. return rt;
  283. }
  284. static void arp_failure_discard(void *handle, struct sk_buff *skb)
  285. {
  286. PDBG("%s c4iw_dev %p\n", __func__, handle);
  287. kfree_skb(skb);
  288. }
  289. /*
  290. * Handle an ARP failure for an active open.
  291. */
  292. static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
  293. {
  294. printk(KERN_ERR MOD "ARP failure duing connect\n");
  295. kfree_skb(skb);
  296. }
  297. /*
  298. * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
  299. * and send it along.
  300. */
  301. static void abort_arp_failure(void *handle, struct sk_buff *skb)
  302. {
  303. struct c4iw_rdev *rdev = handle;
  304. struct cpl_abort_req *req = cplhdr(skb);
  305. PDBG("%s rdev %p\n", __func__, rdev);
  306. req->cmd = CPL_ABORT_NO_RST;
  307. c4iw_ofld_send(rdev, skb);
  308. }
  309. static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
  310. {
  311. unsigned int flowclen = 80;
  312. struct fw_flowc_wr *flowc;
  313. int i;
  314. skb = get_skb(skb, flowclen, GFP_KERNEL);
  315. flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
  316. flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
  317. FW_FLOWC_WR_NPARAMS(8));
  318. flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
  319. 16)) | FW_WR_FLOWID(ep->hwtid));
  320. flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
  321. flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
  322. flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
  323. flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
  324. flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
  325. flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
  326. flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
  327. flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
  328. flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
  329. flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
  330. flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
  331. flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
  332. flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
  333. flowc->mnemval[6].val = cpu_to_be32(snd_win);
  334. flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
  335. flowc->mnemval[7].val = cpu_to_be32(ep->emss);
  336. /* Pad WR to 16 byte boundary */
  337. flowc->mnemval[8].mnemonic = 0;
  338. flowc->mnemval[8].val = 0;
  339. for (i = 0; i < 9; i++) {
  340. flowc->mnemval[i].r4[0] = 0;
  341. flowc->mnemval[i].r4[1] = 0;
  342. flowc->mnemval[i].r4[2] = 0;
  343. }
  344. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  345. c4iw_ofld_send(&ep->com.dev->rdev, skb);
  346. }
  347. static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
  348. {
  349. struct cpl_close_con_req *req;
  350. struct sk_buff *skb;
  351. int wrlen = roundup(sizeof *req, 16);
  352. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  353. skb = get_skb(NULL, wrlen, gfp);
  354. if (!skb) {
  355. printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
  356. return -ENOMEM;
  357. }
  358. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  359. t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
  360. req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
  361. memset(req, 0, wrlen);
  362. INIT_TP_WR(req, ep->hwtid);
  363. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
  364. ep->hwtid));
  365. return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  366. }
  367. static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
  368. {
  369. struct cpl_abort_req *req;
  370. int wrlen = roundup(sizeof *req, 16);
  371. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  372. skb = get_skb(skb, wrlen, gfp);
  373. if (!skb) {
  374. printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
  375. __func__);
  376. return -ENOMEM;
  377. }
  378. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  379. t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
  380. req = (struct cpl_abort_req *) skb_put(skb, wrlen);
  381. memset(req, 0, wrlen);
  382. INIT_TP_WR(req, ep->hwtid);
  383. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
  384. req->cmd = CPL_ABORT_SEND_RST;
  385. return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  386. }
  387. static int send_connect(struct c4iw_ep *ep)
  388. {
  389. struct cpl_act_open_req *req;
  390. struct sk_buff *skb;
  391. u64 opt0;
  392. u32 opt2;
  393. unsigned int mtu_idx;
  394. int wscale;
  395. int wrlen = roundup(sizeof *req, 16);
  396. PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
  397. skb = get_skb(NULL, wrlen, GFP_KERNEL);
  398. if (!skb) {
  399. printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
  400. __func__);
  401. return -ENOMEM;
  402. }
  403. set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
  404. cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
  405. wscale = compute_wscale(rcv_win);
  406. opt0 = KEEP_ALIVE(1) |
  407. DELACK(1) |
  408. WND_SCALE(wscale) |
  409. MSS_IDX(mtu_idx) |
  410. L2T_IDX(ep->l2t->idx) |
  411. TX_CHAN(ep->tx_chan) |
  412. SMAC_SEL(ep->smac_idx) |
  413. DSCP(ep->tos) |
  414. ULP_MODE(ULP_MODE_TCPDDP) |
  415. RCV_BUFSIZ(rcv_win>>10);
  416. opt2 = RX_CHANNEL(0) |
  417. RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
  418. if (enable_tcp_timestamps)
  419. opt2 |= TSTAMPS_EN(1);
  420. if (enable_tcp_sack)
  421. opt2 |= SACK_EN(1);
  422. if (wscale && enable_tcp_window_scaling)
  423. opt2 |= WND_SCALE_EN(1);
  424. t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
  425. req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
  426. INIT_TP_WR(req, 0);
  427. OPCODE_TID(req) = cpu_to_be32(
  428. MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
  429. req->local_port = ep->com.local_addr.sin_port;
  430. req->peer_port = ep->com.remote_addr.sin_port;
  431. req->local_ip = ep->com.local_addr.sin_addr.s_addr;
  432. req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
  433. req->opt0 = cpu_to_be64(opt0);
  434. req->params = 0;
  435. req->opt2 = cpu_to_be32(opt2);
  436. return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  437. }
  438. static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
  439. u8 mpa_rev_to_use)
  440. {
  441. int mpalen, wrlen;
  442. struct fw_ofld_tx_data_wr *req;
  443. struct mpa_message *mpa;
  444. struct mpa_v2_conn_params mpa_v2_params;
  445. PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
  446. BUG_ON(skb_cloned(skb));
  447. mpalen = sizeof(*mpa) + ep->plen;
  448. if (mpa_rev_to_use == 2)
  449. mpalen += sizeof(struct mpa_v2_conn_params);
  450. wrlen = roundup(mpalen + sizeof *req, 16);
  451. skb = get_skb(skb, wrlen, GFP_KERNEL);
  452. if (!skb) {
  453. connect_reply_upcall(ep, -ENOMEM);
  454. return;
  455. }
  456. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  457. req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
  458. memset(req, 0, wrlen);
  459. req->op_to_immdlen = cpu_to_be32(
  460. FW_WR_OP(FW_OFLD_TX_DATA_WR) |
  461. FW_WR_COMPL(1) |
  462. FW_WR_IMMDLEN(mpalen));
  463. req->flowid_len16 = cpu_to_be32(
  464. FW_WR_FLOWID(ep->hwtid) |
  465. FW_WR_LEN16(wrlen >> 4));
  466. req->plen = cpu_to_be32(mpalen);
  467. req->tunnel_to_proxy = cpu_to_be32(
  468. FW_OFLD_TX_DATA_WR_FLUSH(1) |
  469. FW_OFLD_TX_DATA_WR_SHOVE(1));
  470. mpa = (struct mpa_message *)(req + 1);
  471. memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
  472. mpa->flags = (crc_enabled ? MPA_CRC : 0) |
  473. (markers_enabled ? MPA_MARKERS : 0) |
  474. (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
  475. mpa->private_data_size = htons(ep->plen);
  476. mpa->revision = mpa_rev_to_use;
  477. if (mpa_rev_to_use == 1) {
  478. ep->tried_with_mpa_v1 = 1;
  479. ep->retry_with_mpa_v1 = 0;
  480. }
  481. if (mpa_rev_to_use == 2) {
  482. mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
  483. sizeof (struct mpa_v2_conn_params));
  484. mpa_v2_params.ird = htons((u16)ep->ird);
  485. mpa_v2_params.ord = htons((u16)ep->ord);
  486. if (peer2peer) {
  487. mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
  488. if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
  489. mpa_v2_params.ord |=
  490. htons(MPA_V2_RDMA_WRITE_RTR);
  491. else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
  492. mpa_v2_params.ord |=
  493. htons(MPA_V2_RDMA_READ_RTR);
  494. }
  495. memcpy(mpa->private_data, &mpa_v2_params,
  496. sizeof(struct mpa_v2_conn_params));
  497. if (ep->plen)
  498. memcpy(mpa->private_data +
  499. sizeof(struct mpa_v2_conn_params),
  500. ep->mpa_pkt + sizeof(*mpa), ep->plen);
  501. } else
  502. if (ep->plen)
  503. memcpy(mpa->private_data,
  504. ep->mpa_pkt + sizeof(*mpa), ep->plen);
  505. /*
  506. * Reference the mpa skb. This ensures the data area
  507. * will remain in memory until the hw acks the tx.
  508. * Function fw4_ack() will deref it.
  509. */
  510. skb_get(skb);
  511. t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
  512. BUG_ON(ep->mpa_skb);
  513. ep->mpa_skb = skb;
  514. c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  515. start_ep_timer(ep);
  516. state_set(&ep->com, MPA_REQ_SENT);
  517. ep->mpa_attr.initiator = 1;
  518. return;
  519. }
  520. static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
  521. {
  522. int mpalen, wrlen;
  523. struct fw_ofld_tx_data_wr *req;
  524. struct mpa_message *mpa;
  525. struct sk_buff *skb;
  526. struct mpa_v2_conn_params mpa_v2_params;
  527. PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
  528. mpalen = sizeof(*mpa) + plen;
  529. if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
  530. mpalen += sizeof(struct mpa_v2_conn_params);
  531. wrlen = roundup(mpalen + sizeof *req, 16);
  532. skb = get_skb(NULL, wrlen, GFP_KERNEL);
  533. if (!skb) {
  534. printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
  535. return -ENOMEM;
  536. }
  537. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  538. req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
  539. memset(req, 0, wrlen);
  540. req->op_to_immdlen = cpu_to_be32(
  541. FW_WR_OP(FW_OFLD_TX_DATA_WR) |
  542. FW_WR_COMPL(1) |
  543. FW_WR_IMMDLEN(mpalen));
  544. req->flowid_len16 = cpu_to_be32(
  545. FW_WR_FLOWID(ep->hwtid) |
  546. FW_WR_LEN16(wrlen >> 4));
  547. req->plen = cpu_to_be32(mpalen);
  548. req->tunnel_to_proxy = cpu_to_be32(
  549. FW_OFLD_TX_DATA_WR_FLUSH(1) |
  550. FW_OFLD_TX_DATA_WR_SHOVE(1));
  551. mpa = (struct mpa_message *)(req + 1);
  552. memset(mpa, 0, sizeof(*mpa));
  553. memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
  554. mpa->flags = MPA_REJECT;
  555. mpa->revision = mpa_rev;
  556. mpa->private_data_size = htons(plen);
  557. if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
  558. mpa->flags |= MPA_ENHANCED_RDMA_CONN;
  559. mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
  560. sizeof (struct mpa_v2_conn_params));
  561. mpa_v2_params.ird = htons(((u16)ep->ird) |
  562. (peer2peer ? MPA_V2_PEER2PEER_MODEL :
  563. 0));
  564. mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
  565. (p2p_type ==
  566. FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
  567. MPA_V2_RDMA_WRITE_RTR : p2p_type ==
  568. FW_RI_INIT_P2PTYPE_READ_REQ ?
  569. MPA_V2_RDMA_READ_RTR : 0) : 0));
  570. memcpy(mpa->private_data, &mpa_v2_params,
  571. sizeof(struct mpa_v2_conn_params));
  572. if (ep->plen)
  573. memcpy(mpa->private_data +
  574. sizeof(struct mpa_v2_conn_params), pdata, plen);
  575. } else
  576. if (plen)
  577. memcpy(mpa->private_data, pdata, plen);
  578. /*
  579. * Reference the mpa skb again. This ensures the data area
  580. * will remain in memory until the hw acks the tx.
  581. * Function fw4_ack() will deref it.
  582. */
  583. skb_get(skb);
  584. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  585. t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
  586. BUG_ON(ep->mpa_skb);
  587. ep->mpa_skb = skb;
  588. return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  589. }
  590. static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
  591. {
  592. int mpalen, wrlen;
  593. struct fw_ofld_tx_data_wr *req;
  594. struct mpa_message *mpa;
  595. struct sk_buff *skb;
  596. struct mpa_v2_conn_params mpa_v2_params;
  597. PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
  598. mpalen = sizeof(*mpa) + plen;
  599. if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
  600. mpalen += sizeof(struct mpa_v2_conn_params);
  601. wrlen = roundup(mpalen + sizeof *req, 16);
  602. skb = get_skb(NULL, wrlen, GFP_KERNEL);
  603. if (!skb) {
  604. printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
  605. return -ENOMEM;
  606. }
  607. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  608. req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
  609. memset(req, 0, wrlen);
  610. req->op_to_immdlen = cpu_to_be32(
  611. FW_WR_OP(FW_OFLD_TX_DATA_WR) |
  612. FW_WR_COMPL(1) |
  613. FW_WR_IMMDLEN(mpalen));
  614. req->flowid_len16 = cpu_to_be32(
  615. FW_WR_FLOWID(ep->hwtid) |
  616. FW_WR_LEN16(wrlen >> 4));
  617. req->plen = cpu_to_be32(mpalen);
  618. req->tunnel_to_proxy = cpu_to_be32(
  619. FW_OFLD_TX_DATA_WR_FLUSH(1) |
  620. FW_OFLD_TX_DATA_WR_SHOVE(1));
  621. mpa = (struct mpa_message *)(req + 1);
  622. memset(mpa, 0, sizeof(*mpa));
  623. memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
  624. mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
  625. (markers_enabled ? MPA_MARKERS : 0);
  626. mpa->revision = ep->mpa_attr.version;
  627. mpa->private_data_size = htons(plen);
  628. if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
  629. mpa->flags |= MPA_ENHANCED_RDMA_CONN;
  630. mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
  631. sizeof (struct mpa_v2_conn_params));
  632. mpa_v2_params.ird = htons((u16)ep->ird);
  633. mpa_v2_params.ord = htons((u16)ep->ord);
  634. if (peer2peer && (ep->mpa_attr.p2p_type !=
  635. FW_RI_INIT_P2PTYPE_DISABLED)) {
  636. mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
  637. if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
  638. mpa_v2_params.ord |=
  639. htons(MPA_V2_RDMA_WRITE_RTR);
  640. else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
  641. mpa_v2_params.ord |=
  642. htons(MPA_V2_RDMA_READ_RTR);
  643. }
  644. memcpy(mpa->private_data, &mpa_v2_params,
  645. sizeof(struct mpa_v2_conn_params));
  646. if (ep->plen)
  647. memcpy(mpa->private_data +
  648. sizeof(struct mpa_v2_conn_params), pdata, plen);
  649. } else
  650. if (plen)
  651. memcpy(mpa->private_data, pdata, plen);
  652. /*
  653. * Reference the mpa skb. This ensures the data area
  654. * will remain in memory until the hw acks the tx.
  655. * Function fw4_ack() will deref it.
  656. */
  657. skb_get(skb);
  658. t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
  659. ep->mpa_skb = skb;
  660. state_set(&ep->com, MPA_REP_SENT);
  661. return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  662. }
  663. static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
  664. {
  665. struct c4iw_ep *ep;
  666. struct cpl_act_establish *req = cplhdr(skb);
  667. unsigned int tid = GET_TID(req);
  668. unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
  669. struct tid_info *t = dev->rdev.lldi.tids;
  670. ep = lookup_atid(t, atid);
  671. PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
  672. be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
  673. dst_confirm(ep->dst);
  674. /* setup the hwtid for this connection */
  675. ep->hwtid = tid;
  676. cxgb4_insert_tid(t, ep, tid);
  677. ep->snd_seq = be32_to_cpu(req->snd_isn);
  678. ep->rcv_seq = be32_to_cpu(req->rcv_isn);
  679. set_emss(ep, ntohs(req->tcp_opt));
  680. /* dealloc the atid */
  681. cxgb4_free_atid(t, atid);
  682. /* start MPA negotiation */
  683. send_flowc(ep, NULL);
  684. if (ep->retry_with_mpa_v1)
  685. send_mpa_req(ep, skb, 1);
  686. else
  687. send_mpa_req(ep, skb, mpa_rev);
  688. return 0;
  689. }
  690. static void close_complete_upcall(struct c4iw_ep *ep)
  691. {
  692. struct iw_cm_event event;
  693. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  694. memset(&event, 0, sizeof(event));
  695. event.event = IW_CM_EVENT_CLOSE;
  696. if (ep->com.cm_id) {
  697. PDBG("close complete delivered ep %p cm_id %p tid %u\n",
  698. ep, ep->com.cm_id, ep->hwtid);
  699. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  700. ep->com.cm_id->rem_ref(ep->com.cm_id);
  701. ep->com.cm_id = NULL;
  702. ep->com.qp = NULL;
  703. }
  704. }
  705. static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
  706. {
  707. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  708. close_complete_upcall(ep);
  709. state_set(&ep->com, ABORTING);
  710. return send_abort(ep, skb, gfp);
  711. }
  712. static void peer_close_upcall(struct c4iw_ep *ep)
  713. {
  714. struct iw_cm_event event;
  715. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  716. memset(&event, 0, sizeof(event));
  717. event.event = IW_CM_EVENT_DISCONNECT;
  718. if (ep->com.cm_id) {
  719. PDBG("peer close delivered ep %p cm_id %p tid %u\n",
  720. ep, ep->com.cm_id, ep->hwtid);
  721. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  722. }
  723. }
  724. static void peer_abort_upcall(struct c4iw_ep *ep)
  725. {
  726. struct iw_cm_event event;
  727. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  728. memset(&event, 0, sizeof(event));
  729. event.event = IW_CM_EVENT_CLOSE;
  730. event.status = -ECONNRESET;
  731. if (ep->com.cm_id) {
  732. PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
  733. ep->com.cm_id, ep->hwtid);
  734. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  735. ep->com.cm_id->rem_ref(ep->com.cm_id);
  736. ep->com.cm_id = NULL;
  737. ep->com.qp = NULL;
  738. }
  739. }
  740. static void connect_reply_upcall(struct c4iw_ep *ep, int status)
  741. {
  742. struct iw_cm_event event;
  743. PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
  744. memset(&event, 0, sizeof(event));
  745. event.event = IW_CM_EVENT_CONNECT_REPLY;
  746. event.status = status;
  747. event.local_addr = ep->com.local_addr;
  748. event.remote_addr = ep->com.remote_addr;
  749. if ((status == 0) || (status == -ECONNREFUSED)) {
  750. if (!ep->tried_with_mpa_v1) {
  751. /* this means MPA_v2 is used */
  752. event.private_data_len = ep->plen -
  753. sizeof(struct mpa_v2_conn_params);
  754. event.private_data = ep->mpa_pkt +
  755. sizeof(struct mpa_message) +
  756. sizeof(struct mpa_v2_conn_params);
  757. } else {
  758. /* this means MPA_v1 is used */
  759. event.private_data_len = ep->plen;
  760. event.private_data = ep->mpa_pkt +
  761. sizeof(struct mpa_message);
  762. }
  763. }
  764. PDBG("%s ep %p tid %u status %d\n", __func__, ep,
  765. ep->hwtid, status);
  766. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  767. if (status < 0) {
  768. ep->com.cm_id->rem_ref(ep->com.cm_id);
  769. ep->com.cm_id = NULL;
  770. ep->com.qp = NULL;
  771. }
  772. }
  773. static void connect_request_upcall(struct c4iw_ep *ep)
  774. {
  775. struct iw_cm_event event;
  776. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  777. memset(&event, 0, sizeof(event));
  778. event.event = IW_CM_EVENT_CONNECT_REQUEST;
  779. event.local_addr = ep->com.local_addr;
  780. event.remote_addr = ep->com.remote_addr;
  781. event.provider_data = ep;
  782. if (!ep->tried_with_mpa_v1) {
  783. /* this means MPA_v2 is used */
  784. event.ord = ep->ord;
  785. event.ird = ep->ird;
  786. event.private_data_len = ep->plen -
  787. sizeof(struct mpa_v2_conn_params);
  788. event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
  789. sizeof(struct mpa_v2_conn_params);
  790. } else {
  791. /* this means MPA_v1 is used. Send max supported */
  792. event.ord = c4iw_max_read_depth;
  793. event.ird = c4iw_max_read_depth;
  794. event.private_data_len = ep->plen;
  795. event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
  796. }
  797. if (state_read(&ep->parent_ep->com) != DEAD) {
  798. c4iw_get_ep(&ep->com);
  799. ep->parent_ep->com.cm_id->event_handler(
  800. ep->parent_ep->com.cm_id,
  801. &event);
  802. }
  803. c4iw_put_ep(&ep->parent_ep->com);
  804. ep->parent_ep = NULL;
  805. }
  806. static void established_upcall(struct c4iw_ep *ep)
  807. {
  808. struct iw_cm_event event;
  809. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  810. memset(&event, 0, sizeof(event));
  811. event.event = IW_CM_EVENT_ESTABLISHED;
  812. event.ird = ep->ird;
  813. event.ord = ep->ord;
  814. if (ep->com.cm_id) {
  815. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  816. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  817. }
  818. }
  819. static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
  820. {
  821. struct cpl_rx_data_ack *req;
  822. struct sk_buff *skb;
  823. int wrlen = roundup(sizeof *req, 16);
  824. PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
  825. skb = get_skb(NULL, wrlen, GFP_KERNEL);
  826. if (!skb) {
  827. printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
  828. return 0;
  829. }
  830. req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
  831. memset(req, 0, wrlen);
  832. INIT_TP_WR(req, ep->hwtid);
  833. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
  834. ep->hwtid));
  835. req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
  836. F_RX_DACK_CHANGE |
  837. V_RX_DACK_MODE(dack_mode));
  838. set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
  839. c4iw_ofld_send(&ep->com.dev->rdev, skb);
  840. return credits;
  841. }
  842. static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
  843. {
  844. struct mpa_message *mpa;
  845. struct mpa_v2_conn_params *mpa_v2_params;
  846. u16 plen;
  847. u16 resp_ird, resp_ord;
  848. u8 rtr_mismatch = 0, insuff_ird = 0;
  849. struct c4iw_qp_attributes attrs;
  850. enum c4iw_qp_attr_mask mask;
  851. int err;
  852. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  853. /*
  854. * Stop mpa timer. If it expired, then the state has
  855. * changed and we bail since ep_timeout already aborted
  856. * the connection.
  857. */
  858. stop_ep_timer(ep);
  859. if (state_read(&ep->com) != MPA_REQ_SENT)
  860. return;
  861. /*
  862. * If we get more than the supported amount of private data
  863. * then we must fail this connection.
  864. */
  865. if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
  866. err = -EINVAL;
  867. goto err;
  868. }
  869. /*
  870. * copy the new data into our accumulation buffer.
  871. */
  872. skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
  873. skb->len);
  874. ep->mpa_pkt_len += skb->len;
  875. /*
  876. * if we don't even have the mpa message, then bail.
  877. */
  878. if (ep->mpa_pkt_len < sizeof(*mpa))
  879. return;
  880. mpa = (struct mpa_message *) ep->mpa_pkt;
  881. /* Validate MPA header. */
  882. if (mpa->revision > mpa_rev) {
  883. printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
  884. " Received = %d\n", __func__, mpa_rev, mpa->revision);
  885. err = -EPROTO;
  886. goto err;
  887. }
  888. if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
  889. err = -EPROTO;
  890. goto err;
  891. }
  892. plen = ntohs(mpa->private_data_size);
  893. /*
  894. * Fail if there's too much private data.
  895. */
  896. if (plen > MPA_MAX_PRIVATE_DATA) {
  897. err = -EPROTO;
  898. goto err;
  899. }
  900. /*
  901. * If plen does not account for pkt size
  902. */
  903. if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
  904. err = -EPROTO;
  905. goto err;
  906. }
  907. ep->plen = (u8) plen;
  908. /*
  909. * If we don't have all the pdata yet, then bail.
  910. * We'll continue process when more data arrives.
  911. */
  912. if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
  913. return;
  914. if (mpa->flags & MPA_REJECT) {
  915. err = -ECONNREFUSED;
  916. goto err;
  917. }
  918. /*
  919. * If we get here we have accumulated the entire mpa
  920. * start reply message including private data. And
  921. * the MPA header is valid.
  922. */
  923. state_set(&ep->com, FPDU_MODE);
  924. ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
  925. ep->mpa_attr.recv_marker_enabled = markers_enabled;
  926. ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
  927. ep->mpa_attr.version = mpa->revision;
  928. ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
  929. if (mpa->revision == 2) {
  930. ep->mpa_attr.enhanced_rdma_conn =
  931. mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
  932. if (ep->mpa_attr.enhanced_rdma_conn) {
  933. mpa_v2_params = (struct mpa_v2_conn_params *)
  934. (ep->mpa_pkt + sizeof(*mpa));
  935. resp_ird = ntohs(mpa_v2_params->ird) &
  936. MPA_V2_IRD_ORD_MASK;
  937. resp_ord = ntohs(mpa_v2_params->ord) &
  938. MPA_V2_IRD_ORD_MASK;
  939. /*
  940. * This is a double-check. Ideally, below checks are
  941. * not required since ird/ord stuff has been taken
  942. * care of in c4iw_accept_cr
  943. */
  944. if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
  945. err = -ENOMEM;
  946. ep->ird = resp_ord;
  947. ep->ord = resp_ird;
  948. insuff_ird = 1;
  949. }
  950. if (ntohs(mpa_v2_params->ird) &
  951. MPA_V2_PEER2PEER_MODEL) {
  952. if (ntohs(mpa_v2_params->ord) &
  953. MPA_V2_RDMA_WRITE_RTR)
  954. ep->mpa_attr.p2p_type =
  955. FW_RI_INIT_P2PTYPE_RDMA_WRITE;
  956. else if (ntohs(mpa_v2_params->ord) &
  957. MPA_V2_RDMA_READ_RTR)
  958. ep->mpa_attr.p2p_type =
  959. FW_RI_INIT_P2PTYPE_READ_REQ;
  960. }
  961. }
  962. } else if (mpa->revision == 1)
  963. if (peer2peer)
  964. ep->mpa_attr.p2p_type = p2p_type;
  965. PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
  966. "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
  967. "%d\n", __func__, ep->mpa_attr.crc_enabled,
  968. ep->mpa_attr.recv_marker_enabled,
  969. ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
  970. ep->mpa_attr.p2p_type, p2p_type);
  971. /*
  972. * If responder's RTR does not match with that of initiator, assign
  973. * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
  974. * generated when moving QP to RTS state.
  975. * A TERM message will be sent after QP has moved to RTS state
  976. */
  977. if ((ep->mpa_attr.version == 2) && peer2peer &&
  978. (ep->mpa_attr.p2p_type != p2p_type)) {
  979. ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
  980. rtr_mismatch = 1;
  981. }
  982. attrs.mpa_attr = ep->mpa_attr;
  983. attrs.max_ird = ep->ird;
  984. attrs.max_ord = ep->ord;
  985. attrs.llp_stream_handle = ep;
  986. attrs.next_state = C4IW_QP_STATE_RTS;
  987. mask = C4IW_QP_ATTR_NEXT_STATE |
  988. C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
  989. C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
  990. /* bind QP and TID with INIT_WR */
  991. err = c4iw_modify_qp(ep->com.qp->rhp,
  992. ep->com.qp, mask, &attrs, 1);
  993. if (err)
  994. goto err;
  995. /*
  996. * If responder's RTR requirement did not match with what initiator
  997. * supports, generate TERM message
  998. */
  999. if (rtr_mismatch) {
  1000. printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
  1001. attrs.layer_etype = LAYER_MPA | DDP_LLP;
  1002. attrs.ecode = MPA_NOMATCH_RTR;
  1003. attrs.next_state = C4IW_QP_STATE_TERMINATE;
  1004. err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1005. C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
  1006. err = -ENOMEM;
  1007. goto out;
  1008. }
  1009. /*
  1010. * Generate TERM if initiator IRD is not sufficient for responder
  1011. * provided ORD. Currently, we do the same behaviour even when
  1012. * responder provided IRD is also not sufficient as regards to
  1013. * initiator ORD.
  1014. */
  1015. if (insuff_ird) {
  1016. printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
  1017. __func__);
  1018. attrs.layer_etype = LAYER_MPA | DDP_LLP;
  1019. attrs.ecode = MPA_INSUFF_IRD;
  1020. attrs.next_state = C4IW_QP_STATE_TERMINATE;
  1021. err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1022. C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
  1023. err = -ENOMEM;
  1024. goto out;
  1025. }
  1026. goto out;
  1027. err:
  1028. state_set(&ep->com, ABORTING);
  1029. send_abort(ep, skb, GFP_KERNEL);
  1030. out:
  1031. connect_reply_upcall(ep, err);
  1032. return;
  1033. }
  1034. static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
  1035. {
  1036. struct mpa_message *mpa;
  1037. struct mpa_v2_conn_params *mpa_v2_params;
  1038. u16 plen;
  1039. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1040. if (state_read(&ep->com) != MPA_REQ_WAIT)
  1041. return;
  1042. /*
  1043. * If we get more than the supported amount of private data
  1044. * then we must fail this connection.
  1045. */
  1046. if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
  1047. stop_ep_timer(ep);
  1048. abort_connection(ep, skb, GFP_KERNEL);
  1049. return;
  1050. }
  1051. PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
  1052. /*
  1053. * Copy the new data into our accumulation buffer.
  1054. */
  1055. skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
  1056. skb->len);
  1057. ep->mpa_pkt_len += skb->len;
  1058. /*
  1059. * If we don't even have the mpa message, then bail.
  1060. * We'll continue process when more data arrives.
  1061. */
  1062. if (ep->mpa_pkt_len < sizeof(*mpa))
  1063. return;
  1064. PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
  1065. stop_ep_timer(ep);
  1066. mpa = (struct mpa_message *) ep->mpa_pkt;
  1067. /*
  1068. * Validate MPA Header.
  1069. */
  1070. if (mpa->revision > mpa_rev) {
  1071. printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
  1072. " Received = %d\n", __func__, mpa_rev, mpa->revision);
  1073. abort_connection(ep, skb, GFP_KERNEL);
  1074. return;
  1075. }
  1076. if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
  1077. abort_connection(ep, skb, GFP_KERNEL);
  1078. return;
  1079. }
  1080. plen = ntohs(mpa->private_data_size);
  1081. /*
  1082. * Fail if there's too much private data.
  1083. */
  1084. if (plen > MPA_MAX_PRIVATE_DATA) {
  1085. abort_connection(ep, skb, GFP_KERNEL);
  1086. return;
  1087. }
  1088. /*
  1089. * If plen does not account for pkt size
  1090. */
  1091. if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
  1092. abort_connection(ep, skb, GFP_KERNEL);
  1093. return;
  1094. }
  1095. ep->plen = (u8) plen;
  1096. /*
  1097. * If we don't have all the pdata yet, then bail.
  1098. */
  1099. if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
  1100. return;
  1101. /*
  1102. * If we get here we have accumulated the entire mpa
  1103. * start reply message including private data.
  1104. */
  1105. ep->mpa_attr.initiator = 0;
  1106. ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
  1107. ep->mpa_attr.recv_marker_enabled = markers_enabled;
  1108. ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
  1109. ep->mpa_attr.version = mpa->revision;
  1110. if (mpa->revision == 1)
  1111. ep->tried_with_mpa_v1 = 1;
  1112. ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
  1113. if (mpa->revision == 2) {
  1114. ep->mpa_attr.enhanced_rdma_conn =
  1115. mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
  1116. if (ep->mpa_attr.enhanced_rdma_conn) {
  1117. mpa_v2_params = (struct mpa_v2_conn_params *)
  1118. (ep->mpa_pkt + sizeof(*mpa));
  1119. ep->ird = ntohs(mpa_v2_params->ird) &
  1120. MPA_V2_IRD_ORD_MASK;
  1121. ep->ord = ntohs(mpa_v2_params->ord) &
  1122. MPA_V2_IRD_ORD_MASK;
  1123. if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
  1124. if (peer2peer) {
  1125. if (ntohs(mpa_v2_params->ord) &
  1126. MPA_V2_RDMA_WRITE_RTR)
  1127. ep->mpa_attr.p2p_type =
  1128. FW_RI_INIT_P2PTYPE_RDMA_WRITE;
  1129. else if (ntohs(mpa_v2_params->ord) &
  1130. MPA_V2_RDMA_READ_RTR)
  1131. ep->mpa_attr.p2p_type =
  1132. FW_RI_INIT_P2PTYPE_READ_REQ;
  1133. }
  1134. }
  1135. } else if (mpa->revision == 1)
  1136. if (peer2peer)
  1137. ep->mpa_attr.p2p_type = p2p_type;
  1138. PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
  1139. "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
  1140. ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
  1141. ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
  1142. ep->mpa_attr.p2p_type);
  1143. state_set(&ep->com, MPA_REQ_RCVD);
  1144. /* drive upcall */
  1145. connect_request_upcall(ep);
  1146. return;
  1147. }
  1148. static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
  1149. {
  1150. struct c4iw_ep *ep;
  1151. struct cpl_rx_data *hdr = cplhdr(skb);
  1152. unsigned int dlen = ntohs(hdr->len);
  1153. unsigned int tid = GET_TID(hdr);
  1154. struct tid_info *t = dev->rdev.lldi.tids;
  1155. ep = lookup_tid(t, tid);
  1156. PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
  1157. skb_pull(skb, sizeof(*hdr));
  1158. skb_trim(skb, dlen);
  1159. ep->rcv_seq += dlen;
  1160. BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
  1161. /* update RX credits */
  1162. update_rx_credits(ep, dlen);
  1163. switch (state_read(&ep->com)) {
  1164. case MPA_REQ_SENT:
  1165. process_mpa_reply(ep, skb);
  1166. break;
  1167. case MPA_REQ_WAIT:
  1168. process_mpa_request(ep, skb);
  1169. break;
  1170. case MPA_REP_SENT:
  1171. break;
  1172. default:
  1173. printk(KERN_ERR MOD "%s Unexpected streaming data."
  1174. " ep %p state %d tid %u\n",
  1175. __func__, ep, state_read(&ep->com), ep->hwtid);
  1176. /*
  1177. * The ep will timeout and inform the ULP of the failure.
  1178. * See ep_timeout().
  1179. */
  1180. break;
  1181. }
  1182. return 0;
  1183. }
  1184. static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
  1185. {
  1186. struct c4iw_ep *ep;
  1187. struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
  1188. int release = 0;
  1189. unsigned int tid = GET_TID(rpl);
  1190. struct tid_info *t = dev->rdev.lldi.tids;
  1191. ep = lookup_tid(t, tid);
  1192. if (!ep) {
  1193. printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
  1194. return 0;
  1195. }
  1196. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1197. mutex_lock(&ep->com.mutex);
  1198. switch (ep->com.state) {
  1199. case ABORTING:
  1200. __state_set(&ep->com, DEAD);
  1201. release = 1;
  1202. break;
  1203. default:
  1204. printk(KERN_ERR "%s ep %p state %d\n",
  1205. __func__, ep, ep->com.state);
  1206. break;
  1207. }
  1208. mutex_unlock(&ep->com.mutex);
  1209. if (release)
  1210. release_ep_resources(ep);
  1211. return 0;
  1212. }
  1213. /*
  1214. * Return whether a failed active open has allocated a TID
  1215. */
  1216. static inline int act_open_has_tid(int status)
  1217. {
  1218. return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
  1219. status != CPL_ERR_ARP_MISS;
  1220. }
  1221. static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
  1222. {
  1223. struct c4iw_ep *ep;
  1224. struct cpl_act_open_rpl *rpl = cplhdr(skb);
  1225. unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
  1226. ntohl(rpl->atid_status)));
  1227. struct tid_info *t = dev->rdev.lldi.tids;
  1228. int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
  1229. ep = lookup_atid(t, atid);
  1230. PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
  1231. status, status2errno(status));
  1232. if (status == CPL_ERR_RTX_NEG_ADVICE) {
  1233. printk(KERN_WARNING MOD "Connection problems for atid %u\n",
  1234. atid);
  1235. return 0;
  1236. }
  1237. /*
  1238. * Log interesting failures.
  1239. */
  1240. switch (status) {
  1241. case CPL_ERR_CONN_RESET:
  1242. case CPL_ERR_CONN_TIMEDOUT:
  1243. break;
  1244. default:
  1245. printk(KERN_INFO MOD "Active open failure - "
  1246. "atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
  1247. atid, status, status2errno(status),
  1248. &ep->com.local_addr.sin_addr.s_addr,
  1249. ntohs(ep->com.local_addr.sin_port),
  1250. &ep->com.remote_addr.sin_addr.s_addr,
  1251. ntohs(ep->com.remote_addr.sin_port));
  1252. break;
  1253. }
  1254. connect_reply_upcall(ep, status2errno(status));
  1255. state_set(&ep->com, DEAD);
  1256. if (status && act_open_has_tid(status))
  1257. cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
  1258. cxgb4_free_atid(t, atid);
  1259. dst_release(ep->dst);
  1260. cxgb4_l2t_release(ep->l2t);
  1261. c4iw_put_ep(&ep->com);
  1262. return 0;
  1263. }
  1264. static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
  1265. {
  1266. struct cpl_pass_open_rpl *rpl = cplhdr(skb);
  1267. struct tid_info *t = dev->rdev.lldi.tids;
  1268. unsigned int stid = GET_TID(rpl);
  1269. struct c4iw_listen_ep *ep = lookup_stid(t, stid);
  1270. if (!ep) {
  1271. printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
  1272. return 0;
  1273. }
  1274. PDBG("%s ep %p status %d error %d\n", __func__, ep,
  1275. rpl->status, status2errno(rpl->status));
  1276. c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
  1277. return 0;
  1278. }
  1279. static int listen_stop(struct c4iw_listen_ep *ep)
  1280. {
  1281. struct sk_buff *skb;
  1282. struct cpl_close_listsvr_req *req;
  1283. PDBG("%s ep %p\n", __func__, ep);
  1284. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  1285. if (!skb) {
  1286. printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
  1287. return -ENOMEM;
  1288. }
  1289. req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
  1290. INIT_TP_WR(req, 0);
  1291. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
  1292. ep->stid));
  1293. req->reply_ctrl = cpu_to_be16(
  1294. QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
  1295. set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
  1296. return c4iw_ofld_send(&ep->com.dev->rdev, skb);
  1297. }
  1298. static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
  1299. {
  1300. struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
  1301. struct tid_info *t = dev->rdev.lldi.tids;
  1302. unsigned int stid = GET_TID(rpl);
  1303. struct c4iw_listen_ep *ep = lookup_stid(t, stid);
  1304. PDBG("%s ep %p\n", __func__, ep);
  1305. c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
  1306. return 0;
  1307. }
  1308. static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
  1309. struct cpl_pass_accept_req *req)
  1310. {
  1311. struct cpl_pass_accept_rpl *rpl;
  1312. unsigned int mtu_idx;
  1313. u64 opt0;
  1314. u32 opt2;
  1315. int wscale;
  1316. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1317. BUG_ON(skb_cloned(skb));
  1318. skb_trim(skb, sizeof(*rpl));
  1319. skb_get(skb);
  1320. cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
  1321. wscale = compute_wscale(rcv_win);
  1322. opt0 = KEEP_ALIVE(1) |
  1323. DELACK(1) |
  1324. WND_SCALE(wscale) |
  1325. MSS_IDX(mtu_idx) |
  1326. L2T_IDX(ep->l2t->idx) |
  1327. TX_CHAN(ep->tx_chan) |
  1328. SMAC_SEL(ep->smac_idx) |
  1329. DSCP(ep->tos) |
  1330. ULP_MODE(ULP_MODE_TCPDDP) |
  1331. RCV_BUFSIZ(rcv_win>>10);
  1332. opt2 = RX_CHANNEL(0) |
  1333. RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
  1334. if (enable_tcp_timestamps && req->tcpopt.tstamp)
  1335. opt2 |= TSTAMPS_EN(1);
  1336. if (enable_tcp_sack && req->tcpopt.sack)
  1337. opt2 |= SACK_EN(1);
  1338. if (wscale && enable_tcp_window_scaling)
  1339. opt2 |= WND_SCALE_EN(1);
  1340. rpl = cplhdr(skb);
  1341. INIT_TP_WR(rpl, ep->hwtid);
  1342. OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
  1343. ep->hwtid));
  1344. rpl->opt0 = cpu_to_be64(opt0);
  1345. rpl->opt2 = cpu_to_be32(opt2);
  1346. set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
  1347. c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  1348. return;
  1349. }
  1350. static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
  1351. struct sk_buff *skb)
  1352. {
  1353. PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
  1354. peer_ip);
  1355. BUG_ON(skb_cloned(skb));
  1356. skb_trim(skb, sizeof(struct cpl_tid_release));
  1357. skb_get(skb);
  1358. release_tid(&dev->rdev, hwtid, skb);
  1359. return;
  1360. }
  1361. static void get_4tuple(struct cpl_pass_accept_req *req,
  1362. __be32 *local_ip, __be32 *peer_ip,
  1363. __be16 *local_port, __be16 *peer_port)
  1364. {
  1365. int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
  1366. int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
  1367. struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
  1368. struct tcphdr *tcp = (struct tcphdr *)
  1369. ((u8 *)(req + 1) + eth_len + ip_len);
  1370. PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
  1371. ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
  1372. ntohs(tcp->dest));
  1373. *peer_ip = ip->saddr;
  1374. *local_ip = ip->daddr;
  1375. *peer_port = tcp->source;
  1376. *local_port = tcp->dest;
  1377. return;
  1378. }
  1379. static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
  1380. struct c4iw_dev *cdev, bool clear_mpa_v1)
  1381. {
  1382. struct neighbour *n;
  1383. int err, step;
  1384. n = dst_neigh_lookup(dst, &peer_ip);
  1385. if (!n)
  1386. return -ENODEV;
  1387. rcu_read_lock();
  1388. err = -ENOMEM;
  1389. if (n->dev->flags & IFF_LOOPBACK) {
  1390. struct net_device *pdev;
  1391. pdev = ip_dev_find(&init_net, peer_ip);
  1392. if (!pdev) {
  1393. err = -ENODEV;
  1394. goto out;
  1395. }
  1396. ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
  1397. n, pdev, 0);
  1398. if (!ep->l2t)
  1399. goto out;
  1400. ep->mtu = pdev->mtu;
  1401. ep->tx_chan = cxgb4_port_chan(pdev);
  1402. ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
  1403. step = cdev->rdev.lldi.ntxq /
  1404. cdev->rdev.lldi.nchan;
  1405. ep->txq_idx = cxgb4_port_idx(pdev) * step;
  1406. step = cdev->rdev.lldi.nrxq /
  1407. cdev->rdev.lldi.nchan;
  1408. ep->ctrlq_idx = cxgb4_port_idx(pdev);
  1409. ep->rss_qid = cdev->rdev.lldi.rxq_ids[
  1410. cxgb4_port_idx(pdev) * step];
  1411. dev_put(pdev);
  1412. } else {
  1413. ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
  1414. n, n->dev, 0);
  1415. if (!ep->l2t)
  1416. goto out;
  1417. ep->mtu = dst_mtu(dst);
  1418. ep->tx_chan = cxgb4_port_chan(n->dev);
  1419. ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
  1420. step = cdev->rdev.lldi.ntxq /
  1421. cdev->rdev.lldi.nchan;
  1422. ep->txq_idx = cxgb4_port_idx(n->dev) * step;
  1423. ep->ctrlq_idx = cxgb4_port_idx(n->dev);
  1424. step = cdev->rdev.lldi.nrxq /
  1425. cdev->rdev.lldi.nchan;
  1426. ep->rss_qid = cdev->rdev.lldi.rxq_ids[
  1427. cxgb4_port_idx(n->dev) * step];
  1428. if (clear_mpa_v1) {
  1429. ep->retry_with_mpa_v1 = 0;
  1430. ep->tried_with_mpa_v1 = 0;
  1431. }
  1432. }
  1433. err = 0;
  1434. out:
  1435. rcu_read_unlock();
  1436. neigh_release(n);
  1437. return err;
  1438. }
  1439. static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
  1440. {
  1441. struct c4iw_ep *child_ep, *parent_ep;
  1442. struct cpl_pass_accept_req *req = cplhdr(skb);
  1443. unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
  1444. struct tid_info *t = dev->rdev.lldi.tids;
  1445. unsigned int hwtid = GET_TID(req);
  1446. struct dst_entry *dst;
  1447. struct rtable *rt;
  1448. __be32 local_ip, peer_ip;
  1449. __be16 local_port, peer_port;
  1450. int err;
  1451. parent_ep = lookup_stid(t, stid);
  1452. PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
  1453. get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
  1454. if (state_read(&parent_ep->com) != LISTEN) {
  1455. printk(KERN_ERR "%s - listening ep not in LISTEN\n",
  1456. __func__);
  1457. goto reject;
  1458. }
  1459. /* Find output route */
  1460. rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
  1461. GET_POPEN_TOS(ntohl(req->tos_stid)));
  1462. if (!rt) {
  1463. printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
  1464. __func__);
  1465. goto reject;
  1466. }
  1467. dst = &rt->dst;
  1468. child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
  1469. if (!child_ep) {
  1470. printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
  1471. __func__);
  1472. dst_release(dst);
  1473. goto reject;
  1474. }
  1475. err = import_ep(child_ep, peer_ip, dst, dev, false);
  1476. if (err) {
  1477. printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
  1478. __func__);
  1479. dst_release(dst);
  1480. kfree(child_ep);
  1481. goto reject;
  1482. }
  1483. state_set(&child_ep->com, CONNECTING);
  1484. child_ep->com.dev = dev;
  1485. child_ep->com.cm_id = NULL;
  1486. child_ep->com.local_addr.sin_family = PF_INET;
  1487. child_ep->com.local_addr.sin_port = local_port;
  1488. child_ep->com.local_addr.sin_addr.s_addr = local_ip;
  1489. child_ep->com.remote_addr.sin_family = PF_INET;
  1490. child_ep->com.remote_addr.sin_port = peer_port;
  1491. child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
  1492. c4iw_get_ep(&parent_ep->com);
  1493. child_ep->parent_ep = parent_ep;
  1494. child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
  1495. child_ep->dst = dst;
  1496. child_ep->hwtid = hwtid;
  1497. PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
  1498. child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
  1499. init_timer(&child_ep->timer);
  1500. cxgb4_insert_tid(t, child_ep, hwtid);
  1501. accept_cr(child_ep, peer_ip, skb, req);
  1502. goto out;
  1503. reject:
  1504. reject_cr(dev, hwtid, peer_ip, skb);
  1505. out:
  1506. return 0;
  1507. }
  1508. static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
  1509. {
  1510. struct c4iw_ep *ep;
  1511. struct cpl_pass_establish *req = cplhdr(skb);
  1512. struct tid_info *t = dev->rdev.lldi.tids;
  1513. unsigned int tid = GET_TID(req);
  1514. ep = lookup_tid(t, tid);
  1515. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1516. ep->snd_seq = be32_to_cpu(req->snd_isn);
  1517. ep->rcv_seq = be32_to_cpu(req->rcv_isn);
  1518. set_emss(ep, ntohs(req->tcp_opt));
  1519. dst_confirm(ep->dst);
  1520. state_set(&ep->com, MPA_REQ_WAIT);
  1521. start_ep_timer(ep);
  1522. send_flowc(ep, skb);
  1523. return 0;
  1524. }
  1525. static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
  1526. {
  1527. struct cpl_peer_close *hdr = cplhdr(skb);
  1528. struct c4iw_ep *ep;
  1529. struct c4iw_qp_attributes attrs;
  1530. int disconnect = 1;
  1531. int release = 0;
  1532. struct tid_info *t = dev->rdev.lldi.tids;
  1533. unsigned int tid = GET_TID(hdr);
  1534. int ret;
  1535. ep = lookup_tid(t, tid);
  1536. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1537. dst_confirm(ep->dst);
  1538. mutex_lock(&ep->com.mutex);
  1539. switch (ep->com.state) {
  1540. case MPA_REQ_WAIT:
  1541. __state_set(&ep->com, CLOSING);
  1542. break;
  1543. case MPA_REQ_SENT:
  1544. __state_set(&ep->com, CLOSING);
  1545. connect_reply_upcall(ep, -ECONNRESET);
  1546. break;
  1547. case MPA_REQ_RCVD:
  1548. /*
  1549. * We're gonna mark this puppy DEAD, but keep
  1550. * the reference on it until the ULP accepts or
  1551. * rejects the CR. Also wake up anyone waiting
  1552. * in rdma connection migration (see c4iw_accept_cr()).
  1553. */
  1554. __state_set(&ep->com, CLOSING);
  1555. PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
  1556. c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
  1557. break;
  1558. case MPA_REP_SENT:
  1559. __state_set(&ep->com, CLOSING);
  1560. PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
  1561. c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
  1562. break;
  1563. case FPDU_MODE:
  1564. start_ep_timer(ep);
  1565. __state_set(&ep->com, CLOSING);
  1566. attrs.next_state = C4IW_QP_STATE_CLOSING;
  1567. ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1568. C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
  1569. if (ret != -ECONNRESET) {
  1570. peer_close_upcall(ep);
  1571. disconnect = 1;
  1572. }
  1573. break;
  1574. case ABORTING:
  1575. disconnect = 0;
  1576. break;
  1577. case CLOSING:
  1578. __state_set(&ep->com, MORIBUND);
  1579. disconnect = 0;
  1580. break;
  1581. case MORIBUND:
  1582. stop_ep_timer(ep);
  1583. if (ep->com.cm_id && ep->com.qp) {
  1584. attrs.next_state = C4IW_QP_STATE_IDLE;
  1585. c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1586. C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
  1587. }
  1588. close_complete_upcall(ep);
  1589. __state_set(&ep->com, DEAD);
  1590. release = 1;
  1591. disconnect = 0;
  1592. break;
  1593. case DEAD:
  1594. disconnect = 0;
  1595. break;
  1596. default:
  1597. BUG_ON(1);
  1598. }
  1599. mutex_unlock(&ep->com.mutex);
  1600. if (disconnect)
  1601. c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
  1602. if (release)
  1603. release_ep_resources(ep);
  1604. return 0;
  1605. }
  1606. /*
  1607. * Returns whether an ABORT_REQ_RSS message is a negative advice.
  1608. */
  1609. static int is_neg_adv_abort(unsigned int status)
  1610. {
  1611. return status == CPL_ERR_RTX_NEG_ADVICE ||
  1612. status == CPL_ERR_PERSIST_NEG_ADVICE;
  1613. }
  1614. static int c4iw_reconnect(struct c4iw_ep *ep)
  1615. {
  1616. struct rtable *rt;
  1617. int err = 0;
  1618. PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
  1619. init_timer(&ep->timer);
  1620. /*
  1621. * Allocate an active TID to initiate a TCP connection.
  1622. */
  1623. ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
  1624. if (ep->atid == -1) {
  1625. printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
  1626. err = -ENOMEM;
  1627. goto fail2;
  1628. }
  1629. /* find a route */
  1630. rt = find_route(ep->com.dev,
  1631. ep->com.cm_id->local_addr.sin_addr.s_addr,
  1632. ep->com.cm_id->remote_addr.sin_addr.s_addr,
  1633. ep->com.cm_id->local_addr.sin_port,
  1634. ep->com.cm_id->remote_addr.sin_port, 0);
  1635. if (!rt) {
  1636. printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
  1637. err = -EHOSTUNREACH;
  1638. goto fail3;
  1639. }
  1640. ep->dst = &rt->dst;
  1641. err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
  1642. ep->dst, ep->com.dev, false);
  1643. if (err) {
  1644. printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
  1645. goto fail4;
  1646. }
  1647. PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
  1648. __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
  1649. ep->l2t->idx);
  1650. state_set(&ep->com, CONNECTING);
  1651. ep->tos = 0;
  1652. /* send connect request to rnic */
  1653. err = send_connect(ep);
  1654. if (!err)
  1655. goto out;
  1656. cxgb4_l2t_release(ep->l2t);
  1657. fail4:
  1658. dst_release(ep->dst);
  1659. fail3:
  1660. cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
  1661. fail2:
  1662. /*
  1663. * remember to send notification to upper layer.
  1664. * We are in here so the upper layer is not aware that this is
  1665. * re-connect attempt and so, upper layer is still waiting for
  1666. * response of 1st connect request.
  1667. */
  1668. connect_reply_upcall(ep, -ECONNRESET);
  1669. c4iw_put_ep(&ep->com);
  1670. out:
  1671. return err;
  1672. }
  1673. static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
  1674. {
  1675. struct cpl_abort_req_rss *req = cplhdr(skb);
  1676. struct c4iw_ep *ep;
  1677. struct cpl_abort_rpl *rpl;
  1678. struct sk_buff *rpl_skb;
  1679. struct c4iw_qp_attributes attrs;
  1680. int ret;
  1681. int release = 0;
  1682. struct tid_info *t = dev->rdev.lldi.tids;
  1683. unsigned int tid = GET_TID(req);
  1684. ep = lookup_tid(t, tid);
  1685. if (is_neg_adv_abort(req->status)) {
  1686. PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
  1687. ep->hwtid);
  1688. return 0;
  1689. }
  1690. PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
  1691. ep->com.state);
  1692. /*
  1693. * Wake up any threads in rdma_init() or rdma_fini().
  1694. * However, this is not needed if com state is just
  1695. * MPA_REQ_SENT
  1696. */
  1697. if (ep->com.state != MPA_REQ_SENT)
  1698. c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
  1699. mutex_lock(&ep->com.mutex);
  1700. switch (ep->com.state) {
  1701. case CONNECTING:
  1702. break;
  1703. case MPA_REQ_WAIT:
  1704. stop_ep_timer(ep);
  1705. break;
  1706. case MPA_REQ_SENT:
  1707. stop_ep_timer(ep);
  1708. if (mpa_rev == 2 && ep->tried_with_mpa_v1)
  1709. connect_reply_upcall(ep, -ECONNRESET);
  1710. else {
  1711. /*
  1712. * we just don't send notification upwards because we
  1713. * want to retry with mpa_v1 without upper layers even
  1714. * knowing it.
  1715. *
  1716. * do some housekeeping so as to re-initiate the
  1717. * connection
  1718. */
  1719. PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
  1720. mpa_rev);
  1721. ep->retry_with_mpa_v1 = 1;
  1722. }
  1723. break;
  1724. case MPA_REP_SENT:
  1725. break;
  1726. case MPA_REQ_RCVD:
  1727. break;
  1728. case MORIBUND:
  1729. case CLOSING:
  1730. stop_ep_timer(ep);
  1731. /*FALLTHROUGH*/
  1732. case FPDU_MODE:
  1733. if (ep->com.cm_id && ep->com.qp) {
  1734. attrs.next_state = C4IW_QP_STATE_ERROR;
  1735. ret = c4iw_modify_qp(ep->com.qp->rhp,
  1736. ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
  1737. &attrs, 1);
  1738. if (ret)
  1739. printk(KERN_ERR MOD
  1740. "%s - qp <- error failed!\n",
  1741. __func__);
  1742. }
  1743. peer_abort_upcall(ep);
  1744. break;
  1745. case ABORTING:
  1746. break;
  1747. case DEAD:
  1748. PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
  1749. mutex_unlock(&ep->com.mutex);
  1750. return 0;
  1751. default:
  1752. BUG_ON(1);
  1753. break;
  1754. }
  1755. dst_confirm(ep->dst);
  1756. if (ep->com.state != ABORTING) {
  1757. __state_set(&ep->com, DEAD);
  1758. /* we don't release if we want to retry with mpa_v1 */
  1759. if (!ep->retry_with_mpa_v1)
  1760. release = 1;
  1761. }
  1762. mutex_unlock(&ep->com.mutex);
  1763. rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
  1764. if (!rpl_skb) {
  1765. printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
  1766. __func__);
  1767. release = 1;
  1768. goto out;
  1769. }
  1770. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  1771. rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
  1772. INIT_TP_WR(rpl, ep->hwtid);
  1773. OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
  1774. rpl->cmd = CPL_ABORT_NO_RST;
  1775. c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
  1776. out:
  1777. if (release)
  1778. release_ep_resources(ep);
  1779. /* retry with mpa-v1 */
  1780. if (ep && ep->retry_with_mpa_v1) {
  1781. cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
  1782. dst_release(ep->dst);
  1783. cxgb4_l2t_release(ep->l2t);
  1784. c4iw_reconnect(ep);
  1785. }
  1786. return 0;
  1787. }
  1788. static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
  1789. {
  1790. struct c4iw_ep *ep;
  1791. struct c4iw_qp_attributes attrs;
  1792. struct cpl_close_con_rpl *rpl = cplhdr(skb);
  1793. int release = 0;
  1794. struct tid_info *t = dev->rdev.lldi.tids;
  1795. unsigned int tid = GET_TID(rpl);
  1796. ep = lookup_tid(t, tid);
  1797. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1798. BUG_ON(!ep);
  1799. /* The cm_id may be null if we failed to connect */
  1800. mutex_lock(&ep->com.mutex);
  1801. switch (ep->com.state) {
  1802. case CLOSING:
  1803. __state_set(&ep->com, MORIBUND);
  1804. break;
  1805. case MORIBUND:
  1806. stop_ep_timer(ep);
  1807. if ((ep->com.cm_id) && (ep->com.qp)) {
  1808. attrs.next_state = C4IW_QP_STATE_IDLE;
  1809. c4iw_modify_qp(ep->com.qp->rhp,
  1810. ep->com.qp,
  1811. C4IW_QP_ATTR_NEXT_STATE,
  1812. &attrs, 1);
  1813. }
  1814. close_complete_upcall(ep);
  1815. __state_set(&ep->com, DEAD);
  1816. release = 1;
  1817. break;
  1818. case ABORTING:
  1819. case DEAD:
  1820. break;
  1821. default:
  1822. BUG_ON(1);
  1823. break;
  1824. }
  1825. mutex_unlock(&ep->com.mutex);
  1826. if (release)
  1827. release_ep_resources(ep);
  1828. return 0;
  1829. }
  1830. static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
  1831. {
  1832. struct cpl_rdma_terminate *rpl = cplhdr(skb);
  1833. struct tid_info *t = dev->rdev.lldi.tids;
  1834. unsigned int tid = GET_TID(rpl);
  1835. struct c4iw_ep *ep;
  1836. struct c4iw_qp_attributes attrs;
  1837. ep = lookup_tid(t, tid);
  1838. BUG_ON(!ep);
  1839. if (ep && ep->com.qp) {
  1840. printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
  1841. ep->com.qp->wq.sq.qid);
  1842. attrs.next_state = C4IW_QP_STATE_TERMINATE;
  1843. c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1844. C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
  1845. } else
  1846. printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
  1847. return 0;
  1848. }
  1849. /*
  1850. * Upcall from the adapter indicating data has been transmitted.
  1851. * For us its just the single MPA request or reply. We can now free
  1852. * the skb holding the mpa message.
  1853. */
  1854. static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
  1855. {
  1856. struct c4iw_ep *ep;
  1857. struct cpl_fw4_ack *hdr = cplhdr(skb);
  1858. u8 credits = hdr->credits;
  1859. unsigned int tid = GET_TID(hdr);
  1860. struct tid_info *t = dev->rdev.lldi.tids;
  1861. ep = lookup_tid(t, tid);
  1862. PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
  1863. if (credits == 0) {
  1864. PDBG("%s 0 credit ack ep %p tid %u state %u\n",
  1865. __func__, ep, ep->hwtid, state_read(&ep->com));
  1866. return 0;
  1867. }
  1868. dst_confirm(ep->dst);
  1869. if (ep->mpa_skb) {
  1870. PDBG("%s last streaming msg ack ep %p tid %u state %u "
  1871. "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
  1872. state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
  1873. kfree_skb(ep->mpa_skb);
  1874. ep->mpa_skb = NULL;
  1875. }
  1876. return 0;
  1877. }
  1878. int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
  1879. {
  1880. int err;
  1881. struct c4iw_ep *ep = to_ep(cm_id);
  1882. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1883. if (state_read(&ep->com) == DEAD) {
  1884. c4iw_put_ep(&ep->com);
  1885. return -ECONNRESET;
  1886. }
  1887. BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
  1888. if (mpa_rev == 0)
  1889. abort_connection(ep, NULL, GFP_KERNEL);
  1890. else {
  1891. err = send_mpa_reject(ep, pdata, pdata_len);
  1892. err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
  1893. }
  1894. c4iw_put_ep(&ep->com);
  1895. return 0;
  1896. }
  1897. int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  1898. {
  1899. int err;
  1900. struct c4iw_qp_attributes attrs;
  1901. enum c4iw_qp_attr_mask mask;
  1902. struct c4iw_ep *ep = to_ep(cm_id);
  1903. struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
  1904. struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
  1905. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1906. if (state_read(&ep->com) == DEAD) {
  1907. err = -ECONNRESET;
  1908. goto err;
  1909. }
  1910. BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
  1911. BUG_ON(!qp);
  1912. if ((conn_param->ord > c4iw_max_read_depth) ||
  1913. (conn_param->ird > c4iw_max_read_depth)) {
  1914. abort_connection(ep, NULL, GFP_KERNEL);
  1915. err = -EINVAL;
  1916. goto err;
  1917. }
  1918. if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
  1919. if (conn_param->ord > ep->ird) {
  1920. ep->ird = conn_param->ird;
  1921. ep->ord = conn_param->ord;
  1922. send_mpa_reject(ep, conn_param->private_data,
  1923. conn_param->private_data_len);
  1924. abort_connection(ep, NULL, GFP_KERNEL);
  1925. err = -ENOMEM;
  1926. goto err;
  1927. }
  1928. if (conn_param->ird > ep->ord) {
  1929. if (!ep->ord)
  1930. conn_param->ird = 1;
  1931. else {
  1932. abort_connection(ep, NULL, GFP_KERNEL);
  1933. err = -ENOMEM;
  1934. goto err;
  1935. }
  1936. }
  1937. }
  1938. ep->ird = conn_param->ird;
  1939. ep->ord = conn_param->ord;
  1940. if (ep->mpa_attr.version != 2)
  1941. if (peer2peer && ep->ird == 0)
  1942. ep->ird = 1;
  1943. PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
  1944. cm_id->add_ref(cm_id);
  1945. ep->com.cm_id = cm_id;
  1946. ep->com.qp = qp;
  1947. /* bind QP to EP and move to RTS */
  1948. attrs.mpa_attr = ep->mpa_attr;
  1949. attrs.max_ird = ep->ird;
  1950. attrs.max_ord = ep->ord;
  1951. attrs.llp_stream_handle = ep;
  1952. attrs.next_state = C4IW_QP_STATE_RTS;
  1953. /* bind QP and TID with INIT_WR */
  1954. mask = C4IW_QP_ATTR_NEXT_STATE |
  1955. C4IW_QP_ATTR_LLP_STREAM_HANDLE |
  1956. C4IW_QP_ATTR_MPA_ATTR |
  1957. C4IW_QP_ATTR_MAX_IRD |
  1958. C4IW_QP_ATTR_MAX_ORD;
  1959. err = c4iw_modify_qp(ep->com.qp->rhp,
  1960. ep->com.qp, mask, &attrs, 1);
  1961. if (err)
  1962. goto err1;
  1963. err = send_mpa_reply(ep, conn_param->private_data,
  1964. conn_param->private_data_len);
  1965. if (err)
  1966. goto err1;
  1967. state_set(&ep->com, FPDU_MODE);
  1968. established_upcall(ep);
  1969. c4iw_put_ep(&ep->com);
  1970. return 0;
  1971. err1:
  1972. ep->com.cm_id = NULL;
  1973. ep->com.qp = NULL;
  1974. cm_id->rem_ref(cm_id);
  1975. err:
  1976. c4iw_put_ep(&ep->com);
  1977. return err;
  1978. }
  1979. int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  1980. {
  1981. struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
  1982. struct c4iw_ep *ep;
  1983. struct rtable *rt;
  1984. int err = 0;
  1985. if ((conn_param->ord > c4iw_max_read_depth) ||
  1986. (conn_param->ird > c4iw_max_read_depth)) {
  1987. err = -EINVAL;
  1988. goto out;
  1989. }
  1990. ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
  1991. if (!ep) {
  1992. printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
  1993. err = -ENOMEM;
  1994. goto out;
  1995. }
  1996. init_timer(&ep->timer);
  1997. ep->plen = conn_param->private_data_len;
  1998. if (ep->plen)
  1999. memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
  2000. conn_param->private_data, ep->plen);
  2001. ep->ird = conn_param->ird;
  2002. ep->ord = conn_param->ord;
  2003. if (peer2peer && ep->ord == 0)
  2004. ep->ord = 1;
  2005. cm_id->add_ref(cm_id);
  2006. ep->com.dev = dev;
  2007. ep->com.cm_id = cm_id;
  2008. ep->com.qp = get_qhp(dev, conn_param->qpn);
  2009. BUG_ON(!ep->com.qp);
  2010. PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
  2011. ep->com.qp, cm_id);
  2012. /*
  2013. * Allocate an active TID to initiate a TCP connection.
  2014. */
  2015. ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
  2016. if (ep->atid == -1) {
  2017. printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
  2018. err = -ENOMEM;
  2019. goto fail2;
  2020. }
  2021. PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
  2022. ntohl(cm_id->local_addr.sin_addr.s_addr),
  2023. ntohs(cm_id->local_addr.sin_port),
  2024. ntohl(cm_id->remote_addr.sin_addr.s_addr),
  2025. ntohs(cm_id->remote_addr.sin_port));
  2026. /* find a route */
  2027. rt = find_route(dev,
  2028. cm_id->local_addr.sin_addr.s_addr,
  2029. cm_id->remote_addr.sin_addr.s_addr,
  2030. cm_id->local_addr.sin_port,
  2031. cm_id->remote_addr.sin_port, 0);
  2032. if (!rt) {
  2033. printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
  2034. err = -EHOSTUNREACH;
  2035. goto fail3;
  2036. }
  2037. ep->dst = &rt->dst;
  2038. err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
  2039. ep->dst, ep->com.dev, true);
  2040. if (err) {
  2041. printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
  2042. goto fail4;
  2043. }
  2044. PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
  2045. __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
  2046. ep->l2t->idx);
  2047. state_set(&ep->com, CONNECTING);
  2048. ep->tos = 0;
  2049. ep->com.local_addr = cm_id->local_addr;
  2050. ep->com.remote_addr = cm_id->remote_addr;
  2051. /* send connect request to rnic */
  2052. err = send_connect(ep);
  2053. if (!err)
  2054. goto out;
  2055. cxgb4_l2t_release(ep->l2t);
  2056. fail4:
  2057. dst_release(ep->dst);
  2058. fail3:
  2059. cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
  2060. fail2:
  2061. cm_id->rem_ref(cm_id);
  2062. c4iw_put_ep(&ep->com);
  2063. out:
  2064. return err;
  2065. }
  2066. int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
  2067. {
  2068. int err = 0;
  2069. struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
  2070. struct c4iw_listen_ep *ep;
  2071. might_sleep();
  2072. ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
  2073. if (!ep) {
  2074. printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
  2075. err = -ENOMEM;
  2076. goto fail1;
  2077. }
  2078. PDBG("%s ep %p\n", __func__, ep);
  2079. cm_id->add_ref(cm_id);
  2080. ep->com.cm_id = cm_id;
  2081. ep->com.dev = dev;
  2082. ep->backlog = backlog;
  2083. ep->com.local_addr = cm_id->local_addr;
  2084. /*
  2085. * Allocate a server TID.
  2086. */
  2087. ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
  2088. if (ep->stid == -1) {
  2089. printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
  2090. err = -ENOMEM;
  2091. goto fail2;
  2092. }
  2093. state_set(&ep->com, LISTEN);
  2094. c4iw_init_wr_wait(&ep->com.wr_wait);
  2095. err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
  2096. ep->com.local_addr.sin_addr.s_addr,
  2097. ep->com.local_addr.sin_port,
  2098. ep->com.dev->rdev.lldi.rxq_ids[0]);
  2099. if (err)
  2100. goto fail3;
  2101. /* wait for pass_open_rpl */
  2102. err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
  2103. __func__);
  2104. if (!err) {
  2105. cm_id->provider_data = ep;
  2106. goto out;
  2107. }
  2108. fail3:
  2109. cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
  2110. fail2:
  2111. cm_id->rem_ref(cm_id);
  2112. c4iw_put_ep(&ep->com);
  2113. fail1:
  2114. out:
  2115. return err;
  2116. }
  2117. int c4iw_destroy_listen(struct iw_cm_id *cm_id)
  2118. {
  2119. int err;
  2120. struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
  2121. PDBG("%s ep %p\n", __func__, ep);
  2122. might_sleep();
  2123. state_set(&ep->com, DEAD);
  2124. c4iw_init_wr_wait(&ep->com.wr_wait);
  2125. err = listen_stop(ep);
  2126. if (err)
  2127. goto done;
  2128. err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
  2129. __func__);
  2130. cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
  2131. done:
  2132. cm_id->rem_ref(cm_id);
  2133. c4iw_put_ep(&ep->com);
  2134. return err;
  2135. }
  2136. int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
  2137. {
  2138. int ret = 0;
  2139. int close = 0;
  2140. int fatal = 0;
  2141. struct c4iw_rdev *rdev;
  2142. mutex_lock(&ep->com.mutex);
  2143. PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
  2144. states[ep->com.state], abrupt);
  2145. rdev = &ep->com.dev->rdev;
  2146. if (c4iw_fatal_error(rdev)) {
  2147. fatal = 1;
  2148. close_complete_upcall(ep);
  2149. ep->com.state = DEAD;
  2150. }
  2151. switch (ep->com.state) {
  2152. case MPA_REQ_WAIT:
  2153. case MPA_REQ_SENT:
  2154. case MPA_REQ_RCVD:
  2155. case MPA_REP_SENT:
  2156. case FPDU_MODE:
  2157. close = 1;
  2158. if (abrupt)
  2159. ep->com.state = ABORTING;
  2160. else {
  2161. ep->com.state = CLOSING;
  2162. start_ep_timer(ep);
  2163. }
  2164. set_bit(CLOSE_SENT, &ep->com.flags);
  2165. break;
  2166. case CLOSING:
  2167. if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
  2168. close = 1;
  2169. if (abrupt) {
  2170. stop_ep_timer(ep);
  2171. ep->com.state = ABORTING;
  2172. } else
  2173. ep->com.state = MORIBUND;
  2174. }
  2175. break;
  2176. case MORIBUND:
  2177. case ABORTING:
  2178. case DEAD:
  2179. PDBG("%s ignoring disconnect ep %p state %u\n",
  2180. __func__, ep, ep->com.state);
  2181. break;
  2182. default:
  2183. BUG();
  2184. break;
  2185. }
  2186. if (close) {
  2187. if (abrupt) {
  2188. close_complete_upcall(ep);
  2189. ret = send_abort(ep, NULL, gfp);
  2190. } else
  2191. ret = send_halfclose(ep, gfp);
  2192. if (ret)
  2193. fatal = 1;
  2194. }
  2195. mutex_unlock(&ep->com.mutex);
  2196. if (fatal)
  2197. release_ep_resources(ep);
  2198. return ret;
  2199. }
  2200. static int async_event(struct c4iw_dev *dev, struct sk_buff *skb)
  2201. {
  2202. struct cpl_fw6_msg *rpl = cplhdr(skb);
  2203. c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
  2204. return 0;
  2205. }
  2206. /*
  2207. * These are the real handlers that are called from a
  2208. * work queue.
  2209. */
  2210. static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
  2211. [CPL_ACT_ESTABLISH] = act_establish,
  2212. [CPL_ACT_OPEN_RPL] = act_open_rpl,
  2213. [CPL_RX_DATA] = rx_data,
  2214. [CPL_ABORT_RPL_RSS] = abort_rpl,
  2215. [CPL_ABORT_RPL] = abort_rpl,
  2216. [CPL_PASS_OPEN_RPL] = pass_open_rpl,
  2217. [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
  2218. [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
  2219. [CPL_PASS_ESTABLISH] = pass_establish,
  2220. [CPL_PEER_CLOSE] = peer_close,
  2221. [CPL_ABORT_REQ_RSS] = peer_abort,
  2222. [CPL_CLOSE_CON_RPL] = close_con_rpl,
  2223. [CPL_RDMA_TERMINATE] = terminate,
  2224. [CPL_FW4_ACK] = fw4_ack,
  2225. [CPL_FW6_MSG] = async_event
  2226. };
  2227. static void process_timeout(struct c4iw_ep *ep)
  2228. {
  2229. struct c4iw_qp_attributes attrs;
  2230. int abort = 1;
  2231. mutex_lock(&ep->com.mutex);
  2232. PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
  2233. ep->com.state);
  2234. switch (ep->com.state) {
  2235. case MPA_REQ_SENT:
  2236. __state_set(&ep->com, ABORTING);
  2237. connect_reply_upcall(ep, -ETIMEDOUT);
  2238. break;
  2239. case MPA_REQ_WAIT:
  2240. __state_set(&ep->com, ABORTING);
  2241. break;
  2242. case CLOSING:
  2243. case MORIBUND:
  2244. if (ep->com.cm_id && ep->com.qp) {
  2245. attrs.next_state = C4IW_QP_STATE_ERROR;
  2246. c4iw_modify_qp(ep->com.qp->rhp,
  2247. ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
  2248. &attrs, 1);
  2249. }
  2250. __state_set(&ep->com, ABORTING);
  2251. break;
  2252. default:
  2253. WARN(1, "%s unexpected state ep %p tid %u state %u\n",
  2254. __func__, ep, ep->hwtid, ep->com.state);
  2255. abort = 0;
  2256. }
  2257. mutex_unlock(&ep->com.mutex);
  2258. if (abort)
  2259. abort_connection(ep, NULL, GFP_KERNEL);
  2260. c4iw_put_ep(&ep->com);
  2261. }
  2262. static void process_timedout_eps(void)
  2263. {
  2264. struct c4iw_ep *ep;
  2265. spin_lock_irq(&timeout_lock);
  2266. while (!list_empty(&timeout_list)) {
  2267. struct list_head *tmp;
  2268. tmp = timeout_list.next;
  2269. list_del(tmp);
  2270. spin_unlock_irq(&timeout_lock);
  2271. ep = list_entry(tmp, struct c4iw_ep, entry);
  2272. process_timeout(ep);
  2273. spin_lock_irq(&timeout_lock);
  2274. }
  2275. spin_unlock_irq(&timeout_lock);
  2276. }
  2277. static void process_work(struct work_struct *work)
  2278. {
  2279. struct sk_buff *skb = NULL;
  2280. struct c4iw_dev *dev;
  2281. struct cpl_act_establish *rpl;
  2282. unsigned int opcode;
  2283. int ret;
  2284. while ((skb = skb_dequeue(&rxq))) {
  2285. rpl = cplhdr(skb);
  2286. dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
  2287. opcode = rpl->ot.opcode;
  2288. BUG_ON(!work_handlers[opcode]);
  2289. ret = work_handlers[opcode](dev, skb);
  2290. if (!ret)
  2291. kfree_skb(skb);
  2292. }
  2293. process_timedout_eps();
  2294. }
  2295. static DECLARE_WORK(skb_work, process_work);
  2296. static void ep_timeout(unsigned long arg)
  2297. {
  2298. struct c4iw_ep *ep = (struct c4iw_ep *)arg;
  2299. spin_lock(&timeout_lock);
  2300. list_add_tail(&ep->entry, &timeout_list);
  2301. spin_unlock(&timeout_lock);
  2302. queue_work(workq, &skb_work);
  2303. }
  2304. /*
  2305. * All the CM events are handled on a work queue to have a safe context.
  2306. */
  2307. static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
  2308. {
  2309. /*
  2310. * Save dev in the skb->cb area.
  2311. */
  2312. *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
  2313. /*
  2314. * Queue the skb and schedule the worker thread.
  2315. */
  2316. skb_queue_tail(&rxq, skb);
  2317. queue_work(workq, &skb_work);
  2318. return 0;
  2319. }
  2320. static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
  2321. {
  2322. struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
  2323. if (rpl->status != CPL_ERR_NONE) {
  2324. printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
  2325. "for tid %u\n", rpl->status, GET_TID(rpl));
  2326. }
  2327. kfree_skb(skb);
  2328. return 0;
  2329. }
  2330. static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
  2331. {
  2332. struct cpl_fw6_msg *rpl = cplhdr(skb);
  2333. struct c4iw_wr_wait *wr_waitp;
  2334. int ret;
  2335. PDBG("%s type %u\n", __func__, rpl->type);
  2336. switch (rpl->type) {
  2337. case 1:
  2338. ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
  2339. wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
  2340. PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
  2341. if (wr_waitp)
  2342. c4iw_wake_up(wr_waitp, ret ? -ret : 0);
  2343. kfree_skb(skb);
  2344. break;
  2345. case 2:
  2346. sched(dev, skb);
  2347. break;
  2348. default:
  2349. printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
  2350. rpl->type);
  2351. kfree_skb(skb);
  2352. break;
  2353. }
  2354. return 0;
  2355. }
  2356. static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
  2357. {
  2358. struct cpl_abort_req_rss *req = cplhdr(skb);
  2359. struct c4iw_ep *ep;
  2360. struct tid_info *t = dev->rdev.lldi.tids;
  2361. unsigned int tid = GET_TID(req);
  2362. ep = lookup_tid(t, tid);
  2363. if (!ep) {
  2364. printk(KERN_WARNING MOD
  2365. "Abort on non-existent endpoint, tid %d\n", tid);
  2366. kfree_skb(skb);
  2367. return 0;
  2368. }
  2369. if (is_neg_adv_abort(req->status)) {
  2370. PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
  2371. ep->hwtid);
  2372. kfree_skb(skb);
  2373. return 0;
  2374. }
  2375. PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
  2376. ep->com.state);
  2377. /*
  2378. * Wake up any threads in rdma_init() or rdma_fini().
  2379. */
  2380. c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
  2381. sched(dev, skb);
  2382. return 0;
  2383. }
  2384. /*
  2385. * Most upcalls from the T4 Core go to sched() to
  2386. * schedule the processing on a work queue.
  2387. */
  2388. c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
  2389. [CPL_ACT_ESTABLISH] = sched,
  2390. [CPL_ACT_OPEN_RPL] = sched,
  2391. [CPL_RX_DATA] = sched,
  2392. [CPL_ABORT_RPL_RSS] = sched,
  2393. [CPL_ABORT_RPL] = sched,
  2394. [CPL_PASS_OPEN_RPL] = sched,
  2395. [CPL_CLOSE_LISTSRV_RPL] = sched,
  2396. [CPL_PASS_ACCEPT_REQ] = sched,
  2397. [CPL_PASS_ESTABLISH] = sched,
  2398. [CPL_PEER_CLOSE] = sched,
  2399. [CPL_CLOSE_CON_RPL] = sched,
  2400. [CPL_ABORT_REQ_RSS] = peer_abort_intr,
  2401. [CPL_RDMA_TERMINATE] = sched,
  2402. [CPL_FW4_ACK] = sched,
  2403. [CPL_SET_TCB_RPL] = set_tcb_rpl,
  2404. [CPL_FW6_MSG] = fw6_msg
  2405. };
  2406. int __init c4iw_cm_init(void)
  2407. {
  2408. spin_lock_init(&timeout_lock);
  2409. skb_queue_head_init(&rxq);
  2410. workq = create_singlethread_workqueue("iw_cxgb4");
  2411. if (!workq)
  2412. return -ENOMEM;
  2413. return 0;
  2414. }
  2415. void __exit c4iw_cm_term(void)
  2416. {
  2417. WARN_ON(!list_empty(&timeout_list));
  2418. flush_workqueue(workq);
  2419. destroy_workqueue(workq);
  2420. }