iwch_cm.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253
  1. /*
  2. * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/list.h>
  34. #include <linux/slab.h>
  35. #include <linux/workqueue.h>
  36. #include <linux/skbuff.h>
  37. #include <linux/timer.h>
  38. #include <linux/notifier.h>
  39. #include <linux/inetdevice.h>
  40. #include <net/neighbour.h>
  41. #include <net/netevent.h>
  42. #include <net/route.h>
  43. #include "tcb.h"
  44. #include "cxgb3_offload.h"
  45. #include "iwch.h"
  46. #include "iwch_provider.h"
  47. #include "iwch_cm.h"
  48. static char *states[] = {
  49. "idle",
  50. "listen",
  51. "connecting",
  52. "mpa_wait_req",
  53. "mpa_req_sent",
  54. "mpa_req_rcvd",
  55. "mpa_rep_sent",
  56. "fpdu_mode",
  57. "aborting",
  58. "closing",
  59. "moribund",
  60. "dead",
  61. NULL,
  62. };
  63. int peer2peer = 0;
  64. module_param(peer2peer, int, 0644);
  65. MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
  66. static int ep_timeout_secs = 60;
  67. module_param(ep_timeout_secs, int, 0644);
  68. MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
  69. "in seconds (default=60)");
  70. static int mpa_rev = 1;
  71. module_param(mpa_rev, int, 0644);
  72. MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
  73. "1 is spec compliant. (default=1)");
  74. static int markers_enabled = 0;
  75. module_param(markers_enabled, int, 0644);
  76. MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
  77. static int crc_enabled = 1;
  78. module_param(crc_enabled, int, 0644);
  79. MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
  80. static int rcv_win = 256 * 1024;
  81. module_param(rcv_win, int, 0644);
  82. MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
  83. static int snd_win = 32 * 1024;
  84. module_param(snd_win, int, 0644);
  85. MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
  86. static unsigned int nocong = 0;
  87. module_param(nocong, uint, 0644);
  88. MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
  89. static unsigned int cong_flavor = 1;
  90. module_param(cong_flavor, uint, 0644);
  91. MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
  92. static struct workqueue_struct *workq;
  93. static struct sk_buff_head rxq;
  94. static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
  95. static void ep_timeout(unsigned long arg);
  96. static void connect_reply_upcall(struct iwch_ep *ep, int status);
  97. static void start_ep_timer(struct iwch_ep *ep)
  98. {
  99. PDBG("%s ep %p\n", __func__, ep);
  100. if (timer_pending(&ep->timer)) {
  101. PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
  102. del_timer_sync(&ep->timer);
  103. } else
  104. get_ep(&ep->com);
  105. ep->timer.expires = jiffies + ep_timeout_secs * HZ;
  106. ep->timer.data = (unsigned long)ep;
  107. ep->timer.function = ep_timeout;
  108. add_timer(&ep->timer);
  109. }
  110. static void stop_ep_timer(struct iwch_ep *ep)
  111. {
  112. PDBG("%s ep %p\n", __func__, ep);
  113. if (!timer_pending(&ep->timer)) {
  114. WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
  115. __func__, ep, ep->com.state);
  116. return;
  117. }
  118. del_timer_sync(&ep->timer);
  119. put_ep(&ep->com);
  120. }
  121. static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
  122. {
  123. int error = 0;
  124. struct cxio_rdev *rdev;
  125. rdev = (struct cxio_rdev *)tdev->ulp;
  126. if (cxio_fatal_error(rdev)) {
  127. kfree_skb(skb);
  128. return -EIO;
  129. }
  130. error = l2t_send(tdev, skb, l2e);
  131. if (error < 0)
  132. kfree_skb(skb);
  133. return error;
  134. }
  135. int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
  136. {
  137. int error = 0;
  138. struct cxio_rdev *rdev;
  139. rdev = (struct cxio_rdev *)tdev->ulp;
  140. if (cxio_fatal_error(rdev)) {
  141. kfree_skb(skb);
  142. return -EIO;
  143. }
  144. error = cxgb3_ofld_send(tdev, skb);
  145. if (error < 0)
  146. kfree_skb(skb);
  147. return error;
  148. }
  149. static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
  150. {
  151. struct cpl_tid_release *req;
  152. skb = get_skb(skb, sizeof *req, GFP_KERNEL);
  153. if (!skb)
  154. return;
  155. req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
  156. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  157. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
  158. skb->priority = CPL_PRIORITY_SETUP;
  159. iwch_cxgb3_ofld_send(tdev, skb);
  160. return;
  161. }
  162. int iwch_quiesce_tid(struct iwch_ep *ep)
  163. {
  164. struct cpl_set_tcb_field *req;
  165. struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  166. if (!skb)
  167. return -ENOMEM;
  168. req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
  169. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  170. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  171. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
  172. req->reply = 0;
  173. req->cpu_idx = 0;
  174. req->word = htons(W_TCB_RX_QUIESCE);
  175. req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
  176. req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
  177. skb->priority = CPL_PRIORITY_DATA;
  178. return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
  179. }
  180. int iwch_resume_tid(struct iwch_ep *ep)
  181. {
  182. struct cpl_set_tcb_field *req;
  183. struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  184. if (!skb)
  185. return -ENOMEM;
  186. req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
  187. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  188. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  189. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
  190. req->reply = 0;
  191. req->cpu_idx = 0;
  192. req->word = htons(W_TCB_RX_QUIESCE);
  193. req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
  194. req->val = 0;
  195. skb->priority = CPL_PRIORITY_DATA;
  196. return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
  197. }
  198. static void set_emss(struct iwch_ep *ep, u16 opt)
  199. {
  200. PDBG("%s ep %p opt %u\n", __func__, ep, opt);
  201. ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
  202. if (G_TCPOPT_TSTAMP(opt))
  203. ep->emss -= 12;
  204. if (ep->emss < 128)
  205. ep->emss = 128;
  206. PDBG("emss=%d\n", ep->emss);
  207. }
  208. static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
  209. {
  210. unsigned long flags;
  211. enum iwch_ep_state state;
  212. spin_lock_irqsave(&epc->lock, flags);
  213. state = epc->state;
  214. spin_unlock_irqrestore(&epc->lock, flags);
  215. return state;
  216. }
  217. static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
  218. {
  219. epc->state = new;
  220. }
  221. static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
  222. {
  223. unsigned long flags;
  224. spin_lock_irqsave(&epc->lock, flags);
  225. PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
  226. __state_set(epc, new);
  227. spin_unlock_irqrestore(&epc->lock, flags);
  228. return;
  229. }
  230. static void *alloc_ep(int size, gfp_t gfp)
  231. {
  232. struct iwch_ep_common *epc;
  233. epc = kzalloc(size, gfp);
  234. if (epc) {
  235. kref_init(&epc->kref);
  236. spin_lock_init(&epc->lock);
  237. init_waitqueue_head(&epc->waitq);
  238. }
  239. PDBG("%s alloc ep %p\n", __func__, epc);
  240. return epc;
  241. }
  242. void __free_ep(struct kref *kref)
  243. {
  244. struct iwch_ep *ep;
  245. ep = container_of(container_of(kref, struct iwch_ep_common, kref),
  246. struct iwch_ep, com);
  247. PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
  248. if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
  249. cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
  250. dst_release(ep->dst);
  251. l2t_release(ep->com.tdev, ep->l2t);
  252. }
  253. kfree(ep);
  254. }
  255. static void release_ep_resources(struct iwch_ep *ep)
  256. {
  257. PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
  258. set_bit(RELEASE_RESOURCES, &ep->com.flags);
  259. put_ep(&ep->com);
  260. }
  261. static int status2errno(int status)
  262. {
  263. switch (status) {
  264. case CPL_ERR_NONE:
  265. return 0;
  266. case CPL_ERR_CONN_RESET:
  267. return -ECONNRESET;
  268. case CPL_ERR_ARP_MISS:
  269. return -EHOSTUNREACH;
  270. case CPL_ERR_CONN_TIMEDOUT:
  271. return -ETIMEDOUT;
  272. case CPL_ERR_TCAM_FULL:
  273. return -ENOMEM;
  274. case CPL_ERR_CONN_EXIST:
  275. return -EADDRINUSE;
  276. default:
  277. return -EIO;
  278. }
  279. }
  280. /*
  281. * Try and reuse skbs already allocated...
  282. */
  283. static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
  284. {
  285. if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
  286. skb_trim(skb, 0);
  287. skb_get(skb);
  288. } else {
  289. skb = alloc_skb(len, gfp);
  290. }
  291. return skb;
  292. }
  293. static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
  294. __be32 peer_ip, __be16 local_port,
  295. __be16 peer_port, u8 tos)
  296. {
  297. struct rtable *rt;
  298. struct flowi4 fl4;
  299. rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
  300. peer_port, local_port, IPPROTO_TCP,
  301. tos, 0);
  302. if (IS_ERR(rt))
  303. return NULL;
  304. return rt;
  305. }
  306. static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
  307. {
  308. int i = 0;
  309. while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
  310. ++i;
  311. return i;
  312. }
  313. static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
  314. {
  315. PDBG("%s t3cdev %p\n", __func__, dev);
  316. kfree_skb(skb);
  317. }
  318. /*
  319. * Handle an ARP failure for an active open.
  320. */
  321. static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
  322. {
  323. printk(KERN_ERR MOD "ARP failure duing connect\n");
  324. kfree_skb(skb);
  325. }
  326. /*
  327. * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
  328. * and send it along.
  329. */
  330. static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
  331. {
  332. struct cpl_abort_req *req = cplhdr(skb);
  333. PDBG("%s t3cdev %p\n", __func__, dev);
  334. req->cmd = CPL_ABORT_NO_RST;
  335. iwch_cxgb3_ofld_send(dev, skb);
  336. }
  337. static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
  338. {
  339. struct cpl_close_con_req *req;
  340. struct sk_buff *skb;
  341. PDBG("%s ep %p\n", __func__, ep);
  342. skb = get_skb(NULL, sizeof(*req), gfp);
  343. if (!skb) {
  344. printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
  345. return -ENOMEM;
  346. }
  347. skb->priority = CPL_PRIORITY_DATA;
  348. set_arp_failure_handler(skb, arp_failure_discard);
  349. req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
  350. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
  351. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  352. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
  353. return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  354. }
  355. static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
  356. {
  357. struct cpl_abort_req *req;
  358. PDBG("%s ep %p\n", __func__, ep);
  359. skb = get_skb(skb, sizeof(*req), gfp);
  360. if (!skb) {
  361. printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
  362. __func__);
  363. return -ENOMEM;
  364. }
  365. skb->priority = CPL_PRIORITY_DATA;
  366. set_arp_failure_handler(skb, abort_arp_failure);
  367. req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
  368. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
  369. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  370. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
  371. req->cmd = CPL_ABORT_SEND_RST;
  372. return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  373. }
  374. static int send_connect(struct iwch_ep *ep)
  375. {
  376. struct cpl_act_open_req *req;
  377. struct sk_buff *skb;
  378. u32 opt0h, opt0l, opt2;
  379. unsigned int mtu_idx;
  380. int wscale;
  381. PDBG("%s ep %p\n", __func__, ep);
  382. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  383. if (!skb) {
  384. printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
  385. __func__);
  386. return -ENOMEM;
  387. }
  388. mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
  389. wscale = compute_wscale(rcv_win);
  390. opt0h = V_NAGLE(0) |
  391. V_NO_CONG(nocong) |
  392. V_KEEP_ALIVE(1) |
  393. F_TCAM_BYPASS |
  394. V_WND_SCALE(wscale) |
  395. V_MSS_IDX(mtu_idx) |
  396. V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
  397. opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
  398. opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
  399. V_CONG_CONTROL_FLAVOR(cong_flavor);
  400. skb->priority = CPL_PRIORITY_SETUP;
  401. set_arp_failure_handler(skb, act_open_req_arp_failure);
  402. req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
  403. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  404. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
  405. req->local_port = ep->com.local_addr.sin_port;
  406. req->peer_port = ep->com.remote_addr.sin_port;
  407. req->local_ip = ep->com.local_addr.sin_addr.s_addr;
  408. req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
  409. req->opt0h = htonl(opt0h);
  410. req->opt0l = htonl(opt0l);
  411. req->params = 0;
  412. req->opt2 = htonl(opt2);
  413. return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  414. }
  415. static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
  416. {
  417. int mpalen;
  418. struct tx_data_wr *req;
  419. struct mpa_message *mpa;
  420. int len;
  421. PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
  422. BUG_ON(skb_cloned(skb));
  423. mpalen = sizeof(*mpa) + ep->plen;
  424. if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
  425. kfree_skb(skb);
  426. skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
  427. if (!skb) {
  428. connect_reply_upcall(ep, -ENOMEM);
  429. return;
  430. }
  431. }
  432. skb_trim(skb, 0);
  433. skb_reserve(skb, sizeof(*req));
  434. skb_put(skb, mpalen);
  435. skb->priority = CPL_PRIORITY_DATA;
  436. mpa = (struct mpa_message *) skb->data;
  437. memset(mpa, 0, sizeof(*mpa));
  438. memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
  439. mpa->flags = (crc_enabled ? MPA_CRC : 0) |
  440. (markers_enabled ? MPA_MARKERS : 0);
  441. mpa->private_data_size = htons(ep->plen);
  442. mpa->revision = mpa_rev;
  443. if (ep->plen)
  444. memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
  445. /*
  446. * Reference the mpa skb. This ensures the data area
  447. * will remain in memory until the hw acks the tx.
  448. * Function tx_ack() will deref it.
  449. */
  450. skb_get(skb);
  451. set_arp_failure_handler(skb, arp_failure_discard);
  452. skb_reset_transport_header(skb);
  453. len = skb->len;
  454. req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
  455. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
  456. req->wr_lo = htonl(V_WR_TID(ep->hwtid));
  457. req->len = htonl(len);
  458. req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
  459. V_TX_SNDBUF(snd_win>>15));
  460. req->flags = htonl(F_TX_INIT);
  461. req->sndseq = htonl(ep->snd_seq);
  462. BUG_ON(ep->mpa_skb);
  463. ep->mpa_skb = skb;
  464. iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  465. start_ep_timer(ep);
  466. state_set(&ep->com, MPA_REQ_SENT);
  467. return;
  468. }
  469. static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
  470. {
  471. int mpalen;
  472. struct tx_data_wr *req;
  473. struct mpa_message *mpa;
  474. struct sk_buff *skb;
  475. PDBG("%s ep %p plen %d\n", __func__, ep, plen);
  476. mpalen = sizeof(*mpa) + plen;
  477. skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
  478. if (!skb) {
  479. printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
  480. return -ENOMEM;
  481. }
  482. skb_reserve(skb, sizeof(*req));
  483. mpa = (struct mpa_message *) skb_put(skb, mpalen);
  484. memset(mpa, 0, sizeof(*mpa));
  485. memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
  486. mpa->flags = MPA_REJECT;
  487. mpa->revision = mpa_rev;
  488. mpa->private_data_size = htons(plen);
  489. if (plen)
  490. memcpy(mpa->private_data, pdata, plen);
  491. /*
  492. * Reference the mpa skb again. This ensures the data area
  493. * will remain in memory until the hw acks the tx.
  494. * Function tx_ack() will deref it.
  495. */
  496. skb_get(skb);
  497. skb->priority = CPL_PRIORITY_DATA;
  498. set_arp_failure_handler(skb, arp_failure_discard);
  499. skb_reset_transport_header(skb);
  500. req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
  501. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
  502. req->wr_lo = htonl(V_WR_TID(ep->hwtid));
  503. req->len = htonl(mpalen);
  504. req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
  505. V_TX_SNDBUF(snd_win>>15));
  506. req->flags = htonl(F_TX_INIT);
  507. req->sndseq = htonl(ep->snd_seq);
  508. BUG_ON(ep->mpa_skb);
  509. ep->mpa_skb = skb;
  510. return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  511. }
  512. static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
  513. {
  514. int mpalen;
  515. struct tx_data_wr *req;
  516. struct mpa_message *mpa;
  517. int len;
  518. struct sk_buff *skb;
  519. PDBG("%s ep %p plen %d\n", __func__, ep, plen);
  520. mpalen = sizeof(*mpa) + plen;
  521. skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
  522. if (!skb) {
  523. printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
  524. return -ENOMEM;
  525. }
  526. skb->priority = CPL_PRIORITY_DATA;
  527. skb_reserve(skb, sizeof(*req));
  528. mpa = (struct mpa_message *) skb_put(skb, mpalen);
  529. memset(mpa, 0, sizeof(*mpa));
  530. memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
  531. mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
  532. (markers_enabled ? MPA_MARKERS : 0);
  533. mpa->revision = mpa_rev;
  534. mpa->private_data_size = htons(plen);
  535. if (plen)
  536. memcpy(mpa->private_data, pdata, plen);
  537. /*
  538. * Reference the mpa skb. This ensures the data area
  539. * will remain in memory until the hw acks the tx.
  540. * Function tx_ack() will deref it.
  541. */
  542. skb_get(skb);
  543. set_arp_failure_handler(skb, arp_failure_discard);
  544. skb_reset_transport_header(skb);
  545. len = skb->len;
  546. req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
  547. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
  548. req->wr_lo = htonl(V_WR_TID(ep->hwtid));
  549. req->len = htonl(len);
  550. req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
  551. V_TX_SNDBUF(snd_win>>15));
  552. req->flags = htonl(F_TX_INIT);
  553. req->sndseq = htonl(ep->snd_seq);
  554. ep->mpa_skb = skb;
  555. state_set(&ep->com, MPA_REP_SENT);
  556. return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  557. }
  558. static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  559. {
  560. struct iwch_ep *ep = ctx;
  561. struct cpl_act_establish *req = cplhdr(skb);
  562. unsigned int tid = GET_TID(req);
  563. PDBG("%s ep %p tid %d\n", __func__, ep, tid);
  564. dst_confirm(ep->dst);
  565. /* setup the hwtid for this connection */
  566. ep->hwtid = tid;
  567. cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
  568. ep->snd_seq = ntohl(req->snd_isn);
  569. ep->rcv_seq = ntohl(req->rcv_isn);
  570. set_emss(ep, ntohs(req->tcp_opt));
  571. /* dealloc the atid */
  572. cxgb3_free_atid(ep->com.tdev, ep->atid);
  573. /* start MPA negotiation */
  574. send_mpa_req(ep, skb);
  575. return 0;
  576. }
  577. static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
  578. {
  579. PDBG("%s ep %p\n", __FILE__, ep);
  580. state_set(&ep->com, ABORTING);
  581. send_abort(ep, skb, gfp);
  582. }
  583. static void close_complete_upcall(struct iwch_ep *ep)
  584. {
  585. struct iw_cm_event event;
  586. PDBG("%s ep %p\n", __func__, ep);
  587. memset(&event, 0, sizeof(event));
  588. event.event = IW_CM_EVENT_CLOSE;
  589. if (ep->com.cm_id) {
  590. PDBG("close complete delivered ep %p cm_id %p tid %d\n",
  591. ep, ep->com.cm_id, ep->hwtid);
  592. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  593. ep->com.cm_id->rem_ref(ep->com.cm_id);
  594. ep->com.cm_id = NULL;
  595. ep->com.qp = NULL;
  596. }
  597. }
  598. static void peer_close_upcall(struct iwch_ep *ep)
  599. {
  600. struct iw_cm_event event;
  601. PDBG("%s ep %p\n", __func__, ep);
  602. memset(&event, 0, sizeof(event));
  603. event.event = IW_CM_EVENT_DISCONNECT;
  604. if (ep->com.cm_id) {
  605. PDBG("peer close delivered ep %p cm_id %p tid %d\n",
  606. ep, ep->com.cm_id, ep->hwtid);
  607. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  608. }
  609. }
  610. static void peer_abort_upcall(struct iwch_ep *ep)
  611. {
  612. struct iw_cm_event event;
  613. PDBG("%s ep %p\n", __func__, ep);
  614. memset(&event, 0, sizeof(event));
  615. event.event = IW_CM_EVENT_CLOSE;
  616. event.status = -ECONNRESET;
  617. if (ep->com.cm_id) {
  618. PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
  619. ep->com.cm_id, ep->hwtid);
  620. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  621. ep->com.cm_id->rem_ref(ep->com.cm_id);
  622. ep->com.cm_id = NULL;
  623. ep->com.qp = NULL;
  624. }
  625. }
  626. static void connect_reply_upcall(struct iwch_ep *ep, int status)
  627. {
  628. struct iw_cm_event event;
  629. PDBG("%s ep %p status %d\n", __func__, ep, status);
  630. memset(&event, 0, sizeof(event));
  631. event.event = IW_CM_EVENT_CONNECT_REPLY;
  632. event.status = status;
  633. event.local_addr = ep->com.local_addr;
  634. event.remote_addr = ep->com.remote_addr;
  635. if ((status == 0) || (status == -ECONNREFUSED)) {
  636. event.private_data_len = ep->plen;
  637. event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
  638. }
  639. if (ep->com.cm_id) {
  640. PDBG("%s ep %p tid %d status %d\n", __func__, ep,
  641. ep->hwtid, status);
  642. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  643. }
  644. if (status < 0) {
  645. ep->com.cm_id->rem_ref(ep->com.cm_id);
  646. ep->com.cm_id = NULL;
  647. ep->com.qp = NULL;
  648. }
  649. }
  650. static void connect_request_upcall(struct iwch_ep *ep)
  651. {
  652. struct iw_cm_event event;
  653. PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
  654. memset(&event, 0, sizeof(event));
  655. event.event = IW_CM_EVENT_CONNECT_REQUEST;
  656. event.local_addr = ep->com.local_addr;
  657. event.remote_addr = ep->com.remote_addr;
  658. event.private_data_len = ep->plen;
  659. event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
  660. event.provider_data = ep;
  661. /*
  662. * Until ird/ord negotiation via MPAv2 support is added, send max
  663. * supported values
  664. */
  665. event.ird = event.ord = 8;
  666. if (state_read(&ep->parent_ep->com) != DEAD) {
  667. get_ep(&ep->com);
  668. ep->parent_ep->com.cm_id->event_handler(
  669. ep->parent_ep->com.cm_id,
  670. &event);
  671. }
  672. put_ep(&ep->parent_ep->com);
  673. ep->parent_ep = NULL;
  674. }
  675. static void established_upcall(struct iwch_ep *ep)
  676. {
  677. struct iw_cm_event event;
  678. PDBG("%s ep %p\n", __func__, ep);
  679. memset(&event, 0, sizeof(event));
  680. event.event = IW_CM_EVENT_ESTABLISHED;
  681. /*
  682. * Until ird/ord negotiation via MPAv2 support is added, send max
  683. * supported values
  684. */
  685. event.ird = event.ord = 8;
  686. if (ep->com.cm_id) {
  687. PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
  688. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  689. }
  690. }
  691. static int update_rx_credits(struct iwch_ep *ep, u32 credits)
  692. {
  693. struct cpl_rx_data_ack *req;
  694. struct sk_buff *skb;
  695. PDBG("%s ep %p credits %u\n", __func__, ep, credits);
  696. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  697. if (!skb) {
  698. printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
  699. return 0;
  700. }
  701. req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
  702. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  703. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
  704. req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
  705. skb->priority = CPL_PRIORITY_ACK;
  706. iwch_cxgb3_ofld_send(ep->com.tdev, skb);
  707. return credits;
  708. }
  709. static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
  710. {
  711. struct mpa_message *mpa;
  712. u16 plen;
  713. struct iwch_qp_attributes attrs;
  714. enum iwch_qp_attr_mask mask;
  715. int err;
  716. PDBG("%s ep %p\n", __func__, ep);
  717. /*
  718. * Stop mpa timer. If it expired, then the state has
  719. * changed and we bail since ep_timeout already aborted
  720. * the connection.
  721. */
  722. stop_ep_timer(ep);
  723. if (state_read(&ep->com) != MPA_REQ_SENT)
  724. return;
  725. /*
  726. * If we get more than the supported amount of private data
  727. * then we must fail this connection.
  728. */
  729. if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
  730. err = -EINVAL;
  731. goto err;
  732. }
  733. /*
  734. * copy the new data into our accumulation buffer.
  735. */
  736. skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
  737. skb->len);
  738. ep->mpa_pkt_len += skb->len;
  739. /*
  740. * if we don't even have the mpa message, then bail.
  741. */
  742. if (ep->mpa_pkt_len < sizeof(*mpa))
  743. return;
  744. mpa = (struct mpa_message *) ep->mpa_pkt;
  745. /* Validate MPA header. */
  746. if (mpa->revision != mpa_rev) {
  747. err = -EPROTO;
  748. goto err;
  749. }
  750. if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
  751. err = -EPROTO;
  752. goto err;
  753. }
  754. plen = ntohs(mpa->private_data_size);
  755. /*
  756. * Fail if there's too much private data.
  757. */
  758. if (plen > MPA_MAX_PRIVATE_DATA) {
  759. err = -EPROTO;
  760. goto err;
  761. }
  762. /*
  763. * If plen does not account for pkt size
  764. */
  765. if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
  766. err = -EPROTO;
  767. goto err;
  768. }
  769. ep->plen = (u8) plen;
  770. /*
  771. * If we don't have all the pdata yet, then bail.
  772. * We'll continue process when more data arrives.
  773. */
  774. if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
  775. return;
  776. if (mpa->flags & MPA_REJECT) {
  777. err = -ECONNREFUSED;
  778. goto err;
  779. }
  780. /*
  781. * If we get here we have accumulated the entire mpa
  782. * start reply message including private data. And
  783. * the MPA header is valid.
  784. */
  785. state_set(&ep->com, FPDU_MODE);
  786. ep->mpa_attr.initiator = 1;
  787. ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
  788. ep->mpa_attr.recv_marker_enabled = markers_enabled;
  789. ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
  790. ep->mpa_attr.version = mpa_rev;
  791. PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
  792. "xmit_marker_enabled=%d, version=%d\n", __func__,
  793. ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
  794. ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
  795. attrs.mpa_attr = ep->mpa_attr;
  796. attrs.max_ird = ep->ird;
  797. attrs.max_ord = ep->ord;
  798. attrs.llp_stream_handle = ep;
  799. attrs.next_state = IWCH_QP_STATE_RTS;
  800. mask = IWCH_QP_ATTR_NEXT_STATE |
  801. IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
  802. IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
  803. /* bind QP and TID with INIT_WR */
  804. err = iwch_modify_qp(ep->com.qp->rhp,
  805. ep->com.qp, mask, &attrs, 1);
  806. if (err)
  807. goto err;
  808. if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
  809. iwch_post_zb_read(ep);
  810. }
  811. goto out;
  812. err:
  813. abort_connection(ep, skb, GFP_KERNEL);
  814. out:
  815. connect_reply_upcall(ep, err);
  816. return;
  817. }
  818. static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
  819. {
  820. struct mpa_message *mpa;
  821. u16 plen;
  822. PDBG("%s ep %p\n", __func__, ep);
  823. /*
  824. * Stop mpa timer. If it expired, then the state has
  825. * changed and we bail since ep_timeout already aborted
  826. * the connection.
  827. */
  828. stop_ep_timer(ep);
  829. if (state_read(&ep->com) != MPA_REQ_WAIT)
  830. return;
  831. /*
  832. * If we get more than the supported amount of private data
  833. * then we must fail this connection.
  834. */
  835. if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
  836. abort_connection(ep, skb, GFP_KERNEL);
  837. return;
  838. }
  839. PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
  840. /*
  841. * Copy the new data into our accumulation buffer.
  842. */
  843. skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
  844. skb->len);
  845. ep->mpa_pkt_len += skb->len;
  846. /*
  847. * If we don't even have the mpa message, then bail.
  848. * We'll continue process when more data arrives.
  849. */
  850. if (ep->mpa_pkt_len < sizeof(*mpa))
  851. return;
  852. PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
  853. mpa = (struct mpa_message *) ep->mpa_pkt;
  854. /*
  855. * Validate MPA Header.
  856. */
  857. if (mpa->revision != mpa_rev) {
  858. abort_connection(ep, skb, GFP_KERNEL);
  859. return;
  860. }
  861. if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
  862. abort_connection(ep, skb, GFP_KERNEL);
  863. return;
  864. }
  865. plen = ntohs(mpa->private_data_size);
  866. /*
  867. * Fail if there's too much private data.
  868. */
  869. if (plen > MPA_MAX_PRIVATE_DATA) {
  870. abort_connection(ep, skb, GFP_KERNEL);
  871. return;
  872. }
  873. /*
  874. * If plen does not account for pkt size
  875. */
  876. if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
  877. abort_connection(ep, skb, GFP_KERNEL);
  878. return;
  879. }
  880. ep->plen = (u8) plen;
  881. /*
  882. * If we don't have all the pdata yet, then bail.
  883. */
  884. if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
  885. return;
  886. /*
  887. * If we get here we have accumulated the entire mpa
  888. * start reply message including private data.
  889. */
  890. ep->mpa_attr.initiator = 0;
  891. ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
  892. ep->mpa_attr.recv_marker_enabled = markers_enabled;
  893. ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
  894. ep->mpa_attr.version = mpa_rev;
  895. PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
  896. "xmit_marker_enabled=%d, version=%d\n", __func__,
  897. ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
  898. ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
  899. state_set(&ep->com, MPA_REQ_RCVD);
  900. /* drive upcall */
  901. connect_request_upcall(ep);
  902. return;
  903. }
  904. static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  905. {
  906. struct iwch_ep *ep = ctx;
  907. struct cpl_rx_data *hdr = cplhdr(skb);
  908. unsigned int dlen = ntohs(hdr->len);
  909. PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
  910. skb_pull(skb, sizeof(*hdr));
  911. skb_trim(skb, dlen);
  912. ep->rcv_seq += dlen;
  913. BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
  914. switch (state_read(&ep->com)) {
  915. case MPA_REQ_SENT:
  916. process_mpa_reply(ep, skb);
  917. break;
  918. case MPA_REQ_WAIT:
  919. process_mpa_request(ep, skb);
  920. break;
  921. case MPA_REP_SENT:
  922. break;
  923. default:
  924. printk(KERN_ERR MOD "%s Unexpected streaming data."
  925. " ep %p state %d tid %d\n",
  926. __func__, ep, state_read(&ep->com), ep->hwtid);
  927. /*
  928. * The ep will timeout and inform the ULP of the failure.
  929. * See ep_timeout().
  930. */
  931. break;
  932. }
  933. /* update RX credits */
  934. update_rx_credits(ep, dlen);
  935. return CPL_RET_BUF_DONE;
  936. }
  937. /*
  938. * Upcall from the adapter indicating data has been transmitted.
  939. * For us its just the single MPA request or reply. We can now free
  940. * the skb holding the mpa message.
  941. */
  942. static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  943. {
  944. struct iwch_ep *ep = ctx;
  945. struct cpl_wr_ack *hdr = cplhdr(skb);
  946. unsigned int credits = ntohs(hdr->credits);
  947. unsigned long flags;
  948. int post_zb = 0;
  949. PDBG("%s ep %p credits %u\n", __func__, ep, credits);
  950. if (credits == 0) {
  951. PDBG("%s 0 credit ack ep %p state %u\n",
  952. __func__, ep, state_read(&ep->com));
  953. return CPL_RET_BUF_DONE;
  954. }
  955. spin_lock_irqsave(&ep->com.lock, flags);
  956. BUG_ON(credits != 1);
  957. dst_confirm(ep->dst);
  958. if (!ep->mpa_skb) {
  959. PDBG("%s rdma_init wr_ack ep %p state %u\n",
  960. __func__, ep, ep->com.state);
  961. if (ep->mpa_attr.initiator) {
  962. PDBG("%s initiator ep %p state %u\n",
  963. __func__, ep, ep->com.state);
  964. if (peer2peer && ep->com.state == FPDU_MODE)
  965. post_zb = 1;
  966. } else {
  967. PDBG("%s responder ep %p state %u\n",
  968. __func__, ep, ep->com.state);
  969. if (ep->com.state == MPA_REQ_RCVD) {
  970. ep->com.rpl_done = 1;
  971. wake_up(&ep->com.waitq);
  972. }
  973. }
  974. } else {
  975. PDBG("%s lsm ack ep %p state %u freeing skb\n",
  976. __func__, ep, ep->com.state);
  977. kfree_skb(ep->mpa_skb);
  978. ep->mpa_skb = NULL;
  979. }
  980. spin_unlock_irqrestore(&ep->com.lock, flags);
  981. if (post_zb)
  982. iwch_post_zb_read(ep);
  983. return CPL_RET_BUF_DONE;
  984. }
  985. static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  986. {
  987. struct iwch_ep *ep = ctx;
  988. unsigned long flags;
  989. int release = 0;
  990. PDBG("%s ep %p\n", __func__, ep);
  991. BUG_ON(!ep);
  992. /*
  993. * We get 2 abort replies from the HW. The first one must
  994. * be ignored except for scribbling that we need one more.
  995. */
  996. if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
  997. return CPL_RET_BUF_DONE;
  998. }
  999. spin_lock_irqsave(&ep->com.lock, flags);
  1000. switch (ep->com.state) {
  1001. case ABORTING:
  1002. close_complete_upcall(ep);
  1003. __state_set(&ep->com, DEAD);
  1004. release = 1;
  1005. break;
  1006. default:
  1007. printk(KERN_ERR "%s ep %p state %d\n",
  1008. __func__, ep, ep->com.state);
  1009. break;
  1010. }
  1011. spin_unlock_irqrestore(&ep->com.lock, flags);
  1012. if (release)
  1013. release_ep_resources(ep);
  1014. return CPL_RET_BUF_DONE;
  1015. }
  1016. /*
  1017. * Return whether a failed active open has allocated a TID
  1018. */
  1019. static inline int act_open_has_tid(int status)
  1020. {
  1021. return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
  1022. status != CPL_ERR_ARP_MISS;
  1023. }
  1024. static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1025. {
  1026. struct iwch_ep *ep = ctx;
  1027. struct cpl_act_open_rpl *rpl = cplhdr(skb);
  1028. PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
  1029. status2errno(rpl->status));
  1030. connect_reply_upcall(ep, status2errno(rpl->status));
  1031. state_set(&ep->com, DEAD);
  1032. if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
  1033. release_tid(ep->com.tdev, GET_TID(rpl), NULL);
  1034. cxgb3_free_atid(ep->com.tdev, ep->atid);
  1035. dst_release(ep->dst);
  1036. l2t_release(ep->com.tdev, ep->l2t);
  1037. put_ep(&ep->com);
  1038. return CPL_RET_BUF_DONE;
  1039. }
  1040. static int listen_start(struct iwch_listen_ep *ep)
  1041. {
  1042. struct sk_buff *skb;
  1043. struct cpl_pass_open_req *req;
  1044. PDBG("%s ep %p\n", __func__, ep);
  1045. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  1046. if (!skb) {
  1047. printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
  1048. return -ENOMEM;
  1049. }
  1050. req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
  1051. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1052. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
  1053. req->local_port = ep->com.local_addr.sin_port;
  1054. req->local_ip = ep->com.local_addr.sin_addr.s_addr;
  1055. req->peer_port = 0;
  1056. req->peer_ip = 0;
  1057. req->peer_netmask = 0;
  1058. req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
  1059. req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
  1060. req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
  1061. skb->priority = 1;
  1062. return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
  1063. }
  1064. static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1065. {
  1066. struct iwch_listen_ep *ep = ctx;
  1067. struct cpl_pass_open_rpl *rpl = cplhdr(skb);
  1068. PDBG("%s ep %p status %d error %d\n", __func__, ep,
  1069. rpl->status, status2errno(rpl->status));
  1070. ep->com.rpl_err = status2errno(rpl->status);
  1071. ep->com.rpl_done = 1;
  1072. wake_up(&ep->com.waitq);
  1073. return CPL_RET_BUF_DONE;
  1074. }
  1075. static int listen_stop(struct iwch_listen_ep *ep)
  1076. {
  1077. struct sk_buff *skb;
  1078. struct cpl_close_listserv_req *req;
  1079. PDBG("%s ep %p\n", __func__, ep);
  1080. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  1081. if (!skb) {
  1082. printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
  1083. return -ENOMEM;
  1084. }
  1085. req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
  1086. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1087. req->cpu_idx = 0;
  1088. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
  1089. skb->priority = 1;
  1090. return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
  1091. }
  1092. static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
  1093. void *ctx)
  1094. {
  1095. struct iwch_listen_ep *ep = ctx;
  1096. struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
  1097. PDBG("%s ep %p\n", __func__, ep);
  1098. ep->com.rpl_err = status2errno(rpl->status);
  1099. ep->com.rpl_done = 1;
  1100. wake_up(&ep->com.waitq);
  1101. return CPL_RET_BUF_DONE;
  1102. }
  1103. static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
  1104. {
  1105. struct cpl_pass_accept_rpl *rpl;
  1106. unsigned int mtu_idx;
  1107. u32 opt0h, opt0l, opt2;
  1108. int wscale;
  1109. PDBG("%s ep %p\n", __func__, ep);
  1110. BUG_ON(skb_cloned(skb));
  1111. skb_trim(skb, sizeof(*rpl));
  1112. skb_get(skb);
  1113. mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
  1114. wscale = compute_wscale(rcv_win);
  1115. opt0h = V_NAGLE(0) |
  1116. V_NO_CONG(nocong) |
  1117. V_KEEP_ALIVE(1) |
  1118. F_TCAM_BYPASS |
  1119. V_WND_SCALE(wscale) |
  1120. V_MSS_IDX(mtu_idx) |
  1121. V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
  1122. opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
  1123. opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
  1124. V_CONG_CONTROL_FLAVOR(cong_flavor);
  1125. rpl = cplhdr(skb);
  1126. rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1127. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
  1128. rpl->peer_ip = peer_ip;
  1129. rpl->opt0h = htonl(opt0h);
  1130. rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
  1131. rpl->opt2 = htonl(opt2);
  1132. rpl->rsvd = rpl->opt2; /* workaround for HW bug */
  1133. skb->priority = CPL_PRIORITY_SETUP;
  1134. iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
  1135. return;
  1136. }
  1137. static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
  1138. struct sk_buff *skb)
  1139. {
  1140. PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
  1141. peer_ip);
  1142. BUG_ON(skb_cloned(skb));
  1143. skb_trim(skb, sizeof(struct cpl_tid_release));
  1144. skb_get(skb);
  1145. if (tdev->type != T3A)
  1146. release_tid(tdev, hwtid, skb);
  1147. else {
  1148. struct cpl_pass_accept_rpl *rpl;
  1149. rpl = cplhdr(skb);
  1150. skb->priority = CPL_PRIORITY_SETUP;
  1151. rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1152. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
  1153. hwtid));
  1154. rpl->peer_ip = peer_ip;
  1155. rpl->opt0h = htonl(F_TCAM_BYPASS);
  1156. rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
  1157. rpl->opt2 = 0;
  1158. rpl->rsvd = rpl->opt2;
  1159. iwch_cxgb3_ofld_send(tdev, skb);
  1160. }
  1161. }
  1162. static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1163. {
  1164. struct iwch_ep *child_ep, *parent_ep = ctx;
  1165. struct cpl_pass_accept_req *req = cplhdr(skb);
  1166. unsigned int hwtid = GET_TID(req);
  1167. struct dst_entry *dst;
  1168. struct l2t_entry *l2t;
  1169. struct rtable *rt;
  1170. struct iff_mac tim;
  1171. PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
  1172. if (state_read(&parent_ep->com) != LISTEN) {
  1173. printk(KERN_ERR "%s - listening ep not in LISTEN\n",
  1174. __func__);
  1175. goto reject;
  1176. }
  1177. /*
  1178. * Find the netdev for this connection request.
  1179. */
  1180. tim.mac_addr = req->dst_mac;
  1181. tim.vlan_tag = ntohs(req->vlan_tag);
  1182. if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
  1183. printk(KERN_ERR "%s bad dst mac %pM\n",
  1184. __func__, req->dst_mac);
  1185. goto reject;
  1186. }
  1187. /* Find output route */
  1188. rt = find_route(tdev,
  1189. req->local_ip,
  1190. req->peer_ip,
  1191. req->local_port,
  1192. req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
  1193. if (!rt) {
  1194. printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
  1195. __func__);
  1196. goto reject;
  1197. }
  1198. dst = &rt->dst;
  1199. l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
  1200. if (!l2t) {
  1201. printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
  1202. __func__);
  1203. dst_release(dst);
  1204. goto reject;
  1205. }
  1206. child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
  1207. if (!child_ep) {
  1208. printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
  1209. __func__);
  1210. l2t_release(tdev, l2t);
  1211. dst_release(dst);
  1212. goto reject;
  1213. }
  1214. state_set(&child_ep->com, CONNECTING);
  1215. child_ep->com.tdev = tdev;
  1216. child_ep->com.cm_id = NULL;
  1217. child_ep->com.local_addr.sin_family = PF_INET;
  1218. child_ep->com.local_addr.sin_port = req->local_port;
  1219. child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
  1220. child_ep->com.remote_addr.sin_family = PF_INET;
  1221. child_ep->com.remote_addr.sin_port = req->peer_port;
  1222. child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
  1223. get_ep(&parent_ep->com);
  1224. child_ep->parent_ep = parent_ep;
  1225. child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
  1226. child_ep->l2t = l2t;
  1227. child_ep->dst = dst;
  1228. child_ep->hwtid = hwtid;
  1229. init_timer(&child_ep->timer);
  1230. cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
  1231. accept_cr(child_ep, req->peer_ip, skb);
  1232. goto out;
  1233. reject:
  1234. reject_cr(tdev, hwtid, req->peer_ip, skb);
  1235. out:
  1236. return CPL_RET_BUF_DONE;
  1237. }
  1238. static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1239. {
  1240. struct iwch_ep *ep = ctx;
  1241. struct cpl_pass_establish *req = cplhdr(skb);
  1242. PDBG("%s ep %p\n", __func__, ep);
  1243. ep->snd_seq = ntohl(req->snd_isn);
  1244. ep->rcv_seq = ntohl(req->rcv_isn);
  1245. set_emss(ep, ntohs(req->tcp_opt));
  1246. dst_confirm(ep->dst);
  1247. state_set(&ep->com, MPA_REQ_WAIT);
  1248. start_ep_timer(ep);
  1249. return CPL_RET_BUF_DONE;
  1250. }
  1251. static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1252. {
  1253. struct iwch_ep *ep = ctx;
  1254. struct iwch_qp_attributes attrs;
  1255. unsigned long flags;
  1256. int disconnect = 1;
  1257. int release = 0;
  1258. PDBG("%s ep %p\n", __func__, ep);
  1259. dst_confirm(ep->dst);
  1260. spin_lock_irqsave(&ep->com.lock, flags);
  1261. switch (ep->com.state) {
  1262. case MPA_REQ_WAIT:
  1263. __state_set(&ep->com, CLOSING);
  1264. break;
  1265. case MPA_REQ_SENT:
  1266. __state_set(&ep->com, CLOSING);
  1267. connect_reply_upcall(ep, -ECONNRESET);
  1268. break;
  1269. case MPA_REQ_RCVD:
  1270. /*
  1271. * We're gonna mark this puppy DEAD, but keep
  1272. * the reference on it until the ULP accepts or
  1273. * rejects the CR. Also wake up anyone waiting
  1274. * in rdma connection migration (see iwch_accept_cr()).
  1275. */
  1276. __state_set(&ep->com, CLOSING);
  1277. ep->com.rpl_done = 1;
  1278. ep->com.rpl_err = -ECONNRESET;
  1279. PDBG("waking up ep %p\n", ep);
  1280. wake_up(&ep->com.waitq);
  1281. break;
  1282. case MPA_REP_SENT:
  1283. __state_set(&ep->com, CLOSING);
  1284. ep->com.rpl_done = 1;
  1285. ep->com.rpl_err = -ECONNRESET;
  1286. PDBG("waking up ep %p\n", ep);
  1287. wake_up(&ep->com.waitq);
  1288. break;
  1289. case FPDU_MODE:
  1290. start_ep_timer(ep);
  1291. __state_set(&ep->com, CLOSING);
  1292. attrs.next_state = IWCH_QP_STATE_CLOSING;
  1293. iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1294. IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
  1295. peer_close_upcall(ep);
  1296. break;
  1297. case ABORTING:
  1298. disconnect = 0;
  1299. break;
  1300. case CLOSING:
  1301. __state_set(&ep->com, MORIBUND);
  1302. disconnect = 0;
  1303. break;
  1304. case MORIBUND:
  1305. stop_ep_timer(ep);
  1306. if (ep->com.cm_id && ep->com.qp) {
  1307. attrs.next_state = IWCH_QP_STATE_IDLE;
  1308. iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1309. IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
  1310. }
  1311. close_complete_upcall(ep);
  1312. __state_set(&ep->com, DEAD);
  1313. release = 1;
  1314. disconnect = 0;
  1315. break;
  1316. case DEAD:
  1317. disconnect = 0;
  1318. break;
  1319. default:
  1320. BUG_ON(1);
  1321. }
  1322. spin_unlock_irqrestore(&ep->com.lock, flags);
  1323. if (disconnect)
  1324. iwch_ep_disconnect(ep, 0, GFP_KERNEL);
  1325. if (release)
  1326. release_ep_resources(ep);
  1327. return CPL_RET_BUF_DONE;
  1328. }
  1329. /*
  1330. * Returns whether an ABORT_REQ_RSS message is a negative advice.
  1331. */
  1332. static int is_neg_adv_abort(unsigned int status)
  1333. {
  1334. return status == CPL_ERR_RTX_NEG_ADVICE ||
  1335. status == CPL_ERR_PERSIST_NEG_ADVICE;
  1336. }
  1337. static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1338. {
  1339. struct cpl_abort_req_rss *req = cplhdr(skb);
  1340. struct iwch_ep *ep = ctx;
  1341. struct cpl_abort_rpl *rpl;
  1342. struct sk_buff *rpl_skb;
  1343. struct iwch_qp_attributes attrs;
  1344. int ret;
  1345. int release = 0;
  1346. unsigned long flags;
  1347. if (is_neg_adv_abort(req->status)) {
  1348. PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
  1349. ep->hwtid);
  1350. t3_l2t_send_event(ep->com.tdev, ep->l2t);
  1351. return CPL_RET_BUF_DONE;
  1352. }
  1353. /*
  1354. * We get 2 peer aborts from the HW. The first one must
  1355. * be ignored except for scribbling that we need one more.
  1356. */
  1357. if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
  1358. return CPL_RET_BUF_DONE;
  1359. }
  1360. spin_lock_irqsave(&ep->com.lock, flags);
  1361. PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state);
  1362. switch (ep->com.state) {
  1363. case CONNECTING:
  1364. break;
  1365. case MPA_REQ_WAIT:
  1366. stop_ep_timer(ep);
  1367. break;
  1368. case MPA_REQ_SENT:
  1369. stop_ep_timer(ep);
  1370. connect_reply_upcall(ep, -ECONNRESET);
  1371. break;
  1372. case MPA_REP_SENT:
  1373. ep->com.rpl_done = 1;
  1374. ep->com.rpl_err = -ECONNRESET;
  1375. PDBG("waking up ep %p\n", ep);
  1376. wake_up(&ep->com.waitq);
  1377. break;
  1378. case MPA_REQ_RCVD:
  1379. /*
  1380. * We're gonna mark this puppy DEAD, but keep
  1381. * the reference on it until the ULP accepts or
  1382. * rejects the CR. Also wake up anyone waiting
  1383. * in rdma connection migration (see iwch_accept_cr()).
  1384. */
  1385. ep->com.rpl_done = 1;
  1386. ep->com.rpl_err = -ECONNRESET;
  1387. PDBG("waking up ep %p\n", ep);
  1388. wake_up(&ep->com.waitq);
  1389. break;
  1390. case MORIBUND:
  1391. case CLOSING:
  1392. stop_ep_timer(ep);
  1393. /*FALLTHROUGH*/
  1394. case FPDU_MODE:
  1395. if (ep->com.cm_id && ep->com.qp) {
  1396. attrs.next_state = IWCH_QP_STATE_ERROR;
  1397. ret = iwch_modify_qp(ep->com.qp->rhp,
  1398. ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
  1399. &attrs, 1);
  1400. if (ret)
  1401. printk(KERN_ERR MOD
  1402. "%s - qp <- error failed!\n",
  1403. __func__);
  1404. }
  1405. peer_abort_upcall(ep);
  1406. break;
  1407. case ABORTING:
  1408. break;
  1409. case DEAD:
  1410. PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
  1411. spin_unlock_irqrestore(&ep->com.lock, flags);
  1412. return CPL_RET_BUF_DONE;
  1413. default:
  1414. BUG_ON(1);
  1415. break;
  1416. }
  1417. dst_confirm(ep->dst);
  1418. if (ep->com.state != ABORTING) {
  1419. __state_set(&ep->com, DEAD);
  1420. release = 1;
  1421. }
  1422. spin_unlock_irqrestore(&ep->com.lock, flags);
  1423. rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
  1424. if (!rpl_skb) {
  1425. printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
  1426. __func__);
  1427. release = 1;
  1428. goto out;
  1429. }
  1430. rpl_skb->priority = CPL_PRIORITY_DATA;
  1431. rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
  1432. rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
  1433. rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  1434. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
  1435. rpl->cmd = CPL_ABORT_NO_RST;
  1436. iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
  1437. out:
  1438. if (release)
  1439. release_ep_resources(ep);
  1440. return CPL_RET_BUF_DONE;
  1441. }
  1442. static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1443. {
  1444. struct iwch_ep *ep = ctx;
  1445. struct iwch_qp_attributes attrs;
  1446. unsigned long flags;
  1447. int release = 0;
  1448. PDBG("%s ep %p\n", __func__, ep);
  1449. BUG_ON(!ep);
  1450. /* The cm_id may be null if we failed to connect */
  1451. spin_lock_irqsave(&ep->com.lock, flags);
  1452. switch (ep->com.state) {
  1453. case CLOSING:
  1454. __state_set(&ep->com, MORIBUND);
  1455. break;
  1456. case MORIBUND:
  1457. stop_ep_timer(ep);
  1458. if ((ep->com.cm_id) && (ep->com.qp)) {
  1459. attrs.next_state = IWCH_QP_STATE_IDLE;
  1460. iwch_modify_qp(ep->com.qp->rhp,
  1461. ep->com.qp,
  1462. IWCH_QP_ATTR_NEXT_STATE,
  1463. &attrs, 1);
  1464. }
  1465. close_complete_upcall(ep);
  1466. __state_set(&ep->com, DEAD);
  1467. release = 1;
  1468. break;
  1469. case ABORTING:
  1470. case DEAD:
  1471. break;
  1472. default:
  1473. BUG_ON(1);
  1474. break;
  1475. }
  1476. spin_unlock_irqrestore(&ep->com.lock, flags);
  1477. if (release)
  1478. release_ep_resources(ep);
  1479. return CPL_RET_BUF_DONE;
  1480. }
  1481. /*
  1482. * T3A does 3 things when a TERM is received:
  1483. * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
  1484. * 2) generate an async event on the QP with the TERMINATE opcode
  1485. * 3) post a TERMINATE opcode cqe into the associated CQ.
  1486. *
  1487. * For (1), we save the message in the qp for later consumer consumption.
  1488. * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
  1489. * For (3), we toss the CQE in cxio_poll_cq().
  1490. *
  1491. * terminate() handles case (1)...
  1492. */
  1493. static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1494. {
  1495. struct iwch_ep *ep = ctx;
  1496. if (state_read(&ep->com) != FPDU_MODE)
  1497. return CPL_RET_BUF_DONE;
  1498. PDBG("%s ep %p\n", __func__, ep);
  1499. skb_pull(skb, sizeof(struct cpl_rdma_terminate));
  1500. PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
  1501. skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
  1502. skb->len);
  1503. ep->com.qp->attr.terminate_msg_len = skb->len;
  1504. ep->com.qp->attr.is_terminate_local = 0;
  1505. return CPL_RET_BUF_DONE;
  1506. }
  1507. static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1508. {
  1509. struct cpl_rdma_ec_status *rep = cplhdr(skb);
  1510. struct iwch_ep *ep = ctx;
  1511. PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
  1512. rep->status);
  1513. if (rep->status) {
  1514. struct iwch_qp_attributes attrs;
  1515. printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
  1516. __func__, ep->hwtid);
  1517. stop_ep_timer(ep);
  1518. attrs.next_state = IWCH_QP_STATE_ERROR;
  1519. iwch_modify_qp(ep->com.qp->rhp,
  1520. ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
  1521. &attrs, 1);
  1522. abort_connection(ep, NULL, GFP_KERNEL);
  1523. }
  1524. return CPL_RET_BUF_DONE;
  1525. }
  1526. static void ep_timeout(unsigned long arg)
  1527. {
  1528. struct iwch_ep *ep = (struct iwch_ep *)arg;
  1529. struct iwch_qp_attributes attrs;
  1530. unsigned long flags;
  1531. int abort = 1;
  1532. spin_lock_irqsave(&ep->com.lock, flags);
  1533. PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
  1534. ep->com.state);
  1535. switch (ep->com.state) {
  1536. case MPA_REQ_SENT:
  1537. __state_set(&ep->com, ABORTING);
  1538. connect_reply_upcall(ep, -ETIMEDOUT);
  1539. break;
  1540. case MPA_REQ_WAIT:
  1541. __state_set(&ep->com, ABORTING);
  1542. break;
  1543. case CLOSING:
  1544. case MORIBUND:
  1545. if (ep->com.cm_id && ep->com.qp) {
  1546. attrs.next_state = IWCH_QP_STATE_ERROR;
  1547. iwch_modify_qp(ep->com.qp->rhp,
  1548. ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
  1549. &attrs, 1);
  1550. }
  1551. __state_set(&ep->com, ABORTING);
  1552. break;
  1553. default:
  1554. WARN(1, "%s unexpected state ep %p state %u\n",
  1555. __func__, ep, ep->com.state);
  1556. abort = 0;
  1557. }
  1558. spin_unlock_irqrestore(&ep->com.lock, flags);
  1559. if (abort)
  1560. abort_connection(ep, NULL, GFP_ATOMIC);
  1561. put_ep(&ep->com);
  1562. }
  1563. int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
  1564. {
  1565. int err;
  1566. struct iwch_ep *ep = to_ep(cm_id);
  1567. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1568. if (state_read(&ep->com) == DEAD) {
  1569. put_ep(&ep->com);
  1570. return -ECONNRESET;
  1571. }
  1572. BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
  1573. if (mpa_rev == 0)
  1574. abort_connection(ep, NULL, GFP_KERNEL);
  1575. else {
  1576. err = send_mpa_reject(ep, pdata, pdata_len);
  1577. err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
  1578. }
  1579. put_ep(&ep->com);
  1580. return 0;
  1581. }
  1582. int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  1583. {
  1584. int err;
  1585. struct iwch_qp_attributes attrs;
  1586. enum iwch_qp_attr_mask mask;
  1587. struct iwch_ep *ep = to_ep(cm_id);
  1588. struct iwch_dev *h = to_iwch_dev(cm_id->device);
  1589. struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
  1590. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1591. if (state_read(&ep->com) == DEAD) {
  1592. err = -ECONNRESET;
  1593. goto err;
  1594. }
  1595. BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
  1596. BUG_ON(!qp);
  1597. if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
  1598. (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
  1599. abort_connection(ep, NULL, GFP_KERNEL);
  1600. err = -EINVAL;
  1601. goto err;
  1602. }
  1603. cm_id->add_ref(cm_id);
  1604. ep->com.cm_id = cm_id;
  1605. ep->com.qp = qp;
  1606. ep->ird = conn_param->ird;
  1607. ep->ord = conn_param->ord;
  1608. if (peer2peer && ep->ird == 0)
  1609. ep->ird = 1;
  1610. PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
  1611. /* bind QP to EP and move to RTS */
  1612. attrs.mpa_attr = ep->mpa_attr;
  1613. attrs.max_ird = ep->ird;
  1614. attrs.max_ord = ep->ord;
  1615. attrs.llp_stream_handle = ep;
  1616. attrs.next_state = IWCH_QP_STATE_RTS;
  1617. /* bind QP and TID with INIT_WR */
  1618. mask = IWCH_QP_ATTR_NEXT_STATE |
  1619. IWCH_QP_ATTR_LLP_STREAM_HANDLE |
  1620. IWCH_QP_ATTR_MPA_ATTR |
  1621. IWCH_QP_ATTR_MAX_IRD |
  1622. IWCH_QP_ATTR_MAX_ORD;
  1623. err = iwch_modify_qp(ep->com.qp->rhp,
  1624. ep->com.qp, mask, &attrs, 1);
  1625. if (err)
  1626. goto err1;
  1627. /* if needed, wait for wr_ack */
  1628. if (iwch_rqes_posted(qp)) {
  1629. wait_event(ep->com.waitq, ep->com.rpl_done);
  1630. err = ep->com.rpl_err;
  1631. if (err)
  1632. goto err1;
  1633. }
  1634. err = send_mpa_reply(ep, conn_param->private_data,
  1635. conn_param->private_data_len);
  1636. if (err)
  1637. goto err1;
  1638. state_set(&ep->com, FPDU_MODE);
  1639. established_upcall(ep);
  1640. put_ep(&ep->com);
  1641. return 0;
  1642. err1:
  1643. ep->com.cm_id = NULL;
  1644. ep->com.qp = NULL;
  1645. cm_id->rem_ref(cm_id);
  1646. err:
  1647. put_ep(&ep->com);
  1648. return err;
  1649. }
  1650. static int is_loopback_dst(struct iw_cm_id *cm_id)
  1651. {
  1652. struct net_device *dev;
  1653. dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);
  1654. if (!dev)
  1655. return 0;
  1656. dev_put(dev);
  1657. return 1;
  1658. }
  1659. int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  1660. {
  1661. struct iwch_dev *h = to_iwch_dev(cm_id->device);
  1662. struct iwch_ep *ep;
  1663. struct rtable *rt;
  1664. int err = 0;
  1665. if (is_loopback_dst(cm_id)) {
  1666. err = -ENOSYS;
  1667. goto out;
  1668. }
  1669. ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
  1670. if (!ep) {
  1671. printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
  1672. err = -ENOMEM;
  1673. goto out;
  1674. }
  1675. init_timer(&ep->timer);
  1676. ep->plen = conn_param->private_data_len;
  1677. if (ep->plen)
  1678. memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
  1679. conn_param->private_data, ep->plen);
  1680. ep->ird = conn_param->ird;
  1681. ep->ord = conn_param->ord;
  1682. if (peer2peer && ep->ord == 0)
  1683. ep->ord = 1;
  1684. ep->com.tdev = h->rdev.t3cdev_p;
  1685. cm_id->add_ref(cm_id);
  1686. ep->com.cm_id = cm_id;
  1687. ep->com.qp = get_qhp(h, conn_param->qpn);
  1688. BUG_ON(!ep->com.qp);
  1689. PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
  1690. ep->com.qp, cm_id);
  1691. /*
  1692. * Allocate an active TID to initiate a TCP connection.
  1693. */
  1694. ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
  1695. if (ep->atid == -1) {
  1696. printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
  1697. err = -ENOMEM;
  1698. goto fail2;
  1699. }
  1700. /* find a route */
  1701. rt = find_route(h->rdev.t3cdev_p,
  1702. cm_id->local_addr.sin_addr.s_addr,
  1703. cm_id->remote_addr.sin_addr.s_addr,
  1704. cm_id->local_addr.sin_port,
  1705. cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
  1706. if (!rt) {
  1707. printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
  1708. err = -EHOSTUNREACH;
  1709. goto fail3;
  1710. }
  1711. ep->dst = &rt->dst;
  1712. ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
  1713. &cm_id->remote_addr.sin_addr.s_addr);
  1714. if (!ep->l2t) {
  1715. printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
  1716. err = -ENOMEM;
  1717. goto fail4;
  1718. }
  1719. state_set(&ep->com, CONNECTING);
  1720. ep->tos = IPTOS_LOWDELAY;
  1721. ep->com.local_addr = cm_id->local_addr;
  1722. ep->com.remote_addr = cm_id->remote_addr;
  1723. /* send connect request to rnic */
  1724. err = send_connect(ep);
  1725. if (!err)
  1726. goto out;
  1727. l2t_release(h->rdev.t3cdev_p, ep->l2t);
  1728. fail4:
  1729. dst_release(ep->dst);
  1730. fail3:
  1731. cxgb3_free_atid(ep->com.tdev, ep->atid);
  1732. fail2:
  1733. cm_id->rem_ref(cm_id);
  1734. put_ep(&ep->com);
  1735. out:
  1736. return err;
  1737. }
  1738. int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
  1739. {
  1740. int err = 0;
  1741. struct iwch_dev *h = to_iwch_dev(cm_id->device);
  1742. struct iwch_listen_ep *ep;
  1743. might_sleep();
  1744. ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
  1745. if (!ep) {
  1746. printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
  1747. err = -ENOMEM;
  1748. goto fail1;
  1749. }
  1750. PDBG("%s ep %p\n", __func__, ep);
  1751. ep->com.tdev = h->rdev.t3cdev_p;
  1752. cm_id->add_ref(cm_id);
  1753. ep->com.cm_id = cm_id;
  1754. ep->backlog = backlog;
  1755. ep->com.local_addr = cm_id->local_addr;
  1756. /*
  1757. * Allocate a server TID.
  1758. */
  1759. ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
  1760. if (ep->stid == -1) {
  1761. printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
  1762. err = -ENOMEM;
  1763. goto fail2;
  1764. }
  1765. state_set(&ep->com, LISTEN);
  1766. err = listen_start(ep);
  1767. if (err)
  1768. goto fail3;
  1769. /* wait for pass_open_rpl */
  1770. wait_event(ep->com.waitq, ep->com.rpl_done);
  1771. err = ep->com.rpl_err;
  1772. if (!err) {
  1773. cm_id->provider_data = ep;
  1774. goto out;
  1775. }
  1776. fail3:
  1777. cxgb3_free_stid(ep->com.tdev, ep->stid);
  1778. fail2:
  1779. cm_id->rem_ref(cm_id);
  1780. put_ep(&ep->com);
  1781. fail1:
  1782. out:
  1783. return err;
  1784. }
  1785. int iwch_destroy_listen(struct iw_cm_id *cm_id)
  1786. {
  1787. int err;
  1788. struct iwch_listen_ep *ep = to_listen_ep(cm_id);
  1789. PDBG("%s ep %p\n", __func__, ep);
  1790. might_sleep();
  1791. state_set(&ep->com, DEAD);
  1792. ep->com.rpl_done = 0;
  1793. ep->com.rpl_err = 0;
  1794. err = listen_stop(ep);
  1795. if (err)
  1796. goto done;
  1797. wait_event(ep->com.waitq, ep->com.rpl_done);
  1798. cxgb3_free_stid(ep->com.tdev, ep->stid);
  1799. done:
  1800. err = ep->com.rpl_err;
  1801. cm_id->rem_ref(cm_id);
  1802. put_ep(&ep->com);
  1803. return err;
  1804. }
  1805. int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
  1806. {
  1807. int ret=0;
  1808. unsigned long flags;
  1809. int close = 0;
  1810. int fatal = 0;
  1811. struct t3cdev *tdev;
  1812. struct cxio_rdev *rdev;
  1813. spin_lock_irqsave(&ep->com.lock, flags);
  1814. PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
  1815. states[ep->com.state], abrupt);
  1816. tdev = (struct t3cdev *)ep->com.tdev;
  1817. rdev = (struct cxio_rdev *)tdev->ulp;
  1818. if (cxio_fatal_error(rdev)) {
  1819. fatal = 1;
  1820. close_complete_upcall(ep);
  1821. ep->com.state = DEAD;
  1822. }
  1823. switch (ep->com.state) {
  1824. case MPA_REQ_WAIT:
  1825. case MPA_REQ_SENT:
  1826. case MPA_REQ_RCVD:
  1827. case MPA_REP_SENT:
  1828. case FPDU_MODE:
  1829. close = 1;
  1830. if (abrupt)
  1831. ep->com.state = ABORTING;
  1832. else {
  1833. ep->com.state = CLOSING;
  1834. start_ep_timer(ep);
  1835. }
  1836. set_bit(CLOSE_SENT, &ep->com.flags);
  1837. break;
  1838. case CLOSING:
  1839. if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
  1840. close = 1;
  1841. if (abrupt) {
  1842. stop_ep_timer(ep);
  1843. ep->com.state = ABORTING;
  1844. } else
  1845. ep->com.state = MORIBUND;
  1846. }
  1847. break;
  1848. case MORIBUND:
  1849. case ABORTING:
  1850. case DEAD:
  1851. PDBG("%s ignoring disconnect ep %p state %u\n",
  1852. __func__, ep, ep->com.state);
  1853. break;
  1854. default:
  1855. BUG();
  1856. break;
  1857. }
  1858. spin_unlock_irqrestore(&ep->com.lock, flags);
  1859. if (close) {
  1860. if (abrupt)
  1861. ret = send_abort(ep, NULL, gfp);
  1862. else
  1863. ret = send_halfclose(ep, gfp);
  1864. if (ret)
  1865. fatal = 1;
  1866. }
  1867. if (fatal)
  1868. release_ep_resources(ep);
  1869. return ret;
  1870. }
  1871. int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
  1872. struct l2t_entry *l2t)
  1873. {
  1874. struct iwch_ep *ep = ctx;
  1875. if (ep->dst != old)
  1876. return 0;
  1877. PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
  1878. l2t);
  1879. dst_hold(new);
  1880. l2t_release(ep->com.tdev, ep->l2t);
  1881. ep->l2t = l2t;
  1882. dst_release(old);
  1883. ep->dst = new;
  1884. return 1;
  1885. }
  1886. /*
  1887. * All the CM events are handled on a work queue to have a safe context.
  1888. * These are the real handlers that are called from the work queue.
  1889. */
  1890. static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
  1891. [CPL_ACT_ESTABLISH] = act_establish,
  1892. [CPL_ACT_OPEN_RPL] = act_open_rpl,
  1893. [CPL_RX_DATA] = rx_data,
  1894. [CPL_TX_DMA_ACK] = tx_ack,
  1895. [CPL_ABORT_RPL_RSS] = abort_rpl,
  1896. [CPL_ABORT_RPL] = abort_rpl,
  1897. [CPL_PASS_OPEN_RPL] = pass_open_rpl,
  1898. [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
  1899. [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
  1900. [CPL_PASS_ESTABLISH] = pass_establish,
  1901. [CPL_PEER_CLOSE] = peer_close,
  1902. [CPL_ABORT_REQ_RSS] = peer_abort,
  1903. [CPL_CLOSE_CON_RPL] = close_con_rpl,
  1904. [CPL_RDMA_TERMINATE] = terminate,
  1905. [CPL_RDMA_EC_STATUS] = ec_status,
  1906. };
  1907. static void process_work(struct work_struct *work)
  1908. {
  1909. struct sk_buff *skb = NULL;
  1910. void *ep;
  1911. struct t3cdev *tdev;
  1912. int ret;
  1913. while ((skb = skb_dequeue(&rxq))) {
  1914. ep = *((void **) (skb->cb));
  1915. tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
  1916. ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
  1917. if (ret & CPL_RET_BUF_DONE)
  1918. kfree_skb(skb);
  1919. /*
  1920. * ep was referenced in sched(), and is freed here.
  1921. */
  1922. put_ep((struct iwch_ep_common *)ep);
  1923. }
  1924. }
  1925. static DECLARE_WORK(skb_work, process_work);
  1926. static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1927. {
  1928. struct iwch_ep_common *epc = ctx;
  1929. get_ep(epc);
  1930. /*
  1931. * Save ctx and tdev in the skb->cb area.
  1932. */
  1933. *((void **) skb->cb) = ctx;
  1934. *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
  1935. /*
  1936. * Queue the skb and schedule the worker thread.
  1937. */
  1938. skb_queue_tail(&rxq, skb);
  1939. queue_work(workq, &skb_work);
  1940. return 0;
  1941. }
  1942. static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1943. {
  1944. struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
  1945. if (rpl->status != CPL_ERR_NONE) {
  1946. printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
  1947. "for tid %u\n", rpl->status, GET_TID(rpl));
  1948. }
  1949. return CPL_RET_BUF_DONE;
  1950. }
  1951. /*
  1952. * All upcalls from the T3 Core go to sched() to schedule the
  1953. * processing on a work queue.
  1954. */
  1955. cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
  1956. [CPL_ACT_ESTABLISH] = sched,
  1957. [CPL_ACT_OPEN_RPL] = sched,
  1958. [CPL_RX_DATA] = sched,
  1959. [CPL_TX_DMA_ACK] = sched,
  1960. [CPL_ABORT_RPL_RSS] = sched,
  1961. [CPL_ABORT_RPL] = sched,
  1962. [CPL_PASS_OPEN_RPL] = sched,
  1963. [CPL_CLOSE_LISTSRV_RPL] = sched,
  1964. [CPL_PASS_ACCEPT_REQ] = sched,
  1965. [CPL_PASS_ESTABLISH] = sched,
  1966. [CPL_PEER_CLOSE] = sched,
  1967. [CPL_CLOSE_CON_RPL] = sched,
  1968. [CPL_ABORT_REQ_RSS] = sched,
  1969. [CPL_RDMA_TERMINATE] = sched,
  1970. [CPL_RDMA_EC_STATUS] = sched,
  1971. [CPL_SET_TCB_RPL] = set_tcb_rpl,
  1972. };
  1973. int __init iwch_cm_init(void)
  1974. {
  1975. skb_queue_head_init(&rxq);
  1976. workq = create_singlethread_workqueue("iw_cxgb3");
  1977. if (!workq)
  1978. return -ENOMEM;
  1979. return 0;
  1980. }
  1981. void __exit iwch_cm_term(void)
  1982. {
  1983. flush_workqueue(workq);
  1984. destroy_workqueue(workq);
  1985. }