iwch_cm.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098
  1. /*
  2. * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/list.h>
  34. #include <linux/workqueue.h>
  35. #include <linux/skbuff.h>
  36. #include <linux/timer.h>
  37. #include <linux/notifier.h>
  38. #include <net/neighbour.h>
  39. #include <net/netevent.h>
  40. #include <net/route.h>
  41. #include "tcb.h"
  42. #include "cxgb3_offload.h"
  43. #include "iwch.h"
  44. #include "iwch_provider.h"
  45. #include "iwch_cm.h"
  46. static char *states[] = {
  47. "idle",
  48. "listen",
  49. "connecting",
  50. "mpa_wait_req",
  51. "mpa_req_sent",
  52. "mpa_req_rcvd",
  53. "mpa_rep_sent",
  54. "fpdu_mode",
  55. "aborting",
  56. "closing",
  57. "moribund",
  58. "dead",
  59. NULL,
  60. };
  61. static int ep_timeout_secs = 10;
  62. module_param(ep_timeout_secs, int, 0444);
  63. MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
  64. "in seconds (default=10)");
  65. static int mpa_rev = 1;
  66. module_param(mpa_rev, int, 0444);
  67. MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
  68. "1 is spec compliant. (default=1)");
  69. static int markers_enabled = 0;
  70. module_param(markers_enabled, int, 0444);
  71. MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
  72. static int crc_enabled = 1;
  73. module_param(crc_enabled, int, 0444);
  74. MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
  75. static int rcv_win = 256 * 1024;
  76. module_param(rcv_win, int, 0444);
  77. MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
  78. static int snd_win = 32 * 1024;
  79. module_param(snd_win, int, 0444);
  80. MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
  81. static unsigned int nocong = 0;
  82. module_param(nocong, uint, 0444);
  83. MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
  84. static unsigned int cong_flavor = 1;
  85. module_param(cong_flavor, uint, 0444);
  86. MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
  87. static void process_work(struct work_struct *work);
  88. static struct workqueue_struct *workq;
  89. static DECLARE_WORK(skb_work, process_work);
  90. static struct sk_buff_head rxq;
  91. static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
  92. static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
  93. static void ep_timeout(unsigned long arg);
  94. static void connect_reply_upcall(struct iwch_ep *ep, int status);
  95. static void start_ep_timer(struct iwch_ep *ep)
  96. {
  97. PDBG("%s ep %p\n", __FUNCTION__, ep);
  98. if (timer_pending(&ep->timer)) {
  99. PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep);
  100. del_timer_sync(&ep->timer);
  101. } else
  102. get_ep(&ep->com);
  103. ep->timer.expires = jiffies + ep_timeout_secs * HZ;
  104. ep->timer.data = (unsigned long)ep;
  105. ep->timer.function = ep_timeout;
  106. add_timer(&ep->timer);
  107. }
  108. static void stop_ep_timer(struct iwch_ep *ep)
  109. {
  110. PDBG("%s ep %p\n", __FUNCTION__, ep);
  111. del_timer_sync(&ep->timer);
  112. put_ep(&ep->com);
  113. }
  114. static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
  115. {
  116. struct cpl_tid_release *req;
  117. skb = get_skb(skb, sizeof *req, GFP_KERNEL);
  118. if (!skb)
  119. return;
  120. req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
  121. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  122. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
  123. skb->priority = CPL_PRIORITY_SETUP;
  124. tdev->send(tdev, skb);
  125. return;
  126. }
  127. int iwch_quiesce_tid(struct iwch_ep *ep)
  128. {
  129. struct cpl_set_tcb_field *req;
  130. struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  131. if (!skb)
  132. return -ENOMEM;
  133. req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
  134. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  135. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  136. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
  137. req->reply = 0;
  138. req->cpu_idx = 0;
  139. req->word = htons(W_TCB_RX_QUIESCE);
  140. req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
  141. req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
  142. skb->priority = CPL_PRIORITY_DATA;
  143. ep->com.tdev->send(ep->com.tdev, skb);
  144. return 0;
  145. }
  146. int iwch_resume_tid(struct iwch_ep *ep)
  147. {
  148. struct cpl_set_tcb_field *req;
  149. struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  150. if (!skb)
  151. return -ENOMEM;
  152. req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
  153. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  154. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  155. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
  156. req->reply = 0;
  157. req->cpu_idx = 0;
  158. req->word = htons(W_TCB_RX_QUIESCE);
  159. req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
  160. req->val = 0;
  161. skb->priority = CPL_PRIORITY_DATA;
  162. ep->com.tdev->send(ep->com.tdev, skb);
  163. return 0;
  164. }
  165. static void set_emss(struct iwch_ep *ep, u16 opt)
  166. {
  167. PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt);
  168. ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
  169. if (G_TCPOPT_TSTAMP(opt))
  170. ep->emss -= 12;
  171. if (ep->emss < 128)
  172. ep->emss = 128;
  173. PDBG("emss=%d\n", ep->emss);
  174. }
  175. static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
  176. {
  177. unsigned long flags;
  178. enum iwch_ep_state state;
  179. spin_lock_irqsave(&epc->lock, flags);
  180. state = epc->state;
  181. spin_unlock_irqrestore(&epc->lock, flags);
  182. return state;
  183. }
  184. static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
  185. {
  186. epc->state = new;
  187. }
  188. static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
  189. {
  190. unsigned long flags;
  191. spin_lock_irqsave(&epc->lock, flags);
  192. PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]);
  193. __state_set(epc, new);
  194. spin_unlock_irqrestore(&epc->lock, flags);
  195. return;
  196. }
  197. static void *alloc_ep(int size, gfp_t gfp)
  198. {
  199. struct iwch_ep_common *epc;
  200. epc = kmalloc(size, gfp);
  201. if (epc) {
  202. memset(epc, 0, size);
  203. kref_init(&epc->kref);
  204. spin_lock_init(&epc->lock);
  205. init_waitqueue_head(&epc->waitq);
  206. }
  207. PDBG("%s alloc ep %p\n", __FUNCTION__, epc);
  208. return epc;
  209. }
  210. void __free_ep(struct kref *kref)
  211. {
  212. struct iwch_ep_common *epc;
  213. epc = container_of(kref, struct iwch_ep_common, kref);
  214. PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]);
  215. kfree(epc);
  216. }
  217. static void release_ep_resources(struct iwch_ep *ep)
  218. {
  219. PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
  220. cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
  221. dst_release(ep->dst);
  222. l2t_release(L2DATA(ep->com.tdev), ep->l2t);
  223. if (ep->com.tdev->type == T3B)
  224. release_tid(ep->com.tdev, ep->hwtid, NULL);
  225. put_ep(&ep->com);
  226. }
  227. static void process_work(struct work_struct *work)
  228. {
  229. struct sk_buff *skb = NULL;
  230. void *ep;
  231. struct t3cdev *tdev;
  232. int ret;
  233. while ((skb = skb_dequeue(&rxq))) {
  234. ep = *((void **) (skb->cb));
  235. tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
  236. ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
  237. if (ret & CPL_RET_BUF_DONE)
  238. kfree_skb(skb);
  239. /*
  240. * ep was referenced in sched(), and is freed here.
  241. */
  242. put_ep((struct iwch_ep_common *)ep);
  243. }
  244. }
  245. static int status2errno(int status)
  246. {
  247. switch (status) {
  248. case CPL_ERR_NONE:
  249. return 0;
  250. case CPL_ERR_CONN_RESET:
  251. return -ECONNRESET;
  252. case CPL_ERR_ARP_MISS:
  253. return -EHOSTUNREACH;
  254. case CPL_ERR_CONN_TIMEDOUT:
  255. return -ETIMEDOUT;
  256. case CPL_ERR_TCAM_FULL:
  257. return -ENOMEM;
  258. case CPL_ERR_CONN_EXIST:
  259. return -EADDRINUSE;
  260. default:
  261. return -EIO;
  262. }
  263. }
  264. /*
  265. * Try and reuse skbs already allocated...
  266. */
  267. static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
  268. {
  269. if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
  270. skb_trim(skb, 0);
  271. skb_get(skb);
  272. } else {
  273. skb = alloc_skb(len, gfp);
  274. }
  275. return skb;
  276. }
  277. static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
  278. __be32 peer_ip, __be16 local_port,
  279. __be16 peer_port, u8 tos)
  280. {
  281. struct rtable *rt;
  282. struct flowi fl = {
  283. .oif = 0,
  284. .nl_u = {
  285. .ip4_u = {
  286. .daddr = peer_ip,
  287. .saddr = local_ip,
  288. .tos = tos}
  289. },
  290. .proto = IPPROTO_TCP,
  291. .uli_u = {
  292. .ports = {
  293. .sport = local_port,
  294. .dport = peer_port}
  295. }
  296. };
  297. if (ip_route_output_flow(&rt, &fl, NULL, 0))
  298. return NULL;
  299. return rt;
  300. }
  301. static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
  302. {
  303. int i = 0;
  304. while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
  305. ++i;
  306. return i;
  307. }
  308. static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
  309. {
  310. PDBG("%s t3cdev %p\n", __FUNCTION__, dev);
  311. kfree_skb(skb);
  312. }
  313. /*
  314. * Handle an ARP failure for an active open.
  315. */
  316. static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
  317. {
  318. printk(KERN_ERR MOD "ARP failure duing connect\n");
  319. kfree_skb(skb);
  320. }
  321. /*
  322. * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
  323. * and send it along.
  324. */
  325. static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
  326. {
  327. struct cpl_abort_req *req = cplhdr(skb);
  328. PDBG("%s t3cdev %p\n", __FUNCTION__, dev);
  329. req->cmd = CPL_ABORT_NO_RST;
  330. cxgb3_ofld_send(dev, skb);
  331. }
  332. static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
  333. {
  334. struct cpl_close_con_req *req;
  335. struct sk_buff *skb;
  336. PDBG("%s ep %p\n", __FUNCTION__, ep);
  337. skb = get_skb(NULL, sizeof(*req), gfp);
  338. if (!skb) {
  339. printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
  340. return -ENOMEM;
  341. }
  342. skb->priority = CPL_PRIORITY_DATA;
  343. set_arp_failure_handler(skb, arp_failure_discard);
  344. req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
  345. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
  346. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  347. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
  348. l2t_send(ep->com.tdev, skb, ep->l2t);
  349. return 0;
  350. }
  351. static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
  352. {
  353. struct cpl_abort_req *req;
  354. PDBG("%s ep %p\n", __FUNCTION__, ep);
  355. skb = get_skb(skb, sizeof(*req), gfp);
  356. if (!skb) {
  357. printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
  358. __FUNCTION__);
  359. return -ENOMEM;
  360. }
  361. skb->priority = CPL_PRIORITY_DATA;
  362. set_arp_failure_handler(skb, abort_arp_failure);
  363. req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
  364. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
  365. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  366. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
  367. req->cmd = CPL_ABORT_SEND_RST;
  368. l2t_send(ep->com.tdev, skb, ep->l2t);
  369. return 0;
  370. }
  371. static int send_connect(struct iwch_ep *ep)
  372. {
  373. struct cpl_act_open_req *req;
  374. struct sk_buff *skb;
  375. u32 opt0h, opt0l, opt2;
  376. unsigned int mtu_idx;
  377. int wscale;
  378. PDBG("%s ep %p\n", __FUNCTION__, ep);
  379. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  380. if (!skb) {
  381. printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
  382. __FUNCTION__);
  383. return -ENOMEM;
  384. }
  385. mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
  386. wscale = compute_wscale(rcv_win);
  387. opt0h = V_NAGLE(0) |
  388. V_NO_CONG(nocong) |
  389. V_KEEP_ALIVE(1) |
  390. F_TCAM_BYPASS |
  391. V_WND_SCALE(wscale) |
  392. V_MSS_IDX(mtu_idx) |
  393. V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
  394. opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
  395. opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
  396. skb->priority = CPL_PRIORITY_SETUP;
  397. set_arp_failure_handler(skb, act_open_req_arp_failure);
  398. req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
  399. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  400. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
  401. req->local_port = ep->com.local_addr.sin_port;
  402. req->peer_port = ep->com.remote_addr.sin_port;
  403. req->local_ip = ep->com.local_addr.sin_addr.s_addr;
  404. req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
  405. req->opt0h = htonl(opt0h);
  406. req->opt0l = htonl(opt0l);
  407. req->params = 0;
  408. req->opt2 = htonl(opt2);
  409. l2t_send(ep->com.tdev, skb, ep->l2t);
  410. return 0;
  411. }
  412. static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
  413. {
  414. int mpalen;
  415. struct tx_data_wr *req;
  416. struct mpa_message *mpa;
  417. int len;
  418. PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen);
  419. BUG_ON(skb_cloned(skb));
  420. mpalen = sizeof(*mpa) + ep->plen;
  421. if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
  422. kfree_skb(skb);
  423. skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
  424. if (!skb) {
  425. connect_reply_upcall(ep, -ENOMEM);
  426. return;
  427. }
  428. }
  429. skb_trim(skb, 0);
  430. skb_reserve(skb, sizeof(*req));
  431. skb_put(skb, mpalen);
  432. skb->priority = CPL_PRIORITY_DATA;
  433. mpa = (struct mpa_message *) skb->data;
  434. memset(mpa, 0, sizeof(*mpa));
  435. memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
  436. mpa->flags = (crc_enabled ? MPA_CRC : 0) |
  437. (markers_enabled ? MPA_MARKERS : 0);
  438. mpa->private_data_size = htons(ep->plen);
  439. mpa->revision = mpa_rev;
  440. if (ep->plen)
  441. memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
  442. /*
  443. * Reference the mpa skb. This ensures the data area
  444. * will remain in memory until the hw acks the tx.
  445. * Function tx_ack() will deref it.
  446. */
  447. skb_get(skb);
  448. set_arp_failure_handler(skb, arp_failure_discard);
  449. skb_reset_transport_header(skb);
  450. len = skb->len;
  451. req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
  452. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
  453. req->wr_lo = htonl(V_WR_TID(ep->hwtid));
  454. req->len = htonl(len);
  455. req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
  456. V_TX_SNDBUF(snd_win>>15));
  457. req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT);
  458. req->sndseq = htonl(ep->snd_seq);
  459. BUG_ON(ep->mpa_skb);
  460. ep->mpa_skb = skb;
  461. l2t_send(ep->com.tdev, skb, ep->l2t);
  462. start_ep_timer(ep);
  463. state_set(&ep->com, MPA_REQ_SENT);
  464. return;
  465. }
  466. static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
  467. {
  468. int mpalen;
  469. struct tx_data_wr *req;
  470. struct mpa_message *mpa;
  471. struct sk_buff *skb;
  472. PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen);
  473. mpalen = sizeof(*mpa) + plen;
  474. skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
  475. if (!skb) {
  476. printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
  477. return -ENOMEM;
  478. }
  479. skb_reserve(skb, sizeof(*req));
  480. mpa = (struct mpa_message *) skb_put(skb, mpalen);
  481. memset(mpa, 0, sizeof(*mpa));
  482. memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
  483. mpa->flags = MPA_REJECT;
  484. mpa->revision = mpa_rev;
  485. mpa->private_data_size = htons(plen);
  486. if (plen)
  487. memcpy(mpa->private_data, pdata, plen);
  488. /*
  489. * Reference the mpa skb again. This ensures the data area
  490. * will remain in memory until the hw acks the tx.
  491. * Function tx_ack() will deref it.
  492. */
  493. skb_get(skb);
  494. skb->priority = CPL_PRIORITY_DATA;
  495. set_arp_failure_handler(skb, arp_failure_discard);
  496. skb_reset_transport_header(skb);
  497. req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
  498. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
  499. req->wr_lo = htonl(V_WR_TID(ep->hwtid));
  500. req->len = htonl(mpalen);
  501. req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
  502. V_TX_SNDBUF(snd_win>>15));
  503. req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT);
  504. req->sndseq = htonl(ep->snd_seq);
  505. BUG_ON(ep->mpa_skb);
  506. ep->mpa_skb = skb;
  507. l2t_send(ep->com.tdev, skb, ep->l2t);
  508. return 0;
  509. }
  510. static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
  511. {
  512. int mpalen;
  513. struct tx_data_wr *req;
  514. struct mpa_message *mpa;
  515. int len;
  516. struct sk_buff *skb;
  517. PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen);
  518. mpalen = sizeof(*mpa) + plen;
  519. skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
  520. if (!skb) {
  521. printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
  522. return -ENOMEM;
  523. }
  524. skb->priority = CPL_PRIORITY_DATA;
  525. skb_reserve(skb, sizeof(*req));
  526. mpa = (struct mpa_message *) skb_put(skb, mpalen);
  527. memset(mpa, 0, sizeof(*mpa));
  528. memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
  529. mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
  530. (markers_enabled ? MPA_MARKERS : 0);
  531. mpa->revision = mpa_rev;
  532. mpa->private_data_size = htons(plen);
  533. if (plen)
  534. memcpy(mpa->private_data, pdata, plen);
  535. /*
  536. * Reference the mpa skb. This ensures the data area
  537. * will remain in memory until the hw acks the tx.
  538. * Function tx_ack() will deref it.
  539. */
  540. skb_get(skb);
  541. set_arp_failure_handler(skb, arp_failure_discard);
  542. skb_reset_transport_header(skb);
  543. len = skb->len;
  544. req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
  545. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
  546. req->wr_lo = htonl(V_WR_TID(ep->hwtid));
  547. req->len = htonl(len);
  548. req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
  549. V_TX_SNDBUF(snd_win>>15));
  550. req->flags = htonl(F_TX_MORE | F_TX_IMM_ACK | F_TX_INIT);
  551. req->sndseq = htonl(ep->snd_seq);
  552. ep->mpa_skb = skb;
  553. state_set(&ep->com, MPA_REP_SENT);
  554. l2t_send(ep->com.tdev, skb, ep->l2t);
  555. return 0;
  556. }
  557. static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  558. {
  559. struct iwch_ep *ep = ctx;
  560. struct cpl_act_establish *req = cplhdr(skb);
  561. unsigned int tid = GET_TID(req);
  562. PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, tid);
  563. dst_confirm(ep->dst);
  564. /* setup the hwtid for this connection */
  565. ep->hwtid = tid;
  566. cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
  567. ep->snd_seq = ntohl(req->snd_isn);
  568. set_emss(ep, ntohs(req->tcp_opt));
  569. /* dealloc the atid */
  570. cxgb3_free_atid(ep->com.tdev, ep->atid);
  571. /* start MPA negotiation */
  572. send_mpa_req(ep, skb);
  573. return 0;
  574. }
  575. static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
  576. {
  577. PDBG("%s ep %p\n", __FILE__, ep);
  578. state_set(&ep->com, ABORTING);
  579. send_abort(ep, skb, gfp);
  580. }
  581. static void close_complete_upcall(struct iwch_ep *ep)
  582. {
  583. struct iw_cm_event event;
  584. PDBG("%s ep %p\n", __FUNCTION__, ep);
  585. memset(&event, 0, sizeof(event));
  586. event.event = IW_CM_EVENT_CLOSE;
  587. if (ep->com.cm_id) {
  588. PDBG("close complete delivered ep %p cm_id %p tid %d\n",
  589. ep, ep->com.cm_id, ep->hwtid);
  590. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  591. ep->com.cm_id->rem_ref(ep->com.cm_id);
  592. ep->com.cm_id = NULL;
  593. ep->com.qp = NULL;
  594. }
  595. }
  596. static void peer_close_upcall(struct iwch_ep *ep)
  597. {
  598. struct iw_cm_event event;
  599. PDBG("%s ep %p\n", __FUNCTION__, ep);
  600. memset(&event, 0, sizeof(event));
  601. event.event = IW_CM_EVENT_DISCONNECT;
  602. if (ep->com.cm_id) {
  603. PDBG("peer close delivered ep %p cm_id %p tid %d\n",
  604. ep, ep->com.cm_id, ep->hwtid);
  605. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  606. }
  607. }
  608. static void peer_abort_upcall(struct iwch_ep *ep)
  609. {
  610. struct iw_cm_event event;
  611. PDBG("%s ep %p\n", __FUNCTION__, ep);
  612. memset(&event, 0, sizeof(event));
  613. event.event = IW_CM_EVENT_CLOSE;
  614. event.status = -ECONNRESET;
  615. if (ep->com.cm_id) {
  616. PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
  617. ep->com.cm_id, ep->hwtid);
  618. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  619. ep->com.cm_id->rem_ref(ep->com.cm_id);
  620. ep->com.cm_id = NULL;
  621. ep->com.qp = NULL;
  622. }
  623. }
  624. static void connect_reply_upcall(struct iwch_ep *ep, int status)
  625. {
  626. struct iw_cm_event event;
  627. PDBG("%s ep %p status %d\n", __FUNCTION__, ep, status);
  628. memset(&event, 0, sizeof(event));
  629. event.event = IW_CM_EVENT_CONNECT_REPLY;
  630. event.status = status;
  631. event.local_addr = ep->com.local_addr;
  632. event.remote_addr = ep->com.remote_addr;
  633. if ((status == 0) || (status == -ECONNREFUSED)) {
  634. event.private_data_len = ep->plen;
  635. event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
  636. }
  637. if (ep->com.cm_id) {
  638. PDBG("%s ep %p tid %d status %d\n", __FUNCTION__, ep,
  639. ep->hwtid, status);
  640. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  641. }
  642. if (status < 0) {
  643. ep->com.cm_id->rem_ref(ep->com.cm_id);
  644. ep->com.cm_id = NULL;
  645. ep->com.qp = NULL;
  646. }
  647. }
  648. static void connect_request_upcall(struct iwch_ep *ep)
  649. {
  650. struct iw_cm_event event;
  651. PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
  652. memset(&event, 0, sizeof(event));
  653. event.event = IW_CM_EVENT_CONNECT_REQUEST;
  654. event.local_addr = ep->com.local_addr;
  655. event.remote_addr = ep->com.remote_addr;
  656. event.private_data_len = ep->plen;
  657. event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
  658. event.provider_data = ep;
  659. if (state_read(&ep->parent_ep->com) != DEAD)
  660. ep->parent_ep->com.cm_id->event_handler(
  661. ep->parent_ep->com.cm_id,
  662. &event);
  663. put_ep(&ep->parent_ep->com);
  664. ep->parent_ep = NULL;
  665. }
  666. static void established_upcall(struct iwch_ep *ep)
  667. {
  668. struct iw_cm_event event;
  669. PDBG("%s ep %p\n", __FUNCTION__, ep);
  670. memset(&event, 0, sizeof(event));
  671. event.event = IW_CM_EVENT_ESTABLISHED;
  672. if (ep->com.cm_id) {
  673. PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
  674. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  675. }
  676. }
  677. static int update_rx_credits(struct iwch_ep *ep, u32 credits)
  678. {
  679. struct cpl_rx_data_ack *req;
  680. struct sk_buff *skb;
  681. PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
  682. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  683. if (!skb) {
  684. printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
  685. return 0;
  686. }
  687. req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
  688. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  689. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
  690. req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
  691. skb->priority = CPL_PRIORITY_ACK;
  692. ep->com.tdev->send(ep->com.tdev, skb);
  693. return credits;
  694. }
  695. static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
  696. {
  697. struct mpa_message *mpa;
  698. u16 plen;
  699. struct iwch_qp_attributes attrs;
  700. enum iwch_qp_attr_mask mask;
  701. int err;
  702. PDBG("%s ep %p\n", __FUNCTION__, ep);
  703. /*
  704. * Stop mpa timer. If it expired, then the state has
  705. * changed and we bail since ep_timeout already aborted
  706. * the connection.
  707. */
  708. stop_ep_timer(ep);
  709. if (state_read(&ep->com) != MPA_REQ_SENT)
  710. return;
  711. /*
  712. * If we get more than the supported amount of private data
  713. * then we must fail this connection.
  714. */
  715. if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
  716. err = -EINVAL;
  717. goto err;
  718. }
  719. /*
  720. * copy the new data into our accumulation buffer.
  721. */
  722. skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
  723. skb->len);
  724. ep->mpa_pkt_len += skb->len;
  725. /*
  726. * if we don't even have the mpa message, then bail.
  727. */
  728. if (ep->mpa_pkt_len < sizeof(*mpa))
  729. return;
  730. mpa = (struct mpa_message *) ep->mpa_pkt;
  731. /* Validate MPA header. */
  732. if (mpa->revision != mpa_rev) {
  733. err = -EPROTO;
  734. goto err;
  735. }
  736. if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
  737. err = -EPROTO;
  738. goto err;
  739. }
  740. plen = ntohs(mpa->private_data_size);
  741. /*
  742. * Fail if there's too much private data.
  743. */
  744. if (plen > MPA_MAX_PRIVATE_DATA) {
  745. err = -EPROTO;
  746. goto err;
  747. }
  748. /*
  749. * If plen does not account for pkt size
  750. */
  751. if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
  752. err = -EPROTO;
  753. goto err;
  754. }
  755. ep->plen = (u8) plen;
  756. /*
  757. * If we don't have all the pdata yet, then bail.
  758. * We'll continue process when more data arrives.
  759. */
  760. if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
  761. return;
  762. if (mpa->flags & MPA_REJECT) {
  763. err = -ECONNREFUSED;
  764. goto err;
  765. }
  766. /*
  767. * If we get here we have accumulated the entire mpa
  768. * start reply message including private data. And
  769. * the MPA header is valid.
  770. */
  771. state_set(&ep->com, FPDU_MODE);
  772. ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
  773. ep->mpa_attr.recv_marker_enabled = markers_enabled;
  774. ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
  775. ep->mpa_attr.version = mpa_rev;
  776. PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
  777. "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__,
  778. ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
  779. ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
  780. attrs.mpa_attr = ep->mpa_attr;
  781. attrs.max_ird = ep->ird;
  782. attrs.max_ord = ep->ord;
  783. attrs.llp_stream_handle = ep;
  784. attrs.next_state = IWCH_QP_STATE_RTS;
  785. mask = IWCH_QP_ATTR_NEXT_STATE |
  786. IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
  787. IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
  788. /* bind QP and TID with INIT_WR */
  789. err = iwch_modify_qp(ep->com.qp->rhp,
  790. ep->com.qp, mask, &attrs, 1);
  791. if (!err)
  792. goto out;
  793. err:
  794. abort_connection(ep, skb, GFP_KERNEL);
  795. out:
  796. connect_reply_upcall(ep, err);
  797. return;
  798. }
  799. static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
  800. {
  801. struct mpa_message *mpa;
  802. u16 plen;
  803. PDBG("%s ep %p\n", __FUNCTION__, ep);
  804. /*
  805. * Stop mpa timer. If it expired, then the state has
  806. * changed and we bail since ep_timeout already aborted
  807. * the connection.
  808. */
  809. stop_ep_timer(ep);
  810. if (state_read(&ep->com) != MPA_REQ_WAIT)
  811. return;
  812. /*
  813. * If we get more than the supported amount of private data
  814. * then we must fail this connection.
  815. */
  816. if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
  817. abort_connection(ep, skb, GFP_KERNEL);
  818. return;
  819. }
  820. PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
  821. /*
  822. * Copy the new data into our accumulation buffer.
  823. */
  824. skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
  825. skb->len);
  826. ep->mpa_pkt_len += skb->len;
  827. /*
  828. * If we don't even have the mpa message, then bail.
  829. * We'll continue process when more data arrives.
  830. */
  831. if (ep->mpa_pkt_len < sizeof(*mpa))
  832. return;
  833. PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
  834. mpa = (struct mpa_message *) ep->mpa_pkt;
  835. /*
  836. * Validate MPA Header.
  837. */
  838. if (mpa->revision != mpa_rev) {
  839. abort_connection(ep, skb, GFP_KERNEL);
  840. return;
  841. }
  842. if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
  843. abort_connection(ep, skb, GFP_KERNEL);
  844. return;
  845. }
  846. plen = ntohs(mpa->private_data_size);
  847. /*
  848. * Fail if there's too much private data.
  849. */
  850. if (plen > MPA_MAX_PRIVATE_DATA) {
  851. abort_connection(ep, skb, GFP_KERNEL);
  852. return;
  853. }
  854. /*
  855. * If plen does not account for pkt size
  856. */
  857. if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
  858. abort_connection(ep, skb, GFP_KERNEL);
  859. return;
  860. }
  861. ep->plen = (u8) plen;
  862. /*
  863. * If we don't have all the pdata yet, then bail.
  864. */
  865. if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
  866. return;
  867. /*
  868. * If we get here we have accumulated the entire mpa
  869. * start reply message including private data.
  870. */
  871. ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
  872. ep->mpa_attr.recv_marker_enabled = markers_enabled;
  873. ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
  874. ep->mpa_attr.version = mpa_rev;
  875. PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
  876. "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__,
  877. ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
  878. ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
  879. state_set(&ep->com, MPA_REQ_RCVD);
  880. /* drive upcall */
  881. connect_request_upcall(ep);
  882. return;
  883. }
  884. static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  885. {
  886. struct iwch_ep *ep = ctx;
  887. struct cpl_rx_data *hdr = cplhdr(skb);
  888. unsigned int dlen = ntohs(hdr->len);
  889. PDBG("%s ep %p dlen %u\n", __FUNCTION__, ep, dlen);
  890. skb_pull(skb, sizeof(*hdr));
  891. skb_trim(skb, dlen);
  892. switch (state_read(&ep->com)) {
  893. case MPA_REQ_SENT:
  894. process_mpa_reply(ep, skb);
  895. break;
  896. case MPA_REQ_WAIT:
  897. process_mpa_request(ep, skb);
  898. break;
  899. case MPA_REP_SENT:
  900. break;
  901. default:
  902. printk(KERN_ERR MOD "%s Unexpected streaming data."
  903. " ep %p state %d tid %d\n",
  904. __FUNCTION__, ep, state_read(&ep->com), ep->hwtid);
  905. /*
  906. * The ep will timeout and inform the ULP of the failure.
  907. * See ep_timeout().
  908. */
  909. break;
  910. }
  911. /* update RX credits */
  912. update_rx_credits(ep, dlen);
  913. return CPL_RET_BUF_DONE;
  914. }
  915. /*
  916. * Upcall from the adapter indicating data has been transmitted.
  917. * For us its just the single MPA request or reply. We can now free
  918. * the skb holding the mpa message.
  919. */
  920. static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  921. {
  922. struct iwch_ep *ep = ctx;
  923. struct cpl_wr_ack *hdr = cplhdr(skb);
  924. unsigned int credits = ntohs(hdr->credits);
  925. enum iwch_qp_attr_mask mask;
  926. PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
  927. if (credits == 0)
  928. return CPL_RET_BUF_DONE;
  929. BUG_ON(credits != 1);
  930. BUG_ON(ep->mpa_skb == NULL);
  931. kfree_skb(ep->mpa_skb);
  932. ep->mpa_skb = NULL;
  933. dst_confirm(ep->dst);
  934. if (state_read(&ep->com) == MPA_REP_SENT) {
  935. struct iwch_qp_attributes attrs;
  936. /* bind QP to EP and move to RTS */
  937. attrs.mpa_attr = ep->mpa_attr;
  938. attrs.max_ird = ep->ord;
  939. attrs.max_ord = ep->ord;
  940. attrs.llp_stream_handle = ep;
  941. attrs.next_state = IWCH_QP_STATE_RTS;
  942. /* bind QP and TID with INIT_WR */
  943. mask = IWCH_QP_ATTR_NEXT_STATE |
  944. IWCH_QP_ATTR_LLP_STREAM_HANDLE |
  945. IWCH_QP_ATTR_MPA_ATTR |
  946. IWCH_QP_ATTR_MAX_IRD |
  947. IWCH_QP_ATTR_MAX_ORD;
  948. ep->com.rpl_err = iwch_modify_qp(ep->com.qp->rhp,
  949. ep->com.qp, mask, &attrs, 1);
  950. if (!ep->com.rpl_err) {
  951. state_set(&ep->com, FPDU_MODE);
  952. established_upcall(ep);
  953. }
  954. ep->com.rpl_done = 1;
  955. PDBG("waking up ep %p\n", ep);
  956. wake_up(&ep->com.waitq);
  957. }
  958. return CPL_RET_BUF_DONE;
  959. }
  960. static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  961. {
  962. struct iwch_ep *ep = ctx;
  963. PDBG("%s ep %p\n", __FUNCTION__, ep);
  964. close_complete_upcall(ep);
  965. state_set(&ep->com, DEAD);
  966. release_ep_resources(ep);
  967. return CPL_RET_BUF_DONE;
  968. }
  969. static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  970. {
  971. struct iwch_ep *ep = ctx;
  972. struct cpl_act_open_rpl *rpl = cplhdr(skb);
  973. PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status,
  974. status2errno(rpl->status));
  975. connect_reply_upcall(ep, status2errno(rpl->status));
  976. state_set(&ep->com, DEAD);
  977. if (ep->com.tdev->type == T3B)
  978. release_tid(ep->com.tdev, GET_TID(rpl), NULL);
  979. cxgb3_free_atid(ep->com.tdev, ep->atid);
  980. dst_release(ep->dst);
  981. l2t_release(L2DATA(ep->com.tdev), ep->l2t);
  982. put_ep(&ep->com);
  983. return CPL_RET_BUF_DONE;
  984. }
  985. static int listen_start(struct iwch_listen_ep *ep)
  986. {
  987. struct sk_buff *skb;
  988. struct cpl_pass_open_req *req;
  989. PDBG("%s ep %p\n", __FUNCTION__, ep);
  990. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  991. if (!skb) {
  992. printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
  993. return -ENOMEM;
  994. }
  995. req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
  996. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  997. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
  998. req->local_port = ep->com.local_addr.sin_port;
  999. req->local_ip = ep->com.local_addr.sin_addr.s_addr;
  1000. req->peer_port = 0;
  1001. req->peer_ip = 0;
  1002. req->peer_netmask = 0;
  1003. req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
  1004. req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
  1005. req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
  1006. skb->priority = 1;
  1007. ep->com.tdev->send(ep->com.tdev, skb);
  1008. return 0;
  1009. }
  1010. static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1011. {
  1012. struct iwch_listen_ep *ep = ctx;
  1013. struct cpl_pass_open_rpl *rpl = cplhdr(skb);
  1014. PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep,
  1015. rpl->status, status2errno(rpl->status));
  1016. ep->com.rpl_err = status2errno(rpl->status);
  1017. ep->com.rpl_done = 1;
  1018. wake_up(&ep->com.waitq);
  1019. return CPL_RET_BUF_DONE;
  1020. }
  1021. static int listen_stop(struct iwch_listen_ep *ep)
  1022. {
  1023. struct sk_buff *skb;
  1024. struct cpl_close_listserv_req *req;
  1025. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1026. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  1027. if (!skb) {
  1028. printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
  1029. return -ENOMEM;
  1030. }
  1031. req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
  1032. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1033. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
  1034. skb->priority = 1;
  1035. ep->com.tdev->send(ep->com.tdev, skb);
  1036. return 0;
  1037. }
  1038. static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
  1039. void *ctx)
  1040. {
  1041. struct iwch_listen_ep *ep = ctx;
  1042. struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
  1043. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1044. ep->com.rpl_err = status2errno(rpl->status);
  1045. ep->com.rpl_done = 1;
  1046. wake_up(&ep->com.waitq);
  1047. return CPL_RET_BUF_DONE;
  1048. }
  1049. static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
  1050. {
  1051. struct cpl_pass_accept_rpl *rpl;
  1052. unsigned int mtu_idx;
  1053. u32 opt0h, opt0l, opt2;
  1054. int wscale;
  1055. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1056. BUG_ON(skb_cloned(skb));
  1057. skb_trim(skb, sizeof(*rpl));
  1058. skb_get(skb);
  1059. mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
  1060. wscale = compute_wscale(rcv_win);
  1061. opt0h = V_NAGLE(0) |
  1062. V_NO_CONG(nocong) |
  1063. V_KEEP_ALIVE(1) |
  1064. F_TCAM_BYPASS |
  1065. V_WND_SCALE(wscale) |
  1066. V_MSS_IDX(mtu_idx) |
  1067. V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
  1068. opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
  1069. opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
  1070. rpl = cplhdr(skb);
  1071. rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1072. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
  1073. rpl->peer_ip = peer_ip;
  1074. rpl->opt0h = htonl(opt0h);
  1075. rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
  1076. rpl->opt2 = htonl(opt2);
  1077. rpl->rsvd = rpl->opt2; /* workaround for HW bug */
  1078. skb->priority = CPL_PRIORITY_SETUP;
  1079. l2t_send(ep->com.tdev, skb, ep->l2t);
  1080. return;
  1081. }
  1082. static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
  1083. struct sk_buff *skb)
  1084. {
  1085. PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid,
  1086. peer_ip);
  1087. BUG_ON(skb_cloned(skb));
  1088. skb_trim(skb, sizeof(struct cpl_tid_release));
  1089. skb_get(skb);
  1090. if (tdev->type == T3B)
  1091. release_tid(tdev, hwtid, skb);
  1092. else {
  1093. struct cpl_pass_accept_rpl *rpl;
  1094. rpl = cplhdr(skb);
  1095. skb->priority = CPL_PRIORITY_SETUP;
  1096. rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1097. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
  1098. hwtid));
  1099. rpl->peer_ip = peer_ip;
  1100. rpl->opt0h = htonl(F_TCAM_BYPASS);
  1101. rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
  1102. rpl->opt2 = 0;
  1103. rpl->rsvd = rpl->opt2;
  1104. tdev->send(tdev, skb);
  1105. }
  1106. }
  1107. static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1108. {
  1109. struct iwch_ep *child_ep, *parent_ep = ctx;
  1110. struct cpl_pass_accept_req *req = cplhdr(skb);
  1111. unsigned int hwtid = GET_TID(req);
  1112. struct dst_entry *dst;
  1113. struct l2t_entry *l2t;
  1114. struct rtable *rt;
  1115. struct iff_mac tim;
  1116. PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid);
  1117. if (state_read(&parent_ep->com) != LISTEN) {
  1118. printk(KERN_ERR "%s - listening ep not in LISTEN\n",
  1119. __FUNCTION__);
  1120. goto reject;
  1121. }
  1122. /*
  1123. * Find the netdev for this connection request.
  1124. */
  1125. tim.mac_addr = req->dst_mac;
  1126. tim.vlan_tag = ntohs(req->vlan_tag);
  1127. if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
  1128. printk(KERN_ERR
  1129. "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
  1130. __FUNCTION__,
  1131. req->dst_mac[0],
  1132. req->dst_mac[1],
  1133. req->dst_mac[2],
  1134. req->dst_mac[3],
  1135. req->dst_mac[4],
  1136. req->dst_mac[5]);
  1137. goto reject;
  1138. }
  1139. /* Find output route */
  1140. rt = find_route(tdev,
  1141. req->local_ip,
  1142. req->peer_ip,
  1143. req->local_port,
  1144. req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
  1145. if (!rt) {
  1146. printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
  1147. __FUNCTION__);
  1148. goto reject;
  1149. }
  1150. dst = &rt->u.dst;
  1151. l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
  1152. if (!l2t) {
  1153. printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
  1154. __FUNCTION__);
  1155. dst_release(dst);
  1156. goto reject;
  1157. }
  1158. child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
  1159. if (!child_ep) {
  1160. printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
  1161. __FUNCTION__);
  1162. l2t_release(L2DATA(tdev), l2t);
  1163. dst_release(dst);
  1164. goto reject;
  1165. }
  1166. state_set(&child_ep->com, CONNECTING);
  1167. child_ep->com.tdev = tdev;
  1168. child_ep->com.cm_id = NULL;
  1169. child_ep->com.local_addr.sin_family = PF_INET;
  1170. child_ep->com.local_addr.sin_port = req->local_port;
  1171. child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
  1172. child_ep->com.remote_addr.sin_family = PF_INET;
  1173. child_ep->com.remote_addr.sin_port = req->peer_port;
  1174. child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
  1175. get_ep(&parent_ep->com);
  1176. child_ep->parent_ep = parent_ep;
  1177. child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
  1178. child_ep->l2t = l2t;
  1179. child_ep->dst = dst;
  1180. child_ep->hwtid = hwtid;
  1181. init_timer(&child_ep->timer);
  1182. cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
  1183. accept_cr(child_ep, req->peer_ip, skb);
  1184. goto out;
  1185. reject:
  1186. reject_cr(tdev, hwtid, req->peer_ip, skb);
  1187. out:
  1188. return CPL_RET_BUF_DONE;
  1189. }
  1190. static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1191. {
  1192. struct iwch_ep *ep = ctx;
  1193. struct cpl_pass_establish *req = cplhdr(skb);
  1194. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1195. ep->snd_seq = ntohl(req->snd_isn);
  1196. set_emss(ep, ntohs(req->tcp_opt));
  1197. dst_confirm(ep->dst);
  1198. state_set(&ep->com, MPA_REQ_WAIT);
  1199. start_ep_timer(ep);
  1200. return CPL_RET_BUF_DONE;
  1201. }
  1202. static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1203. {
  1204. struct iwch_ep *ep = ctx;
  1205. struct iwch_qp_attributes attrs;
  1206. unsigned long flags;
  1207. int disconnect = 1;
  1208. int release = 0;
  1209. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1210. dst_confirm(ep->dst);
  1211. spin_lock_irqsave(&ep->com.lock, flags);
  1212. switch (ep->com.state) {
  1213. case MPA_REQ_WAIT:
  1214. __state_set(&ep->com, CLOSING);
  1215. break;
  1216. case MPA_REQ_SENT:
  1217. __state_set(&ep->com, CLOSING);
  1218. connect_reply_upcall(ep, -ECONNRESET);
  1219. break;
  1220. case MPA_REQ_RCVD:
  1221. /*
  1222. * We're gonna mark this puppy DEAD, but keep
  1223. * the reference on it until the ULP accepts or
  1224. * rejects the CR.
  1225. */
  1226. __state_set(&ep->com, CLOSING);
  1227. get_ep(&ep->com);
  1228. break;
  1229. case MPA_REP_SENT:
  1230. __state_set(&ep->com, CLOSING);
  1231. ep->com.rpl_done = 1;
  1232. ep->com.rpl_err = -ECONNRESET;
  1233. PDBG("waking up ep %p\n", ep);
  1234. wake_up(&ep->com.waitq);
  1235. break;
  1236. case FPDU_MODE:
  1237. start_ep_timer(ep);
  1238. __state_set(&ep->com, CLOSING);
  1239. attrs.next_state = IWCH_QP_STATE_CLOSING;
  1240. iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1241. IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
  1242. peer_close_upcall(ep);
  1243. break;
  1244. case ABORTING:
  1245. disconnect = 0;
  1246. break;
  1247. case CLOSING:
  1248. __state_set(&ep->com, MORIBUND);
  1249. disconnect = 0;
  1250. break;
  1251. case MORIBUND:
  1252. stop_ep_timer(ep);
  1253. if (ep->com.cm_id && ep->com.qp) {
  1254. attrs.next_state = IWCH_QP_STATE_IDLE;
  1255. iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1256. IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
  1257. }
  1258. close_complete_upcall(ep);
  1259. __state_set(&ep->com, DEAD);
  1260. release = 1;
  1261. disconnect = 0;
  1262. break;
  1263. case DEAD:
  1264. disconnect = 0;
  1265. break;
  1266. default:
  1267. BUG_ON(1);
  1268. }
  1269. spin_unlock_irqrestore(&ep->com.lock, flags);
  1270. if (disconnect)
  1271. iwch_ep_disconnect(ep, 0, GFP_KERNEL);
  1272. if (release)
  1273. release_ep_resources(ep);
  1274. return CPL_RET_BUF_DONE;
  1275. }
  1276. /*
  1277. * Returns whether an ABORT_REQ_RSS message is a negative advice.
  1278. */
  1279. static int is_neg_adv_abort(unsigned int status)
  1280. {
  1281. return status == CPL_ERR_RTX_NEG_ADVICE ||
  1282. status == CPL_ERR_PERSIST_NEG_ADVICE;
  1283. }
  1284. static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1285. {
  1286. struct cpl_abort_req_rss *req = cplhdr(skb);
  1287. struct iwch_ep *ep = ctx;
  1288. struct cpl_abort_rpl *rpl;
  1289. struct sk_buff *rpl_skb;
  1290. struct iwch_qp_attributes attrs;
  1291. int ret;
  1292. int state;
  1293. if (is_neg_adv_abort(req->status)) {
  1294. PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
  1295. ep->hwtid);
  1296. t3_l2t_send_event(ep->com.tdev, ep->l2t);
  1297. return CPL_RET_BUF_DONE;
  1298. }
  1299. state = state_read(&ep->com);
  1300. PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state);
  1301. switch (state) {
  1302. case CONNECTING:
  1303. break;
  1304. case MPA_REQ_WAIT:
  1305. stop_ep_timer(ep);
  1306. break;
  1307. case MPA_REQ_SENT:
  1308. stop_ep_timer(ep);
  1309. connect_reply_upcall(ep, -ECONNRESET);
  1310. break;
  1311. case MPA_REP_SENT:
  1312. ep->com.rpl_done = 1;
  1313. ep->com.rpl_err = -ECONNRESET;
  1314. PDBG("waking up ep %p\n", ep);
  1315. wake_up(&ep->com.waitq);
  1316. break;
  1317. case MPA_REQ_RCVD:
  1318. /*
  1319. * We're gonna mark this puppy DEAD, but keep
  1320. * the reference on it until the ULP accepts or
  1321. * rejects the CR.
  1322. */
  1323. get_ep(&ep->com);
  1324. break;
  1325. case MORIBUND:
  1326. case CLOSING:
  1327. stop_ep_timer(ep);
  1328. /*FALLTHROUGH*/
  1329. case FPDU_MODE:
  1330. if (ep->com.cm_id && ep->com.qp) {
  1331. attrs.next_state = IWCH_QP_STATE_ERROR;
  1332. ret = iwch_modify_qp(ep->com.qp->rhp,
  1333. ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
  1334. &attrs, 1);
  1335. if (ret)
  1336. printk(KERN_ERR MOD
  1337. "%s - qp <- error failed!\n",
  1338. __FUNCTION__);
  1339. }
  1340. peer_abort_upcall(ep);
  1341. break;
  1342. case ABORTING:
  1343. break;
  1344. case DEAD:
  1345. PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__);
  1346. return CPL_RET_BUF_DONE;
  1347. default:
  1348. BUG_ON(1);
  1349. break;
  1350. }
  1351. dst_confirm(ep->dst);
  1352. rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
  1353. if (!rpl_skb) {
  1354. printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
  1355. __FUNCTION__);
  1356. dst_release(ep->dst);
  1357. l2t_release(L2DATA(ep->com.tdev), ep->l2t);
  1358. put_ep(&ep->com);
  1359. return CPL_RET_BUF_DONE;
  1360. }
  1361. rpl_skb->priority = CPL_PRIORITY_DATA;
  1362. rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
  1363. rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
  1364. rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  1365. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
  1366. rpl->cmd = CPL_ABORT_NO_RST;
  1367. ep->com.tdev->send(ep->com.tdev, rpl_skb);
  1368. if (state != ABORTING) {
  1369. state_set(&ep->com, DEAD);
  1370. release_ep_resources(ep);
  1371. }
  1372. return CPL_RET_BUF_DONE;
  1373. }
  1374. static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1375. {
  1376. struct iwch_ep *ep = ctx;
  1377. struct iwch_qp_attributes attrs;
  1378. unsigned long flags;
  1379. int release = 0;
  1380. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1381. BUG_ON(!ep);
  1382. /* The cm_id may be null if we failed to connect */
  1383. spin_lock_irqsave(&ep->com.lock, flags);
  1384. switch (ep->com.state) {
  1385. case CLOSING:
  1386. __state_set(&ep->com, MORIBUND);
  1387. break;
  1388. case MORIBUND:
  1389. stop_ep_timer(ep);
  1390. if ((ep->com.cm_id) && (ep->com.qp)) {
  1391. attrs.next_state = IWCH_QP_STATE_IDLE;
  1392. iwch_modify_qp(ep->com.qp->rhp,
  1393. ep->com.qp,
  1394. IWCH_QP_ATTR_NEXT_STATE,
  1395. &attrs, 1);
  1396. }
  1397. close_complete_upcall(ep);
  1398. __state_set(&ep->com, DEAD);
  1399. release = 1;
  1400. break;
  1401. case ABORTING:
  1402. break;
  1403. case DEAD:
  1404. default:
  1405. BUG_ON(1);
  1406. break;
  1407. }
  1408. spin_unlock_irqrestore(&ep->com.lock, flags);
  1409. if (release)
  1410. release_ep_resources(ep);
  1411. return CPL_RET_BUF_DONE;
  1412. }
  1413. /*
  1414. * T3A does 3 things when a TERM is received:
  1415. * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
  1416. * 2) generate an async event on the QP with the TERMINATE opcode
  1417. * 3) post a TERMINATE opcde cqe into the associated CQ.
  1418. *
  1419. * For (1), we save the message in the qp for later consumer consumption.
  1420. * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
  1421. * For (3), we toss the CQE in cxio_poll_cq().
  1422. *
  1423. * terminate() handles case (1)...
  1424. */
  1425. static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1426. {
  1427. struct iwch_ep *ep = ctx;
  1428. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1429. skb_pull(skb, sizeof(struct cpl_rdma_terminate));
  1430. PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len);
  1431. skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
  1432. skb->len);
  1433. ep->com.qp->attr.terminate_msg_len = skb->len;
  1434. ep->com.qp->attr.is_terminate_local = 0;
  1435. return CPL_RET_BUF_DONE;
  1436. }
  1437. static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1438. {
  1439. struct cpl_rdma_ec_status *rep = cplhdr(skb);
  1440. struct iwch_ep *ep = ctx;
  1441. PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid,
  1442. rep->status);
  1443. if (rep->status) {
  1444. struct iwch_qp_attributes attrs;
  1445. printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
  1446. __FUNCTION__, ep->hwtid);
  1447. stop_ep_timer(ep);
  1448. attrs.next_state = IWCH_QP_STATE_ERROR;
  1449. iwch_modify_qp(ep->com.qp->rhp,
  1450. ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
  1451. &attrs, 1);
  1452. abort_connection(ep, NULL, GFP_KERNEL);
  1453. }
  1454. return CPL_RET_BUF_DONE;
  1455. }
  1456. static void ep_timeout(unsigned long arg)
  1457. {
  1458. struct iwch_ep *ep = (struct iwch_ep *)arg;
  1459. struct iwch_qp_attributes attrs;
  1460. unsigned long flags;
  1461. spin_lock_irqsave(&ep->com.lock, flags);
  1462. PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid,
  1463. ep->com.state);
  1464. switch (ep->com.state) {
  1465. case MPA_REQ_SENT:
  1466. connect_reply_upcall(ep, -ETIMEDOUT);
  1467. break;
  1468. case MPA_REQ_WAIT:
  1469. break;
  1470. case CLOSING:
  1471. case MORIBUND:
  1472. if (ep->com.cm_id && ep->com.qp) {
  1473. attrs.next_state = IWCH_QP_STATE_ERROR;
  1474. iwch_modify_qp(ep->com.qp->rhp,
  1475. ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
  1476. &attrs, 1);
  1477. }
  1478. break;
  1479. default:
  1480. BUG();
  1481. }
  1482. __state_set(&ep->com, CLOSING);
  1483. spin_unlock_irqrestore(&ep->com.lock, flags);
  1484. abort_connection(ep, NULL, GFP_ATOMIC);
  1485. put_ep(&ep->com);
  1486. }
  1487. int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
  1488. {
  1489. int err;
  1490. struct iwch_ep *ep = to_ep(cm_id);
  1491. PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
  1492. if (state_read(&ep->com) == DEAD) {
  1493. put_ep(&ep->com);
  1494. return -ECONNRESET;
  1495. }
  1496. BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
  1497. if (mpa_rev == 0)
  1498. abort_connection(ep, NULL, GFP_KERNEL);
  1499. else {
  1500. err = send_mpa_reject(ep, pdata, pdata_len);
  1501. err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
  1502. }
  1503. return 0;
  1504. }
  1505. int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  1506. {
  1507. int err;
  1508. struct iwch_qp_attributes attrs;
  1509. enum iwch_qp_attr_mask mask;
  1510. struct iwch_ep *ep = to_ep(cm_id);
  1511. struct iwch_dev *h = to_iwch_dev(cm_id->device);
  1512. struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
  1513. PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
  1514. if (state_read(&ep->com) == DEAD) {
  1515. put_ep(&ep->com);
  1516. return -ECONNRESET;
  1517. }
  1518. BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
  1519. BUG_ON(!qp);
  1520. if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
  1521. (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
  1522. abort_connection(ep, NULL, GFP_KERNEL);
  1523. return -EINVAL;
  1524. }
  1525. cm_id->add_ref(cm_id);
  1526. ep->com.cm_id = cm_id;
  1527. ep->com.qp = qp;
  1528. ep->com.rpl_done = 0;
  1529. ep->com.rpl_err = 0;
  1530. ep->ird = conn_param->ird;
  1531. ep->ord = conn_param->ord;
  1532. PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord);
  1533. get_ep(&ep->com);
  1534. err = send_mpa_reply(ep, conn_param->private_data,
  1535. conn_param->private_data_len);
  1536. if (err) {
  1537. ep->com.cm_id = NULL;
  1538. ep->com.qp = NULL;
  1539. cm_id->rem_ref(cm_id);
  1540. abort_connection(ep, NULL, GFP_KERNEL);
  1541. put_ep(&ep->com);
  1542. return err;
  1543. }
  1544. /* bind QP to EP and move to RTS */
  1545. attrs.mpa_attr = ep->mpa_attr;
  1546. attrs.max_ird = ep->ord;
  1547. attrs.max_ord = ep->ord;
  1548. attrs.llp_stream_handle = ep;
  1549. attrs.next_state = IWCH_QP_STATE_RTS;
  1550. /* bind QP and TID with INIT_WR */
  1551. mask = IWCH_QP_ATTR_NEXT_STATE |
  1552. IWCH_QP_ATTR_LLP_STREAM_HANDLE |
  1553. IWCH_QP_ATTR_MPA_ATTR |
  1554. IWCH_QP_ATTR_MAX_IRD |
  1555. IWCH_QP_ATTR_MAX_ORD;
  1556. err = iwch_modify_qp(ep->com.qp->rhp,
  1557. ep->com.qp, mask, &attrs, 1);
  1558. if (err) {
  1559. ep->com.cm_id = NULL;
  1560. ep->com.qp = NULL;
  1561. cm_id->rem_ref(cm_id);
  1562. abort_connection(ep, NULL, GFP_KERNEL);
  1563. } else {
  1564. state_set(&ep->com, FPDU_MODE);
  1565. established_upcall(ep);
  1566. }
  1567. put_ep(&ep->com);
  1568. return err;
  1569. }
  1570. int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  1571. {
  1572. int err = 0;
  1573. struct iwch_dev *h = to_iwch_dev(cm_id->device);
  1574. struct iwch_ep *ep;
  1575. struct rtable *rt;
  1576. ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
  1577. if (!ep) {
  1578. printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
  1579. err = -ENOMEM;
  1580. goto out;
  1581. }
  1582. init_timer(&ep->timer);
  1583. ep->plen = conn_param->private_data_len;
  1584. if (ep->plen)
  1585. memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
  1586. conn_param->private_data, ep->plen);
  1587. ep->ird = conn_param->ird;
  1588. ep->ord = conn_param->ord;
  1589. ep->com.tdev = h->rdev.t3cdev_p;
  1590. cm_id->add_ref(cm_id);
  1591. ep->com.cm_id = cm_id;
  1592. ep->com.qp = get_qhp(h, conn_param->qpn);
  1593. BUG_ON(!ep->com.qp);
  1594. PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn,
  1595. ep->com.qp, cm_id);
  1596. /*
  1597. * Allocate an active TID to initiate a TCP connection.
  1598. */
  1599. ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
  1600. if (ep->atid == -1) {
  1601. printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
  1602. err = -ENOMEM;
  1603. goto fail2;
  1604. }
  1605. /* find a route */
  1606. rt = find_route(h->rdev.t3cdev_p,
  1607. cm_id->local_addr.sin_addr.s_addr,
  1608. cm_id->remote_addr.sin_addr.s_addr,
  1609. cm_id->local_addr.sin_port,
  1610. cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
  1611. if (!rt) {
  1612. printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__);
  1613. err = -EHOSTUNREACH;
  1614. goto fail3;
  1615. }
  1616. ep->dst = &rt->u.dst;
  1617. /* get a l2t entry */
  1618. ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
  1619. ep->dst->neighbour->dev);
  1620. if (!ep->l2t) {
  1621. printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__);
  1622. err = -ENOMEM;
  1623. goto fail4;
  1624. }
  1625. state_set(&ep->com, CONNECTING);
  1626. ep->tos = IPTOS_LOWDELAY;
  1627. ep->com.local_addr = cm_id->local_addr;
  1628. ep->com.remote_addr = cm_id->remote_addr;
  1629. /* send connect request to rnic */
  1630. err = send_connect(ep);
  1631. if (!err)
  1632. goto out;
  1633. l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
  1634. fail4:
  1635. dst_release(ep->dst);
  1636. fail3:
  1637. cxgb3_free_atid(ep->com.tdev, ep->atid);
  1638. fail2:
  1639. put_ep(&ep->com);
  1640. out:
  1641. return err;
  1642. }
  1643. int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
  1644. {
  1645. int err = 0;
  1646. struct iwch_dev *h = to_iwch_dev(cm_id->device);
  1647. struct iwch_listen_ep *ep;
  1648. might_sleep();
  1649. ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
  1650. if (!ep) {
  1651. printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
  1652. err = -ENOMEM;
  1653. goto fail1;
  1654. }
  1655. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1656. ep->com.tdev = h->rdev.t3cdev_p;
  1657. cm_id->add_ref(cm_id);
  1658. ep->com.cm_id = cm_id;
  1659. ep->backlog = backlog;
  1660. ep->com.local_addr = cm_id->local_addr;
  1661. /*
  1662. * Allocate a server TID.
  1663. */
  1664. ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
  1665. if (ep->stid == -1) {
  1666. printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
  1667. err = -ENOMEM;
  1668. goto fail2;
  1669. }
  1670. state_set(&ep->com, LISTEN);
  1671. err = listen_start(ep);
  1672. if (err)
  1673. goto fail3;
  1674. /* wait for pass_open_rpl */
  1675. wait_event(ep->com.waitq, ep->com.rpl_done);
  1676. err = ep->com.rpl_err;
  1677. if (!err) {
  1678. cm_id->provider_data = ep;
  1679. goto out;
  1680. }
  1681. fail3:
  1682. cxgb3_free_stid(ep->com.tdev, ep->stid);
  1683. fail2:
  1684. put_ep(&ep->com);
  1685. fail1:
  1686. out:
  1687. return err;
  1688. }
  1689. int iwch_destroy_listen(struct iw_cm_id *cm_id)
  1690. {
  1691. int err;
  1692. struct iwch_listen_ep *ep = to_listen_ep(cm_id);
  1693. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1694. might_sleep();
  1695. state_set(&ep->com, DEAD);
  1696. ep->com.rpl_done = 0;
  1697. ep->com.rpl_err = 0;
  1698. err = listen_stop(ep);
  1699. wait_event(ep->com.waitq, ep->com.rpl_done);
  1700. cxgb3_free_stid(ep->com.tdev, ep->stid);
  1701. err = ep->com.rpl_err;
  1702. cm_id->rem_ref(cm_id);
  1703. put_ep(&ep->com);
  1704. return err;
  1705. }
  1706. int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
  1707. {
  1708. int ret=0;
  1709. unsigned long flags;
  1710. int close = 0;
  1711. spin_lock_irqsave(&ep->com.lock, flags);
  1712. PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep,
  1713. states[ep->com.state], abrupt);
  1714. if (ep->com.state == DEAD) {
  1715. PDBG("%s already dead ep %p\n", __FUNCTION__, ep);
  1716. goto out;
  1717. }
  1718. if (abrupt) {
  1719. if (ep->com.state != ABORTING) {
  1720. ep->com.state = ABORTING;
  1721. close = 1;
  1722. }
  1723. goto out;
  1724. }
  1725. switch (ep->com.state) {
  1726. case MPA_REQ_WAIT:
  1727. case MPA_REQ_SENT:
  1728. case MPA_REQ_RCVD:
  1729. case MPA_REP_SENT:
  1730. case FPDU_MODE:
  1731. start_ep_timer(ep);
  1732. ep->com.state = CLOSING;
  1733. close = 1;
  1734. break;
  1735. case CLOSING:
  1736. ep->com.state = MORIBUND;
  1737. close = 1;
  1738. break;
  1739. case MORIBUND:
  1740. break;
  1741. default:
  1742. BUG();
  1743. break;
  1744. }
  1745. out:
  1746. spin_unlock_irqrestore(&ep->com.lock, flags);
  1747. if (close) {
  1748. if (abrupt)
  1749. ret = send_abort(ep, NULL, gfp);
  1750. else
  1751. ret = send_halfclose(ep, gfp);
  1752. }
  1753. return ret;
  1754. }
  1755. int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
  1756. struct l2t_entry *l2t)
  1757. {
  1758. struct iwch_ep *ep = ctx;
  1759. if (ep->dst != old)
  1760. return 0;
  1761. PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new,
  1762. l2t);
  1763. dst_hold(new);
  1764. l2t_release(L2DATA(ep->com.tdev), ep->l2t);
  1765. ep->l2t = l2t;
  1766. dst_release(old);
  1767. ep->dst = new;
  1768. return 1;
  1769. }
  1770. /*
  1771. * All the CM events are handled on a work queue to have a safe context.
  1772. */
  1773. static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1774. {
  1775. struct iwch_ep_common *epc = ctx;
  1776. get_ep(epc);
  1777. /*
  1778. * Save ctx and tdev in the skb->cb area.
  1779. */
  1780. *((void **) skb->cb) = ctx;
  1781. *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
  1782. /*
  1783. * Queue the skb and schedule the worker thread.
  1784. */
  1785. skb_queue_tail(&rxq, skb);
  1786. queue_work(workq, &skb_work);
  1787. return 0;
  1788. }
  1789. static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1790. {
  1791. struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
  1792. if (rpl->status != CPL_ERR_NONE) {
  1793. printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
  1794. "for tid %u\n", rpl->status, GET_TID(rpl));
  1795. }
  1796. return CPL_RET_BUF_DONE;
  1797. }
  1798. int __init iwch_cm_init(void)
  1799. {
  1800. skb_queue_head_init(&rxq);
  1801. workq = create_singlethread_workqueue("iw_cxgb3");
  1802. if (!workq)
  1803. return -ENOMEM;
  1804. /*
  1805. * All upcalls from the T3 Core go to sched() to
  1806. * schedule the processing on a work queue.
  1807. */
  1808. t3c_handlers[CPL_ACT_ESTABLISH] = sched;
  1809. t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
  1810. t3c_handlers[CPL_RX_DATA] = sched;
  1811. t3c_handlers[CPL_TX_DMA_ACK] = sched;
  1812. t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
  1813. t3c_handlers[CPL_ABORT_RPL] = sched;
  1814. t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
  1815. t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
  1816. t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
  1817. t3c_handlers[CPL_PASS_ESTABLISH] = sched;
  1818. t3c_handlers[CPL_PEER_CLOSE] = sched;
  1819. t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
  1820. t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
  1821. t3c_handlers[CPL_RDMA_TERMINATE] = sched;
  1822. t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
  1823. t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
  1824. /*
  1825. * These are the real handlers that are called from a
  1826. * work queue.
  1827. */
  1828. work_handlers[CPL_ACT_ESTABLISH] = act_establish;
  1829. work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
  1830. work_handlers[CPL_RX_DATA] = rx_data;
  1831. work_handlers[CPL_TX_DMA_ACK] = tx_ack;
  1832. work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
  1833. work_handlers[CPL_ABORT_RPL] = abort_rpl;
  1834. work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
  1835. work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
  1836. work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
  1837. work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
  1838. work_handlers[CPL_PEER_CLOSE] = peer_close;
  1839. work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
  1840. work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
  1841. work_handlers[CPL_RDMA_TERMINATE] = terminate;
  1842. work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
  1843. return 0;
  1844. }
  1845. void __exit iwch_cm_term(void)
  1846. {
  1847. flush_workqueue(workq);
  1848. destroy_workqueue(workq);
  1849. }