iwch_cm.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099
  1. /*
  2. * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/list.h>
  34. #include <linux/workqueue.h>
  35. #include <linux/skbuff.h>
  36. #include <linux/timer.h>
  37. #include <linux/notifier.h>
  38. #include <net/neighbour.h>
  39. #include <net/netevent.h>
  40. #include <net/route.h>
  41. #include "tcb.h"
  42. #include "cxgb3_offload.h"
  43. #include "iwch.h"
  44. #include "iwch_provider.h"
  45. #include "iwch_cm.h"
  46. static char *states[] = {
  47. "idle",
  48. "listen",
  49. "connecting",
  50. "mpa_wait_req",
  51. "mpa_req_sent",
  52. "mpa_req_rcvd",
  53. "mpa_rep_sent",
  54. "fpdu_mode",
  55. "aborting",
  56. "closing",
  57. "moribund",
  58. "dead",
  59. NULL,
  60. };
  61. static int ep_timeout_secs = 10;
  62. module_param(ep_timeout_secs, int, 0444);
  63. MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
  64. "in seconds (default=10)");
  65. static int mpa_rev = 1;
  66. module_param(mpa_rev, int, 0444);
  67. MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
  68. "1 is spec compliant. (default=1)");
  69. static int markers_enabled = 0;
  70. module_param(markers_enabled, int, 0444);
  71. MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
  72. static int crc_enabled = 1;
  73. module_param(crc_enabled, int, 0444);
  74. MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
  75. static int rcv_win = 256 * 1024;
  76. module_param(rcv_win, int, 0444);
  77. MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
  78. static int snd_win = 32 * 1024;
  79. module_param(snd_win, int, 0444);
  80. MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
  81. static unsigned int nocong = 0;
  82. module_param(nocong, uint, 0444);
  83. MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
  84. static unsigned int cong_flavor = 1;
  85. module_param(cong_flavor, uint, 0444);
  86. MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
  87. static void process_work(struct work_struct *work);
  88. static struct workqueue_struct *workq;
  89. static DECLARE_WORK(skb_work, process_work);
  90. static struct sk_buff_head rxq;
  91. static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
  92. static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
  93. static void ep_timeout(unsigned long arg);
  94. static void connect_reply_upcall(struct iwch_ep *ep, int status);
  95. static void start_ep_timer(struct iwch_ep *ep)
  96. {
  97. PDBG("%s ep %p\n", __FUNCTION__, ep);
  98. if (timer_pending(&ep->timer)) {
  99. PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep);
  100. del_timer_sync(&ep->timer);
  101. } else
  102. get_ep(&ep->com);
  103. ep->timer.expires = jiffies + ep_timeout_secs * HZ;
  104. ep->timer.data = (unsigned long)ep;
  105. ep->timer.function = ep_timeout;
  106. add_timer(&ep->timer);
  107. }
  108. static void stop_ep_timer(struct iwch_ep *ep)
  109. {
  110. PDBG("%s ep %p\n", __FUNCTION__, ep);
  111. del_timer_sync(&ep->timer);
  112. put_ep(&ep->com);
  113. }
  114. static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
  115. {
  116. struct cpl_tid_release *req;
  117. skb = get_skb(skb, sizeof *req, GFP_KERNEL);
  118. if (!skb)
  119. return;
  120. req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
  121. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  122. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
  123. skb->priority = CPL_PRIORITY_SETUP;
  124. tdev->send(tdev, skb);
  125. return;
  126. }
  127. int iwch_quiesce_tid(struct iwch_ep *ep)
  128. {
  129. struct cpl_set_tcb_field *req;
  130. struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  131. if (!skb)
  132. return -ENOMEM;
  133. req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
  134. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  135. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  136. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
  137. req->reply = 0;
  138. req->cpu_idx = 0;
  139. req->word = htons(W_TCB_RX_QUIESCE);
  140. req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
  141. req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
  142. skb->priority = CPL_PRIORITY_DATA;
  143. ep->com.tdev->send(ep->com.tdev, skb);
  144. return 0;
  145. }
  146. int iwch_resume_tid(struct iwch_ep *ep)
  147. {
  148. struct cpl_set_tcb_field *req;
  149. struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  150. if (!skb)
  151. return -ENOMEM;
  152. req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req));
  153. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  154. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  155. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
  156. req->reply = 0;
  157. req->cpu_idx = 0;
  158. req->word = htons(W_TCB_RX_QUIESCE);
  159. req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
  160. req->val = 0;
  161. skb->priority = CPL_PRIORITY_DATA;
  162. ep->com.tdev->send(ep->com.tdev, skb);
  163. return 0;
  164. }
  165. static void set_emss(struct iwch_ep *ep, u16 opt)
  166. {
  167. PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt);
  168. ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
  169. if (G_TCPOPT_TSTAMP(opt))
  170. ep->emss -= 12;
  171. if (ep->emss < 128)
  172. ep->emss = 128;
  173. PDBG("emss=%d\n", ep->emss);
  174. }
  175. static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
  176. {
  177. unsigned long flags;
  178. enum iwch_ep_state state;
  179. spin_lock_irqsave(&epc->lock, flags);
  180. state = epc->state;
  181. spin_unlock_irqrestore(&epc->lock, flags);
  182. return state;
  183. }
  184. static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
  185. {
  186. epc->state = new;
  187. }
  188. static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
  189. {
  190. unsigned long flags;
  191. spin_lock_irqsave(&epc->lock, flags);
  192. PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]);
  193. __state_set(epc, new);
  194. spin_unlock_irqrestore(&epc->lock, flags);
  195. return;
  196. }
  197. static void *alloc_ep(int size, gfp_t gfp)
  198. {
  199. struct iwch_ep_common *epc;
  200. epc = kmalloc(size, gfp);
  201. if (epc) {
  202. memset(epc, 0, size);
  203. kref_init(&epc->kref);
  204. spin_lock_init(&epc->lock);
  205. init_waitqueue_head(&epc->waitq);
  206. }
  207. PDBG("%s alloc ep %p\n", __FUNCTION__, epc);
  208. return epc;
  209. }
  210. void __free_ep(struct kref *kref)
  211. {
  212. struct iwch_ep_common *epc;
  213. epc = container_of(kref, struct iwch_ep_common, kref);
  214. PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]);
  215. kfree(epc);
  216. }
  217. static void release_ep_resources(struct iwch_ep *ep)
  218. {
  219. PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
  220. cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
  221. dst_release(ep->dst);
  222. l2t_release(L2DATA(ep->com.tdev), ep->l2t);
  223. if (ep->com.tdev->type == T3B)
  224. release_tid(ep->com.tdev, ep->hwtid, NULL);
  225. put_ep(&ep->com);
  226. }
  227. static void process_work(struct work_struct *work)
  228. {
  229. struct sk_buff *skb = NULL;
  230. void *ep;
  231. struct t3cdev *tdev;
  232. int ret;
  233. while ((skb = skb_dequeue(&rxq))) {
  234. ep = *((void **) (skb->cb));
  235. tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
  236. ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
  237. if (ret & CPL_RET_BUF_DONE)
  238. kfree_skb(skb);
  239. /*
  240. * ep was referenced in sched(), and is freed here.
  241. */
  242. put_ep((struct iwch_ep_common *)ep);
  243. }
  244. }
  245. static int status2errno(int status)
  246. {
  247. switch (status) {
  248. case CPL_ERR_NONE:
  249. return 0;
  250. case CPL_ERR_CONN_RESET:
  251. return -ECONNRESET;
  252. case CPL_ERR_ARP_MISS:
  253. return -EHOSTUNREACH;
  254. case CPL_ERR_CONN_TIMEDOUT:
  255. return -ETIMEDOUT;
  256. case CPL_ERR_TCAM_FULL:
  257. return -ENOMEM;
  258. case CPL_ERR_CONN_EXIST:
  259. return -EADDRINUSE;
  260. default:
  261. return -EIO;
  262. }
  263. }
  264. /*
  265. * Try and reuse skbs already allocated...
  266. */
  267. static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
  268. {
  269. if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
  270. skb_trim(skb, 0);
  271. skb_get(skb);
  272. } else {
  273. skb = alloc_skb(len, gfp);
  274. }
  275. return skb;
  276. }
  277. static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
  278. __be32 peer_ip, __be16 local_port,
  279. __be16 peer_port, u8 tos)
  280. {
  281. struct rtable *rt;
  282. struct flowi fl = {
  283. .oif = 0,
  284. .nl_u = {
  285. .ip4_u = {
  286. .daddr = peer_ip,
  287. .saddr = local_ip,
  288. .tos = tos}
  289. },
  290. .proto = IPPROTO_TCP,
  291. .uli_u = {
  292. .ports = {
  293. .sport = local_port,
  294. .dport = peer_port}
  295. }
  296. };
  297. if (ip_route_output_flow(&rt, &fl, NULL, 0))
  298. return NULL;
  299. return rt;
  300. }
  301. static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
  302. {
  303. int i = 0;
  304. while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
  305. ++i;
  306. return i;
  307. }
  308. static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
  309. {
  310. PDBG("%s t3cdev %p\n", __FUNCTION__, dev);
  311. kfree_skb(skb);
  312. }
  313. /*
  314. * Handle an ARP failure for an active open.
  315. */
  316. static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
  317. {
  318. printk(KERN_ERR MOD "ARP failure duing connect\n");
  319. kfree_skb(skb);
  320. }
  321. /*
  322. * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
  323. * and send it along.
  324. */
  325. static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
  326. {
  327. struct cpl_abort_req *req = cplhdr(skb);
  328. PDBG("%s t3cdev %p\n", __FUNCTION__, dev);
  329. req->cmd = CPL_ABORT_NO_RST;
  330. cxgb3_ofld_send(dev, skb);
  331. }
  332. static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
  333. {
  334. struct cpl_close_con_req *req;
  335. struct sk_buff *skb;
  336. PDBG("%s ep %p\n", __FUNCTION__, ep);
  337. skb = get_skb(NULL, sizeof(*req), gfp);
  338. if (!skb) {
  339. printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
  340. return -ENOMEM;
  341. }
  342. skb->priority = CPL_PRIORITY_DATA;
  343. set_arp_failure_handler(skb, arp_failure_discard);
  344. req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req));
  345. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
  346. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  347. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
  348. l2t_send(ep->com.tdev, skb, ep->l2t);
  349. return 0;
  350. }
  351. static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
  352. {
  353. struct cpl_abort_req *req;
  354. PDBG("%s ep %p\n", __FUNCTION__, ep);
  355. skb = get_skb(skb, sizeof(*req), gfp);
  356. if (!skb) {
  357. printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
  358. __FUNCTION__);
  359. return -ENOMEM;
  360. }
  361. skb->priority = CPL_PRIORITY_DATA;
  362. set_arp_failure_handler(skb, abort_arp_failure);
  363. req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req));
  364. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
  365. req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  366. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
  367. req->cmd = CPL_ABORT_SEND_RST;
  368. l2t_send(ep->com.tdev, skb, ep->l2t);
  369. return 0;
  370. }
  371. static int send_connect(struct iwch_ep *ep)
  372. {
  373. struct cpl_act_open_req *req;
  374. struct sk_buff *skb;
  375. u32 opt0h, opt0l, opt2;
  376. unsigned int mtu_idx;
  377. int wscale;
  378. PDBG("%s ep %p\n", __FUNCTION__, ep);
  379. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  380. if (!skb) {
  381. printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
  382. __FUNCTION__);
  383. return -ENOMEM;
  384. }
  385. mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
  386. wscale = compute_wscale(rcv_win);
  387. opt0h = V_NAGLE(0) |
  388. V_NO_CONG(nocong) |
  389. V_KEEP_ALIVE(1) |
  390. F_TCAM_BYPASS |
  391. V_WND_SCALE(wscale) |
  392. V_MSS_IDX(mtu_idx) |
  393. V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
  394. opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
  395. opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
  396. skb->priority = CPL_PRIORITY_SETUP;
  397. set_arp_failure_handler(skb, act_open_req_arp_failure);
  398. req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req));
  399. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  400. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
  401. req->local_port = ep->com.local_addr.sin_port;
  402. req->peer_port = ep->com.remote_addr.sin_port;
  403. req->local_ip = ep->com.local_addr.sin_addr.s_addr;
  404. req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
  405. req->opt0h = htonl(opt0h);
  406. req->opt0l = htonl(opt0l);
  407. req->params = 0;
  408. req->opt2 = htonl(opt2);
  409. l2t_send(ep->com.tdev, skb, ep->l2t);
  410. return 0;
  411. }
  412. static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
  413. {
  414. int mpalen;
  415. struct tx_data_wr *req;
  416. struct mpa_message *mpa;
  417. int len;
  418. PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen);
  419. BUG_ON(skb_cloned(skb));
  420. mpalen = sizeof(*mpa) + ep->plen;
  421. if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
  422. kfree_skb(skb);
  423. skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
  424. if (!skb) {
  425. connect_reply_upcall(ep, -ENOMEM);
  426. return;
  427. }
  428. }
  429. skb_trim(skb, 0);
  430. skb_reserve(skb, sizeof(*req));
  431. skb_put(skb, mpalen);
  432. skb->priority = CPL_PRIORITY_DATA;
  433. mpa = (struct mpa_message *) skb->data;
  434. memset(mpa, 0, sizeof(*mpa));
  435. memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
  436. mpa->flags = (crc_enabled ? MPA_CRC : 0) |
  437. (markers_enabled ? MPA_MARKERS : 0);
  438. mpa->private_data_size = htons(ep->plen);
  439. mpa->revision = mpa_rev;
  440. if (ep->plen)
  441. memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
  442. /*
  443. * Reference the mpa skb. This ensures the data area
  444. * will remain in memory until the hw acks the tx.
  445. * Function tx_ack() will deref it.
  446. */
  447. skb_get(skb);
  448. set_arp_failure_handler(skb, arp_failure_discard);
  449. skb_reset_transport_header(skb);
  450. len = skb->len;
  451. req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
  452. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
  453. req->wr_lo = htonl(V_WR_TID(ep->hwtid));
  454. req->len = htonl(len);
  455. req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
  456. V_TX_SNDBUF(snd_win>>15));
  457. req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT);
  458. req->sndseq = htonl(ep->snd_seq);
  459. BUG_ON(ep->mpa_skb);
  460. ep->mpa_skb = skb;
  461. l2t_send(ep->com.tdev, skb, ep->l2t);
  462. start_ep_timer(ep);
  463. state_set(&ep->com, MPA_REQ_SENT);
  464. return;
  465. }
  466. static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
  467. {
  468. int mpalen;
  469. struct tx_data_wr *req;
  470. struct mpa_message *mpa;
  471. struct sk_buff *skb;
  472. PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen);
  473. mpalen = sizeof(*mpa) + plen;
  474. skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
  475. if (!skb) {
  476. printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
  477. return -ENOMEM;
  478. }
  479. skb_reserve(skb, sizeof(*req));
  480. mpa = (struct mpa_message *) skb_put(skb, mpalen);
  481. memset(mpa, 0, sizeof(*mpa));
  482. memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
  483. mpa->flags = MPA_REJECT;
  484. mpa->revision = mpa_rev;
  485. mpa->private_data_size = htons(plen);
  486. if (plen)
  487. memcpy(mpa->private_data, pdata, plen);
  488. /*
  489. * Reference the mpa skb again. This ensures the data area
  490. * will remain in memory until the hw acks the tx.
  491. * Function tx_ack() will deref it.
  492. */
  493. skb_get(skb);
  494. skb->priority = CPL_PRIORITY_DATA;
  495. set_arp_failure_handler(skb, arp_failure_discard);
  496. skb_reset_transport_header(skb);
  497. req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
  498. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
  499. req->wr_lo = htonl(V_WR_TID(ep->hwtid));
  500. req->len = htonl(mpalen);
  501. req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
  502. V_TX_SNDBUF(snd_win>>15));
  503. req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT);
  504. req->sndseq = htonl(ep->snd_seq);
  505. BUG_ON(ep->mpa_skb);
  506. ep->mpa_skb = skb;
  507. l2t_send(ep->com.tdev, skb, ep->l2t);
  508. return 0;
  509. }
  510. static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
  511. {
  512. int mpalen;
  513. struct tx_data_wr *req;
  514. struct mpa_message *mpa;
  515. int len;
  516. struct sk_buff *skb;
  517. PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen);
  518. mpalen = sizeof(*mpa) + plen;
  519. skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
  520. if (!skb) {
  521. printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
  522. return -ENOMEM;
  523. }
  524. skb->priority = CPL_PRIORITY_DATA;
  525. skb_reserve(skb, sizeof(*req));
  526. mpa = (struct mpa_message *) skb_put(skb, mpalen);
  527. memset(mpa, 0, sizeof(*mpa));
  528. memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
  529. mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
  530. (markers_enabled ? MPA_MARKERS : 0);
  531. mpa->revision = mpa_rev;
  532. mpa->private_data_size = htons(plen);
  533. if (plen)
  534. memcpy(mpa->private_data, pdata, plen);
  535. /*
  536. * Reference the mpa skb. This ensures the data area
  537. * will remain in memory until the hw acks the tx.
  538. * Function tx_ack() will deref it.
  539. */
  540. skb_get(skb);
  541. set_arp_failure_handler(skb, arp_failure_discard);
  542. skb_reset_transport_header(skb);
  543. len = skb->len;
  544. req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
  545. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
  546. req->wr_lo = htonl(V_WR_TID(ep->hwtid));
  547. req->len = htonl(len);
  548. req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
  549. V_TX_SNDBUF(snd_win>>15));
  550. req->flags = htonl(F_TX_MORE | F_TX_IMM_ACK | F_TX_INIT);
  551. req->sndseq = htonl(ep->snd_seq);
  552. ep->mpa_skb = skb;
  553. state_set(&ep->com, MPA_REP_SENT);
  554. l2t_send(ep->com.tdev, skb, ep->l2t);
  555. return 0;
  556. }
  557. static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  558. {
  559. struct iwch_ep *ep = ctx;
  560. struct cpl_act_establish *req = cplhdr(skb);
  561. unsigned int tid = GET_TID(req);
  562. PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, tid);
  563. dst_confirm(ep->dst);
  564. /* setup the hwtid for this connection */
  565. ep->hwtid = tid;
  566. cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
  567. ep->snd_seq = ntohl(req->snd_isn);
  568. set_emss(ep, ntohs(req->tcp_opt));
  569. /* dealloc the atid */
  570. cxgb3_free_atid(ep->com.tdev, ep->atid);
  571. /* start MPA negotiation */
  572. send_mpa_req(ep, skb);
  573. return 0;
  574. }
  575. static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
  576. {
  577. PDBG("%s ep %p\n", __FILE__, ep);
  578. state_set(&ep->com, ABORTING);
  579. send_abort(ep, skb, gfp);
  580. }
  581. static void close_complete_upcall(struct iwch_ep *ep)
  582. {
  583. struct iw_cm_event event;
  584. PDBG("%s ep %p\n", __FUNCTION__, ep);
  585. memset(&event, 0, sizeof(event));
  586. event.event = IW_CM_EVENT_CLOSE;
  587. if (ep->com.cm_id) {
  588. PDBG("close complete delivered ep %p cm_id %p tid %d\n",
  589. ep, ep->com.cm_id, ep->hwtid);
  590. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  591. ep->com.cm_id->rem_ref(ep->com.cm_id);
  592. ep->com.cm_id = NULL;
  593. ep->com.qp = NULL;
  594. }
  595. }
  596. static void peer_close_upcall(struct iwch_ep *ep)
  597. {
  598. struct iw_cm_event event;
  599. PDBG("%s ep %p\n", __FUNCTION__, ep);
  600. memset(&event, 0, sizeof(event));
  601. event.event = IW_CM_EVENT_DISCONNECT;
  602. if (ep->com.cm_id) {
  603. PDBG("peer close delivered ep %p cm_id %p tid %d\n",
  604. ep, ep->com.cm_id, ep->hwtid);
  605. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  606. }
  607. }
  608. static void peer_abort_upcall(struct iwch_ep *ep)
  609. {
  610. struct iw_cm_event event;
  611. PDBG("%s ep %p\n", __FUNCTION__, ep);
  612. memset(&event, 0, sizeof(event));
  613. event.event = IW_CM_EVENT_CLOSE;
  614. event.status = -ECONNRESET;
  615. if (ep->com.cm_id) {
  616. PDBG("abort delivered ep %p cm_id %p tid %d\n", ep,
  617. ep->com.cm_id, ep->hwtid);
  618. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  619. ep->com.cm_id->rem_ref(ep->com.cm_id);
  620. ep->com.cm_id = NULL;
  621. ep->com.qp = NULL;
  622. }
  623. }
  624. static void connect_reply_upcall(struct iwch_ep *ep, int status)
  625. {
  626. struct iw_cm_event event;
  627. PDBG("%s ep %p status %d\n", __FUNCTION__, ep, status);
  628. memset(&event, 0, sizeof(event));
  629. event.event = IW_CM_EVENT_CONNECT_REPLY;
  630. event.status = status;
  631. event.local_addr = ep->com.local_addr;
  632. event.remote_addr = ep->com.remote_addr;
  633. if ((status == 0) || (status == -ECONNREFUSED)) {
  634. event.private_data_len = ep->plen;
  635. event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
  636. }
  637. if (ep->com.cm_id) {
  638. PDBG("%s ep %p tid %d status %d\n", __FUNCTION__, ep,
  639. ep->hwtid, status);
  640. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  641. }
  642. if (status < 0) {
  643. ep->com.cm_id->rem_ref(ep->com.cm_id);
  644. ep->com.cm_id = NULL;
  645. ep->com.qp = NULL;
  646. }
  647. }
  648. static void connect_request_upcall(struct iwch_ep *ep)
  649. {
  650. struct iw_cm_event event;
  651. PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
  652. memset(&event, 0, sizeof(event));
  653. event.event = IW_CM_EVENT_CONNECT_REQUEST;
  654. event.local_addr = ep->com.local_addr;
  655. event.remote_addr = ep->com.remote_addr;
  656. event.private_data_len = ep->plen;
  657. event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
  658. event.provider_data = ep;
  659. if (state_read(&ep->parent_ep->com) != DEAD)
  660. ep->parent_ep->com.cm_id->event_handler(
  661. ep->parent_ep->com.cm_id,
  662. &event);
  663. put_ep(&ep->parent_ep->com);
  664. ep->parent_ep = NULL;
  665. }
  666. static void established_upcall(struct iwch_ep *ep)
  667. {
  668. struct iw_cm_event event;
  669. PDBG("%s ep %p\n", __FUNCTION__, ep);
  670. memset(&event, 0, sizeof(event));
  671. event.event = IW_CM_EVENT_ESTABLISHED;
  672. if (ep->com.cm_id) {
  673. PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
  674. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  675. }
  676. }
  677. static int update_rx_credits(struct iwch_ep *ep, u32 credits)
  678. {
  679. struct cpl_rx_data_ack *req;
  680. struct sk_buff *skb;
  681. PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
  682. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  683. if (!skb) {
  684. printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
  685. return 0;
  686. }
  687. req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req));
  688. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  689. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
  690. req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
  691. skb->priority = CPL_PRIORITY_ACK;
  692. ep->com.tdev->send(ep->com.tdev, skb);
  693. return credits;
  694. }
  695. static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
  696. {
  697. struct mpa_message *mpa;
  698. u16 plen;
  699. struct iwch_qp_attributes attrs;
  700. enum iwch_qp_attr_mask mask;
  701. int err;
  702. PDBG("%s ep %p\n", __FUNCTION__, ep);
  703. /*
  704. * Stop mpa timer. If it expired, then the state has
  705. * changed and we bail since ep_timeout already aborted
  706. * the connection.
  707. */
  708. stop_ep_timer(ep);
  709. if (state_read(&ep->com) != MPA_REQ_SENT)
  710. return;
  711. /*
  712. * If we get more than the supported amount of private data
  713. * then we must fail this connection.
  714. */
  715. if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
  716. err = -EINVAL;
  717. goto err;
  718. }
  719. /*
  720. * copy the new data into our accumulation buffer.
  721. */
  722. skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
  723. skb->len);
  724. ep->mpa_pkt_len += skb->len;
  725. /*
  726. * if we don't even have the mpa message, then bail.
  727. */
  728. if (ep->mpa_pkt_len < sizeof(*mpa))
  729. return;
  730. mpa = (struct mpa_message *) ep->mpa_pkt;
  731. /* Validate MPA header. */
  732. if (mpa->revision != mpa_rev) {
  733. err = -EPROTO;
  734. goto err;
  735. }
  736. if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
  737. err = -EPROTO;
  738. goto err;
  739. }
  740. plen = ntohs(mpa->private_data_size);
  741. /*
  742. * Fail if there's too much private data.
  743. */
  744. if (plen > MPA_MAX_PRIVATE_DATA) {
  745. err = -EPROTO;
  746. goto err;
  747. }
  748. /*
  749. * If plen does not account for pkt size
  750. */
  751. if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
  752. err = -EPROTO;
  753. goto err;
  754. }
  755. ep->plen = (u8) plen;
  756. /*
  757. * If we don't have all the pdata yet, then bail.
  758. * We'll continue process when more data arrives.
  759. */
  760. if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
  761. return;
  762. if (mpa->flags & MPA_REJECT) {
  763. err = -ECONNREFUSED;
  764. goto err;
  765. }
  766. /*
  767. * If we get here we have accumulated the entire mpa
  768. * start reply message including private data. And
  769. * the MPA header is valid.
  770. */
  771. state_set(&ep->com, FPDU_MODE);
  772. ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
  773. ep->mpa_attr.recv_marker_enabled = markers_enabled;
  774. ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
  775. ep->mpa_attr.version = mpa_rev;
  776. PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
  777. "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__,
  778. ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
  779. ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
  780. attrs.mpa_attr = ep->mpa_attr;
  781. attrs.max_ird = ep->ird;
  782. attrs.max_ord = ep->ord;
  783. attrs.llp_stream_handle = ep;
  784. attrs.next_state = IWCH_QP_STATE_RTS;
  785. mask = IWCH_QP_ATTR_NEXT_STATE |
  786. IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
  787. IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
  788. /* bind QP and TID with INIT_WR */
  789. err = iwch_modify_qp(ep->com.qp->rhp,
  790. ep->com.qp, mask, &attrs, 1);
  791. if (!err)
  792. goto out;
  793. err:
  794. abort_connection(ep, skb, GFP_KERNEL);
  795. out:
  796. connect_reply_upcall(ep, err);
  797. return;
  798. }
  799. static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
  800. {
  801. struct mpa_message *mpa;
  802. u16 plen;
  803. PDBG("%s ep %p\n", __FUNCTION__, ep);
  804. /*
  805. * Stop mpa timer. If it expired, then the state has
  806. * changed and we bail since ep_timeout already aborted
  807. * the connection.
  808. */
  809. stop_ep_timer(ep);
  810. if (state_read(&ep->com) != MPA_REQ_WAIT)
  811. return;
  812. /*
  813. * If we get more than the supported amount of private data
  814. * then we must fail this connection.
  815. */
  816. if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
  817. abort_connection(ep, skb, GFP_KERNEL);
  818. return;
  819. }
  820. PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
  821. /*
  822. * Copy the new data into our accumulation buffer.
  823. */
  824. skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
  825. skb->len);
  826. ep->mpa_pkt_len += skb->len;
  827. /*
  828. * If we don't even have the mpa message, then bail.
  829. * We'll continue process when more data arrives.
  830. */
  831. if (ep->mpa_pkt_len < sizeof(*mpa))
  832. return;
  833. PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
  834. mpa = (struct mpa_message *) ep->mpa_pkt;
  835. /*
  836. * Validate MPA Header.
  837. */
  838. if (mpa->revision != mpa_rev) {
  839. abort_connection(ep, skb, GFP_KERNEL);
  840. return;
  841. }
  842. if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
  843. abort_connection(ep, skb, GFP_KERNEL);
  844. return;
  845. }
  846. plen = ntohs(mpa->private_data_size);
  847. /*
  848. * Fail if there's too much private data.
  849. */
  850. if (plen > MPA_MAX_PRIVATE_DATA) {
  851. abort_connection(ep, skb, GFP_KERNEL);
  852. return;
  853. }
  854. /*
  855. * If plen does not account for pkt size
  856. */
  857. if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
  858. abort_connection(ep, skb, GFP_KERNEL);
  859. return;
  860. }
  861. ep->plen = (u8) plen;
  862. /*
  863. * If we don't have all the pdata yet, then bail.
  864. */
  865. if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
  866. return;
  867. /*
  868. * If we get here we have accumulated the entire mpa
  869. * start reply message including private data.
  870. */
  871. ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
  872. ep->mpa_attr.recv_marker_enabled = markers_enabled;
  873. ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
  874. ep->mpa_attr.version = mpa_rev;
  875. PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
  876. "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__,
  877. ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
  878. ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
  879. state_set(&ep->com, MPA_REQ_RCVD);
  880. /* drive upcall */
  881. connect_request_upcall(ep);
  882. return;
  883. }
  884. static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  885. {
  886. struct iwch_ep *ep = ctx;
  887. struct cpl_rx_data *hdr = cplhdr(skb);
  888. unsigned int dlen = ntohs(hdr->len);
  889. PDBG("%s ep %p dlen %u\n", __FUNCTION__, ep, dlen);
  890. skb_pull(skb, sizeof(*hdr));
  891. skb_trim(skb, dlen);
  892. switch (state_read(&ep->com)) {
  893. case MPA_REQ_SENT:
  894. process_mpa_reply(ep, skb);
  895. break;
  896. case MPA_REQ_WAIT:
  897. process_mpa_request(ep, skb);
  898. break;
  899. case MPA_REP_SENT:
  900. break;
  901. default:
  902. printk(KERN_ERR MOD "%s Unexpected streaming data."
  903. " ep %p state %d tid %d\n",
  904. __FUNCTION__, ep, state_read(&ep->com), ep->hwtid);
  905. /*
  906. * The ep will timeout and inform the ULP of the failure.
  907. * See ep_timeout().
  908. */
  909. break;
  910. }
  911. /* update RX credits */
  912. update_rx_credits(ep, dlen);
  913. return CPL_RET_BUF_DONE;
  914. }
  915. /*
  916. * Upcall from the adapter indicating data has been transmitted.
  917. * For us its just the single MPA request or reply. We can now free
  918. * the skb holding the mpa message.
  919. */
  920. static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  921. {
  922. struct iwch_ep *ep = ctx;
  923. struct cpl_wr_ack *hdr = cplhdr(skb);
  924. unsigned int credits = ntohs(hdr->credits);
  925. enum iwch_qp_attr_mask mask;
  926. PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
  927. if (credits == 0)
  928. return CPL_RET_BUF_DONE;
  929. BUG_ON(credits != 1);
  930. BUG_ON(ep->mpa_skb == NULL);
  931. kfree_skb(ep->mpa_skb);
  932. ep->mpa_skb = NULL;
  933. dst_confirm(ep->dst);
  934. if (state_read(&ep->com) == MPA_REP_SENT) {
  935. struct iwch_qp_attributes attrs;
  936. /* bind QP to EP and move to RTS */
  937. attrs.mpa_attr = ep->mpa_attr;
  938. attrs.max_ird = ep->ord;
  939. attrs.max_ord = ep->ord;
  940. attrs.llp_stream_handle = ep;
  941. attrs.next_state = IWCH_QP_STATE_RTS;
  942. /* bind QP and TID with INIT_WR */
  943. mask = IWCH_QP_ATTR_NEXT_STATE |
  944. IWCH_QP_ATTR_LLP_STREAM_HANDLE |
  945. IWCH_QP_ATTR_MPA_ATTR |
  946. IWCH_QP_ATTR_MAX_IRD |
  947. IWCH_QP_ATTR_MAX_ORD;
  948. ep->com.rpl_err = iwch_modify_qp(ep->com.qp->rhp,
  949. ep->com.qp, mask, &attrs, 1);
  950. if (!ep->com.rpl_err) {
  951. state_set(&ep->com, FPDU_MODE);
  952. established_upcall(ep);
  953. }
  954. ep->com.rpl_done = 1;
  955. PDBG("waking up ep %p\n", ep);
  956. wake_up(&ep->com.waitq);
  957. }
  958. return CPL_RET_BUF_DONE;
  959. }
  960. static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  961. {
  962. struct iwch_ep *ep = ctx;
  963. PDBG("%s ep %p\n", __FUNCTION__, ep);
  964. close_complete_upcall(ep);
  965. state_set(&ep->com, DEAD);
  966. release_ep_resources(ep);
  967. return CPL_RET_BUF_DONE;
  968. }
  969. static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  970. {
  971. struct iwch_ep *ep = ctx;
  972. struct cpl_act_open_rpl *rpl = cplhdr(skb);
  973. PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status,
  974. status2errno(rpl->status));
  975. connect_reply_upcall(ep, status2errno(rpl->status));
  976. state_set(&ep->com, DEAD);
  977. if (ep->com.tdev->type == T3B)
  978. release_tid(ep->com.tdev, GET_TID(rpl), NULL);
  979. cxgb3_free_atid(ep->com.tdev, ep->atid);
  980. dst_release(ep->dst);
  981. l2t_release(L2DATA(ep->com.tdev), ep->l2t);
  982. put_ep(&ep->com);
  983. return CPL_RET_BUF_DONE;
  984. }
  985. static int listen_start(struct iwch_listen_ep *ep)
  986. {
  987. struct sk_buff *skb;
  988. struct cpl_pass_open_req *req;
  989. PDBG("%s ep %p\n", __FUNCTION__, ep);
  990. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  991. if (!skb) {
  992. printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
  993. return -ENOMEM;
  994. }
  995. req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req));
  996. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  997. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
  998. req->local_port = ep->com.local_addr.sin_port;
  999. req->local_ip = ep->com.local_addr.sin_addr.s_addr;
  1000. req->peer_port = 0;
  1001. req->peer_ip = 0;
  1002. req->peer_netmask = 0;
  1003. req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
  1004. req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
  1005. req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
  1006. skb->priority = 1;
  1007. ep->com.tdev->send(ep->com.tdev, skb);
  1008. return 0;
  1009. }
  1010. static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1011. {
  1012. struct iwch_listen_ep *ep = ctx;
  1013. struct cpl_pass_open_rpl *rpl = cplhdr(skb);
  1014. PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep,
  1015. rpl->status, status2errno(rpl->status));
  1016. ep->com.rpl_err = status2errno(rpl->status);
  1017. ep->com.rpl_done = 1;
  1018. wake_up(&ep->com.waitq);
  1019. return CPL_RET_BUF_DONE;
  1020. }
  1021. static int listen_stop(struct iwch_listen_ep *ep)
  1022. {
  1023. struct sk_buff *skb;
  1024. struct cpl_close_listserv_req *req;
  1025. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1026. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  1027. if (!skb) {
  1028. printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
  1029. return -ENOMEM;
  1030. }
  1031. req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
  1032. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1033. req->cpu_idx = 0;
  1034. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
  1035. skb->priority = 1;
  1036. ep->com.tdev->send(ep->com.tdev, skb);
  1037. return 0;
  1038. }
  1039. static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
  1040. void *ctx)
  1041. {
  1042. struct iwch_listen_ep *ep = ctx;
  1043. struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
  1044. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1045. ep->com.rpl_err = status2errno(rpl->status);
  1046. ep->com.rpl_done = 1;
  1047. wake_up(&ep->com.waitq);
  1048. return CPL_RET_BUF_DONE;
  1049. }
  1050. static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
  1051. {
  1052. struct cpl_pass_accept_rpl *rpl;
  1053. unsigned int mtu_idx;
  1054. u32 opt0h, opt0l, opt2;
  1055. int wscale;
  1056. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1057. BUG_ON(skb_cloned(skb));
  1058. skb_trim(skb, sizeof(*rpl));
  1059. skb_get(skb);
  1060. mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
  1061. wscale = compute_wscale(rcv_win);
  1062. opt0h = V_NAGLE(0) |
  1063. V_NO_CONG(nocong) |
  1064. V_KEEP_ALIVE(1) |
  1065. F_TCAM_BYPASS |
  1066. V_WND_SCALE(wscale) |
  1067. V_MSS_IDX(mtu_idx) |
  1068. V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
  1069. opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
  1070. opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
  1071. rpl = cplhdr(skb);
  1072. rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1073. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
  1074. rpl->peer_ip = peer_ip;
  1075. rpl->opt0h = htonl(opt0h);
  1076. rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
  1077. rpl->opt2 = htonl(opt2);
  1078. rpl->rsvd = rpl->opt2; /* workaround for HW bug */
  1079. skb->priority = CPL_PRIORITY_SETUP;
  1080. l2t_send(ep->com.tdev, skb, ep->l2t);
  1081. return;
  1082. }
  1083. static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
  1084. struct sk_buff *skb)
  1085. {
  1086. PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid,
  1087. peer_ip);
  1088. BUG_ON(skb_cloned(skb));
  1089. skb_trim(skb, sizeof(struct cpl_tid_release));
  1090. skb_get(skb);
  1091. if (tdev->type == T3B)
  1092. release_tid(tdev, hwtid, skb);
  1093. else {
  1094. struct cpl_pass_accept_rpl *rpl;
  1095. rpl = cplhdr(skb);
  1096. skb->priority = CPL_PRIORITY_SETUP;
  1097. rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  1098. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
  1099. hwtid));
  1100. rpl->peer_ip = peer_ip;
  1101. rpl->opt0h = htonl(F_TCAM_BYPASS);
  1102. rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
  1103. rpl->opt2 = 0;
  1104. rpl->rsvd = rpl->opt2;
  1105. tdev->send(tdev, skb);
  1106. }
  1107. }
  1108. static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1109. {
  1110. struct iwch_ep *child_ep, *parent_ep = ctx;
  1111. struct cpl_pass_accept_req *req = cplhdr(skb);
  1112. unsigned int hwtid = GET_TID(req);
  1113. struct dst_entry *dst;
  1114. struct l2t_entry *l2t;
  1115. struct rtable *rt;
  1116. struct iff_mac tim;
  1117. PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid);
  1118. if (state_read(&parent_ep->com) != LISTEN) {
  1119. printk(KERN_ERR "%s - listening ep not in LISTEN\n",
  1120. __FUNCTION__);
  1121. goto reject;
  1122. }
  1123. /*
  1124. * Find the netdev for this connection request.
  1125. */
  1126. tim.mac_addr = req->dst_mac;
  1127. tim.vlan_tag = ntohs(req->vlan_tag);
  1128. if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
  1129. printk(KERN_ERR
  1130. "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
  1131. __FUNCTION__,
  1132. req->dst_mac[0],
  1133. req->dst_mac[1],
  1134. req->dst_mac[2],
  1135. req->dst_mac[3],
  1136. req->dst_mac[4],
  1137. req->dst_mac[5]);
  1138. goto reject;
  1139. }
  1140. /* Find output route */
  1141. rt = find_route(tdev,
  1142. req->local_ip,
  1143. req->peer_ip,
  1144. req->local_port,
  1145. req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
  1146. if (!rt) {
  1147. printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
  1148. __FUNCTION__);
  1149. goto reject;
  1150. }
  1151. dst = &rt->u.dst;
  1152. l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
  1153. if (!l2t) {
  1154. printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
  1155. __FUNCTION__);
  1156. dst_release(dst);
  1157. goto reject;
  1158. }
  1159. child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
  1160. if (!child_ep) {
  1161. printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
  1162. __FUNCTION__);
  1163. l2t_release(L2DATA(tdev), l2t);
  1164. dst_release(dst);
  1165. goto reject;
  1166. }
  1167. state_set(&child_ep->com, CONNECTING);
  1168. child_ep->com.tdev = tdev;
  1169. child_ep->com.cm_id = NULL;
  1170. child_ep->com.local_addr.sin_family = PF_INET;
  1171. child_ep->com.local_addr.sin_port = req->local_port;
  1172. child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
  1173. child_ep->com.remote_addr.sin_family = PF_INET;
  1174. child_ep->com.remote_addr.sin_port = req->peer_port;
  1175. child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
  1176. get_ep(&parent_ep->com);
  1177. child_ep->parent_ep = parent_ep;
  1178. child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
  1179. child_ep->l2t = l2t;
  1180. child_ep->dst = dst;
  1181. child_ep->hwtid = hwtid;
  1182. init_timer(&child_ep->timer);
  1183. cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
  1184. accept_cr(child_ep, req->peer_ip, skb);
  1185. goto out;
  1186. reject:
  1187. reject_cr(tdev, hwtid, req->peer_ip, skb);
  1188. out:
  1189. return CPL_RET_BUF_DONE;
  1190. }
  1191. static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1192. {
  1193. struct iwch_ep *ep = ctx;
  1194. struct cpl_pass_establish *req = cplhdr(skb);
  1195. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1196. ep->snd_seq = ntohl(req->snd_isn);
  1197. set_emss(ep, ntohs(req->tcp_opt));
  1198. dst_confirm(ep->dst);
  1199. state_set(&ep->com, MPA_REQ_WAIT);
  1200. start_ep_timer(ep);
  1201. return CPL_RET_BUF_DONE;
  1202. }
  1203. static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1204. {
  1205. struct iwch_ep *ep = ctx;
  1206. struct iwch_qp_attributes attrs;
  1207. unsigned long flags;
  1208. int disconnect = 1;
  1209. int release = 0;
  1210. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1211. dst_confirm(ep->dst);
  1212. spin_lock_irqsave(&ep->com.lock, flags);
  1213. switch (ep->com.state) {
  1214. case MPA_REQ_WAIT:
  1215. __state_set(&ep->com, CLOSING);
  1216. break;
  1217. case MPA_REQ_SENT:
  1218. __state_set(&ep->com, CLOSING);
  1219. connect_reply_upcall(ep, -ECONNRESET);
  1220. break;
  1221. case MPA_REQ_RCVD:
  1222. /*
  1223. * We're gonna mark this puppy DEAD, but keep
  1224. * the reference on it until the ULP accepts or
  1225. * rejects the CR.
  1226. */
  1227. __state_set(&ep->com, CLOSING);
  1228. get_ep(&ep->com);
  1229. break;
  1230. case MPA_REP_SENT:
  1231. __state_set(&ep->com, CLOSING);
  1232. ep->com.rpl_done = 1;
  1233. ep->com.rpl_err = -ECONNRESET;
  1234. PDBG("waking up ep %p\n", ep);
  1235. wake_up(&ep->com.waitq);
  1236. break;
  1237. case FPDU_MODE:
  1238. start_ep_timer(ep);
  1239. __state_set(&ep->com, CLOSING);
  1240. attrs.next_state = IWCH_QP_STATE_CLOSING;
  1241. iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1242. IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
  1243. peer_close_upcall(ep);
  1244. break;
  1245. case ABORTING:
  1246. disconnect = 0;
  1247. break;
  1248. case CLOSING:
  1249. __state_set(&ep->com, MORIBUND);
  1250. disconnect = 0;
  1251. break;
  1252. case MORIBUND:
  1253. stop_ep_timer(ep);
  1254. if (ep->com.cm_id && ep->com.qp) {
  1255. attrs.next_state = IWCH_QP_STATE_IDLE;
  1256. iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1257. IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
  1258. }
  1259. close_complete_upcall(ep);
  1260. __state_set(&ep->com, DEAD);
  1261. release = 1;
  1262. disconnect = 0;
  1263. break;
  1264. case DEAD:
  1265. disconnect = 0;
  1266. break;
  1267. default:
  1268. BUG_ON(1);
  1269. }
  1270. spin_unlock_irqrestore(&ep->com.lock, flags);
  1271. if (disconnect)
  1272. iwch_ep_disconnect(ep, 0, GFP_KERNEL);
  1273. if (release)
  1274. release_ep_resources(ep);
  1275. return CPL_RET_BUF_DONE;
  1276. }
  1277. /*
  1278. * Returns whether an ABORT_REQ_RSS message is a negative advice.
  1279. */
  1280. static int is_neg_adv_abort(unsigned int status)
  1281. {
  1282. return status == CPL_ERR_RTX_NEG_ADVICE ||
  1283. status == CPL_ERR_PERSIST_NEG_ADVICE;
  1284. }
  1285. static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1286. {
  1287. struct cpl_abort_req_rss *req = cplhdr(skb);
  1288. struct iwch_ep *ep = ctx;
  1289. struct cpl_abort_rpl *rpl;
  1290. struct sk_buff *rpl_skb;
  1291. struct iwch_qp_attributes attrs;
  1292. int ret;
  1293. int state;
  1294. if (is_neg_adv_abort(req->status)) {
  1295. PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
  1296. ep->hwtid);
  1297. t3_l2t_send_event(ep->com.tdev, ep->l2t);
  1298. return CPL_RET_BUF_DONE;
  1299. }
  1300. state = state_read(&ep->com);
  1301. PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state);
  1302. switch (state) {
  1303. case CONNECTING:
  1304. break;
  1305. case MPA_REQ_WAIT:
  1306. stop_ep_timer(ep);
  1307. break;
  1308. case MPA_REQ_SENT:
  1309. stop_ep_timer(ep);
  1310. connect_reply_upcall(ep, -ECONNRESET);
  1311. break;
  1312. case MPA_REP_SENT:
  1313. ep->com.rpl_done = 1;
  1314. ep->com.rpl_err = -ECONNRESET;
  1315. PDBG("waking up ep %p\n", ep);
  1316. wake_up(&ep->com.waitq);
  1317. break;
  1318. case MPA_REQ_RCVD:
  1319. /*
  1320. * We're gonna mark this puppy DEAD, but keep
  1321. * the reference on it until the ULP accepts or
  1322. * rejects the CR.
  1323. */
  1324. get_ep(&ep->com);
  1325. break;
  1326. case MORIBUND:
  1327. case CLOSING:
  1328. stop_ep_timer(ep);
  1329. /*FALLTHROUGH*/
  1330. case FPDU_MODE:
  1331. if (ep->com.cm_id && ep->com.qp) {
  1332. attrs.next_state = IWCH_QP_STATE_ERROR;
  1333. ret = iwch_modify_qp(ep->com.qp->rhp,
  1334. ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
  1335. &attrs, 1);
  1336. if (ret)
  1337. printk(KERN_ERR MOD
  1338. "%s - qp <- error failed!\n",
  1339. __FUNCTION__);
  1340. }
  1341. peer_abort_upcall(ep);
  1342. break;
  1343. case ABORTING:
  1344. break;
  1345. case DEAD:
  1346. PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__);
  1347. return CPL_RET_BUF_DONE;
  1348. default:
  1349. BUG_ON(1);
  1350. break;
  1351. }
  1352. dst_confirm(ep->dst);
  1353. rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
  1354. if (!rpl_skb) {
  1355. printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
  1356. __FUNCTION__);
  1357. dst_release(ep->dst);
  1358. l2t_release(L2DATA(ep->com.tdev), ep->l2t);
  1359. put_ep(&ep->com);
  1360. return CPL_RET_BUF_DONE;
  1361. }
  1362. rpl_skb->priority = CPL_PRIORITY_DATA;
  1363. rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
  1364. rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
  1365. rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
  1366. OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
  1367. rpl->cmd = CPL_ABORT_NO_RST;
  1368. ep->com.tdev->send(ep->com.tdev, rpl_skb);
  1369. if (state != ABORTING) {
  1370. state_set(&ep->com, DEAD);
  1371. release_ep_resources(ep);
  1372. }
  1373. return CPL_RET_BUF_DONE;
  1374. }
  1375. static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1376. {
  1377. struct iwch_ep *ep = ctx;
  1378. struct iwch_qp_attributes attrs;
  1379. unsigned long flags;
  1380. int release = 0;
  1381. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1382. BUG_ON(!ep);
  1383. /* The cm_id may be null if we failed to connect */
  1384. spin_lock_irqsave(&ep->com.lock, flags);
  1385. switch (ep->com.state) {
  1386. case CLOSING:
  1387. __state_set(&ep->com, MORIBUND);
  1388. break;
  1389. case MORIBUND:
  1390. stop_ep_timer(ep);
  1391. if ((ep->com.cm_id) && (ep->com.qp)) {
  1392. attrs.next_state = IWCH_QP_STATE_IDLE;
  1393. iwch_modify_qp(ep->com.qp->rhp,
  1394. ep->com.qp,
  1395. IWCH_QP_ATTR_NEXT_STATE,
  1396. &attrs, 1);
  1397. }
  1398. close_complete_upcall(ep);
  1399. __state_set(&ep->com, DEAD);
  1400. release = 1;
  1401. break;
  1402. case ABORTING:
  1403. break;
  1404. case DEAD:
  1405. default:
  1406. BUG_ON(1);
  1407. break;
  1408. }
  1409. spin_unlock_irqrestore(&ep->com.lock, flags);
  1410. if (release)
  1411. release_ep_resources(ep);
  1412. return CPL_RET_BUF_DONE;
  1413. }
  1414. /*
  1415. * T3A does 3 things when a TERM is received:
  1416. * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
  1417. * 2) generate an async event on the QP with the TERMINATE opcode
  1418. * 3) post a TERMINATE opcde cqe into the associated CQ.
  1419. *
  1420. * For (1), we save the message in the qp for later consumer consumption.
  1421. * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
  1422. * For (3), we toss the CQE in cxio_poll_cq().
  1423. *
  1424. * terminate() handles case (1)...
  1425. */
  1426. static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1427. {
  1428. struct iwch_ep *ep = ctx;
  1429. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1430. skb_pull(skb, sizeof(struct cpl_rdma_terminate));
  1431. PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len);
  1432. skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
  1433. skb->len);
  1434. ep->com.qp->attr.terminate_msg_len = skb->len;
  1435. ep->com.qp->attr.is_terminate_local = 0;
  1436. return CPL_RET_BUF_DONE;
  1437. }
  1438. static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1439. {
  1440. struct cpl_rdma_ec_status *rep = cplhdr(skb);
  1441. struct iwch_ep *ep = ctx;
  1442. PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid,
  1443. rep->status);
  1444. if (rep->status) {
  1445. struct iwch_qp_attributes attrs;
  1446. printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
  1447. __FUNCTION__, ep->hwtid);
  1448. stop_ep_timer(ep);
  1449. attrs.next_state = IWCH_QP_STATE_ERROR;
  1450. iwch_modify_qp(ep->com.qp->rhp,
  1451. ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
  1452. &attrs, 1);
  1453. abort_connection(ep, NULL, GFP_KERNEL);
  1454. }
  1455. return CPL_RET_BUF_DONE;
  1456. }
  1457. static void ep_timeout(unsigned long arg)
  1458. {
  1459. struct iwch_ep *ep = (struct iwch_ep *)arg;
  1460. struct iwch_qp_attributes attrs;
  1461. unsigned long flags;
  1462. spin_lock_irqsave(&ep->com.lock, flags);
  1463. PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid,
  1464. ep->com.state);
  1465. switch (ep->com.state) {
  1466. case MPA_REQ_SENT:
  1467. connect_reply_upcall(ep, -ETIMEDOUT);
  1468. break;
  1469. case MPA_REQ_WAIT:
  1470. break;
  1471. case CLOSING:
  1472. case MORIBUND:
  1473. if (ep->com.cm_id && ep->com.qp) {
  1474. attrs.next_state = IWCH_QP_STATE_ERROR;
  1475. iwch_modify_qp(ep->com.qp->rhp,
  1476. ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
  1477. &attrs, 1);
  1478. }
  1479. break;
  1480. default:
  1481. BUG();
  1482. }
  1483. __state_set(&ep->com, CLOSING);
  1484. spin_unlock_irqrestore(&ep->com.lock, flags);
  1485. abort_connection(ep, NULL, GFP_ATOMIC);
  1486. put_ep(&ep->com);
  1487. }
  1488. int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
  1489. {
  1490. int err;
  1491. struct iwch_ep *ep = to_ep(cm_id);
  1492. PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
  1493. if (state_read(&ep->com) == DEAD) {
  1494. put_ep(&ep->com);
  1495. return -ECONNRESET;
  1496. }
  1497. BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
  1498. if (mpa_rev == 0)
  1499. abort_connection(ep, NULL, GFP_KERNEL);
  1500. else {
  1501. err = send_mpa_reject(ep, pdata, pdata_len);
  1502. err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
  1503. }
  1504. return 0;
  1505. }
  1506. int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  1507. {
  1508. int err;
  1509. struct iwch_qp_attributes attrs;
  1510. enum iwch_qp_attr_mask mask;
  1511. struct iwch_ep *ep = to_ep(cm_id);
  1512. struct iwch_dev *h = to_iwch_dev(cm_id->device);
  1513. struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
  1514. PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
  1515. if (state_read(&ep->com) == DEAD) {
  1516. put_ep(&ep->com);
  1517. return -ECONNRESET;
  1518. }
  1519. BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
  1520. BUG_ON(!qp);
  1521. if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
  1522. (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
  1523. abort_connection(ep, NULL, GFP_KERNEL);
  1524. return -EINVAL;
  1525. }
  1526. cm_id->add_ref(cm_id);
  1527. ep->com.cm_id = cm_id;
  1528. ep->com.qp = qp;
  1529. ep->com.rpl_done = 0;
  1530. ep->com.rpl_err = 0;
  1531. ep->ird = conn_param->ird;
  1532. ep->ord = conn_param->ord;
  1533. PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord);
  1534. get_ep(&ep->com);
  1535. err = send_mpa_reply(ep, conn_param->private_data,
  1536. conn_param->private_data_len);
  1537. if (err) {
  1538. ep->com.cm_id = NULL;
  1539. ep->com.qp = NULL;
  1540. cm_id->rem_ref(cm_id);
  1541. abort_connection(ep, NULL, GFP_KERNEL);
  1542. put_ep(&ep->com);
  1543. return err;
  1544. }
  1545. /* bind QP to EP and move to RTS */
  1546. attrs.mpa_attr = ep->mpa_attr;
  1547. attrs.max_ird = ep->ord;
  1548. attrs.max_ord = ep->ord;
  1549. attrs.llp_stream_handle = ep;
  1550. attrs.next_state = IWCH_QP_STATE_RTS;
  1551. /* bind QP and TID with INIT_WR */
  1552. mask = IWCH_QP_ATTR_NEXT_STATE |
  1553. IWCH_QP_ATTR_LLP_STREAM_HANDLE |
  1554. IWCH_QP_ATTR_MPA_ATTR |
  1555. IWCH_QP_ATTR_MAX_IRD |
  1556. IWCH_QP_ATTR_MAX_ORD;
  1557. err = iwch_modify_qp(ep->com.qp->rhp,
  1558. ep->com.qp, mask, &attrs, 1);
  1559. if (err) {
  1560. ep->com.cm_id = NULL;
  1561. ep->com.qp = NULL;
  1562. cm_id->rem_ref(cm_id);
  1563. abort_connection(ep, NULL, GFP_KERNEL);
  1564. } else {
  1565. state_set(&ep->com, FPDU_MODE);
  1566. established_upcall(ep);
  1567. }
  1568. put_ep(&ep->com);
  1569. return err;
  1570. }
  1571. int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  1572. {
  1573. int err = 0;
  1574. struct iwch_dev *h = to_iwch_dev(cm_id->device);
  1575. struct iwch_ep *ep;
  1576. struct rtable *rt;
  1577. ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
  1578. if (!ep) {
  1579. printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
  1580. err = -ENOMEM;
  1581. goto out;
  1582. }
  1583. init_timer(&ep->timer);
  1584. ep->plen = conn_param->private_data_len;
  1585. if (ep->plen)
  1586. memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
  1587. conn_param->private_data, ep->plen);
  1588. ep->ird = conn_param->ird;
  1589. ep->ord = conn_param->ord;
  1590. ep->com.tdev = h->rdev.t3cdev_p;
  1591. cm_id->add_ref(cm_id);
  1592. ep->com.cm_id = cm_id;
  1593. ep->com.qp = get_qhp(h, conn_param->qpn);
  1594. BUG_ON(!ep->com.qp);
  1595. PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn,
  1596. ep->com.qp, cm_id);
  1597. /*
  1598. * Allocate an active TID to initiate a TCP connection.
  1599. */
  1600. ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
  1601. if (ep->atid == -1) {
  1602. printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
  1603. err = -ENOMEM;
  1604. goto fail2;
  1605. }
  1606. /* find a route */
  1607. rt = find_route(h->rdev.t3cdev_p,
  1608. cm_id->local_addr.sin_addr.s_addr,
  1609. cm_id->remote_addr.sin_addr.s_addr,
  1610. cm_id->local_addr.sin_port,
  1611. cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
  1612. if (!rt) {
  1613. printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__);
  1614. err = -EHOSTUNREACH;
  1615. goto fail3;
  1616. }
  1617. ep->dst = &rt->u.dst;
  1618. /* get a l2t entry */
  1619. ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
  1620. ep->dst->neighbour->dev);
  1621. if (!ep->l2t) {
  1622. printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__);
  1623. err = -ENOMEM;
  1624. goto fail4;
  1625. }
  1626. state_set(&ep->com, CONNECTING);
  1627. ep->tos = IPTOS_LOWDELAY;
  1628. ep->com.local_addr = cm_id->local_addr;
  1629. ep->com.remote_addr = cm_id->remote_addr;
  1630. /* send connect request to rnic */
  1631. err = send_connect(ep);
  1632. if (!err)
  1633. goto out;
  1634. l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
  1635. fail4:
  1636. dst_release(ep->dst);
  1637. fail3:
  1638. cxgb3_free_atid(ep->com.tdev, ep->atid);
  1639. fail2:
  1640. put_ep(&ep->com);
  1641. out:
  1642. return err;
  1643. }
  1644. int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
  1645. {
  1646. int err = 0;
  1647. struct iwch_dev *h = to_iwch_dev(cm_id->device);
  1648. struct iwch_listen_ep *ep;
  1649. might_sleep();
  1650. ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
  1651. if (!ep) {
  1652. printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
  1653. err = -ENOMEM;
  1654. goto fail1;
  1655. }
  1656. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1657. ep->com.tdev = h->rdev.t3cdev_p;
  1658. cm_id->add_ref(cm_id);
  1659. ep->com.cm_id = cm_id;
  1660. ep->backlog = backlog;
  1661. ep->com.local_addr = cm_id->local_addr;
  1662. /*
  1663. * Allocate a server TID.
  1664. */
  1665. ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
  1666. if (ep->stid == -1) {
  1667. printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
  1668. err = -ENOMEM;
  1669. goto fail2;
  1670. }
  1671. state_set(&ep->com, LISTEN);
  1672. err = listen_start(ep);
  1673. if (err)
  1674. goto fail3;
  1675. /* wait for pass_open_rpl */
  1676. wait_event(ep->com.waitq, ep->com.rpl_done);
  1677. err = ep->com.rpl_err;
  1678. if (!err) {
  1679. cm_id->provider_data = ep;
  1680. goto out;
  1681. }
  1682. fail3:
  1683. cxgb3_free_stid(ep->com.tdev, ep->stid);
  1684. fail2:
  1685. put_ep(&ep->com);
  1686. fail1:
  1687. out:
  1688. return err;
  1689. }
  1690. int iwch_destroy_listen(struct iw_cm_id *cm_id)
  1691. {
  1692. int err;
  1693. struct iwch_listen_ep *ep = to_listen_ep(cm_id);
  1694. PDBG("%s ep %p\n", __FUNCTION__, ep);
  1695. might_sleep();
  1696. state_set(&ep->com, DEAD);
  1697. ep->com.rpl_done = 0;
  1698. ep->com.rpl_err = 0;
  1699. err = listen_stop(ep);
  1700. wait_event(ep->com.waitq, ep->com.rpl_done);
  1701. cxgb3_free_stid(ep->com.tdev, ep->stid);
  1702. err = ep->com.rpl_err;
  1703. cm_id->rem_ref(cm_id);
  1704. put_ep(&ep->com);
  1705. return err;
  1706. }
  1707. int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
  1708. {
  1709. int ret=0;
  1710. unsigned long flags;
  1711. int close = 0;
  1712. spin_lock_irqsave(&ep->com.lock, flags);
  1713. PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep,
  1714. states[ep->com.state], abrupt);
  1715. if (ep->com.state == DEAD) {
  1716. PDBG("%s already dead ep %p\n", __FUNCTION__, ep);
  1717. goto out;
  1718. }
  1719. if (abrupt) {
  1720. if (ep->com.state != ABORTING) {
  1721. ep->com.state = ABORTING;
  1722. close = 1;
  1723. }
  1724. goto out;
  1725. }
  1726. switch (ep->com.state) {
  1727. case MPA_REQ_WAIT:
  1728. case MPA_REQ_SENT:
  1729. case MPA_REQ_RCVD:
  1730. case MPA_REP_SENT:
  1731. case FPDU_MODE:
  1732. start_ep_timer(ep);
  1733. ep->com.state = CLOSING;
  1734. close = 1;
  1735. break;
  1736. case CLOSING:
  1737. ep->com.state = MORIBUND;
  1738. close = 1;
  1739. break;
  1740. case MORIBUND:
  1741. break;
  1742. default:
  1743. BUG();
  1744. break;
  1745. }
  1746. out:
  1747. spin_unlock_irqrestore(&ep->com.lock, flags);
  1748. if (close) {
  1749. if (abrupt)
  1750. ret = send_abort(ep, NULL, gfp);
  1751. else
  1752. ret = send_halfclose(ep, gfp);
  1753. }
  1754. return ret;
  1755. }
  1756. int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
  1757. struct l2t_entry *l2t)
  1758. {
  1759. struct iwch_ep *ep = ctx;
  1760. if (ep->dst != old)
  1761. return 0;
  1762. PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new,
  1763. l2t);
  1764. dst_hold(new);
  1765. l2t_release(L2DATA(ep->com.tdev), ep->l2t);
  1766. ep->l2t = l2t;
  1767. dst_release(old);
  1768. ep->dst = new;
  1769. return 1;
  1770. }
  1771. /*
  1772. * All the CM events are handled on a work queue to have a safe context.
  1773. */
  1774. static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1775. {
  1776. struct iwch_ep_common *epc = ctx;
  1777. get_ep(epc);
  1778. /*
  1779. * Save ctx and tdev in the skb->cb area.
  1780. */
  1781. *((void **) skb->cb) = ctx;
  1782. *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
  1783. /*
  1784. * Queue the skb and schedule the worker thread.
  1785. */
  1786. skb_queue_tail(&rxq, skb);
  1787. queue_work(workq, &skb_work);
  1788. return 0;
  1789. }
  1790. static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
  1791. {
  1792. struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
  1793. if (rpl->status != CPL_ERR_NONE) {
  1794. printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
  1795. "for tid %u\n", rpl->status, GET_TID(rpl));
  1796. }
  1797. return CPL_RET_BUF_DONE;
  1798. }
  1799. int __init iwch_cm_init(void)
  1800. {
  1801. skb_queue_head_init(&rxq);
  1802. workq = create_singlethread_workqueue("iw_cxgb3");
  1803. if (!workq)
  1804. return -ENOMEM;
  1805. /*
  1806. * All upcalls from the T3 Core go to sched() to
  1807. * schedule the processing on a work queue.
  1808. */
  1809. t3c_handlers[CPL_ACT_ESTABLISH] = sched;
  1810. t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
  1811. t3c_handlers[CPL_RX_DATA] = sched;
  1812. t3c_handlers[CPL_TX_DMA_ACK] = sched;
  1813. t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
  1814. t3c_handlers[CPL_ABORT_RPL] = sched;
  1815. t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
  1816. t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
  1817. t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
  1818. t3c_handlers[CPL_PASS_ESTABLISH] = sched;
  1819. t3c_handlers[CPL_PEER_CLOSE] = sched;
  1820. t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
  1821. t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
  1822. t3c_handlers[CPL_RDMA_TERMINATE] = sched;
  1823. t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
  1824. t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
  1825. /*
  1826. * These are the real handlers that are called from a
  1827. * work queue.
  1828. */
  1829. work_handlers[CPL_ACT_ESTABLISH] = act_establish;
  1830. work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
  1831. work_handlers[CPL_RX_DATA] = rx_data;
  1832. work_handlers[CPL_TX_DMA_ACK] = tx_ack;
  1833. work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
  1834. work_handlers[CPL_ABORT_RPL] = abort_rpl;
  1835. work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
  1836. work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
  1837. work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
  1838. work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
  1839. work_handlers[CPL_PEER_CLOSE] = peer_close;
  1840. work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
  1841. work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
  1842. work_handlers[CPL_RDMA_TERMINATE] = terminate;
  1843. work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
  1844. return 0;
  1845. }
  1846. void __exit iwch_cm_term(void)
  1847. {
  1848. flush_workqueue(workq);
  1849. destroy_workqueue(workq);
  1850. }