cm.c 76 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/list.h>
  34. #include <linux/workqueue.h>
  35. #include <linux/skbuff.h>
  36. #include <linux/timer.h>
  37. #include <linux/notifier.h>
  38. #include <linux/inetdevice.h>
  39. #include <linux/ip.h>
  40. #include <linux/tcp.h>
  41. #include <net/neighbour.h>
  42. #include <net/netevent.h>
  43. #include <net/route.h>
  44. #include "iw_cxgb4.h"
  45. static char *states[] = {
  46. "idle",
  47. "listen",
  48. "connecting",
  49. "mpa_wait_req",
  50. "mpa_req_sent",
  51. "mpa_req_rcvd",
  52. "mpa_rep_sent",
  53. "fpdu_mode",
  54. "aborting",
  55. "closing",
  56. "moribund",
  57. "dead",
  58. NULL,
  59. };
  60. static int nocong;
  61. module_param(nocong, int, 0644);
  62. MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
  63. static int enable_ecn;
  64. module_param(enable_ecn, int, 0644);
  65. MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
  66. static int dack_mode = 1;
  67. module_param(dack_mode, int, 0644);
  68. MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
  69. int c4iw_max_read_depth = 8;
  70. module_param(c4iw_max_read_depth, int, 0644);
  71. MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
  72. static int enable_tcp_timestamps;
  73. module_param(enable_tcp_timestamps, int, 0644);
  74. MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
  75. static int enable_tcp_sack;
  76. module_param(enable_tcp_sack, int, 0644);
  77. MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
  78. static int enable_tcp_window_scaling = 1;
  79. module_param(enable_tcp_window_scaling, int, 0644);
  80. MODULE_PARM_DESC(enable_tcp_window_scaling,
  81. "Enable tcp window scaling (default=1)");
  82. int c4iw_debug;
  83. module_param(c4iw_debug, int, 0644);
  84. MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
  85. static int peer2peer;
  86. module_param(peer2peer, int, 0644);
  87. MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
  88. static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
  89. module_param(p2p_type, int, 0644);
  90. MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
  91. "1=RDMA_READ 0=RDMA_WRITE (default 1)");
  92. static int ep_timeout_secs = 60;
  93. module_param(ep_timeout_secs, int, 0644);
  94. MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
  95. "in seconds (default=60)");
  96. static int mpa_rev = 1;
  97. module_param(mpa_rev, int, 0644);
  98. MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
  99. "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
  100. " compliant (default=1)");
  101. static int markers_enabled;
  102. module_param(markers_enabled, int, 0644);
  103. MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
  104. static int crc_enabled = 1;
  105. module_param(crc_enabled, int, 0644);
  106. MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
  107. static int rcv_win = 256 * 1024;
  108. module_param(rcv_win, int, 0644);
  109. MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
  110. static int snd_win = 128 * 1024;
  111. module_param(snd_win, int, 0644);
  112. MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
  113. static struct workqueue_struct *workq;
  114. static struct sk_buff_head rxq;
  115. static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
  116. static void ep_timeout(unsigned long arg);
  117. static void connect_reply_upcall(struct c4iw_ep *ep, int status);
  118. static LIST_HEAD(timeout_list);
  119. static spinlock_t timeout_lock;
  120. static void start_ep_timer(struct c4iw_ep *ep)
  121. {
  122. PDBG("%s ep %p\n", __func__, ep);
  123. if (timer_pending(&ep->timer)) {
  124. PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
  125. del_timer_sync(&ep->timer);
  126. } else
  127. c4iw_get_ep(&ep->com);
  128. ep->timer.expires = jiffies + ep_timeout_secs * HZ;
  129. ep->timer.data = (unsigned long)ep;
  130. ep->timer.function = ep_timeout;
  131. add_timer(&ep->timer);
  132. }
  133. static void stop_ep_timer(struct c4iw_ep *ep)
  134. {
  135. PDBG("%s ep %p\n", __func__, ep);
  136. if (!timer_pending(&ep->timer)) {
  137. WARN(1, "%s timer stopped when its not running! "
  138. "ep %p state %u\n", __func__, ep, ep->com.state);
  139. return;
  140. }
  141. del_timer_sync(&ep->timer);
  142. c4iw_put_ep(&ep->com);
  143. }
  144. static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
  145. struct l2t_entry *l2e)
  146. {
  147. int error = 0;
  148. if (c4iw_fatal_error(rdev)) {
  149. kfree_skb(skb);
  150. PDBG("%s - device in error state - dropping\n", __func__);
  151. return -EIO;
  152. }
  153. error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
  154. if (error < 0)
  155. kfree_skb(skb);
  156. return error < 0 ? error : 0;
  157. }
  158. int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
  159. {
  160. int error = 0;
  161. if (c4iw_fatal_error(rdev)) {
  162. kfree_skb(skb);
  163. PDBG("%s - device in error state - dropping\n", __func__);
  164. return -EIO;
  165. }
  166. error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
  167. if (error < 0)
  168. kfree_skb(skb);
  169. return error < 0 ? error : 0;
  170. }
  171. static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
  172. {
  173. struct cpl_tid_release *req;
  174. skb = get_skb(skb, sizeof *req, GFP_KERNEL);
  175. if (!skb)
  176. return;
  177. req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
  178. INIT_TP_WR(req, hwtid);
  179. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
  180. set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
  181. c4iw_ofld_send(rdev, skb);
  182. return;
  183. }
  184. static void set_emss(struct c4iw_ep *ep, u16 opt)
  185. {
  186. ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
  187. ep->mss = ep->emss;
  188. if (GET_TCPOPT_TSTAMP(opt))
  189. ep->emss -= 12;
  190. if (ep->emss < 128)
  191. ep->emss = 128;
  192. PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
  193. ep->mss, ep->emss);
  194. }
  195. static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
  196. {
  197. enum c4iw_ep_state state;
  198. mutex_lock(&epc->mutex);
  199. state = epc->state;
  200. mutex_unlock(&epc->mutex);
  201. return state;
  202. }
  203. static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
  204. {
  205. epc->state = new;
  206. }
  207. static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
  208. {
  209. mutex_lock(&epc->mutex);
  210. PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
  211. __state_set(epc, new);
  212. mutex_unlock(&epc->mutex);
  213. return;
  214. }
  215. static void *alloc_ep(int size, gfp_t gfp)
  216. {
  217. struct c4iw_ep_common *epc;
  218. epc = kzalloc(size, gfp);
  219. if (epc) {
  220. kref_init(&epc->kref);
  221. mutex_init(&epc->mutex);
  222. c4iw_init_wr_wait(&epc->wr_wait);
  223. }
  224. PDBG("%s alloc ep %p\n", __func__, epc);
  225. return epc;
  226. }
  227. void _c4iw_free_ep(struct kref *kref)
  228. {
  229. struct c4iw_ep *ep;
  230. ep = container_of(kref, struct c4iw_ep, com.kref);
  231. PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
  232. if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
  233. cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
  234. dst_release(ep->dst);
  235. cxgb4_l2t_release(ep->l2t);
  236. }
  237. kfree(ep);
  238. }
  239. static void release_ep_resources(struct c4iw_ep *ep)
  240. {
  241. set_bit(RELEASE_RESOURCES, &ep->com.flags);
  242. c4iw_put_ep(&ep->com);
  243. }
  244. static int status2errno(int status)
  245. {
  246. switch (status) {
  247. case CPL_ERR_NONE:
  248. return 0;
  249. case CPL_ERR_CONN_RESET:
  250. return -ECONNRESET;
  251. case CPL_ERR_ARP_MISS:
  252. return -EHOSTUNREACH;
  253. case CPL_ERR_CONN_TIMEDOUT:
  254. return -ETIMEDOUT;
  255. case CPL_ERR_TCAM_FULL:
  256. return -ENOMEM;
  257. case CPL_ERR_CONN_EXIST:
  258. return -EADDRINUSE;
  259. default:
  260. return -EIO;
  261. }
  262. }
  263. /*
  264. * Try and reuse skbs already allocated...
  265. */
  266. static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
  267. {
  268. if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
  269. skb_trim(skb, 0);
  270. skb_get(skb);
  271. skb_reset_transport_header(skb);
  272. } else {
  273. skb = alloc_skb(len, gfp);
  274. }
  275. return skb;
  276. }
  277. static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
  278. __be32 peer_ip, __be16 local_port,
  279. __be16 peer_port, u8 tos)
  280. {
  281. struct rtable *rt;
  282. struct flowi4 fl4;
  283. rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
  284. peer_port, local_port, IPPROTO_TCP,
  285. tos, 0);
  286. if (IS_ERR(rt))
  287. return NULL;
  288. return rt;
  289. }
  290. static void arp_failure_discard(void *handle, struct sk_buff *skb)
  291. {
  292. PDBG("%s c4iw_dev %p\n", __func__, handle);
  293. kfree_skb(skb);
  294. }
  295. /*
  296. * Handle an ARP failure for an active open.
  297. */
  298. static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
  299. {
  300. printk(KERN_ERR MOD "ARP failure duing connect\n");
  301. kfree_skb(skb);
  302. }
  303. /*
  304. * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
  305. * and send it along.
  306. */
  307. static void abort_arp_failure(void *handle, struct sk_buff *skb)
  308. {
  309. struct c4iw_rdev *rdev = handle;
  310. struct cpl_abort_req *req = cplhdr(skb);
  311. PDBG("%s rdev %p\n", __func__, rdev);
  312. req->cmd = CPL_ABORT_NO_RST;
  313. c4iw_ofld_send(rdev, skb);
  314. }
  315. static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
  316. {
  317. unsigned int flowclen = 80;
  318. struct fw_flowc_wr *flowc;
  319. int i;
  320. skb = get_skb(skb, flowclen, GFP_KERNEL);
  321. flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
  322. flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
  323. FW_FLOWC_WR_NPARAMS(8));
  324. flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
  325. 16)) | FW_WR_FLOWID(ep->hwtid));
  326. flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
  327. flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
  328. flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
  329. flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
  330. flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
  331. flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
  332. flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
  333. flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
  334. flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
  335. flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
  336. flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
  337. flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
  338. flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
  339. flowc->mnemval[6].val = cpu_to_be32(snd_win);
  340. flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
  341. flowc->mnemval[7].val = cpu_to_be32(ep->emss);
  342. /* Pad WR to 16 byte boundary */
  343. flowc->mnemval[8].mnemonic = 0;
  344. flowc->mnemval[8].val = 0;
  345. for (i = 0; i < 9; i++) {
  346. flowc->mnemval[i].r4[0] = 0;
  347. flowc->mnemval[i].r4[1] = 0;
  348. flowc->mnemval[i].r4[2] = 0;
  349. }
  350. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  351. c4iw_ofld_send(&ep->com.dev->rdev, skb);
  352. }
  353. static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
  354. {
  355. struct cpl_close_con_req *req;
  356. struct sk_buff *skb;
  357. int wrlen = roundup(sizeof *req, 16);
  358. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  359. skb = get_skb(NULL, wrlen, gfp);
  360. if (!skb) {
  361. printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
  362. return -ENOMEM;
  363. }
  364. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  365. t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
  366. req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
  367. memset(req, 0, wrlen);
  368. INIT_TP_WR(req, ep->hwtid);
  369. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
  370. ep->hwtid));
  371. return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  372. }
  373. static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
  374. {
  375. struct cpl_abort_req *req;
  376. int wrlen = roundup(sizeof *req, 16);
  377. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  378. skb = get_skb(skb, wrlen, gfp);
  379. if (!skb) {
  380. printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
  381. __func__);
  382. return -ENOMEM;
  383. }
  384. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  385. t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
  386. req = (struct cpl_abort_req *) skb_put(skb, wrlen);
  387. memset(req, 0, wrlen);
  388. INIT_TP_WR(req, ep->hwtid);
  389. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
  390. req->cmd = CPL_ABORT_SEND_RST;
  391. return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  392. }
  393. #define VLAN_NONE 0xfff
  394. #define FILTER_SEL_VLAN_NONE 0xffff
  395. #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
  396. #define FILTER_SEL_WIDTH_VIN_P_FC \
  397. (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
  398. #define FILTER_SEL_WIDTH_TAG_P_FC \
  399. (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
  400. #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
  401. static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
  402. struct l2t_entry *l2t)
  403. {
  404. unsigned int ntuple = 0;
  405. u32 viid;
  406. switch (dev->rdev.lldi.filt_mode) {
  407. /* default filter mode */
  408. case HW_TPL_FR_MT_PR_IV_P_FC:
  409. if (l2t->vlan == VLAN_NONE)
  410. ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
  411. else {
  412. ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
  413. ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
  414. }
  415. ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
  416. FILTER_SEL_WIDTH_VLD_TAG_P_FC;
  417. break;
  418. case HW_TPL_FR_MT_PR_OV_P_FC: {
  419. viid = cxgb4_port_viid(l2t->neigh->dev);
  420. ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
  421. ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
  422. ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
  423. ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
  424. FILTER_SEL_WIDTH_VLD_TAG_P_FC;
  425. break;
  426. }
  427. default:
  428. break;
  429. }
  430. return ntuple;
  431. }
  432. static int send_connect(struct c4iw_ep *ep)
  433. {
  434. struct cpl_act_open_req *req;
  435. struct sk_buff *skb;
  436. u64 opt0;
  437. u32 opt2;
  438. unsigned int mtu_idx;
  439. int wscale;
  440. int wrlen = roundup(sizeof *req, 16);
  441. PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
  442. skb = get_skb(NULL, wrlen, GFP_KERNEL);
  443. if (!skb) {
  444. printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
  445. __func__);
  446. return -ENOMEM;
  447. }
  448. set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
  449. cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
  450. wscale = compute_wscale(rcv_win);
  451. opt0 = (nocong ? NO_CONG(1) : 0) |
  452. KEEP_ALIVE(1) |
  453. DELACK(1) |
  454. WND_SCALE(wscale) |
  455. MSS_IDX(mtu_idx) |
  456. L2T_IDX(ep->l2t->idx) |
  457. TX_CHAN(ep->tx_chan) |
  458. SMAC_SEL(ep->smac_idx) |
  459. DSCP(ep->tos) |
  460. ULP_MODE(ULP_MODE_TCPDDP) |
  461. RCV_BUFSIZ(rcv_win>>10);
  462. opt2 = RX_CHANNEL(0) |
  463. CCTRL_ECN(enable_ecn) |
  464. RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
  465. if (enable_tcp_timestamps)
  466. opt2 |= TSTAMPS_EN(1);
  467. if (enable_tcp_sack)
  468. opt2 |= SACK_EN(1);
  469. if (wscale && enable_tcp_window_scaling)
  470. opt2 |= WND_SCALE_EN(1);
  471. t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
  472. req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
  473. INIT_TP_WR(req, 0);
  474. OPCODE_TID(req) = cpu_to_be32(
  475. MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
  476. req->local_port = ep->com.local_addr.sin_port;
  477. req->peer_port = ep->com.remote_addr.sin_port;
  478. req->local_ip = ep->com.local_addr.sin_addr.s_addr;
  479. req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
  480. req->opt0 = cpu_to_be64(opt0);
  481. req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t));
  482. req->opt2 = cpu_to_be32(opt2);
  483. return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  484. }
  485. static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
  486. u8 mpa_rev_to_use)
  487. {
  488. int mpalen, wrlen;
  489. struct fw_ofld_tx_data_wr *req;
  490. struct mpa_message *mpa;
  491. struct mpa_v2_conn_params mpa_v2_params;
  492. PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
  493. BUG_ON(skb_cloned(skb));
  494. mpalen = sizeof(*mpa) + ep->plen;
  495. if (mpa_rev_to_use == 2)
  496. mpalen += sizeof(struct mpa_v2_conn_params);
  497. wrlen = roundup(mpalen + sizeof *req, 16);
  498. skb = get_skb(skb, wrlen, GFP_KERNEL);
  499. if (!skb) {
  500. connect_reply_upcall(ep, -ENOMEM);
  501. return;
  502. }
  503. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  504. req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
  505. memset(req, 0, wrlen);
  506. req->op_to_immdlen = cpu_to_be32(
  507. FW_WR_OP(FW_OFLD_TX_DATA_WR) |
  508. FW_WR_COMPL(1) |
  509. FW_WR_IMMDLEN(mpalen));
  510. req->flowid_len16 = cpu_to_be32(
  511. FW_WR_FLOWID(ep->hwtid) |
  512. FW_WR_LEN16(wrlen >> 4));
  513. req->plen = cpu_to_be32(mpalen);
  514. req->tunnel_to_proxy = cpu_to_be32(
  515. FW_OFLD_TX_DATA_WR_FLUSH(1) |
  516. FW_OFLD_TX_DATA_WR_SHOVE(1));
  517. mpa = (struct mpa_message *)(req + 1);
  518. memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
  519. mpa->flags = (crc_enabled ? MPA_CRC : 0) |
  520. (markers_enabled ? MPA_MARKERS : 0) |
  521. (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
  522. mpa->private_data_size = htons(ep->plen);
  523. mpa->revision = mpa_rev_to_use;
  524. if (mpa_rev_to_use == 1) {
  525. ep->tried_with_mpa_v1 = 1;
  526. ep->retry_with_mpa_v1 = 0;
  527. }
  528. if (mpa_rev_to_use == 2) {
  529. mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
  530. sizeof (struct mpa_v2_conn_params));
  531. mpa_v2_params.ird = htons((u16)ep->ird);
  532. mpa_v2_params.ord = htons((u16)ep->ord);
  533. if (peer2peer) {
  534. mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
  535. if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
  536. mpa_v2_params.ord |=
  537. htons(MPA_V2_RDMA_WRITE_RTR);
  538. else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
  539. mpa_v2_params.ord |=
  540. htons(MPA_V2_RDMA_READ_RTR);
  541. }
  542. memcpy(mpa->private_data, &mpa_v2_params,
  543. sizeof(struct mpa_v2_conn_params));
  544. if (ep->plen)
  545. memcpy(mpa->private_data +
  546. sizeof(struct mpa_v2_conn_params),
  547. ep->mpa_pkt + sizeof(*mpa), ep->plen);
  548. } else
  549. if (ep->plen)
  550. memcpy(mpa->private_data,
  551. ep->mpa_pkt + sizeof(*mpa), ep->plen);
  552. /*
  553. * Reference the mpa skb. This ensures the data area
  554. * will remain in memory until the hw acks the tx.
  555. * Function fw4_ack() will deref it.
  556. */
  557. skb_get(skb);
  558. t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
  559. BUG_ON(ep->mpa_skb);
  560. ep->mpa_skb = skb;
  561. c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  562. start_ep_timer(ep);
  563. state_set(&ep->com, MPA_REQ_SENT);
  564. ep->mpa_attr.initiator = 1;
  565. return;
  566. }
  567. static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
  568. {
  569. int mpalen, wrlen;
  570. struct fw_ofld_tx_data_wr *req;
  571. struct mpa_message *mpa;
  572. struct sk_buff *skb;
  573. struct mpa_v2_conn_params mpa_v2_params;
  574. PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
  575. mpalen = sizeof(*mpa) + plen;
  576. if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
  577. mpalen += sizeof(struct mpa_v2_conn_params);
  578. wrlen = roundup(mpalen + sizeof *req, 16);
  579. skb = get_skb(NULL, wrlen, GFP_KERNEL);
  580. if (!skb) {
  581. printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
  582. return -ENOMEM;
  583. }
  584. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  585. req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
  586. memset(req, 0, wrlen);
  587. req->op_to_immdlen = cpu_to_be32(
  588. FW_WR_OP(FW_OFLD_TX_DATA_WR) |
  589. FW_WR_COMPL(1) |
  590. FW_WR_IMMDLEN(mpalen));
  591. req->flowid_len16 = cpu_to_be32(
  592. FW_WR_FLOWID(ep->hwtid) |
  593. FW_WR_LEN16(wrlen >> 4));
  594. req->plen = cpu_to_be32(mpalen);
  595. req->tunnel_to_proxy = cpu_to_be32(
  596. FW_OFLD_TX_DATA_WR_FLUSH(1) |
  597. FW_OFLD_TX_DATA_WR_SHOVE(1));
  598. mpa = (struct mpa_message *)(req + 1);
  599. memset(mpa, 0, sizeof(*mpa));
  600. memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
  601. mpa->flags = MPA_REJECT;
  602. mpa->revision = mpa_rev;
  603. mpa->private_data_size = htons(plen);
  604. if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
  605. mpa->flags |= MPA_ENHANCED_RDMA_CONN;
  606. mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
  607. sizeof (struct mpa_v2_conn_params));
  608. mpa_v2_params.ird = htons(((u16)ep->ird) |
  609. (peer2peer ? MPA_V2_PEER2PEER_MODEL :
  610. 0));
  611. mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
  612. (p2p_type ==
  613. FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
  614. MPA_V2_RDMA_WRITE_RTR : p2p_type ==
  615. FW_RI_INIT_P2PTYPE_READ_REQ ?
  616. MPA_V2_RDMA_READ_RTR : 0) : 0));
  617. memcpy(mpa->private_data, &mpa_v2_params,
  618. sizeof(struct mpa_v2_conn_params));
  619. if (ep->plen)
  620. memcpy(mpa->private_data +
  621. sizeof(struct mpa_v2_conn_params), pdata, plen);
  622. } else
  623. if (plen)
  624. memcpy(mpa->private_data, pdata, plen);
  625. /*
  626. * Reference the mpa skb again. This ensures the data area
  627. * will remain in memory until the hw acks the tx.
  628. * Function fw4_ack() will deref it.
  629. */
  630. skb_get(skb);
  631. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  632. t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
  633. BUG_ON(ep->mpa_skb);
  634. ep->mpa_skb = skb;
  635. return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  636. }
  637. static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
  638. {
  639. int mpalen, wrlen;
  640. struct fw_ofld_tx_data_wr *req;
  641. struct mpa_message *mpa;
  642. struct sk_buff *skb;
  643. struct mpa_v2_conn_params mpa_v2_params;
  644. PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
  645. mpalen = sizeof(*mpa) + plen;
  646. if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
  647. mpalen += sizeof(struct mpa_v2_conn_params);
  648. wrlen = roundup(mpalen + sizeof *req, 16);
  649. skb = get_skb(NULL, wrlen, GFP_KERNEL);
  650. if (!skb) {
  651. printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
  652. return -ENOMEM;
  653. }
  654. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  655. req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
  656. memset(req, 0, wrlen);
  657. req->op_to_immdlen = cpu_to_be32(
  658. FW_WR_OP(FW_OFLD_TX_DATA_WR) |
  659. FW_WR_COMPL(1) |
  660. FW_WR_IMMDLEN(mpalen));
  661. req->flowid_len16 = cpu_to_be32(
  662. FW_WR_FLOWID(ep->hwtid) |
  663. FW_WR_LEN16(wrlen >> 4));
  664. req->plen = cpu_to_be32(mpalen);
  665. req->tunnel_to_proxy = cpu_to_be32(
  666. FW_OFLD_TX_DATA_WR_FLUSH(1) |
  667. FW_OFLD_TX_DATA_WR_SHOVE(1));
  668. mpa = (struct mpa_message *)(req + 1);
  669. memset(mpa, 0, sizeof(*mpa));
  670. memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
  671. mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
  672. (markers_enabled ? MPA_MARKERS : 0);
  673. mpa->revision = ep->mpa_attr.version;
  674. mpa->private_data_size = htons(plen);
  675. if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
  676. mpa->flags |= MPA_ENHANCED_RDMA_CONN;
  677. mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
  678. sizeof (struct mpa_v2_conn_params));
  679. mpa_v2_params.ird = htons((u16)ep->ird);
  680. mpa_v2_params.ord = htons((u16)ep->ord);
  681. if (peer2peer && (ep->mpa_attr.p2p_type !=
  682. FW_RI_INIT_P2PTYPE_DISABLED)) {
  683. mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
  684. if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
  685. mpa_v2_params.ord |=
  686. htons(MPA_V2_RDMA_WRITE_RTR);
  687. else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
  688. mpa_v2_params.ord |=
  689. htons(MPA_V2_RDMA_READ_RTR);
  690. }
  691. memcpy(mpa->private_data, &mpa_v2_params,
  692. sizeof(struct mpa_v2_conn_params));
  693. if (ep->plen)
  694. memcpy(mpa->private_data +
  695. sizeof(struct mpa_v2_conn_params), pdata, plen);
  696. } else
  697. if (plen)
  698. memcpy(mpa->private_data, pdata, plen);
  699. /*
  700. * Reference the mpa skb. This ensures the data area
  701. * will remain in memory until the hw acks the tx.
  702. * Function fw4_ack() will deref it.
  703. */
  704. skb_get(skb);
  705. t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
  706. ep->mpa_skb = skb;
  707. state_set(&ep->com, MPA_REP_SENT);
  708. return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  709. }
  710. static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
  711. {
  712. struct c4iw_ep *ep;
  713. struct cpl_act_establish *req = cplhdr(skb);
  714. unsigned int tid = GET_TID(req);
  715. unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
  716. struct tid_info *t = dev->rdev.lldi.tids;
  717. ep = lookup_atid(t, atid);
  718. PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
  719. be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
  720. dst_confirm(ep->dst);
  721. /* setup the hwtid for this connection */
  722. ep->hwtid = tid;
  723. cxgb4_insert_tid(t, ep, tid);
  724. ep->snd_seq = be32_to_cpu(req->snd_isn);
  725. ep->rcv_seq = be32_to_cpu(req->rcv_isn);
  726. set_emss(ep, ntohs(req->tcp_opt));
  727. /* dealloc the atid */
  728. cxgb4_free_atid(t, atid);
  729. /* start MPA negotiation */
  730. send_flowc(ep, NULL);
  731. if (ep->retry_with_mpa_v1)
  732. send_mpa_req(ep, skb, 1);
  733. else
  734. send_mpa_req(ep, skb, mpa_rev);
  735. return 0;
  736. }
  737. static void close_complete_upcall(struct c4iw_ep *ep)
  738. {
  739. struct iw_cm_event event;
  740. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  741. memset(&event, 0, sizeof(event));
  742. event.event = IW_CM_EVENT_CLOSE;
  743. if (ep->com.cm_id) {
  744. PDBG("close complete delivered ep %p cm_id %p tid %u\n",
  745. ep, ep->com.cm_id, ep->hwtid);
  746. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  747. ep->com.cm_id->rem_ref(ep->com.cm_id);
  748. ep->com.cm_id = NULL;
  749. ep->com.qp = NULL;
  750. }
  751. }
  752. static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
  753. {
  754. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  755. close_complete_upcall(ep);
  756. state_set(&ep->com, ABORTING);
  757. return send_abort(ep, skb, gfp);
  758. }
  759. static void peer_close_upcall(struct c4iw_ep *ep)
  760. {
  761. struct iw_cm_event event;
  762. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  763. memset(&event, 0, sizeof(event));
  764. event.event = IW_CM_EVENT_DISCONNECT;
  765. if (ep->com.cm_id) {
  766. PDBG("peer close delivered ep %p cm_id %p tid %u\n",
  767. ep, ep->com.cm_id, ep->hwtid);
  768. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  769. }
  770. }
  771. static void peer_abort_upcall(struct c4iw_ep *ep)
  772. {
  773. struct iw_cm_event event;
  774. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  775. memset(&event, 0, sizeof(event));
  776. event.event = IW_CM_EVENT_CLOSE;
  777. event.status = -ECONNRESET;
  778. if (ep->com.cm_id) {
  779. PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
  780. ep->com.cm_id, ep->hwtid);
  781. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  782. ep->com.cm_id->rem_ref(ep->com.cm_id);
  783. ep->com.cm_id = NULL;
  784. ep->com.qp = NULL;
  785. }
  786. }
  787. static void connect_reply_upcall(struct c4iw_ep *ep, int status)
  788. {
  789. struct iw_cm_event event;
  790. PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
  791. memset(&event, 0, sizeof(event));
  792. event.event = IW_CM_EVENT_CONNECT_REPLY;
  793. event.status = status;
  794. event.local_addr = ep->com.local_addr;
  795. event.remote_addr = ep->com.remote_addr;
  796. if ((status == 0) || (status == -ECONNREFUSED)) {
  797. if (!ep->tried_with_mpa_v1) {
  798. /* this means MPA_v2 is used */
  799. event.private_data_len = ep->plen -
  800. sizeof(struct mpa_v2_conn_params);
  801. event.private_data = ep->mpa_pkt +
  802. sizeof(struct mpa_message) +
  803. sizeof(struct mpa_v2_conn_params);
  804. } else {
  805. /* this means MPA_v1 is used */
  806. event.private_data_len = ep->plen;
  807. event.private_data = ep->mpa_pkt +
  808. sizeof(struct mpa_message);
  809. }
  810. }
  811. PDBG("%s ep %p tid %u status %d\n", __func__, ep,
  812. ep->hwtid, status);
  813. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  814. if (status < 0) {
  815. ep->com.cm_id->rem_ref(ep->com.cm_id);
  816. ep->com.cm_id = NULL;
  817. ep->com.qp = NULL;
  818. }
  819. }
  820. static void connect_request_upcall(struct c4iw_ep *ep)
  821. {
  822. struct iw_cm_event event;
  823. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  824. memset(&event, 0, sizeof(event));
  825. event.event = IW_CM_EVENT_CONNECT_REQUEST;
  826. event.local_addr = ep->com.local_addr;
  827. event.remote_addr = ep->com.remote_addr;
  828. event.provider_data = ep;
  829. if (!ep->tried_with_mpa_v1) {
  830. /* this means MPA_v2 is used */
  831. event.ord = ep->ord;
  832. event.ird = ep->ird;
  833. event.private_data_len = ep->plen -
  834. sizeof(struct mpa_v2_conn_params);
  835. event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
  836. sizeof(struct mpa_v2_conn_params);
  837. } else {
  838. /* this means MPA_v1 is used. Send max supported */
  839. event.ord = c4iw_max_read_depth;
  840. event.ird = c4iw_max_read_depth;
  841. event.private_data_len = ep->plen;
  842. event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
  843. }
  844. if (state_read(&ep->parent_ep->com) != DEAD) {
  845. c4iw_get_ep(&ep->com);
  846. ep->parent_ep->com.cm_id->event_handler(
  847. ep->parent_ep->com.cm_id,
  848. &event);
  849. }
  850. c4iw_put_ep(&ep->parent_ep->com);
  851. ep->parent_ep = NULL;
  852. }
  853. static void established_upcall(struct c4iw_ep *ep)
  854. {
  855. struct iw_cm_event event;
  856. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  857. memset(&event, 0, sizeof(event));
  858. event.event = IW_CM_EVENT_ESTABLISHED;
  859. event.ird = ep->ird;
  860. event.ord = ep->ord;
  861. if (ep->com.cm_id) {
  862. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  863. ep->com.cm_id->event_handler(ep->com.cm_id, &event);
  864. }
  865. }
  866. static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
  867. {
  868. struct cpl_rx_data_ack *req;
  869. struct sk_buff *skb;
  870. int wrlen = roundup(sizeof *req, 16);
  871. PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
  872. skb = get_skb(NULL, wrlen, GFP_KERNEL);
  873. if (!skb) {
  874. printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
  875. return 0;
  876. }
  877. req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
  878. memset(req, 0, wrlen);
  879. INIT_TP_WR(req, ep->hwtid);
  880. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
  881. ep->hwtid));
  882. req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
  883. F_RX_DACK_CHANGE |
  884. V_RX_DACK_MODE(dack_mode));
  885. set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
  886. c4iw_ofld_send(&ep->com.dev->rdev, skb);
  887. return credits;
  888. }
  889. static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
  890. {
  891. struct mpa_message *mpa;
  892. struct mpa_v2_conn_params *mpa_v2_params;
  893. u16 plen;
  894. u16 resp_ird, resp_ord;
  895. u8 rtr_mismatch = 0, insuff_ird = 0;
  896. struct c4iw_qp_attributes attrs;
  897. enum c4iw_qp_attr_mask mask;
  898. int err;
  899. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  900. /*
  901. * Stop mpa timer. If it expired, then the state has
  902. * changed and we bail since ep_timeout already aborted
  903. * the connection.
  904. */
  905. stop_ep_timer(ep);
  906. if (state_read(&ep->com) != MPA_REQ_SENT)
  907. return;
  908. /*
  909. * If we get more than the supported amount of private data
  910. * then we must fail this connection.
  911. */
  912. if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
  913. err = -EINVAL;
  914. goto err;
  915. }
  916. /*
  917. * copy the new data into our accumulation buffer.
  918. */
  919. skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
  920. skb->len);
  921. ep->mpa_pkt_len += skb->len;
  922. /*
  923. * if we don't even have the mpa message, then bail.
  924. */
  925. if (ep->mpa_pkt_len < sizeof(*mpa))
  926. return;
  927. mpa = (struct mpa_message *) ep->mpa_pkt;
  928. /* Validate MPA header. */
  929. if (mpa->revision > mpa_rev) {
  930. printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
  931. " Received = %d\n", __func__, mpa_rev, mpa->revision);
  932. err = -EPROTO;
  933. goto err;
  934. }
  935. if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
  936. err = -EPROTO;
  937. goto err;
  938. }
  939. plen = ntohs(mpa->private_data_size);
  940. /*
  941. * Fail if there's too much private data.
  942. */
  943. if (plen > MPA_MAX_PRIVATE_DATA) {
  944. err = -EPROTO;
  945. goto err;
  946. }
  947. /*
  948. * If plen does not account for pkt size
  949. */
  950. if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
  951. err = -EPROTO;
  952. goto err;
  953. }
  954. ep->plen = (u8) plen;
  955. /*
  956. * If we don't have all the pdata yet, then bail.
  957. * We'll continue process when more data arrives.
  958. */
  959. if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
  960. return;
  961. if (mpa->flags & MPA_REJECT) {
  962. err = -ECONNREFUSED;
  963. goto err;
  964. }
  965. /*
  966. * If we get here we have accumulated the entire mpa
  967. * start reply message including private data. And
  968. * the MPA header is valid.
  969. */
  970. state_set(&ep->com, FPDU_MODE);
  971. ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
  972. ep->mpa_attr.recv_marker_enabled = markers_enabled;
  973. ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
  974. ep->mpa_attr.version = mpa->revision;
  975. ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
  976. if (mpa->revision == 2) {
  977. ep->mpa_attr.enhanced_rdma_conn =
  978. mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
  979. if (ep->mpa_attr.enhanced_rdma_conn) {
  980. mpa_v2_params = (struct mpa_v2_conn_params *)
  981. (ep->mpa_pkt + sizeof(*mpa));
  982. resp_ird = ntohs(mpa_v2_params->ird) &
  983. MPA_V2_IRD_ORD_MASK;
  984. resp_ord = ntohs(mpa_v2_params->ord) &
  985. MPA_V2_IRD_ORD_MASK;
  986. /*
  987. * This is a double-check. Ideally, below checks are
  988. * not required since ird/ord stuff has been taken
  989. * care of in c4iw_accept_cr
  990. */
  991. if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
  992. err = -ENOMEM;
  993. ep->ird = resp_ord;
  994. ep->ord = resp_ird;
  995. insuff_ird = 1;
  996. }
  997. if (ntohs(mpa_v2_params->ird) &
  998. MPA_V2_PEER2PEER_MODEL) {
  999. if (ntohs(mpa_v2_params->ord) &
  1000. MPA_V2_RDMA_WRITE_RTR)
  1001. ep->mpa_attr.p2p_type =
  1002. FW_RI_INIT_P2PTYPE_RDMA_WRITE;
  1003. else if (ntohs(mpa_v2_params->ord) &
  1004. MPA_V2_RDMA_READ_RTR)
  1005. ep->mpa_attr.p2p_type =
  1006. FW_RI_INIT_P2PTYPE_READ_REQ;
  1007. }
  1008. }
  1009. } else if (mpa->revision == 1)
  1010. if (peer2peer)
  1011. ep->mpa_attr.p2p_type = p2p_type;
  1012. PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
  1013. "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
  1014. "%d\n", __func__, ep->mpa_attr.crc_enabled,
  1015. ep->mpa_attr.recv_marker_enabled,
  1016. ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
  1017. ep->mpa_attr.p2p_type, p2p_type);
  1018. /*
  1019. * If responder's RTR does not match with that of initiator, assign
  1020. * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
  1021. * generated when moving QP to RTS state.
  1022. * A TERM message will be sent after QP has moved to RTS state
  1023. */
  1024. if ((ep->mpa_attr.version == 2) && peer2peer &&
  1025. (ep->mpa_attr.p2p_type != p2p_type)) {
  1026. ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
  1027. rtr_mismatch = 1;
  1028. }
  1029. attrs.mpa_attr = ep->mpa_attr;
  1030. attrs.max_ird = ep->ird;
  1031. attrs.max_ord = ep->ord;
  1032. attrs.llp_stream_handle = ep;
  1033. attrs.next_state = C4IW_QP_STATE_RTS;
  1034. mask = C4IW_QP_ATTR_NEXT_STATE |
  1035. C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
  1036. C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
  1037. /* bind QP and TID with INIT_WR */
  1038. err = c4iw_modify_qp(ep->com.qp->rhp,
  1039. ep->com.qp, mask, &attrs, 1);
  1040. if (err)
  1041. goto err;
  1042. /*
  1043. * If responder's RTR requirement did not match with what initiator
  1044. * supports, generate TERM message
  1045. */
  1046. if (rtr_mismatch) {
  1047. printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
  1048. attrs.layer_etype = LAYER_MPA | DDP_LLP;
  1049. attrs.ecode = MPA_NOMATCH_RTR;
  1050. attrs.next_state = C4IW_QP_STATE_TERMINATE;
  1051. err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1052. C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
  1053. err = -ENOMEM;
  1054. goto out;
  1055. }
  1056. /*
  1057. * Generate TERM if initiator IRD is not sufficient for responder
  1058. * provided ORD. Currently, we do the same behaviour even when
  1059. * responder provided IRD is also not sufficient as regards to
  1060. * initiator ORD.
  1061. */
  1062. if (insuff_ird) {
  1063. printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
  1064. __func__);
  1065. attrs.layer_etype = LAYER_MPA | DDP_LLP;
  1066. attrs.ecode = MPA_INSUFF_IRD;
  1067. attrs.next_state = C4IW_QP_STATE_TERMINATE;
  1068. err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1069. C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
  1070. err = -ENOMEM;
  1071. goto out;
  1072. }
  1073. goto out;
  1074. err:
  1075. state_set(&ep->com, ABORTING);
  1076. send_abort(ep, skb, GFP_KERNEL);
  1077. out:
  1078. connect_reply_upcall(ep, err);
  1079. return;
  1080. }
  1081. static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
  1082. {
  1083. struct mpa_message *mpa;
  1084. struct mpa_v2_conn_params *mpa_v2_params;
  1085. u16 plen;
  1086. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1087. if (state_read(&ep->com) != MPA_REQ_WAIT)
  1088. return;
  1089. /*
  1090. * If we get more than the supported amount of private data
  1091. * then we must fail this connection.
  1092. */
  1093. if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
  1094. stop_ep_timer(ep);
  1095. abort_connection(ep, skb, GFP_KERNEL);
  1096. return;
  1097. }
  1098. PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
  1099. /*
  1100. * Copy the new data into our accumulation buffer.
  1101. */
  1102. skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
  1103. skb->len);
  1104. ep->mpa_pkt_len += skb->len;
  1105. /*
  1106. * If we don't even have the mpa message, then bail.
  1107. * We'll continue process when more data arrives.
  1108. */
  1109. if (ep->mpa_pkt_len < sizeof(*mpa))
  1110. return;
  1111. PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
  1112. stop_ep_timer(ep);
  1113. mpa = (struct mpa_message *) ep->mpa_pkt;
  1114. /*
  1115. * Validate MPA Header.
  1116. */
  1117. if (mpa->revision > mpa_rev) {
  1118. printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
  1119. " Received = %d\n", __func__, mpa_rev, mpa->revision);
  1120. abort_connection(ep, skb, GFP_KERNEL);
  1121. return;
  1122. }
  1123. if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
  1124. abort_connection(ep, skb, GFP_KERNEL);
  1125. return;
  1126. }
  1127. plen = ntohs(mpa->private_data_size);
  1128. /*
  1129. * Fail if there's too much private data.
  1130. */
  1131. if (plen > MPA_MAX_PRIVATE_DATA) {
  1132. abort_connection(ep, skb, GFP_KERNEL);
  1133. return;
  1134. }
  1135. /*
  1136. * If plen does not account for pkt size
  1137. */
  1138. if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
  1139. abort_connection(ep, skb, GFP_KERNEL);
  1140. return;
  1141. }
  1142. ep->plen = (u8) plen;
  1143. /*
  1144. * If we don't have all the pdata yet, then bail.
  1145. */
  1146. if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
  1147. return;
  1148. /*
  1149. * If we get here we have accumulated the entire mpa
  1150. * start reply message including private data.
  1151. */
  1152. ep->mpa_attr.initiator = 0;
  1153. ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
  1154. ep->mpa_attr.recv_marker_enabled = markers_enabled;
  1155. ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
  1156. ep->mpa_attr.version = mpa->revision;
  1157. if (mpa->revision == 1)
  1158. ep->tried_with_mpa_v1 = 1;
  1159. ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
  1160. if (mpa->revision == 2) {
  1161. ep->mpa_attr.enhanced_rdma_conn =
  1162. mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
  1163. if (ep->mpa_attr.enhanced_rdma_conn) {
  1164. mpa_v2_params = (struct mpa_v2_conn_params *)
  1165. (ep->mpa_pkt + sizeof(*mpa));
  1166. ep->ird = ntohs(mpa_v2_params->ird) &
  1167. MPA_V2_IRD_ORD_MASK;
  1168. ep->ord = ntohs(mpa_v2_params->ord) &
  1169. MPA_V2_IRD_ORD_MASK;
  1170. if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
  1171. if (peer2peer) {
  1172. if (ntohs(mpa_v2_params->ord) &
  1173. MPA_V2_RDMA_WRITE_RTR)
  1174. ep->mpa_attr.p2p_type =
  1175. FW_RI_INIT_P2PTYPE_RDMA_WRITE;
  1176. else if (ntohs(mpa_v2_params->ord) &
  1177. MPA_V2_RDMA_READ_RTR)
  1178. ep->mpa_attr.p2p_type =
  1179. FW_RI_INIT_P2PTYPE_READ_REQ;
  1180. }
  1181. }
  1182. } else if (mpa->revision == 1)
  1183. if (peer2peer)
  1184. ep->mpa_attr.p2p_type = p2p_type;
  1185. PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
  1186. "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
  1187. ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
  1188. ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
  1189. ep->mpa_attr.p2p_type);
  1190. state_set(&ep->com, MPA_REQ_RCVD);
  1191. /* drive upcall */
  1192. connect_request_upcall(ep);
  1193. return;
  1194. }
  1195. static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
  1196. {
  1197. struct c4iw_ep *ep;
  1198. struct cpl_rx_data *hdr = cplhdr(skb);
  1199. unsigned int dlen = ntohs(hdr->len);
  1200. unsigned int tid = GET_TID(hdr);
  1201. struct tid_info *t = dev->rdev.lldi.tids;
  1202. ep = lookup_tid(t, tid);
  1203. PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
  1204. skb_pull(skb, sizeof(*hdr));
  1205. skb_trim(skb, dlen);
  1206. ep->rcv_seq += dlen;
  1207. BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
  1208. /* update RX credits */
  1209. update_rx_credits(ep, dlen);
  1210. switch (state_read(&ep->com)) {
  1211. case MPA_REQ_SENT:
  1212. process_mpa_reply(ep, skb);
  1213. break;
  1214. case MPA_REQ_WAIT:
  1215. process_mpa_request(ep, skb);
  1216. break;
  1217. case MPA_REP_SENT:
  1218. break;
  1219. default:
  1220. printk(KERN_ERR MOD "%s Unexpected streaming data."
  1221. " ep %p state %d tid %u\n",
  1222. __func__, ep, state_read(&ep->com), ep->hwtid);
  1223. /*
  1224. * The ep will timeout and inform the ULP of the failure.
  1225. * See ep_timeout().
  1226. */
  1227. break;
  1228. }
  1229. return 0;
  1230. }
  1231. static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
  1232. {
  1233. struct c4iw_ep *ep;
  1234. struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
  1235. int release = 0;
  1236. unsigned int tid = GET_TID(rpl);
  1237. struct tid_info *t = dev->rdev.lldi.tids;
  1238. ep = lookup_tid(t, tid);
  1239. if (!ep) {
  1240. printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
  1241. return 0;
  1242. }
  1243. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1244. mutex_lock(&ep->com.mutex);
  1245. switch (ep->com.state) {
  1246. case ABORTING:
  1247. __state_set(&ep->com, DEAD);
  1248. release = 1;
  1249. break;
  1250. default:
  1251. printk(KERN_ERR "%s ep %p state %d\n",
  1252. __func__, ep, ep->com.state);
  1253. break;
  1254. }
  1255. mutex_unlock(&ep->com.mutex);
  1256. if (release)
  1257. release_ep_resources(ep);
  1258. return 0;
  1259. }
  1260. static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
  1261. {
  1262. struct sk_buff *skb;
  1263. struct fw_ofld_connection_wr *req;
  1264. unsigned int mtu_idx;
  1265. int wscale;
  1266. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  1267. req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
  1268. memset(req, 0, sizeof(*req));
  1269. req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
  1270. req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
  1271. req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
  1272. ep->l2t));
  1273. req->le.lport = ep->com.local_addr.sin_port;
  1274. req->le.pport = ep->com.remote_addr.sin_port;
  1275. req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr;
  1276. req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr;
  1277. req->tcb.t_state_to_astid =
  1278. htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
  1279. V_FW_OFLD_CONNECTION_WR_ASTID(atid));
  1280. req->tcb.cplrxdataack_cplpassacceptrpl =
  1281. htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
  1282. req->tcb.tx_max = jiffies;
  1283. cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
  1284. wscale = compute_wscale(rcv_win);
  1285. req->tcb.opt0 = TCAM_BYPASS(1) |
  1286. (nocong ? NO_CONG(1) : 0) |
  1287. KEEP_ALIVE(1) |
  1288. DELACK(1) |
  1289. WND_SCALE(wscale) |
  1290. MSS_IDX(mtu_idx) |
  1291. L2T_IDX(ep->l2t->idx) |
  1292. TX_CHAN(ep->tx_chan) |
  1293. SMAC_SEL(ep->smac_idx) |
  1294. DSCP(ep->tos) |
  1295. ULP_MODE(ULP_MODE_TCPDDP) |
  1296. RCV_BUFSIZ(rcv_win >> 10);
  1297. req->tcb.opt2 = PACE(1) |
  1298. TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
  1299. RX_CHANNEL(0) |
  1300. CCTRL_ECN(enable_ecn) |
  1301. RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
  1302. if (enable_tcp_timestamps)
  1303. req->tcb.opt2 |= TSTAMPS_EN(1);
  1304. if (enable_tcp_sack)
  1305. req->tcb.opt2 |= SACK_EN(1);
  1306. if (wscale && enable_tcp_window_scaling)
  1307. req->tcb.opt2 |= WND_SCALE_EN(1);
  1308. req->tcb.opt0 = cpu_to_be64(req->tcb.opt0);
  1309. req->tcb.opt2 = cpu_to_be32(req->tcb.opt2);
  1310. set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  1311. c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  1312. }
  1313. /*
  1314. * Return whether a failed active open has allocated a TID
  1315. */
  1316. static inline int act_open_has_tid(int status)
  1317. {
  1318. return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
  1319. status != CPL_ERR_ARP_MISS;
  1320. }
  1321. static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
  1322. {
  1323. struct c4iw_ep *ep;
  1324. struct cpl_act_open_rpl *rpl = cplhdr(skb);
  1325. unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
  1326. ntohl(rpl->atid_status)));
  1327. struct tid_info *t = dev->rdev.lldi.tids;
  1328. int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
  1329. ep = lookup_atid(t, atid);
  1330. PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
  1331. status, status2errno(status));
  1332. if (status == CPL_ERR_RTX_NEG_ADVICE) {
  1333. printk(KERN_WARNING MOD "Connection problems for atid %u\n",
  1334. atid);
  1335. return 0;
  1336. }
  1337. /*
  1338. * Log interesting failures.
  1339. */
  1340. switch (status) {
  1341. case CPL_ERR_CONN_RESET:
  1342. case CPL_ERR_CONN_TIMEDOUT:
  1343. break;
  1344. case CPL_ERR_TCAM_FULL:
  1345. mutex_lock(&dev->rdev.stats.lock);
  1346. dev->rdev.stats.tcam_full++;
  1347. mutex_unlock(&dev->rdev.stats.lock);
  1348. send_fw_act_open_req(ep,
  1349. GET_TID_TID(GET_AOPEN_ATID(ntohl(rpl->atid_status))));
  1350. return 0;
  1351. break;
  1352. default:
  1353. printk(KERN_INFO MOD "Active open failure - "
  1354. "atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
  1355. atid, status, status2errno(status),
  1356. &ep->com.local_addr.sin_addr.s_addr,
  1357. ntohs(ep->com.local_addr.sin_port),
  1358. &ep->com.remote_addr.sin_addr.s_addr,
  1359. ntohs(ep->com.remote_addr.sin_port));
  1360. break;
  1361. }
  1362. connect_reply_upcall(ep, status2errno(status));
  1363. state_set(&ep->com, DEAD);
  1364. if (status && act_open_has_tid(status))
  1365. cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
  1366. cxgb4_free_atid(t, atid);
  1367. dst_release(ep->dst);
  1368. cxgb4_l2t_release(ep->l2t);
  1369. c4iw_put_ep(&ep->com);
  1370. return 0;
  1371. }
  1372. static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
  1373. {
  1374. struct cpl_pass_open_rpl *rpl = cplhdr(skb);
  1375. struct tid_info *t = dev->rdev.lldi.tids;
  1376. unsigned int stid = GET_TID(rpl);
  1377. struct c4iw_listen_ep *ep = lookup_stid(t, stid);
  1378. if (!ep) {
  1379. printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
  1380. return 0;
  1381. }
  1382. PDBG("%s ep %p status %d error %d\n", __func__, ep,
  1383. rpl->status, status2errno(rpl->status));
  1384. c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
  1385. return 0;
  1386. }
  1387. static int listen_stop(struct c4iw_listen_ep *ep)
  1388. {
  1389. struct sk_buff *skb;
  1390. struct cpl_close_listsvr_req *req;
  1391. PDBG("%s ep %p\n", __func__, ep);
  1392. skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
  1393. if (!skb) {
  1394. printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
  1395. return -ENOMEM;
  1396. }
  1397. req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
  1398. INIT_TP_WR(req, 0);
  1399. OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
  1400. ep->stid));
  1401. req->reply_ctrl = cpu_to_be16(
  1402. QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
  1403. set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
  1404. return c4iw_ofld_send(&ep->com.dev->rdev, skb);
  1405. }
  1406. static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
  1407. {
  1408. struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
  1409. struct tid_info *t = dev->rdev.lldi.tids;
  1410. unsigned int stid = GET_TID(rpl);
  1411. struct c4iw_listen_ep *ep = lookup_stid(t, stid);
  1412. PDBG("%s ep %p\n", __func__, ep);
  1413. c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
  1414. return 0;
  1415. }
  1416. static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
  1417. struct cpl_pass_accept_req *req)
  1418. {
  1419. struct cpl_pass_accept_rpl *rpl;
  1420. unsigned int mtu_idx;
  1421. u64 opt0;
  1422. u32 opt2;
  1423. int wscale;
  1424. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1425. BUG_ON(skb_cloned(skb));
  1426. skb_trim(skb, sizeof(*rpl));
  1427. skb_get(skb);
  1428. cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
  1429. wscale = compute_wscale(rcv_win);
  1430. opt0 = (nocong ? NO_CONG(1) : 0) |
  1431. KEEP_ALIVE(1) |
  1432. DELACK(1) |
  1433. WND_SCALE(wscale) |
  1434. MSS_IDX(mtu_idx) |
  1435. L2T_IDX(ep->l2t->idx) |
  1436. TX_CHAN(ep->tx_chan) |
  1437. SMAC_SEL(ep->smac_idx) |
  1438. DSCP(ep->tos >> 2) |
  1439. ULP_MODE(ULP_MODE_TCPDDP) |
  1440. RCV_BUFSIZ(rcv_win>>10);
  1441. opt2 = RX_CHANNEL(0) |
  1442. RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
  1443. if (enable_tcp_timestamps && req->tcpopt.tstamp)
  1444. opt2 |= TSTAMPS_EN(1);
  1445. if (enable_tcp_sack && req->tcpopt.sack)
  1446. opt2 |= SACK_EN(1);
  1447. if (wscale && enable_tcp_window_scaling)
  1448. opt2 |= WND_SCALE_EN(1);
  1449. if (enable_ecn) {
  1450. const struct tcphdr *tcph;
  1451. u32 hlen = ntohl(req->hdr_len);
  1452. tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
  1453. G_IP_HDR_LEN(hlen);
  1454. if (tcph->ece && tcph->cwr)
  1455. opt2 |= CCTRL_ECN(1);
  1456. }
  1457. rpl = cplhdr(skb);
  1458. INIT_TP_WR(rpl, ep->hwtid);
  1459. OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
  1460. ep->hwtid));
  1461. rpl->opt0 = cpu_to_be64(opt0);
  1462. rpl->opt2 = cpu_to_be32(opt2);
  1463. set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
  1464. c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
  1465. return;
  1466. }
  1467. static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
  1468. struct sk_buff *skb)
  1469. {
  1470. PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
  1471. peer_ip);
  1472. BUG_ON(skb_cloned(skb));
  1473. skb_trim(skb, sizeof(struct cpl_tid_release));
  1474. skb_get(skb);
  1475. release_tid(&dev->rdev, hwtid, skb);
  1476. return;
  1477. }
  1478. static void get_4tuple(struct cpl_pass_accept_req *req,
  1479. __be32 *local_ip, __be32 *peer_ip,
  1480. __be16 *local_port, __be16 *peer_port)
  1481. {
  1482. int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
  1483. int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
  1484. struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
  1485. struct tcphdr *tcp = (struct tcphdr *)
  1486. ((u8 *)(req + 1) + eth_len + ip_len);
  1487. PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
  1488. ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
  1489. ntohs(tcp->dest));
  1490. *peer_ip = ip->saddr;
  1491. *local_ip = ip->daddr;
  1492. *peer_port = tcp->source;
  1493. *local_port = tcp->dest;
  1494. return;
  1495. }
  1496. static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
  1497. struct c4iw_dev *cdev, bool clear_mpa_v1)
  1498. {
  1499. struct neighbour *n;
  1500. int err, step;
  1501. n = dst_neigh_lookup(dst, &peer_ip);
  1502. if (!n)
  1503. return -ENODEV;
  1504. rcu_read_lock();
  1505. err = -ENOMEM;
  1506. if (n->dev->flags & IFF_LOOPBACK) {
  1507. struct net_device *pdev;
  1508. pdev = ip_dev_find(&init_net, peer_ip);
  1509. if (!pdev) {
  1510. err = -ENODEV;
  1511. goto out;
  1512. }
  1513. ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
  1514. n, pdev, 0);
  1515. if (!ep->l2t)
  1516. goto out;
  1517. ep->mtu = pdev->mtu;
  1518. ep->tx_chan = cxgb4_port_chan(pdev);
  1519. ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
  1520. step = cdev->rdev.lldi.ntxq /
  1521. cdev->rdev.lldi.nchan;
  1522. ep->txq_idx = cxgb4_port_idx(pdev) * step;
  1523. step = cdev->rdev.lldi.nrxq /
  1524. cdev->rdev.lldi.nchan;
  1525. ep->ctrlq_idx = cxgb4_port_idx(pdev);
  1526. ep->rss_qid = cdev->rdev.lldi.rxq_ids[
  1527. cxgb4_port_idx(pdev) * step];
  1528. dev_put(pdev);
  1529. } else {
  1530. ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
  1531. n, n->dev, 0);
  1532. if (!ep->l2t)
  1533. goto out;
  1534. ep->mtu = dst_mtu(dst);
  1535. ep->tx_chan = cxgb4_port_chan(n->dev);
  1536. ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
  1537. step = cdev->rdev.lldi.ntxq /
  1538. cdev->rdev.lldi.nchan;
  1539. ep->txq_idx = cxgb4_port_idx(n->dev) * step;
  1540. ep->ctrlq_idx = cxgb4_port_idx(n->dev);
  1541. step = cdev->rdev.lldi.nrxq /
  1542. cdev->rdev.lldi.nchan;
  1543. ep->rss_qid = cdev->rdev.lldi.rxq_ids[
  1544. cxgb4_port_idx(n->dev) * step];
  1545. if (clear_mpa_v1) {
  1546. ep->retry_with_mpa_v1 = 0;
  1547. ep->tried_with_mpa_v1 = 0;
  1548. }
  1549. }
  1550. err = 0;
  1551. out:
  1552. rcu_read_unlock();
  1553. neigh_release(n);
  1554. return err;
  1555. }
  1556. static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
  1557. {
  1558. struct c4iw_ep *child_ep, *parent_ep;
  1559. struct cpl_pass_accept_req *req = cplhdr(skb);
  1560. unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
  1561. struct tid_info *t = dev->rdev.lldi.tids;
  1562. unsigned int hwtid = GET_TID(req);
  1563. struct dst_entry *dst;
  1564. struct rtable *rt;
  1565. __be32 local_ip, peer_ip;
  1566. __be16 local_port, peer_port;
  1567. int err;
  1568. parent_ep = lookup_stid(t, stid);
  1569. PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
  1570. get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
  1571. if (state_read(&parent_ep->com) != LISTEN) {
  1572. printk(KERN_ERR "%s - listening ep not in LISTEN\n",
  1573. __func__);
  1574. goto reject;
  1575. }
  1576. /* Find output route */
  1577. rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
  1578. GET_POPEN_TOS(ntohl(req->tos_stid)));
  1579. if (!rt) {
  1580. printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
  1581. __func__);
  1582. goto reject;
  1583. }
  1584. dst = &rt->dst;
  1585. child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
  1586. if (!child_ep) {
  1587. printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
  1588. __func__);
  1589. dst_release(dst);
  1590. goto reject;
  1591. }
  1592. err = import_ep(child_ep, peer_ip, dst, dev, false);
  1593. if (err) {
  1594. printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
  1595. __func__);
  1596. dst_release(dst);
  1597. kfree(child_ep);
  1598. goto reject;
  1599. }
  1600. state_set(&child_ep->com, CONNECTING);
  1601. child_ep->com.dev = dev;
  1602. child_ep->com.cm_id = NULL;
  1603. child_ep->com.local_addr.sin_family = PF_INET;
  1604. child_ep->com.local_addr.sin_port = local_port;
  1605. child_ep->com.local_addr.sin_addr.s_addr = local_ip;
  1606. child_ep->com.remote_addr.sin_family = PF_INET;
  1607. child_ep->com.remote_addr.sin_port = peer_port;
  1608. child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
  1609. c4iw_get_ep(&parent_ep->com);
  1610. child_ep->parent_ep = parent_ep;
  1611. child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
  1612. child_ep->dst = dst;
  1613. child_ep->hwtid = hwtid;
  1614. PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
  1615. child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
  1616. init_timer(&child_ep->timer);
  1617. cxgb4_insert_tid(t, child_ep, hwtid);
  1618. accept_cr(child_ep, peer_ip, skb, req);
  1619. goto out;
  1620. reject:
  1621. reject_cr(dev, hwtid, peer_ip, skb);
  1622. out:
  1623. return 0;
  1624. }
  1625. static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
  1626. {
  1627. struct c4iw_ep *ep;
  1628. struct cpl_pass_establish *req = cplhdr(skb);
  1629. struct tid_info *t = dev->rdev.lldi.tids;
  1630. unsigned int tid = GET_TID(req);
  1631. ep = lookup_tid(t, tid);
  1632. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1633. ep->snd_seq = be32_to_cpu(req->snd_isn);
  1634. ep->rcv_seq = be32_to_cpu(req->rcv_isn);
  1635. set_emss(ep, ntohs(req->tcp_opt));
  1636. dst_confirm(ep->dst);
  1637. state_set(&ep->com, MPA_REQ_WAIT);
  1638. start_ep_timer(ep);
  1639. send_flowc(ep, skb);
  1640. return 0;
  1641. }
  1642. static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
  1643. {
  1644. struct cpl_peer_close *hdr = cplhdr(skb);
  1645. struct c4iw_ep *ep;
  1646. struct c4iw_qp_attributes attrs;
  1647. int disconnect = 1;
  1648. int release = 0;
  1649. struct tid_info *t = dev->rdev.lldi.tids;
  1650. unsigned int tid = GET_TID(hdr);
  1651. int ret;
  1652. ep = lookup_tid(t, tid);
  1653. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1654. dst_confirm(ep->dst);
  1655. mutex_lock(&ep->com.mutex);
  1656. switch (ep->com.state) {
  1657. case MPA_REQ_WAIT:
  1658. __state_set(&ep->com, CLOSING);
  1659. break;
  1660. case MPA_REQ_SENT:
  1661. __state_set(&ep->com, CLOSING);
  1662. connect_reply_upcall(ep, -ECONNRESET);
  1663. break;
  1664. case MPA_REQ_RCVD:
  1665. /*
  1666. * We're gonna mark this puppy DEAD, but keep
  1667. * the reference on it until the ULP accepts or
  1668. * rejects the CR. Also wake up anyone waiting
  1669. * in rdma connection migration (see c4iw_accept_cr()).
  1670. */
  1671. __state_set(&ep->com, CLOSING);
  1672. PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
  1673. c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
  1674. break;
  1675. case MPA_REP_SENT:
  1676. __state_set(&ep->com, CLOSING);
  1677. PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
  1678. c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
  1679. break;
  1680. case FPDU_MODE:
  1681. start_ep_timer(ep);
  1682. __state_set(&ep->com, CLOSING);
  1683. attrs.next_state = C4IW_QP_STATE_CLOSING;
  1684. ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1685. C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
  1686. if (ret != -ECONNRESET) {
  1687. peer_close_upcall(ep);
  1688. disconnect = 1;
  1689. }
  1690. break;
  1691. case ABORTING:
  1692. disconnect = 0;
  1693. break;
  1694. case CLOSING:
  1695. __state_set(&ep->com, MORIBUND);
  1696. disconnect = 0;
  1697. break;
  1698. case MORIBUND:
  1699. stop_ep_timer(ep);
  1700. if (ep->com.cm_id && ep->com.qp) {
  1701. attrs.next_state = C4IW_QP_STATE_IDLE;
  1702. c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1703. C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
  1704. }
  1705. close_complete_upcall(ep);
  1706. __state_set(&ep->com, DEAD);
  1707. release = 1;
  1708. disconnect = 0;
  1709. break;
  1710. case DEAD:
  1711. disconnect = 0;
  1712. break;
  1713. default:
  1714. BUG_ON(1);
  1715. }
  1716. mutex_unlock(&ep->com.mutex);
  1717. if (disconnect)
  1718. c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
  1719. if (release)
  1720. release_ep_resources(ep);
  1721. return 0;
  1722. }
  1723. /*
  1724. * Returns whether an ABORT_REQ_RSS message is a negative advice.
  1725. */
  1726. static int is_neg_adv_abort(unsigned int status)
  1727. {
  1728. return status == CPL_ERR_RTX_NEG_ADVICE ||
  1729. status == CPL_ERR_PERSIST_NEG_ADVICE;
  1730. }
  1731. static int c4iw_reconnect(struct c4iw_ep *ep)
  1732. {
  1733. struct rtable *rt;
  1734. int err = 0;
  1735. PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
  1736. init_timer(&ep->timer);
  1737. /*
  1738. * Allocate an active TID to initiate a TCP connection.
  1739. */
  1740. ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
  1741. if (ep->atid == -1) {
  1742. printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
  1743. err = -ENOMEM;
  1744. goto fail2;
  1745. }
  1746. /* find a route */
  1747. rt = find_route(ep->com.dev,
  1748. ep->com.cm_id->local_addr.sin_addr.s_addr,
  1749. ep->com.cm_id->remote_addr.sin_addr.s_addr,
  1750. ep->com.cm_id->local_addr.sin_port,
  1751. ep->com.cm_id->remote_addr.sin_port, 0);
  1752. if (!rt) {
  1753. printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
  1754. err = -EHOSTUNREACH;
  1755. goto fail3;
  1756. }
  1757. ep->dst = &rt->dst;
  1758. err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
  1759. ep->dst, ep->com.dev, false);
  1760. if (err) {
  1761. printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
  1762. goto fail4;
  1763. }
  1764. PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
  1765. __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
  1766. ep->l2t->idx);
  1767. state_set(&ep->com, CONNECTING);
  1768. ep->tos = 0;
  1769. /* send connect request to rnic */
  1770. err = send_connect(ep);
  1771. if (!err)
  1772. goto out;
  1773. cxgb4_l2t_release(ep->l2t);
  1774. fail4:
  1775. dst_release(ep->dst);
  1776. fail3:
  1777. cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
  1778. fail2:
  1779. /*
  1780. * remember to send notification to upper layer.
  1781. * We are in here so the upper layer is not aware that this is
  1782. * re-connect attempt and so, upper layer is still waiting for
  1783. * response of 1st connect request.
  1784. */
  1785. connect_reply_upcall(ep, -ECONNRESET);
  1786. c4iw_put_ep(&ep->com);
  1787. out:
  1788. return err;
  1789. }
  1790. static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
  1791. {
  1792. struct cpl_abort_req_rss *req = cplhdr(skb);
  1793. struct c4iw_ep *ep;
  1794. struct cpl_abort_rpl *rpl;
  1795. struct sk_buff *rpl_skb;
  1796. struct c4iw_qp_attributes attrs;
  1797. int ret;
  1798. int release = 0;
  1799. struct tid_info *t = dev->rdev.lldi.tids;
  1800. unsigned int tid = GET_TID(req);
  1801. ep = lookup_tid(t, tid);
  1802. if (is_neg_adv_abort(req->status)) {
  1803. PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
  1804. ep->hwtid);
  1805. return 0;
  1806. }
  1807. PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
  1808. ep->com.state);
  1809. /*
  1810. * Wake up any threads in rdma_init() or rdma_fini().
  1811. * However, this is not needed if com state is just
  1812. * MPA_REQ_SENT
  1813. */
  1814. if (ep->com.state != MPA_REQ_SENT)
  1815. c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
  1816. mutex_lock(&ep->com.mutex);
  1817. switch (ep->com.state) {
  1818. case CONNECTING:
  1819. break;
  1820. case MPA_REQ_WAIT:
  1821. stop_ep_timer(ep);
  1822. break;
  1823. case MPA_REQ_SENT:
  1824. stop_ep_timer(ep);
  1825. if (mpa_rev == 2 && ep->tried_with_mpa_v1)
  1826. connect_reply_upcall(ep, -ECONNRESET);
  1827. else {
  1828. /*
  1829. * we just don't send notification upwards because we
  1830. * want to retry with mpa_v1 without upper layers even
  1831. * knowing it.
  1832. *
  1833. * do some housekeeping so as to re-initiate the
  1834. * connection
  1835. */
  1836. PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
  1837. mpa_rev);
  1838. ep->retry_with_mpa_v1 = 1;
  1839. }
  1840. break;
  1841. case MPA_REP_SENT:
  1842. break;
  1843. case MPA_REQ_RCVD:
  1844. break;
  1845. case MORIBUND:
  1846. case CLOSING:
  1847. stop_ep_timer(ep);
  1848. /*FALLTHROUGH*/
  1849. case FPDU_MODE:
  1850. if (ep->com.cm_id && ep->com.qp) {
  1851. attrs.next_state = C4IW_QP_STATE_ERROR;
  1852. ret = c4iw_modify_qp(ep->com.qp->rhp,
  1853. ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
  1854. &attrs, 1);
  1855. if (ret)
  1856. printk(KERN_ERR MOD
  1857. "%s - qp <- error failed!\n",
  1858. __func__);
  1859. }
  1860. peer_abort_upcall(ep);
  1861. break;
  1862. case ABORTING:
  1863. break;
  1864. case DEAD:
  1865. PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
  1866. mutex_unlock(&ep->com.mutex);
  1867. return 0;
  1868. default:
  1869. BUG_ON(1);
  1870. break;
  1871. }
  1872. dst_confirm(ep->dst);
  1873. if (ep->com.state != ABORTING) {
  1874. __state_set(&ep->com, DEAD);
  1875. /* we don't release if we want to retry with mpa_v1 */
  1876. if (!ep->retry_with_mpa_v1)
  1877. release = 1;
  1878. }
  1879. mutex_unlock(&ep->com.mutex);
  1880. rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
  1881. if (!rpl_skb) {
  1882. printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
  1883. __func__);
  1884. release = 1;
  1885. goto out;
  1886. }
  1887. set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
  1888. rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
  1889. INIT_TP_WR(rpl, ep->hwtid);
  1890. OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
  1891. rpl->cmd = CPL_ABORT_NO_RST;
  1892. c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
  1893. out:
  1894. if (release)
  1895. release_ep_resources(ep);
  1896. /* retry with mpa-v1 */
  1897. if (ep && ep->retry_with_mpa_v1) {
  1898. cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
  1899. dst_release(ep->dst);
  1900. cxgb4_l2t_release(ep->l2t);
  1901. c4iw_reconnect(ep);
  1902. }
  1903. return 0;
  1904. }
  1905. static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
  1906. {
  1907. struct c4iw_ep *ep;
  1908. struct c4iw_qp_attributes attrs;
  1909. struct cpl_close_con_rpl *rpl = cplhdr(skb);
  1910. int release = 0;
  1911. struct tid_info *t = dev->rdev.lldi.tids;
  1912. unsigned int tid = GET_TID(rpl);
  1913. ep = lookup_tid(t, tid);
  1914. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  1915. BUG_ON(!ep);
  1916. /* The cm_id may be null if we failed to connect */
  1917. mutex_lock(&ep->com.mutex);
  1918. switch (ep->com.state) {
  1919. case CLOSING:
  1920. __state_set(&ep->com, MORIBUND);
  1921. break;
  1922. case MORIBUND:
  1923. stop_ep_timer(ep);
  1924. if ((ep->com.cm_id) && (ep->com.qp)) {
  1925. attrs.next_state = C4IW_QP_STATE_IDLE;
  1926. c4iw_modify_qp(ep->com.qp->rhp,
  1927. ep->com.qp,
  1928. C4IW_QP_ATTR_NEXT_STATE,
  1929. &attrs, 1);
  1930. }
  1931. close_complete_upcall(ep);
  1932. __state_set(&ep->com, DEAD);
  1933. release = 1;
  1934. break;
  1935. case ABORTING:
  1936. case DEAD:
  1937. break;
  1938. default:
  1939. BUG_ON(1);
  1940. break;
  1941. }
  1942. mutex_unlock(&ep->com.mutex);
  1943. if (release)
  1944. release_ep_resources(ep);
  1945. return 0;
  1946. }
  1947. static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
  1948. {
  1949. struct cpl_rdma_terminate *rpl = cplhdr(skb);
  1950. struct tid_info *t = dev->rdev.lldi.tids;
  1951. unsigned int tid = GET_TID(rpl);
  1952. struct c4iw_ep *ep;
  1953. struct c4iw_qp_attributes attrs;
  1954. ep = lookup_tid(t, tid);
  1955. BUG_ON(!ep);
  1956. if (ep && ep->com.qp) {
  1957. printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
  1958. ep->com.qp->wq.sq.qid);
  1959. attrs.next_state = C4IW_QP_STATE_TERMINATE;
  1960. c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
  1961. C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
  1962. } else
  1963. printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
  1964. return 0;
  1965. }
  1966. /*
  1967. * Upcall from the adapter indicating data has been transmitted.
  1968. * For us its just the single MPA request or reply. We can now free
  1969. * the skb holding the mpa message.
  1970. */
  1971. static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
  1972. {
  1973. struct c4iw_ep *ep;
  1974. struct cpl_fw4_ack *hdr = cplhdr(skb);
  1975. u8 credits = hdr->credits;
  1976. unsigned int tid = GET_TID(hdr);
  1977. struct tid_info *t = dev->rdev.lldi.tids;
  1978. ep = lookup_tid(t, tid);
  1979. PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
  1980. if (credits == 0) {
  1981. PDBG("%s 0 credit ack ep %p tid %u state %u\n",
  1982. __func__, ep, ep->hwtid, state_read(&ep->com));
  1983. return 0;
  1984. }
  1985. dst_confirm(ep->dst);
  1986. if (ep->mpa_skb) {
  1987. PDBG("%s last streaming msg ack ep %p tid %u state %u "
  1988. "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
  1989. state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
  1990. kfree_skb(ep->mpa_skb);
  1991. ep->mpa_skb = NULL;
  1992. }
  1993. return 0;
  1994. }
  1995. int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
  1996. {
  1997. int err;
  1998. struct c4iw_ep *ep = to_ep(cm_id);
  1999. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  2000. if (state_read(&ep->com) == DEAD) {
  2001. c4iw_put_ep(&ep->com);
  2002. return -ECONNRESET;
  2003. }
  2004. BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
  2005. if (mpa_rev == 0)
  2006. abort_connection(ep, NULL, GFP_KERNEL);
  2007. else {
  2008. err = send_mpa_reject(ep, pdata, pdata_len);
  2009. err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
  2010. }
  2011. c4iw_put_ep(&ep->com);
  2012. return 0;
  2013. }
  2014. int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  2015. {
  2016. int err;
  2017. struct c4iw_qp_attributes attrs;
  2018. enum c4iw_qp_attr_mask mask;
  2019. struct c4iw_ep *ep = to_ep(cm_id);
  2020. struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
  2021. struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
  2022. PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
  2023. if (state_read(&ep->com) == DEAD) {
  2024. err = -ECONNRESET;
  2025. goto err;
  2026. }
  2027. BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
  2028. BUG_ON(!qp);
  2029. if ((conn_param->ord > c4iw_max_read_depth) ||
  2030. (conn_param->ird > c4iw_max_read_depth)) {
  2031. abort_connection(ep, NULL, GFP_KERNEL);
  2032. err = -EINVAL;
  2033. goto err;
  2034. }
  2035. if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
  2036. if (conn_param->ord > ep->ird) {
  2037. ep->ird = conn_param->ird;
  2038. ep->ord = conn_param->ord;
  2039. send_mpa_reject(ep, conn_param->private_data,
  2040. conn_param->private_data_len);
  2041. abort_connection(ep, NULL, GFP_KERNEL);
  2042. err = -ENOMEM;
  2043. goto err;
  2044. }
  2045. if (conn_param->ird > ep->ord) {
  2046. if (!ep->ord)
  2047. conn_param->ird = 1;
  2048. else {
  2049. abort_connection(ep, NULL, GFP_KERNEL);
  2050. err = -ENOMEM;
  2051. goto err;
  2052. }
  2053. }
  2054. }
  2055. ep->ird = conn_param->ird;
  2056. ep->ord = conn_param->ord;
  2057. if (ep->mpa_attr.version != 2)
  2058. if (peer2peer && ep->ird == 0)
  2059. ep->ird = 1;
  2060. PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
  2061. cm_id->add_ref(cm_id);
  2062. ep->com.cm_id = cm_id;
  2063. ep->com.qp = qp;
  2064. /* bind QP to EP and move to RTS */
  2065. attrs.mpa_attr = ep->mpa_attr;
  2066. attrs.max_ird = ep->ird;
  2067. attrs.max_ord = ep->ord;
  2068. attrs.llp_stream_handle = ep;
  2069. attrs.next_state = C4IW_QP_STATE_RTS;
  2070. /* bind QP and TID with INIT_WR */
  2071. mask = C4IW_QP_ATTR_NEXT_STATE |
  2072. C4IW_QP_ATTR_LLP_STREAM_HANDLE |
  2073. C4IW_QP_ATTR_MPA_ATTR |
  2074. C4IW_QP_ATTR_MAX_IRD |
  2075. C4IW_QP_ATTR_MAX_ORD;
  2076. err = c4iw_modify_qp(ep->com.qp->rhp,
  2077. ep->com.qp, mask, &attrs, 1);
  2078. if (err)
  2079. goto err1;
  2080. err = send_mpa_reply(ep, conn_param->private_data,
  2081. conn_param->private_data_len);
  2082. if (err)
  2083. goto err1;
  2084. state_set(&ep->com, FPDU_MODE);
  2085. established_upcall(ep);
  2086. c4iw_put_ep(&ep->com);
  2087. return 0;
  2088. err1:
  2089. ep->com.cm_id = NULL;
  2090. ep->com.qp = NULL;
  2091. cm_id->rem_ref(cm_id);
  2092. err:
  2093. c4iw_put_ep(&ep->com);
  2094. return err;
  2095. }
  2096. int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  2097. {
  2098. struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
  2099. struct c4iw_ep *ep;
  2100. struct rtable *rt;
  2101. int err = 0;
  2102. if ((conn_param->ord > c4iw_max_read_depth) ||
  2103. (conn_param->ird > c4iw_max_read_depth)) {
  2104. err = -EINVAL;
  2105. goto out;
  2106. }
  2107. ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
  2108. if (!ep) {
  2109. printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
  2110. err = -ENOMEM;
  2111. goto out;
  2112. }
  2113. init_timer(&ep->timer);
  2114. ep->plen = conn_param->private_data_len;
  2115. if (ep->plen)
  2116. memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
  2117. conn_param->private_data, ep->plen);
  2118. ep->ird = conn_param->ird;
  2119. ep->ord = conn_param->ord;
  2120. if (peer2peer && ep->ord == 0)
  2121. ep->ord = 1;
  2122. cm_id->add_ref(cm_id);
  2123. ep->com.dev = dev;
  2124. ep->com.cm_id = cm_id;
  2125. ep->com.qp = get_qhp(dev, conn_param->qpn);
  2126. BUG_ON(!ep->com.qp);
  2127. PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
  2128. ep->com.qp, cm_id);
  2129. /*
  2130. * Allocate an active TID to initiate a TCP connection.
  2131. */
  2132. ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
  2133. if (ep->atid == -1) {
  2134. printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
  2135. err = -ENOMEM;
  2136. goto fail2;
  2137. }
  2138. PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
  2139. ntohl(cm_id->local_addr.sin_addr.s_addr),
  2140. ntohs(cm_id->local_addr.sin_port),
  2141. ntohl(cm_id->remote_addr.sin_addr.s_addr),
  2142. ntohs(cm_id->remote_addr.sin_port));
  2143. /* find a route */
  2144. rt = find_route(dev,
  2145. cm_id->local_addr.sin_addr.s_addr,
  2146. cm_id->remote_addr.sin_addr.s_addr,
  2147. cm_id->local_addr.sin_port,
  2148. cm_id->remote_addr.sin_port, 0);
  2149. if (!rt) {
  2150. printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
  2151. err = -EHOSTUNREACH;
  2152. goto fail3;
  2153. }
  2154. ep->dst = &rt->dst;
  2155. err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
  2156. ep->dst, ep->com.dev, true);
  2157. if (err) {
  2158. printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
  2159. goto fail4;
  2160. }
  2161. PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
  2162. __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
  2163. ep->l2t->idx);
  2164. state_set(&ep->com, CONNECTING);
  2165. ep->tos = 0;
  2166. ep->com.local_addr = cm_id->local_addr;
  2167. ep->com.remote_addr = cm_id->remote_addr;
  2168. /* send connect request to rnic */
  2169. err = send_connect(ep);
  2170. if (!err)
  2171. goto out;
  2172. cxgb4_l2t_release(ep->l2t);
  2173. fail4:
  2174. dst_release(ep->dst);
  2175. fail3:
  2176. cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
  2177. fail2:
  2178. cm_id->rem_ref(cm_id);
  2179. c4iw_put_ep(&ep->com);
  2180. out:
  2181. return err;
  2182. }
  2183. int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
  2184. {
  2185. int err = 0;
  2186. struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
  2187. struct c4iw_listen_ep *ep;
  2188. might_sleep();
  2189. ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
  2190. if (!ep) {
  2191. printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
  2192. err = -ENOMEM;
  2193. goto fail1;
  2194. }
  2195. PDBG("%s ep %p\n", __func__, ep);
  2196. cm_id->add_ref(cm_id);
  2197. ep->com.cm_id = cm_id;
  2198. ep->com.dev = dev;
  2199. ep->backlog = backlog;
  2200. ep->com.local_addr = cm_id->local_addr;
  2201. /*
  2202. * Allocate a server TID.
  2203. */
  2204. ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
  2205. if (ep->stid == -1) {
  2206. printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
  2207. err = -ENOMEM;
  2208. goto fail2;
  2209. }
  2210. state_set(&ep->com, LISTEN);
  2211. c4iw_init_wr_wait(&ep->com.wr_wait);
  2212. err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
  2213. ep->com.local_addr.sin_addr.s_addr,
  2214. ep->com.local_addr.sin_port,
  2215. ep->com.dev->rdev.lldi.rxq_ids[0]);
  2216. if (err)
  2217. goto fail3;
  2218. /* wait for pass_open_rpl */
  2219. err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
  2220. __func__);
  2221. if (!err) {
  2222. cm_id->provider_data = ep;
  2223. goto out;
  2224. }
  2225. fail3:
  2226. cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
  2227. fail2:
  2228. cm_id->rem_ref(cm_id);
  2229. c4iw_put_ep(&ep->com);
  2230. fail1:
  2231. out:
  2232. return err;
  2233. }
  2234. int c4iw_destroy_listen(struct iw_cm_id *cm_id)
  2235. {
  2236. int err;
  2237. struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
  2238. PDBG("%s ep %p\n", __func__, ep);
  2239. might_sleep();
  2240. state_set(&ep->com, DEAD);
  2241. c4iw_init_wr_wait(&ep->com.wr_wait);
  2242. err = listen_stop(ep);
  2243. if (err)
  2244. goto done;
  2245. err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0,
  2246. __func__);
  2247. cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
  2248. done:
  2249. cm_id->rem_ref(cm_id);
  2250. c4iw_put_ep(&ep->com);
  2251. return err;
  2252. }
  2253. int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
  2254. {
  2255. int ret = 0;
  2256. int close = 0;
  2257. int fatal = 0;
  2258. struct c4iw_rdev *rdev;
  2259. mutex_lock(&ep->com.mutex);
  2260. PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
  2261. states[ep->com.state], abrupt);
  2262. rdev = &ep->com.dev->rdev;
  2263. if (c4iw_fatal_error(rdev)) {
  2264. fatal = 1;
  2265. close_complete_upcall(ep);
  2266. ep->com.state = DEAD;
  2267. }
  2268. switch (ep->com.state) {
  2269. case MPA_REQ_WAIT:
  2270. case MPA_REQ_SENT:
  2271. case MPA_REQ_RCVD:
  2272. case MPA_REP_SENT:
  2273. case FPDU_MODE:
  2274. close = 1;
  2275. if (abrupt)
  2276. ep->com.state = ABORTING;
  2277. else {
  2278. ep->com.state = CLOSING;
  2279. start_ep_timer(ep);
  2280. }
  2281. set_bit(CLOSE_SENT, &ep->com.flags);
  2282. break;
  2283. case CLOSING:
  2284. if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
  2285. close = 1;
  2286. if (abrupt) {
  2287. stop_ep_timer(ep);
  2288. ep->com.state = ABORTING;
  2289. } else
  2290. ep->com.state = MORIBUND;
  2291. }
  2292. break;
  2293. case MORIBUND:
  2294. case ABORTING:
  2295. case DEAD:
  2296. PDBG("%s ignoring disconnect ep %p state %u\n",
  2297. __func__, ep, ep->com.state);
  2298. break;
  2299. default:
  2300. BUG();
  2301. break;
  2302. }
  2303. if (close) {
  2304. if (abrupt) {
  2305. close_complete_upcall(ep);
  2306. ret = send_abort(ep, NULL, gfp);
  2307. } else
  2308. ret = send_halfclose(ep, gfp);
  2309. if (ret)
  2310. fatal = 1;
  2311. }
  2312. mutex_unlock(&ep->com.mutex);
  2313. if (fatal)
  2314. release_ep_resources(ep);
  2315. return ret;
  2316. }
  2317. static int async_event(struct c4iw_dev *dev, struct sk_buff *skb)
  2318. {
  2319. struct cpl_fw6_msg *rpl = cplhdr(skb);
  2320. c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
  2321. return 0;
  2322. }
  2323. /*
  2324. * These are the real handlers that are called from a
  2325. * work queue.
  2326. */
  2327. static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
  2328. [CPL_ACT_ESTABLISH] = act_establish,
  2329. [CPL_ACT_OPEN_RPL] = act_open_rpl,
  2330. [CPL_RX_DATA] = rx_data,
  2331. [CPL_ABORT_RPL_RSS] = abort_rpl,
  2332. [CPL_ABORT_RPL] = abort_rpl,
  2333. [CPL_PASS_OPEN_RPL] = pass_open_rpl,
  2334. [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
  2335. [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
  2336. [CPL_PASS_ESTABLISH] = pass_establish,
  2337. [CPL_PEER_CLOSE] = peer_close,
  2338. [CPL_ABORT_REQ_RSS] = peer_abort,
  2339. [CPL_CLOSE_CON_RPL] = close_con_rpl,
  2340. [CPL_RDMA_TERMINATE] = terminate,
  2341. [CPL_FW4_ACK] = fw4_ack,
  2342. [CPL_FW6_MSG] = async_event
  2343. };
  2344. static void process_timeout(struct c4iw_ep *ep)
  2345. {
  2346. struct c4iw_qp_attributes attrs;
  2347. int abort = 1;
  2348. mutex_lock(&ep->com.mutex);
  2349. PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
  2350. ep->com.state);
  2351. switch (ep->com.state) {
  2352. case MPA_REQ_SENT:
  2353. __state_set(&ep->com, ABORTING);
  2354. connect_reply_upcall(ep, -ETIMEDOUT);
  2355. break;
  2356. case MPA_REQ_WAIT:
  2357. __state_set(&ep->com, ABORTING);
  2358. break;
  2359. case CLOSING:
  2360. case MORIBUND:
  2361. if (ep->com.cm_id && ep->com.qp) {
  2362. attrs.next_state = C4IW_QP_STATE_ERROR;
  2363. c4iw_modify_qp(ep->com.qp->rhp,
  2364. ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
  2365. &attrs, 1);
  2366. }
  2367. __state_set(&ep->com, ABORTING);
  2368. break;
  2369. default:
  2370. WARN(1, "%s unexpected state ep %p tid %u state %u\n",
  2371. __func__, ep, ep->hwtid, ep->com.state);
  2372. abort = 0;
  2373. }
  2374. mutex_unlock(&ep->com.mutex);
  2375. if (abort)
  2376. abort_connection(ep, NULL, GFP_KERNEL);
  2377. c4iw_put_ep(&ep->com);
  2378. }
  2379. static void process_timedout_eps(void)
  2380. {
  2381. struct c4iw_ep *ep;
  2382. spin_lock_irq(&timeout_lock);
  2383. while (!list_empty(&timeout_list)) {
  2384. struct list_head *tmp;
  2385. tmp = timeout_list.next;
  2386. list_del(tmp);
  2387. spin_unlock_irq(&timeout_lock);
  2388. ep = list_entry(tmp, struct c4iw_ep, entry);
  2389. process_timeout(ep);
  2390. spin_lock_irq(&timeout_lock);
  2391. }
  2392. spin_unlock_irq(&timeout_lock);
  2393. }
  2394. static void process_work(struct work_struct *work)
  2395. {
  2396. struct sk_buff *skb = NULL;
  2397. struct c4iw_dev *dev;
  2398. struct cpl_act_establish *rpl;
  2399. unsigned int opcode;
  2400. int ret;
  2401. while ((skb = skb_dequeue(&rxq))) {
  2402. rpl = cplhdr(skb);
  2403. dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
  2404. opcode = rpl->ot.opcode;
  2405. BUG_ON(!work_handlers[opcode]);
  2406. ret = work_handlers[opcode](dev, skb);
  2407. if (!ret)
  2408. kfree_skb(skb);
  2409. }
  2410. process_timedout_eps();
  2411. }
  2412. static DECLARE_WORK(skb_work, process_work);
  2413. static void ep_timeout(unsigned long arg)
  2414. {
  2415. struct c4iw_ep *ep = (struct c4iw_ep *)arg;
  2416. spin_lock(&timeout_lock);
  2417. list_add_tail(&ep->entry, &timeout_list);
  2418. spin_unlock(&timeout_lock);
  2419. queue_work(workq, &skb_work);
  2420. }
  2421. /*
  2422. * All the CM events are handled on a work queue to have a safe context.
  2423. */
  2424. static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
  2425. {
  2426. /*
  2427. * Save dev in the skb->cb area.
  2428. */
  2429. *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
  2430. /*
  2431. * Queue the skb and schedule the worker thread.
  2432. */
  2433. skb_queue_tail(&rxq, skb);
  2434. queue_work(workq, &skb_work);
  2435. return 0;
  2436. }
  2437. static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
  2438. {
  2439. struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
  2440. if (rpl->status != CPL_ERR_NONE) {
  2441. printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
  2442. "for tid %u\n", rpl->status, GET_TID(rpl));
  2443. }
  2444. kfree_skb(skb);
  2445. return 0;
  2446. }
  2447. static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
  2448. {
  2449. struct cpl_fw6_msg *rpl = cplhdr(skb);
  2450. struct c4iw_wr_wait *wr_waitp;
  2451. int ret;
  2452. u8 opcode;
  2453. struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
  2454. struct c4iw_ep *ep;
  2455. PDBG("%s type %u\n", __func__, rpl->type);
  2456. switch (rpl->type) {
  2457. case FW6_TYPE_WR_RPL:
  2458. ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
  2459. wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
  2460. PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
  2461. if (wr_waitp)
  2462. c4iw_wake_up(wr_waitp, ret ? -ret : 0);
  2463. kfree_skb(skb);
  2464. break;
  2465. case FW6_TYPE_CQE:
  2466. sched(dev, skb);
  2467. break;
  2468. case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
  2469. opcode = *(const u8 *)rpl->data;
  2470. if (opcode == FW_OFLD_CONNECTION_WR) {
  2471. req =
  2472. (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
  2473. if (req->t_state == TCP_SYN_SENT
  2474. && (req->retval == FW_ENOMEM
  2475. || req->retval == FW_EADDRINUSE)) {
  2476. ep = (struct c4iw_ep *)
  2477. lookup_atid(dev->rdev.lldi.tids,
  2478. req->tid);
  2479. c4iw_l2t_send(&dev->rdev, skb, ep->l2t);
  2480. return 0;
  2481. }
  2482. }
  2483. break;
  2484. default:
  2485. printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
  2486. rpl->type);
  2487. kfree_skb(skb);
  2488. break;
  2489. }
  2490. return 0;
  2491. }
  2492. static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
  2493. {
  2494. struct cpl_abort_req_rss *req = cplhdr(skb);
  2495. struct c4iw_ep *ep;
  2496. struct tid_info *t = dev->rdev.lldi.tids;
  2497. unsigned int tid = GET_TID(req);
  2498. ep = lookup_tid(t, tid);
  2499. if (!ep) {
  2500. printk(KERN_WARNING MOD
  2501. "Abort on non-existent endpoint, tid %d\n", tid);
  2502. kfree_skb(skb);
  2503. return 0;
  2504. }
  2505. if (is_neg_adv_abort(req->status)) {
  2506. PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
  2507. ep->hwtid);
  2508. kfree_skb(skb);
  2509. return 0;
  2510. }
  2511. PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
  2512. ep->com.state);
  2513. /*
  2514. * Wake up any threads in rdma_init() or rdma_fini().
  2515. */
  2516. c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
  2517. sched(dev, skb);
  2518. return 0;
  2519. }
  2520. /*
  2521. * Most upcalls from the T4 Core go to sched() to
  2522. * schedule the processing on a work queue.
  2523. */
  2524. c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
  2525. [CPL_ACT_ESTABLISH] = sched,
  2526. [CPL_ACT_OPEN_RPL] = sched,
  2527. [CPL_RX_DATA] = sched,
  2528. [CPL_ABORT_RPL_RSS] = sched,
  2529. [CPL_ABORT_RPL] = sched,
  2530. [CPL_PASS_OPEN_RPL] = sched,
  2531. [CPL_CLOSE_LISTSRV_RPL] = sched,
  2532. [CPL_PASS_ACCEPT_REQ] = sched,
  2533. [CPL_PASS_ESTABLISH] = sched,
  2534. [CPL_PEER_CLOSE] = sched,
  2535. [CPL_CLOSE_CON_RPL] = sched,
  2536. [CPL_ABORT_REQ_RSS] = peer_abort_intr,
  2537. [CPL_RDMA_TERMINATE] = sched,
  2538. [CPL_FW4_ACK] = sched,
  2539. [CPL_SET_TCB_RPL] = set_tcb_rpl,
  2540. [CPL_FW6_MSG] = fw6_msg
  2541. };
  2542. int __init c4iw_cm_init(void)
  2543. {
  2544. spin_lock_init(&timeout_lock);
  2545. skb_queue_head_init(&rxq);
  2546. workq = create_singlethread_workqueue("iw_cxgb4");
  2547. if (!workq)
  2548. return -ENOMEM;
  2549. return 0;
  2550. }
  2551. void __exit c4iw_cm_term(void)
  2552. {
  2553. WARN_ON(!list_empty(&timeout_list));
  2554. flush_workqueue(workq);
  2555. destroy_workqueue(workq);
  2556. }