bnx2fc_els.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926
  1. /*
  2. * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
  3. * This file contains helper routines that handle ELS requests
  4. * and responses.
  5. *
  6. * Copyright (c) 2008 - 2011 Broadcom Corporation
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. *
  12. * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
  13. */
  14. #include "bnx2fc.h"
  15. static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
  16. void *arg);
  17. static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
  18. void *arg);
  19. static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
  20. void *data, u32 data_len,
  21. void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
  22. struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
  23. static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
  24. {
  25. struct bnx2fc_cmd *orig_io_req;
  26. struct bnx2fc_cmd *rrq_req;
  27. int rc = 0;
  28. BUG_ON(!cb_arg);
  29. rrq_req = cb_arg->io_req;
  30. orig_io_req = cb_arg->aborted_io_req;
  31. BUG_ON(!orig_io_req);
  32. BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
  33. orig_io_req->xid, rrq_req->xid);
  34. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  35. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
  36. /*
  37. * els req is timed out. cleanup the IO with FW and
  38. * drop the completion. Remove from active_cmd_queue.
  39. */
  40. BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
  41. rrq_req->xid);
  42. if (rrq_req->on_active_queue) {
  43. list_del_init(&rrq_req->link);
  44. rrq_req->on_active_queue = 0;
  45. rc = bnx2fc_initiate_cleanup(rrq_req);
  46. BUG_ON(rc);
  47. }
  48. }
  49. kfree(cb_arg);
  50. }
  51. int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
  52. {
  53. struct fc_els_rrq rrq;
  54. struct bnx2fc_rport *tgt = aborted_io_req->tgt;
  55. struct fc_lport *lport = tgt->rdata->local_port;
  56. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  57. u32 sid = tgt->sid;
  58. u32 r_a_tov = lport->r_a_tov;
  59. unsigned long start = jiffies;
  60. int rc;
  61. BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
  62. aborted_io_req->xid);
  63. memset(&rrq, 0, sizeof(rrq));
  64. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
  65. if (!cb_arg) {
  66. printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
  67. rc = -ENOMEM;
  68. goto rrq_err;
  69. }
  70. cb_arg->aborted_io_req = aborted_io_req;
  71. rrq.rrq_cmd = ELS_RRQ;
  72. hton24(rrq.rrq_s_id, sid);
  73. rrq.rrq_ox_id = htons(aborted_io_req->xid);
  74. rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
  75. retry_rrq:
  76. rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
  77. bnx2fc_rrq_compl, cb_arg,
  78. r_a_tov);
  79. if (rc == -ENOMEM) {
  80. if (time_after(jiffies, start + (10 * HZ))) {
  81. BNX2FC_ELS_DBG("rrq Failed\n");
  82. rc = FAILED;
  83. goto rrq_err;
  84. }
  85. msleep(20);
  86. goto retry_rrq;
  87. }
  88. rrq_err:
  89. if (rc) {
  90. BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
  91. aborted_io_req->xid);
  92. kfree(cb_arg);
  93. spin_lock_bh(&tgt->tgt_lock);
  94. kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
  95. spin_unlock_bh(&tgt->tgt_lock);
  96. }
  97. return rc;
  98. }
  99. static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
  100. {
  101. struct bnx2fc_cmd *els_req;
  102. struct bnx2fc_rport *tgt;
  103. struct bnx2fc_mp_req *mp_req;
  104. struct fc_frame_header *fc_hdr;
  105. unsigned char *buf;
  106. void *resp_buf;
  107. u32 resp_len, hdr_len;
  108. u16 l2_oxid;
  109. int frame_len;
  110. int rc = 0;
  111. l2_oxid = cb_arg->l2_oxid;
  112. BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
  113. els_req = cb_arg->io_req;
  114. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
  115. /*
  116. * els req is timed out. cleanup the IO with FW and
  117. * drop the completion. libfc will handle the els timeout
  118. */
  119. if (els_req->on_active_queue) {
  120. list_del_init(&els_req->link);
  121. els_req->on_active_queue = 0;
  122. rc = bnx2fc_initiate_cleanup(els_req);
  123. BUG_ON(rc);
  124. }
  125. goto free_arg;
  126. }
  127. tgt = els_req->tgt;
  128. mp_req = &(els_req->mp_req);
  129. fc_hdr = &(mp_req->resp_fc_hdr);
  130. resp_len = mp_req->resp_len;
  131. resp_buf = mp_req->resp_buf;
  132. buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  133. if (!buf) {
  134. printk(KERN_ERR PFX "Unable to alloc mp buf\n");
  135. goto free_arg;
  136. }
  137. hdr_len = sizeof(*fc_hdr);
  138. if (hdr_len + resp_len > PAGE_SIZE) {
  139. printk(KERN_ERR PFX "l2_els_compl: resp len is "
  140. "beyond page size\n");
  141. goto free_buf;
  142. }
  143. memcpy(buf, fc_hdr, hdr_len);
  144. memcpy(buf + hdr_len, resp_buf, resp_len);
  145. frame_len = hdr_len + resp_len;
  146. bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
  147. free_buf:
  148. kfree(buf);
  149. free_arg:
  150. kfree(cb_arg);
  151. }
  152. int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
  153. {
  154. struct fc_els_adisc *adisc;
  155. struct fc_frame_header *fh;
  156. struct bnx2fc_els_cb_arg *cb_arg;
  157. struct fc_lport *lport = tgt->rdata->local_port;
  158. u32 r_a_tov = lport->r_a_tov;
  159. int rc;
  160. fh = fc_frame_header_get(fp);
  161. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  162. if (!cb_arg) {
  163. printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
  164. return -ENOMEM;
  165. }
  166. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  167. BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
  168. adisc = fc_frame_payload_get(fp, sizeof(*adisc));
  169. /* adisc is initialized by libfc */
  170. rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
  171. bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
  172. if (rc)
  173. kfree(cb_arg);
  174. return rc;
  175. }
  176. int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
  177. {
  178. struct fc_els_logo *logo;
  179. struct fc_frame_header *fh;
  180. struct bnx2fc_els_cb_arg *cb_arg;
  181. struct fc_lport *lport = tgt->rdata->local_port;
  182. u32 r_a_tov = lport->r_a_tov;
  183. int rc;
  184. fh = fc_frame_header_get(fp);
  185. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  186. if (!cb_arg) {
  187. printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
  188. return -ENOMEM;
  189. }
  190. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  191. BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
  192. logo = fc_frame_payload_get(fp, sizeof(*logo));
  193. /* logo is initialized by libfc */
  194. rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
  195. bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
  196. if (rc)
  197. kfree(cb_arg);
  198. return rc;
  199. }
  200. int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
  201. {
  202. struct fc_els_rls *rls;
  203. struct fc_frame_header *fh;
  204. struct bnx2fc_els_cb_arg *cb_arg;
  205. struct fc_lport *lport = tgt->rdata->local_port;
  206. u32 r_a_tov = lport->r_a_tov;
  207. int rc;
  208. fh = fc_frame_header_get(fp);
  209. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  210. if (!cb_arg) {
  211. printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
  212. return -ENOMEM;
  213. }
  214. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  215. rls = fc_frame_payload_get(fp, sizeof(*rls));
  216. /* rls is initialized by libfc */
  217. rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
  218. bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
  219. if (rc)
  220. kfree(cb_arg);
  221. return rc;
  222. }
  223. void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
  224. {
  225. struct bnx2fc_mp_req *mp_req;
  226. struct fc_frame_header *fc_hdr, *fh;
  227. struct bnx2fc_cmd *srr_req;
  228. struct bnx2fc_cmd *orig_io_req;
  229. struct fc_frame *fp;
  230. unsigned char *buf;
  231. void *resp_buf;
  232. u32 resp_len, hdr_len;
  233. u8 opcode;
  234. int rc = 0;
  235. orig_io_req = cb_arg->aborted_io_req;
  236. srr_req = cb_arg->io_req;
  237. if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
  238. BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed",
  239. orig_io_req->xid);
  240. goto srr_compl_done;
  241. }
  242. if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
  243. BNX2FC_IO_DBG(srr_req, "rec abts in prog "
  244. "orig_io - 0x%x\n",
  245. orig_io_req->xid);
  246. goto srr_compl_done;
  247. }
  248. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
  249. /* SRR timedout */
  250. BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
  251. "orig_io - 0x%x\n",
  252. orig_io_req->xid);
  253. rc = bnx2fc_initiate_abts(srr_req);
  254. if (rc != SUCCESS) {
  255. BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
  256. "failed. issue cleanup\n");
  257. bnx2fc_initiate_cleanup(srr_req);
  258. }
  259. orig_io_req->srr_retry++;
  260. if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
  261. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  262. spin_unlock_bh(&tgt->tgt_lock);
  263. rc = bnx2fc_send_srr(orig_io_req,
  264. orig_io_req->srr_offset,
  265. orig_io_req->srr_rctl);
  266. spin_lock_bh(&tgt->tgt_lock);
  267. if (!rc)
  268. goto srr_compl_done;
  269. }
  270. rc = bnx2fc_initiate_abts(orig_io_req);
  271. if (rc != SUCCESS) {
  272. BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
  273. "failed xid = 0x%x. issue cleanup\n",
  274. orig_io_req->xid);
  275. bnx2fc_initiate_cleanup(orig_io_req);
  276. }
  277. goto srr_compl_done;
  278. }
  279. mp_req = &(srr_req->mp_req);
  280. fc_hdr = &(mp_req->resp_fc_hdr);
  281. resp_len = mp_req->resp_len;
  282. resp_buf = mp_req->resp_buf;
  283. hdr_len = sizeof(*fc_hdr);
  284. buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  285. if (!buf) {
  286. printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
  287. goto srr_compl_done;
  288. }
  289. memcpy(buf, fc_hdr, hdr_len);
  290. memcpy(buf + hdr_len, resp_buf, resp_len);
  291. fp = fc_frame_alloc(NULL, resp_len);
  292. if (!fp) {
  293. printk(KERN_ERR PFX "fc_frame_alloc failure\n");
  294. goto free_buf;
  295. }
  296. fh = (struct fc_frame_header *) fc_frame_header_get(fp);
  297. /* Copy FC Frame header and payload into the frame */
  298. memcpy(fh, buf, hdr_len + resp_len);
  299. opcode = fc_frame_payload_op(fp);
  300. switch (opcode) {
  301. case ELS_LS_ACC:
  302. BNX2FC_IO_DBG(srr_req, "SRR success\n");
  303. break;
  304. case ELS_LS_RJT:
  305. BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
  306. rc = bnx2fc_initiate_abts(orig_io_req);
  307. if (rc != SUCCESS) {
  308. BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
  309. "failed xid = 0x%x. issue cleanup\n",
  310. orig_io_req->xid);
  311. bnx2fc_initiate_cleanup(orig_io_req);
  312. }
  313. break;
  314. default:
  315. BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
  316. opcode);
  317. break;
  318. }
  319. fc_frame_free(fp);
  320. free_buf:
  321. kfree(buf);
  322. srr_compl_done:
  323. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  324. }
  325. void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
  326. {
  327. struct bnx2fc_cmd *orig_io_req, *new_io_req;
  328. struct bnx2fc_cmd *rec_req;
  329. struct bnx2fc_mp_req *mp_req;
  330. struct fc_frame_header *fc_hdr, *fh;
  331. struct fc_els_ls_rjt *rjt;
  332. struct fc_els_rec_acc *acc;
  333. struct bnx2fc_rport *tgt;
  334. struct fcoe_err_report_entry *err_entry;
  335. struct scsi_cmnd *sc_cmd;
  336. enum fc_rctl r_ctl;
  337. unsigned char *buf;
  338. void *resp_buf;
  339. struct fc_frame *fp;
  340. u8 opcode;
  341. u32 offset;
  342. u32 e_stat;
  343. u32 resp_len, hdr_len;
  344. int rc = 0;
  345. bool send_seq_clnp = false;
  346. bool abort_io = false;
  347. BNX2FC_MISC_DBG("Entered rec_compl callback\n");
  348. rec_req = cb_arg->io_req;
  349. orig_io_req = cb_arg->aborted_io_req;
  350. BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
  351. tgt = orig_io_req->tgt;
  352. if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
  353. BNX2FC_IO_DBG(rec_req, "completed"
  354. "orig_io - 0x%x\n",
  355. orig_io_req->xid);
  356. goto rec_compl_done;
  357. }
  358. if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
  359. BNX2FC_IO_DBG(rec_req, "abts in prog "
  360. "orig_io - 0x%x\n",
  361. orig_io_req->xid);
  362. goto rec_compl_done;
  363. }
  364. /* Handle REC timeout case */
  365. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
  366. BNX2FC_IO_DBG(rec_req, "timed out, abort "
  367. "orig_io - 0x%x\n",
  368. orig_io_req->xid);
  369. /* els req is timed out. send abts for els */
  370. rc = bnx2fc_initiate_abts(rec_req);
  371. if (rc != SUCCESS) {
  372. BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
  373. "failed. issue cleanup\n");
  374. bnx2fc_initiate_cleanup(rec_req);
  375. }
  376. orig_io_req->rec_retry++;
  377. /* REC timedout. send ABTS to the orig IO req */
  378. if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
  379. spin_unlock_bh(&tgt->tgt_lock);
  380. rc = bnx2fc_send_rec(orig_io_req);
  381. spin_lock_bh(&tgt->tgt_lock);
  382. if (!rc)
  383. goto rec_compl_done;
  384. }
  385. rc = bnx2fc_initiate_abts(orig_io_req);
  386. if (rc != SUCCESS) {
  387. BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
  388. "failed xid = 0x%x. issue cleanup\n",
  389. orig_io_req->xid);
  390. bnx2fc_initiate_cleanup(orig_io_req);
  391. }
  392. goto rec_compl_done;
  393. }
  394. mp_req = &(rec_req->mp_req);
  395. fc_hdr = &(mp_req->resp_fc_hdr);
  396. resp_len = mp_req->resp_len;
  397. acc = resp_buf = mp_req->resp_buf;
  398. hdr_len = sizeof(*fc_hdr);
  399. buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  400. if (!buf) {
  401. printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
  402. goto rec_compl_done;
  403. }
  404. memcpy(buf, fc_hdr, hdr_len);
  405. memcpy(buf + hdr_len, resp_buf, resp_len);
  406. fp = fc_frame_alloc(NULL, resp_len);
  407. if (!fp) {
  408. printk(KERN_ERR PFX "fc_frame_alloc failure\n");
  409. goto free_buf;
  410. }
  411. fh = (struct fc_frame_header *) fc_frame_header_get(fp);
  412. /* Copy FC Frame header and payload into the frame */
  413. memcpy(fh, buf, hdr_len + resp_len);
  414. opcode = fc_frame_payload_op(fp);
  415. if (opcode == ELS_LS_RJT) {
  416. BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
  417. rjt = fc_frame_payload_get(fp, sizeof(*rjt));
  418. if ((rjt->er_reason == ELS_RJT_LOGIC ||
  419. rjt->er_reason == ELS_RJT_UNAB) &&
  420. rjt->er_explan == ELS_EXPL_OXID_RXID) {
  421. BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
  422. new_io_req = bnx2fc_cmd_alloc(tgt);
  423. if (!new_io_req)
  424. goto abort_io;
  425. new_io_req->sc_cmd = orig_io_req->sc_cmd;
  426. /* cleanup orig_io_req that is with the FW */
  427. set_bit(BNX2FC_FLAG_CMD_LOST,
  428. &orig_io_req->req_flags);
  429. bnx2fc_initiate_cleanup(orig_io_req);
  430. /* Post a new IO req with the same sc_cmd */
  431. BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
  432. spin_unlock_bh(&tgt->tgt_lock);
  433. rc = bnx2fc_post_io_req(tgt, new_io_req);
  434. spin_lock_bh(&tgt->tgt_lock);
  435. if (!rc)
  436. goto free_frame;
  437. BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
  438. }
  439. abort_io:
  440. rc = bnx2fc_initiate_abts(orig_io_req);
  441. if (rc != SUCCESS) {
  442. BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
  443. "failed. issue cleanup\n");
  444. bnx2fc_initiate_cleanup(orig_io_req);
  445. }
  446. } else if (opcode == ELS_LS_ACC) {
  447. /* REVISIT: Check if the exchange is already aborted */
  448. offset = ntohl(acc->reca_fc4value);
  449. e_stat = ntohl(acc->reca_e_stat);
  450. if (e_stat & ESB_ST_SEQ_INIT) {
  451. BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
  452. goto free_frame;
  453. }
  454. BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
  455. e_stat, offset);
  456. /* Seq initiative is with us */
  457. err_entry = (struct fcoe_err_report_entry *)
  458. &orig_io_req->err_entry;
  459. sc_cmd = orig_io_req->sc_cmd;
  460. if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
  461. /* SCSI WRITE command */
  462. if (offset == orig_io_req->data_xfer_len) {
  463. BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
  464. /* FCP_RSP lost */
  465. r_ctl = FC_RCTL_DD_CMD_STATUS;
  466. offset = 0;
  467. } else {
  468. /* start transmitting from offset */
  469. BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
  470. send_seq_clnp = true;
  471. r_ctl = FC_RCTL_DD_DATA_DESC;
  472. if (bnx2fc_initiate_seq_cleanup(orig_io_req,
  473. offset, r_ctl))
  474. abort_io = true;
  475. /* XFER_RDY */
  476. }
  477. } else {
  478. /* SCSI READ command */
  479. if (err_entry->data.rx_buf_off ==
  480. orig_io_req->data_xfer_len) {
  481. /* FCP_RSP lost */
  482. BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
  483. r_ctl = FC_RCTL_DD_CMD_STATUS;
  484. offset = 0;
  485. } else {
  486. /* request retransmission from this offset */
  487. send_seq_clnp = true;
  488. offset = err_entry->data.rx_buf_off;
  489. BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
  490. /* FCP_DATA lost */
  491. r_ctl = FC_RCTL_DD_SOL_DATA;
  492. if (bnx2fc_initiate_seq_cleanup(orig_io_req,
  493. offset, r_ctl))
  494. abort_io = true;
  495. }
  496. }
  497. if (abort_io) {
  498. rc = bnx2fc_initiate_abts(orig_io_req);
  499. if (rc != SUCCESS) {
  500. BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
  501. " failed. issue cleanup\n");
  502. bnx2fc_initiate_cleanup(orig_io_req);
  503. }
  504. } else if (!send_seq_clnp) {
  505. BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
  506. spin_unlock_bh(&tgt->tgt_lock);
  507. rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
  508. spin_lock_bh(&tgt->tgt_lock);
  509. if (rc) {
  510. BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
  511. " IO will abort\n");
  512. }
  513. }
  514. }
  515. free_frame:
  516. fc_frame_free(fp);
  517. free_buf:
  518. kfree(buf);
  519. rec_compl_done:
  520. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  521. kfree(cb_arg);
  522. }
  523. int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
  524. {
  525. struct fc_els_rec rec;
  526. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  527. struct fc_lport *lport = tgt->rdata->local_port;
  528. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  529. u32 sid = tgt->sid;
  530. u32 r_a_tov = lport->r_a_tov;
  531. int rc;
  532. BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
  533. memset(&rec, 0, sizeof(rec));
  534. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  535. if (!cb_arg) {
  536. printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
  537. rc = -ENOMEM;
  538. goto rec_err;
  539. }
  540. kref_get(&orig_io_req->refcount);
  541. cb_arg->aborted_io_req = orig_io_req;
  542. rec.rec_cmd = ELS_REC;
  543. hton24(rec.rec_s_id, sid);
  544. rec.rec_ox_id = htons(orig_io_req->xid);
  545. rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
  546. rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
  547. bnx2fc_rec_compl, cb_arg,
  548. r_a_tov);
  549. rec_err:
  550. if (rc) {
  551. BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
  552. spin_lock_bh(&tgt->tgt_lock);
  553. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  554. spin_unlock_bh(&tgt->tgt_lock);
  555. kfree(cb_arg);
  556. }
  557. return rc;
  558. }
  559. int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
  560. {
  561. struct fcp_srr srr;
  562. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  563. struct fc_lport *lport = tgt->rdata->local_port;
  564. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  565. u32 r_a_tov = lport->r_a_tov;
  566. int rc;
  567. BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
  568. memset(&srr, 0, sizeof(srr));
  569. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  570. if (!cb_arg) {
  571. printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
  572. rc = -ENOMEM;
  573. goto srr_err;
  574. }
  575. kref_get(&orig_io_req->refcount);
  576. cb_arg->aborted_io_req = orig_io_req;
  577. srr.srr_op = ELS_SRR;
  578. srr.srr_ox_id = htons(orig_io_req->xid);
  579. srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
  580. srr.srr_rel_off = htonl(offset);
  581. srr.srr_r_ctl = r_ctl;
  582. orig_io_req->srr_offset = offset;
  583. orig_io_req->srr_rctl = r_ctl;
  584. rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
  585. bnx2fc_srr_compl, cb_arg,
  586. r_a_tov);
  587. srr_err:
  588. if (rc) {
  589. BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
  590. spin_lock_bh(&tgt->tgt_lock);
  591. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  592. spin_unlock_bh(&tgt->tgt_lock);
  593. kfree(cb_arg);
  594. } else
  595. set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
  596. return rc;
  597. }
  598. static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
  599. void *data, u32 data_len,
  600. void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
  601. struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
  602. {
  603. struct fcoe_port *port = tgt->port;
  604. struct bnx2fc_interface *interface = port->priv;
  605. struct fc_rport *rport = tgt->rport;
  606. struct fc_lport *lport = port->lport;
  607. struct bnx2fc_cmd *els_req;
  608. struct bnx2fc_mp_req *mp_req;
  609. struct fc_frame_header *fc_hdr;
  610. struct fcoe_task_ctx_entry *task;
  611. struct fcoe_task_ctx_entry *task_page;
  612. int rc = 0;
  613. int task_idx, index;
  614. u32 did, sid;
  615. u16 xid;
  616. rc = fc_remote_port_chkready(rport);
  617. if (rc) {
  618. printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
  619. rc = -EINVAL;
  620. goto els_err;
  621. }
  622. if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
  623. printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
  624. rc = -EINVAL;
  625. goto els_err;
  626. }
  627. if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
  628. (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
  629. printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
  630. rc = -EINVAL;
  631. goto els_err;
  632. }
  633. els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
  634. if (!els_req) {
  635. rc = -ENOMEM;
  636. goto els_err;
  637. }
  638. els_req->sc_cmd = NULL;
  639. els_req->port = port;
  640. els_req->tgt = tgt;
  641. els_req->cb_func = cb_func;
  642. cb_arg->io_req = els_req;
  643. els_req->cb_arg = cb_arg;
  644. mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
  645. rc = bnx2fc_init_mp_req(els_req);
  646. if (rc == FAILED) {
  647. printk(KERN_ERR PFX "ELS MP request init failed\n");
  648. spin_lock_bh(&tgt->tgt_lock);
  649. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  650. spin_unlock_bh(&tgt->tgt_lock);
  651. rc = -ENOMEM;
  652. goto els_err;
  653. } else {
  654. /* rc SUCCESS */
  655. rc = 0;
  656. }
  657. /* Set the data_xfer_len to the size of ELS payload */
  658. mp_req->req_len = data_len;
  659. els_req->data_xfer_len = mp_req->req_len;
  660. /* Fill ELS Payload */
  661. if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
  662. memcpy(mp_req->req_buf, data, data_len);
  663. } else {
  664. printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
  665. els_req->cb_func = NULL;
  666. els_req->cb_arg = NULL;
  667. spin_lock_bh(&tgt->tgt_lock);
  668. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  669. spin_unlock_bh(&tgt->tgt_lock);
  670. rc = -EINVAL;
  671. }
  672. if (rc)
  673. goto els_err;
  674. /* Fill FC header */
  675. fc_hdr = &(mp_req->req_fc_hdr);
  676. did = tgt->rport->port_id;
  677. sid = tgt->sid;
  678. if (op == ELS_SRR)
  679. __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
  680. FC_TYPE_FCP, FC_FC_FIRST_SEQ |
  681. FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  682. else
  683. __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
  684. FC_TYPE_ELS, FC_FC_FIRST_SEQ |
  685. FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  686. /* Obtain exchange id */
  687. xid = els_req->xid;
  688. task_idx = xid/BNX2FC_TASKS_PER_PAGE;
  689. index = xid % BNX2FC_TASKS_PER_PAGE;
  690. /* Initialize task context for this IO request */
  691. task_page = (struct fcoe_task_ctx_entry *)
  692. interface->hba->task_ctx[task_idx];
  693. task = &(task_page[index]);
  694. bnx2fc_init_mp_task(els_req, task);
  695. spin_lock_bh(&tgt->tgt_lock);
  696. if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
  697. printk(KERN_ERR PFX "initiate_els.. session not ready\n");
  698. els_req->cb_func = NULL;
  699. els_req->cb_arg = NULL;
  700. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  701. spin_unlock_bh(&tgt->tgt_lock);
  702. return -EINVAL;
  703. }
  704. if (timer_msec)
  705. bnx2fc_cmd_timer_set(els_req, timer_msec);
  706. bnx2fc_add_2_sq(tgt, xid);
  707. els_req->on_active_queue = 1;
  708. list_add_tail(&els_req->link, &tgt->els_queue);
  709. /* Ring doorbell */
  710. bnx2fc_ring_doorbell(tgt);
  711. spin_unlock_bh(&tgt->tgt_lock);
  712. els_err:
  713. return rc;
  714. }
  715. void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
  716. struct fcoe_task_ctx_entry *task, u8 num_rq)
  717. {
  718. struct bnx2fc_mp_req *mp_req;
  719. struct fc_frame_header *fc_hdr;
  720. u64 *hdr;
  721. u64 *temp_hdr;
  722. BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
  723. "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
  724. if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
  725. &els_req->req_flags)) {
  726. BNX2FC_ELS_DBG("Timer context finished processing this "
  727. "els - 0x%x\n", els_req->xid);
  728. /* This IO doesn't receive cleanup completion */
  729. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  730. return;
  731. }
  732. /* Cancel the timeout_work, as we received the response */
  733. if (cancel_delayed_work(&els_req->timeout_work))
  734. kref_put(&els_req->refcount,
  735. bnx2fc_cmd_release); /* drop timer hold */
  736. if (els_req->on_active_queue) {
  737. list_del_init(&els_req->link);
  738. els_req->on_active_queue = 0;
  739. }
  740. mp_req = &(els_req->mp_req);
  741. fc_hdr = &(mp_req->resp_fc_hdr);
  742. hdr = (u64 *)fc_hdr;
  743. temp_hdr = (u64 *)
  744. &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
  745. hdr[0] = cpu_to_be64(temp_hdr[0]);
  746. hdr[1] = cpu_to_be64(temp_hdr[1]);
  747. hdr[2] = cpu_to_be64(temp_hdr[2]);
  748. mp_req->resp_len =
  749. task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
  750. /* Parse ELS response */
  751. if ((els_req->cb_func) && (els_req->cb_arg)) {
  752. els_req->cb_func(els_req->cb_arg);
  753. els_req->cb_arg = NULL;
  754. }
  755. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  756. }
  757. static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
  758. void *arg)
  759. {
  760. struct fcoe_ctlr *fip = arg;
  761. struct fc_exch *exch = fc_seq_exch(seq);
  762. struct fc_lport *lport = exch->lp;
  763. u8 *mac;
  764. struct fc_frame_header *fh;
  765. u8 op;
  766. if (IS_ERR(fp))
  767. goto done;
  768. mac = fr_cb(fp)->granted_mac;
  769. if (is_zero_ether_addr(mac)) {
  770. fh = fc_frame_header_get(fp);
  771. if (fh->fh_type != FC_TYPE_ELS) {
  772. printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
  773. "fh_type != FC_TYPE_ELS\n");
  774. fc_frame_free(fp);
  775. return;
  776. }
  777. op = fc_frame_payload_op(fp);
  778. if (lport->vport) {
  779. if (op == ELS_LS_RJT) {
  780. printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
  781. fc_vport_terminate(lport->vport);
  782. fc_frame_free(fp);
  783. return;
  784. }
  785. }
  786. if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
  787. fc_frame_free(fp);
  788. return;
  789. }
  790. }
  791. fip->update_mac(lport, mac);
  792. done:
  793. fc_lport_flogi_resp(seq, fp, lport);
  794. }
  795. static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
  796. void *arg)
  797. {
  798. struct fcoe_ctlr *fip = arg;
  799. struct fc_exch *exch = fc_seq_exch(seq);
  800. struct fc_lport *lport = exch->lp;
  801. static u8 zero_mac[ETH_ALEN] = { 0 };
  802. if (!IS_ERR(fp))
  803. fip->update_mac(lport, zero_mac);
  804. fc_lport_logo_resp(seq, fp, lport);
  805. }
  806. struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
  807. struct fc_frame *fp, unsigned int op,
  808. void (*resp)(struct fc_seq *,
  809. struct fc_frame *,
  810. void *),
  811. void *arg, u32 timeout)
  812. {
  813. struct fcoe_port *port = lport_priv(lport);
  814. struct bnx2fc_interface *interface = port->priv;
  815. struct fcoe_ctlr *fip = &interface->ctlr;
  816. struct fc_frame_header *fh = fc_frame_header_get(fp);
  817. switch (op) {
  818. case ELS_FLOGI:
  819. case ELS_FDISC:
  820. return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
  821. fip, timeout);
  822. case ELS_LOGO:
  823. /* only hook onto fabric logouts, not port logouts */
  824. if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
  825. break;
  826. return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
  827. fip, timeout);
  828. }
  829. return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
  830. }