bnx2fc_io.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053
  1. /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
  2. * IO manager and SCSI IO processing.
  3. *
  4. * Copyright (c) 2008 - 2011 Broadcom Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation.
  9. *
  10. * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
  11. */
  12. #include "bnx2fc.h"
  13. #define RESERVE_FREE_LIST_INDEX num_possible_cpus()
  14. static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
  15. int bd_index);
  16. static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
  17. static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
  18. static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
  19. static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
  20. static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
  21. struct fcoe_fcp_rsp_payload *fcp_rsp,
  22. u8 num_rq);
  23. void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
  24. unsigned int timer_msec)
  25. {
  26. struct bnx2fc_interface *interface = io_req->port->priv;
  27. if (queue_delayed_work(interface->timer_work_queue,
  28. &io_req->timeout_work,
  29. msecs_to_jiffies(timer_msec)))
  30. kref_get(&io_req->refcount);
  31. }
  32. static void bnx2fc_cmd_timeout(struct work_struct *work)
  33. {
  34. struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
  35. timeout_work.work);
  36. struct fc_lport *lport;
  37. struct fc_rport_priv *rdata;
  38. u8 cmd_type = io_req->cmd_type;
  39. struct bnx2fc_rport *tgt = io_req->tgt;
  40. int logo_issued;
  41. int rc;
  42. BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
  43. "req_flags = %lx\n", cmd_type, io_req->req_flags);
  44. spin_lock_bh(&tgt->tgt_lock);
  45. if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
  46. clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
  47. /*
  48. * ideally we should hold the io_req until RRQ complets,
  49. * and release io_req from timeout hold.
  50. */
  51. spin_unlock_bh(&tgt->tgt_lock);
  52. bnx2fc_send_rrq(io_req);
  53. return;
  54. }
  55. if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
  56. BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
  57. goto done;
  58. }
  59. switch (cmd_type) {
  60. case BNX2FC_SCSI_CMD:
  61. if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
  62. &io_req->req_flags)) {
  63. /* Handle eh_abort timeout */
  64. BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
  65. complete(&io_req->tm_done);
  66. } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
  67. &io_req->req_flags)) {
  68. /* Handle internally generated ABTS timeout */
  69. BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
  70. io_req->refcount.refcount.counter);
  71. if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
  72. &io_req->req_flags))) {
  73. lport = io_req->port->lport;
  74. rdata = io_req->tgt->rdata;
  75. logo_issued = test_and_set_bit(
  76. BNX2FC_FLAG_EXPL_LOGO,
  77. &tgt->flags);
  78. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  79. spin_unlock_bh(&tgt->tgt_lock);
  80. /* Explicitly logo the target */
  81. if (!logo_issued) {
  82. BNX2FC_IO_DBG(io_req, "Explicit "
  83. "logo - tgt flags = 0x%lx\n",
  84. tgt->flags);
  85. mutex_lock(&lport->disc.disc_mutex);
  86. lport->tt.rport_logoff(rdata);
  87. mutex_unlock(&lport->disc.disc_mutex);
  88. }
  89. return;
  90. }
  91. } else {
  92. /* Hanlde IO timeout */
  93. BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
  94. if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
  95. &io_req->req_flags)) {
  96. BNX2FC_IO_DBG(io_req, "IO completed before "
  97. " timer expiry\n");
  98. goto done;
  99. }
  100. if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
  101. &io_req->req_flags)) {
  102. rc = bnx2fc_initiate_abts(io_req);
  103. if (rc == SUCCESS)
  104. goto done;
  105. /*
  106. * Explicitly logo the target if
  107. * abts initiation fails
  108. */
  109. lport = io_req->port->lport;
  110. rdata = io_req->tgt->rdata;
  111. logo_issued = test_and_set_bit(
  112. BNX2FC_FLAG_EXPL_LOGO,
  113. &tgt->flags);
  114. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  115. spin_unlock_bh(&tgt->tgt_lock);
  116. if (!logo_issued) {
  117. BNX2FC_IO_DBG(io_req, "Explicit "
  118. "logo - tgt flags = 0x%lx\n",
  119. tgt->flags);
  120. mutex_lock(&lport->disc.disc_mutex);
  121. lport->tt.rport_logoff(rdata);
  122. mutex_unlock(&lport->disc.disc_mutex);
  123. }
  124. return;
  125. } else {
  126. BNX2FC_IO_DBG(io_req, "IO already in "
  127. "ABTS processing\n");
  128. }
  129. }
  130. break;
  131. case BNX2FC_ELS:
  132. if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
  133. BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
  134. if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
  135. &io_req->req_flags)) {
  136. lport = io_req->port->lport;
  137. rdata = io_req->tgt->rdata;
  138. logo_issued = test_and_set_bit(
  139. BNX2FC_FLAG_EXPL_LOGO,
  140. &tgt->flags);
  141. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  142. spin_unlock_bh(&tgt->tgt_lock);
  143. /* Explicitly logo the target */
  144. if (!logo_issued) {
  145. BNX2FC_IO_DBG(io_req, "Explicitly logo"
  146. "(els)\n");
  147. mutex_lock(&lport->disc.disc_mutex);
  148. lport->tt.rport_logoff(rdata);
  149. mutex_unlock(&lport->disc.disc_mutex);
  150. }
  151. return;
  152. }
  153. } else {
  154. /*
  155. * Handle ELS timeout.
  156. * tgt_lock is used to sync compl path and timeout
  157. * path. If els compl path is processing this IO, we
  158. * have nothing to do here, just release the timer hold
  159. */
  160. BNX2FC_IO_DBG(io_req, "ELS timed out\n");
  161. if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
  162. &io_req->req_flags))
  163. goto done;
  164. /* Indicate the cb_func that this ELS is timed out */
  165. set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
  166. if ((io_req->cb_func) && (io_req->cb_arg)) {
  167. io_req->cb_func(io_req->cb_arg);
  168. io_req->cb_arg = NULL;
  169. }
  170. }
  171. break;
  172. default:
  173. printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
  174. cmd_type);
  175. break;
  176. }
  177. done:
  178. /* release the cmd that was held when timer was set */
  179. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  180. spin_unlock_bh(&tgt->tgt_lock);
  181. }
  182. static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
  183. {
  184. /* Called with host lock held */
  185. struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
  186. /*
  187. * active_cmd_queue may have other command types as well,
  188. * and during flush operation, we want to error back only
  189. * scsi commands.
  190. */
  191. if (io_req->cmd_type != BNX2FC_SCSI_CMD)
  192. return;
  193. BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
  194. if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
  195. /* Do not call scsi done for this IO */
  196. return;
  197. }
  198. bnx2fc_unmap_sg_list(io_req);
  199. io_req->sc_cmd = NULL;
  200. if (!sc_cmd) {
  201. printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
  202. "IO(0x%x) already cleaned up\n",
  203. io_req->xid);
  204. return;
  205. }
  206. sc_cmd->result = err_code << 16;
  207. BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
  208. sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
  209. sc_cmd->allowed);
  210. scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
  211. sc_cmd->SCp.ptr = NULL;
  212. sc_cmd->scsi_done(sc_cmd);
  213. }
  214. struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
  215. u16 min_xid, u16 max_xid)
  216. {
  217. struct bnx2fc_cmd_mgr *cmgr;
  218. struct io_bdt *bdt_info;
  219. struct bnx2fc_cmd *io_req;
  220. size_t len;
  221. u32 mem_size;
  222. u16 xid;
  223. int i;
  224. int num_ios, num_pri_ios;
  225. size_t bd_tbl_sz;
  226. int arr_sz = num_possible_cpus() + 1;
  227. if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
  228. printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
  229. and max_xid 0x%x\n", min_xid, max_xid);
  230. return NULL;
  231. }
  232. BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
  233. num_ios = max_xid - min_xid + 1;
  234. len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
  235. len += sizeof(struct bnx2fc_cmd_mgr);
  236. cmgr = kzalloc(len, GFP_KERNEL);
  237. if (!cmgr) {
  238. printk(KERN_ERR PFX "failed to alloc cmgr\n");
  239. return NULL;
  240. }
  241. cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
  242. arr_sz, GFP_KERNEL);
  243. if (!cmgr->free_list) {
  244. printk(KERN_ERR PFX "failed to alloc free_list\n");
  245. goto mem_err;
  246. }
  247. cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
  248. arr_sz, GFP_KERNEL);
  249. if (!cmgr->free_list_lock) {
  250. printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
  251. goto mem_err;
  252. }
  253. cmgr->hba = hba;
  254. cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
  255. for (i = 0; i < arr_sz; i++) {
  256. INIT_LIST_HEAD(&cmgr->free_list[i]);
  257. spin_lock_init(&cmgr->free_list_lock[i]);
  258. }
  259. /*
  260. * Pre-allocated pool of bnx2fc_cmds.
  261. * Last entry in the free list array is the free list
  262. * of slow path requests.
  263. */
  264. xid = BNX2FC_MIN_XID;
  265. num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS;
  266. for (i = 0; i < num_ios; i++) {
  267. io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
  268. if (!io_req) {
  269. printk(KERN_ERR PFX "failed to alloc io_req\n");
  270. goto mem_err;
  271. }
  272. INIT_LIST_HEAD(&io_req->link);
  273. INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
  274. io_req->xid = xid++;
  275. if (i < num_pri_ios)
  276. list_add_tail(&io_req->link,
  277. &cmgr->free_list[io_req->xid %
  278. num_possible_cpus()]);
  279. else
  280. list_add_tail(&io_req->link,
  281. &cmgr->free_list[num_possible_cpus()]);
  282. io_req++;
  283. }
  284. /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
  285. mem_size = num_ios * sizeof(struct io_bdt *);
  286. cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
  287. if (!cmgr->io_bdt_pool) {
  288. printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
  289. goto mem_err;
  290. }
  291. mem_size = sizeof(struct io_bdt);
  292. for (i = 0; i < num_ios; i++) {
  293. cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
  294. if (!cmgr->io_bdt_pool[i]) {
  295. printk(KERN_ERR PFX "failed to alloc "
  296. "io_bdt_pool[%d]\n", i);
  297. goto mem_err;
  298. }
  299. }
  300. /* Allocate an map fcoe_bdt_ctx structures */
  301. bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
  302. for (i = 0; i < num_ios; i++) {
  303. bdt_info = cmgr->io_bdt_pool[i];
  304. bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
  305. bd_tbl_sz,
  306. &bdt_info->bd_tbl_dma,
  307. GFP_KERNEL);
  308. if (!bdt_info->bd_tbl) {
  309. printk(KERN_ERR PFX "failed to alloc "
  310. "bdt_tbl[%d]\n", i);
  311. goto mem_err;
  312. }
  313. }
  314. return cmgr;
  315. mem_err:
  316. bnx2fc_cmd_mgr_free(cmgr);
  317. return NULL;
  318. }
  319. void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
  320. {
  321. struct io_bdt *bdt_info;
  322. struct bnx2fc_hba *hba = cmgr->hba;
  323. size_t bd_tbl_sz;
  324. u16 min_xid = BNX2FC_MIN_XID;
  325. u16 max_xid = BNX2FC_MAX_XID;
  326. int num_ios;
  327. int i;
  328. num_ios = max_xid - min_xid + 1;
  329. /* Free fcoe_bdt_ctx structures */
  330. if (!cmgr->io_bdt_pool)
  331. goto free_cmd_pool;
  332. bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
  333. for (i = 0; i < num_ios; i++) {
  334. bdt_info = cmgr->io_bdt_pool[i];
  335. if (bdt_info->bd_tbl) {
  336. dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
  337. bdt_info->bd_tbl,
  338. bdt_info->bd_tbl_dma);
  339. bdt_info->bd_tbl = NULL;
  340. }
  341. }
  342. /* Destroy io_bdt pool */
  343. for (i = 0; i < num_ios; i++) {
  344. kfree(cmgr->io_bdt_pool[i]);
  345. cmgr->io_bdt_pool[i] = NULL;
  346. }
  347. kfree(cmgr->io_bdt_pool);
  348. cmgr->io_bdt_pool = NULL;
  349. free_cmd_pool:
  350. kfree(cmgr->free_list_lock);
  351. /* Destroy cmd pool */
  352. if (!cmgr->free_list)
  353. goto free_cmgr;
  354. for (i = 0; i < num_possible_cpus() + 1; i++) {
  355. struct bnx2fc_cmd *tmp, *io_req;
  356. list_for_each_entry_safe(io_req, tmp,
  357. &cmgr->free_list[i], link) {
  358. list_del(&io_req->link);
  359. kfree(io_req);
  360. }
  361. }
  362. kfree(cmgr->free_list);
  363. free_cmgr:
  364. /* Free command manager itself */
  365. kfree(cmgr);
  366. }
  367. struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
  368. {
  369. struct fcoe_port *port = tgt->port;
  370. struct bnx2fc_interface *interface = port->priv;
  371. struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
  372. struct bnx2fc_cmd *io_req;
  373. struct list_head *listp;
  374. struct io_bdt *bd_tbl;
  375. int index = RESERVE_FREE_LIST_INDEX;
  376. u32 free_sqes;
  377. u32 max_sqes;
  378. u16 xid;
  379. max_sqes = tgt->max_sqes;
  380. switch (type) {
  381. case BNX2FC_TASK_MGMT_CMD:
  382. max_sqes = BNX2FC_TM_MAX_SQES;
  383. break;
  384. case BNX2FC_ELS:
  385. max_sqes = BNX2FC_ELS_MAX_SQES;
  386. break;
  387. default:
  388. break;
  389. }
  390. /*
  391. * NOTE: Free list insertions and deletions are protected with
  392. * cmgr lock
  393. */
  394. spin_lock_bh(&cmd_mgr->free_list_lock[index]);
  395. free_sqes = atomic_read(&tgt->free_sqes);
  396. if ((list_empty(&(cmd_mgr->free_list[index]))) ||
  397. (tgt->num_active_ios.counter >= max_sqes) ||
  398. (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
  399. BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
  400. "ios(%d):sqes(%d)\n",
  401. tgt->num_active_ios.counter, tgt->max_sqes);
  402. if (list_empty(&(cmd_mgr->free_list[index])))
  403. printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
  404. spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
  405. return NULL;
  406. }
  407. listp = (struct list_head *)
  408. cmd_mgr->free_list[index].next;
  409. list_del_init(listp);
  410. io_req = (struct bnx2fc_cmd *) listp;
  411. xid = io_req->xid;
  412. cmd_mgr->cmds[xid] = io_req;
  413. atomic_inc(&tgt->num_active_ios);
  414. atomic_dec(&tgt->free_sqes);
  415. spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
  416. INIT_LIST_HEAD(&io_req->link);
  417. io_req->port = port;
  418. io_req->cmd_mgr = cmd_mgr;
  419. io_req->req_flags = 0;
  420. io_req->cmd_type = type;
  421. /* Bind io_bdt for this io_req */
  422. /* Have a static link between io_req and io_bdt_pool */
  423. bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
  424. bd_tbl->io_req = io_req;
  425. /* Hold the io_req against deletion */
  426. kref_init(&io_req->refcount);
  427. return io_req;
  428. }
  429. struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
  430. {
  431. struct fcoe_port *port = tgt->port;
  432. struct bnx2fc_interface *interface = port->priv;
  433. struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
  434. struct bnx2fc_cmd *io_req;
  435. struct list_head *listp;
  436. struct io_bdt *bd_tbl;
  437. u32 free_sqes;
  438. u32 max_sqes;
  439. u16 xid;
  440. int index = get_cpu();
  441. max_sqes = BNX2FC_SCSI_MAX_SQES;
  442. /*
  443. * NOTE: Free list insertions and deletions are protected with
  444. * cmgr lock
  445. */
  446. spin_lock_bh(&cmd_mgr->free_list_lock[index]);
  447. free_sqes = atomic_read(&tgt->free_sqes);
  448. if ((list_empty(&cmd_mgr->free_list[index])) ||
  449. (tgt->num_active_ios.counter >= max_sqes) ||
  450. (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
  451. spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
  452. put_cpu();
  453. return NULL;
  454. }
  455. listp = (struct list_head *)
  456. cmd_mgr->free_list[index].next;
  457. list_del_init(listp);
  458. io_req = (struct bnx2fc_cmd *) listp;
  459. xid = io_req->xid;
  460. cmd_mgr->cmds[xid] = io_req;
  461. atomic_inc(&tgt->num_active_ios);
  462. atomic_dec(&tgt->free_sqes);
  463. spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
  464. put_cpu();
  465. INIT_LIST_HEAD(&io_req->link);
  466. io_req->port = port;
  467. io_req->cmd_mgr = cmd_mgr;
  468. io_req->req_flags = 0;
  469. /* Bind io_bdt for this io_req */
  470. /* Have a static link between io_req and io_bdt_pool */
  471. bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
  472. bd_tbl->io_req = io_req;
  473. /* Hold the io_req against deletion */
  474. kref_init(&io_req->refcount);
  475. return io_req;
  476. }
  477. void bnx2fc_cmd_release(struct kref *ref)
  478. {
  479. struct bnx2fc_cmd *io_req = container_of(ref,
  480. struct bnx2fc_cmd, refcount);
  481. struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
  482. int index;
  483. if (io_req->cmd_type == BNX2FC_SCSI_CMD)
  484. index = io_req->xid % num_possible_cpus();
  485. else
  486. index = RESERVE_FREE_LIST_INDEX;
  487. spin_lock_bh(&cmd_mgr->free_list_lock[index]);
  488. if (io_req->cmd_type != BNX2FC_SCSI_CMD)
  489. bnx2fc_free_mp_resc(io_req);
  490. cmd_mgr->cmds[io_req->xid] = NULL;
  491. /* Delete IO from retire queue */
  492. list_del_init(&io_req->link);
  493. /* Add it to the free list */
  494. list_add(&io_req->link,
  495. &cmd_mgr->free_list[index]);
  496. atomic_dec(&io_req->tgt->num_active_ios);
  497. spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
  498. }
  499. static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
  500. {
  501. struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
  502. struct bnx2fc_interface *interface = io_req->port->priv;
  503. struct bnx2fc_hba *hba = interface->hba;
  504. size_t sz = sizeof(struct fcoe_bd_ctx);
  505. /* clear tm flags */
  506. mp_req->tm_flags = 0;
  507. if (mp_req->mp_req_bd) {
  508. dma_free_coherent(&hba->pcidev->dev, sz,
  509. mp_req->mp_req_bd,
  510. mp_req->mp_req_bd_dma);
  511. mp_req->mp_req_bd = NULL;
  512. }
  513. if (mp_req->mp_resp_bd) {
  514. dma_free_coherent(&hba->pcidev->dev, sz,
  515. mp_req->mp_resp_bd,
  516. mp_req->mp_resp_bd_dma);
  517. mp_req->mp_resp_bd = NULL;
  518. }
  519. if (mp_req->req_buf) {
  520. dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
  521. mp_req->req_buf,
  522. mp_req->req_buf_dma);
  523. mp_req->req_buf = NULL;
  524. }
  525. if (mp_req->resp_buf) {
  526. dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
  527. mp_req->resp_buf,
  528. mp_req->resp_buf_dma);
  529. mp_req->resp_buf = NULL;
  530. }
  531. }
  532. int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
  533. {
  534. struct bnx2fc_mp_req *mp_req;
  535. struct fcoe_bd_ctx *mp_req_bd;
  536. struct fcoe_bd_ctx *mp_resp_bd;
  537. struct bnx2fc_interface *interface = io_req->port->priv;
  538. struct bnx2fc_hba *hba = interface->hba;
  539. dma_addr_t addr;
  540. size_t sz;
  541. mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
  542. memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
  543. mp_req->req_len = sizeof(struct fcp_cmnd);
  544. io_req->data_xfer_len = mp_req->req_len;
  545. mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
  546. &mp_req->req_buf_dma,
  547. GFP_ATOMIC);
  548. if (!mp_req->req_buf) {
  549. printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
  550. bnx2fc_free_mp_resc(io_req);
  551. return FAILED;
  552. }
  553. mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
  554. &mp_req->resp_buf_dma,
  555. GFP_ATOMIC);
  556. if (!mp_req->resp_buf) {
  557. printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
  558. bnx2fc_free_mp_resc(io_req);
  559. return FAILED;
  560. }
  561. memset(mp_req->req_buf, 0, PAGE_SIZE);
  562. memset(mp_req->resp_buf, 0, PAGE_SIZE);
  563. /* Allocate and map mp_req_bd and mp_resp_bd */
  564. sz = sizeof(struct fcoe_bd_ctx);
  565. mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
  566. &mp_req->mp_req_bd_dma,
  567. GFP_ATOMIC);
  568. if (!mp_req->mp_req_bd) {
  569. printk(KERN_ERR PFX "unable to alloc MP req bd\n");
  570. bnx2fc_free_mp_resc(io_req);
  571. return FAILED;
  572. }
  573. mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
  574. &mp_req->mp_resp_bd_dma,
  575. GFP_ATOMIC);
  576. if (!mp_req->mp_req_bd) {
  577. printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
  578. bnx2fc_free_mp_resc(io_req);
  579. return FAILED;
  580. }
  581. /* Fill bd table */
  582. addr = mp_req->req_buf_dma;
  583. mp_req_bd = mp_req->mp_req_bd;
  584. mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
  585. mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
  586. mp_req_bd->buf_len = PAGE_SIZE;
  587. mp_req_bd->flags = 0;
  588. /*
  589. * MP buffer is either a task mgmt command or an ELS.
  590. * So the assumption is that it consumes a single bd
  591. * entry in the bd table
  592. */
  593. mp_resp_bd = mp_req->mp_resp_bd;
  594. addr = mp_req->resp_buf_dma;
  595. mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
  596. mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
  597. mp_resp_bd->buf_len = PAGE_SIZE;
  598. mp_resp_bd->flags = 0;
  599. return SUCCESS;
  600. }
  601. static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
  602. {
  603. struct fc_lport *lport;
  604. struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
  605. struct fc_rport_libfc_priv *rp = rport->dd_data;
  606. struct fcoe_port *port;
  607. struct bnx2fc_interface *interface;
  608. struct bnx2fc_rport *tgt;
  609. struct bnx2fc_cmd *io_req;
  610. struct bnx2fc_mp_req *tm_req;
  611. struct fcoe_task_ctx_entry *task;
  612. struct fcoe_task_ctx_entry *task_page;
  613. struct Scsi_Host *host = sc_cmd->device->host;
  614. struct fc_frame_header *fc_hdr;
  615. struct fcp_cmnd *fcp_cmnd;
  616. int task_idx, index;
  617. int rc = SUCCESS;
  618. u16 xid;
  619. u32 sid, did;
  620. unsigned long start = jiffies;
  621. lport = shost_priv(host);
  622. port = lport_priv(lport);
  623. interface = port->priv;
  624. if (rport == NULL) {
  625. printk(KERN_ERR PFX "device_reset: rport is NULL\n");
  626. rc = FAILED;
  627. goto tmf_err;
  628. }
  629. rc = fc_block_scsi_eh(sc_cmd);
  630. if (rc)
  631. return rc;
  632. if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
  633. printk(KERN_ERR PFX "device_reset: link is not ready\n");
  634. rc = FAILED;
  635. goto tmf_err;
  636. }
  637. /* rport and tgt are allocated together, so tgt should be non-NULL */
  638. tgt = (struct bnx2fc_rport *)&rp[1];
  639. if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
  640. printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
  641. rc = FAILED;
  642. goto tmf_err;
  643. }
  644. retry_tmf:
  645. io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
  646. if (!io_req) {
  647. if (time_after(jiffies, start + HZ)) {
  648. printk(KERN_ERR PFX "tmf: Failed TMF");
  649. rc = FAILED;
  650. goto tmf_err;
  651. }
  652. msleep(20);
  653. goto retry_tmf;
  654. }
  655. /* Initialize rest of io_req fields */
  656. io_req->sc_cmd = sc_cmd;
  657. io_req->port = port;
  658. io_req->tgt = tgt;
  659. tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
  660. rc = bnx2fc_init_mp_req(io_req);
  661. if (rc == FAILED) {
  662. printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
  663. spin_lock_bh(&tgt->tgt_lock);
  664. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  665. spin_unlock_bh(&tgt->tgt_lock);
  666. goto tmf_err;
  667. }
  668. /* Set TM flags */
  669. io_req->io_req_flags = 0;
  670. tm_req->tm_flags = tm_flags;
  671. /* Fill FCP_CMND */
  672. bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
  673. fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
  674. memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
  675. fcp_cmnd->fc_dl = 0;
  676. /* Fill FC header */
  677. fc_hdr = &(tm_req->req_fc_hdr);
  678. sid = tgt->sid;
  679. did = rport->port_id;
  680. __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
  681. FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
  682. FC_FC_SEQ_INIT, 0);
  683. /* Obtain exchange id */
  684. xid = io_req->xid;
  685. BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
  686. task_idx = xid/BNX2FC_TASKS_PER_PAGE;
  687. index = xid % BNX2FC_TASKS_PER_PAGE;
  688. /* Initialize task context for this IO request */
  689. task_page = (struct fcoe_task_ctx_entry *)
  690. interface->hba->task_ctx[task_idx];
  691. task = &(task_page[index]);
  692. bnx2fc_init_mp_task(io_req, task);
  693. sc_cmd->SCp.ptr = (char *)io_req;
  694. /* Obtain free SQ entry */
  695. spin_lock_bh(&tgt->tgt_lock);
  696. bnx2fc_add_2_sq(tgt, xid);
  697. /* Enqueue the io_req to active_tm_queue */
  698. io_req->on_tmf_queue = 1;
  699. list_add_tail(&io_req->link, &tgt->active_tm_queue);
  700. init_completion(&io_req->tm_done);
  701. io_req->wait_for_comp = 1;
  702. /* Ring doorbell */
  703. bnx2fc_ring_doorbell(tgt);
  704. spin_unlock_bh(&tgt->tgt_lock);
  705. rc = wait_for_completion_timeout(&io_req->tm_done,
  706. BNX2FC_TM_TIMEOUT * HZ);
  707. spin_lock_bh(&tgt->tgt_lock);
  708. io_req->wait_for_comp = 0;
  709. if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
  710. set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
  711. if (io_req->on_tmf_queue) {
  712. list_del_init(&io_req->link);
  713. io_req->on_tmf_queue = 0;
  714. }
  715. io_req->wait_for_comp = 1;
  716. bnx2fc_initiate_cleanup(io_req);
  717. spin_unlock_bh(&tgt->tgt_lock);
  718. rc = wait_for_completion_timeout(&io_req->tm_done,
  719. BNX2FC_FW_TIMEOUT);
  720. spin_lock_bh(&tgt->tgt_lock);
  721. io_req->wait_for_comp = 0;
  722. if (!rc)
  723. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  724. }
  725. spin_unlock_bh(&tgt->tgt_lock);
  726. if (!rc) {
  727. BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
  728. rc = FAILED;
  729. } else {
  730. BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
  731. rc = SUCCESS;
  732. }
  733. tmf_err:
  734. return rc;
  735. }
  736. int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
  737. {
  738. struct fc_lport *lport;
  739. struct bnx2fc_rport *tgt = io_req->tgt;
  740. struct fc_rport *rport = tgt->rport;
  741. struct fc_rport_priv *rdata = tgt->rdata;
  742. struct bnx2fc_interface *interface;
  743. struct fcoe_port *port;
  744. struct bnx2fc_cmd *abts_io_req;
  745. struct fcoe_task_ctx_entry *task;
  746. struct fcoe_task_ctx_entry *task_page;
  747. struct fc_frame_header *fc_hdr;
  748. struct bnx2fc_mp_req *abts_req;
  749. int task_idx, index;
  750. u32 sid, did;
  751. u16 xid;
  752. int rc = SUCCESS;
  753. u32 r_a_tov = rdata->r_a_tov;
  754. /* called with tgt_lock held */
  755. BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
  756. port = io_req->port;
  757. interface = port->priv;
  758. lport = port->lport;
  759. if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
  760. printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
  761. rc = FAILED;
  762. goto abts_err;
  763. }
  764. if (rport == NULL) {
  765. printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
  766. rc = FAILED;
  767. goto abts_err;
  768. }
  769. if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
  770. printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
  771. rc = FAILED;
  772. goto abts_err;
  773. }
  774. abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
  775. if (!abts_io_req) {
  776. printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
  777. rc = FAILED;
  778. goto abts_err;
  779. }
  780. /* Initialize rest of io_req fields */
  781. abts_io_req->sc_cmd = NULL;
  782. abts_io_req->port = port;
  783. abts_io_req->tgt = tgt;
  784. abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
  785. abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
  786. memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
  787. /* Fill FC header */
  788. fc_hdr = &(abts_req->req_fc_hdr);
  789. /* Obtain oxid and rxid for the original exchange to be aborted */
  790. fc_hdr->fh_ox_id = htons(io_req->xid);
  791. fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
  792. sid = tgt->sid;
  793. did = rport->port_id;
  794. __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
  795. FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
  796. FC_FC_SEQ_INIT, 0);
  797. xid = abts_io_req->xid;
  798. BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
  799. task_idx = xid/BNX2FC_TASKS_PER_PAGE;
  800. index = xid % BNX2FC_TASKS_PER_PAGE;
  801. /* Initialize task context for this IO request */
  802. task_page = (struct fcoe_task_ctx_entry *)
  803. interface->hba->task_ctx[task_idx];
  804. task = &(task_page[index]);
  805. bnx2fc_init_mp_task(abts_io_req, task);
  806. /*
  807. * ABTS task is a temporary task that will be cleaned up
  808. * irrespective of ABTS response. We need to start the timer
  809. * for the original exchange, as the CQE is posted for the original
  810. * IO request.
  811. *
  812. * Timer for ABTS is started only when it is originated by a
  813. * TM request. For the ABTS issued as part of ULP timeout,
  814. * scsi-ml maintains the timers.
  815. */
  816. /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
  817. bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
  818. /* Obtain free SQ entry */
  819. bnx2fc_add_2_sq(tgt, xid);
  820. /* Ring doorbell */
  821. bnx2fc_ring_doorbell(tgt);
  822. abts_err:
  823. return rc;
  824. }
  825. int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
  826. enum fc_rctl r_ctl)
  827. {
  828. struct fc_lport *lport;
  829. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  830. struct bnx2fc_interface *interface;
  831. struct fcoe_port *port;
  832. struct bnx2fc_cmd *seq_clnp_req;
  833. struct fcoe_task_ctx_entry *task;
  834. struct fcoe_task_ctx_entry *task_page;
  835. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  836. int task_idx, index;
  837. u16 xid;
  838. int rc = 0;
  839. BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
  840. orig_io_req->xid);
  841. kref_get(&orig_io_req->refcount);
  842. port = orig_io_req->port;
  843. interface = port->priv;
  844. lport = port->lport;
  845. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  846. if (!cb_arg) {
  847. printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
  848. rc = -ENOMEM;
  849. goto cleanup_err;
  850. }
  851. seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
  852. if (!seq_clnp_req) {
  853. printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
  854. rc = -ENOMEM;
  855. kfree(cb_arg);
  856. goto cleanup_err;
  857. }
  858. /* Initialize rest of io_req fields */
  859. seq_clnp_req->sc_cmd = NULL;
  860. seq_clnp_req->port = port;
  861. seq_clnp_req->tgt = tgt;
  862. seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
  863. xid = seq_clnp_req->xid;
  864. task_idx = xid/BNX2FC_TASKS_PER_PAGE;
  865. index = xid % BNX2FC_TASKS_PER_PAGE;
  866. /* Initialize task context for this IO request */
  867. task_page = (struct fcoe_task_ctx_entry *)
  868. interface->hba->task_ctx[task_idx];
  869. task = &(task_page[index]);
  870. cb_arg->aborted_io_req = orig_io_req;
  871. cb_arg->io_req = seq_clnp_req;
  872. cb_arg->r_ctl = r_ctl;
  873. cb_arg->offset = offset;
  874. seq_clnp_req->cb_arg = cb_arg;
  875. printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
  876. bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
  877. /* Obtain free SQ entry */
  878. bnx2fc_add_2_sq(tgt, xid);
  879. /* Ring doorbell */
  880. bnx2fc_ring_doorbell(tgt);
  881. cleanup_err:
  882. return rc;
  883. }
  884. int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
  885. {
  886. struct fc_lport *lport;
  887. struct bnx2fc_rport *tgt = io_req->tgt;
  888. struct bnx2fc_interface *interface;
  889. struct fcoe_port *port;
  890. struct bnx2fc_cmd *cleanup_io_req;
  891. struct fcoe_task_ctx_entry *task;
  892. struct fcoe_task_ctx_entry *task_page;
  893. int task_idx, index;
  894. u16 xid, orig_xid;
  895. int rc = 0;
  896. /* ASSUMPTION: called with tgt_lock held */
  897. BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
  898. port = io_req->port;
  899. interface = port->priv;
  900. lport = port->lport;
  901. cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
  902. if (!cleanup_io_req) {
  903. printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
  904. rc = -1;
  905. goto cleanup_err;
  906. }
  907. /* Initialize rest of io_req fields */
  908. cleanup_io_req->sc_cmd = NULL;
  909. cleanup_io_req->port = port;
  910. cleanup_io_req->tgt = tgt;
  911. cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
  912. xid = cleanup_io_req->xid;
  913. task_idx = xid/BNX2FC_TASKS_PER_PAGE;
  914. index = xid % BNX2FC_TASKS_PER_PAGE;
  915. /* Initialize task context for this IO request */
  916. task_page = (struct fcoe_task_ctx_entry *)
  917. interface->hba->task_ctx[task_idx];
  918. task = &(task_page[index]);
  919. orig_xid = io_req->xid;
  920. BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
  921. bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
  922. /* Obtain free SQ entry */
  923. bnx2fc_add_2_sq(tgt, xid);
  924. /* Ring doorbell */
  925. bnx2fc_ring_doorbell(tgt);
  926. cleanup_err:
  927. return rc;
  928. }
  929. /**
  930. * bnx2fc_eh_target_reset: Reset a target
  931. *
  932. * @sc_cmd: SCSI command
  933. *
  934. * Set from SCSI host template to send task mgmt command to the target
  935. * and wait for the response
  936. */
  937. int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
  938. {
  939. return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
  940. }
  941. /**
  942. * bnx2fc_eh_device_reset - Reset a single LUN
  943. *
  944. * @sc_cmd: SCSI command
  945. *
  946. * Set from SCSI host template to send task mgmt command to the target
  947. * and wait for the response
  948. */
  949. int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
  950. {
  951. return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
  952. }
  953. int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
  954. {
  955. struct bnx2fc_rport *tgt = io_req->tgt;
  956. struct fc_rport_priv *rdata = tgt->rdata;
  957. int logo_issued;
  958. int rc = SUCCESS;
  959. int wait_cnt = 0;
  960. BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
  961. tgt->flags);
  962. logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
  963. &tgt->flags);
  964. io_req->wait_for_comp = 1;
  965. bnx2fc_initiate_cleanup(io_req);
  966. spin_unlock_bh(&tgt->tgt_lock);
  967. wait_for_completion(&io_req->tm_done);
  968. io_req->wait_for_comp = 0;
  969. /*
  970. * release the reference taken in eh_abort to allow the
  971. * target to re-login after flushing IOs
  972. */
  973. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  974. if (!logo_issued) {
  975. clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
  976. mutex_lock(&lport->disc.disc_mutex);
  977. lport->tt.rport_logoff(rdata);
  978. mutex_unlock(&lport->disc.disc_mutex);
  979. do {
  980. msleep(BNX2FC_RELOGIN_WAIT_TIME);
  981. if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
  982. rc = FAILED;
  983. break;
  984. }
  985. } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
  986. }
  987. spin_lock_bh(&tgt->tgt_lock);
  988. return rc;
  989. }
  990. /**
  991. * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
  992. * SCSI command
  993. *
  994. * @sc_cmd: SCSI_ML command pointer
  995. *
  996. * SCSI abort request handler
  997. */
  998. int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
  999. {
  1000. struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
  1001. struct fc_rport_libfc_priv *rp = rport->dd_data;
  1002. struct bnx2fc_cmd *io_req;
  1003. struct fc_lport *lport;
  1004. struct bnx2fc_rport *tgt;
  1005. int rc = FAILED;
  1006. rc = fc_block_scsi_eh(sc_cmd);
  1007. if (rc)
  1008. return rc;
  1009. lport = shost_priv(sc_cmd->device->host);
  1010. if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
  1011. printk(KERN_ERR PFX "eh_abort: link not ready\n");
  1012. return rc;
  1013. }
  1014. tgt = (struct bnx2fc_rport *)&rp[1];
  1015. BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
  1016. spin_lock_bh(&tgt->tgt_lock);
  1017. io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
  1018. if (!io_req) {
  1019. /* Command might have just completed */
  1020. printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
  1021. spin_unlock_bh(&tgt->tgt_lock);
  1022. return SUCCESS;
  1023. }
  1024. BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
  1025. io_req->refcount.refcount.counter);
  1026. /* Hold IO request across abort processing */
  1027. kref_get(&io_req->refcount);
  1028. BUG_ON(tgt != io_req->tgt);
  1029. /* Remove the io_req from the active_q. */
  1030. /*
  1031. * Task Mgmt functions (LUN RESET & TGT RESET) will not
  1032. * issue an ABTS on this particular IO req, as the
  1033. * io_req is no longer in the active_q.
  1034. */
  1035. if (tgt->flush_in_prog) {
  1036. printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
  1037. "flush in progress\n", io_req->xid);
  1038. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  1039. spin_unlock_bh(&tgt->tgt_lock);
  1040. return SUCCESS;
  1041. }
  1042. if (io_req->on_active_queue == 0) {
  1043. printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
  1044. "not on active_q\n", io_req->xid);
  1045. /*
  1046. * This condition can happen only due to the FW bug,
  1047. * where we do not receive cleanup response from
  1048. * the FW. Handle this case gracefully by erroring
  1049. * back the IO request to SCSI-ml
  1050. */
  1051. bnx2fc_scsi_done(io_req, DID_ABORT);
  1052. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  1053. spin_unlock_bh(&tgt->tgt_lock);
  1054. return SUCCESS;
  1055. }
  1056. /*
  1057. * Only eh_abort processing will remove the IO from
  1058. * active_cmd_q before processing the request. this is
  1059. * done to avoid race conditions between IOs aborted
  1060. * as part of task management completion and eh_abort
  1061. * processing
  1062. */
  1063. list_del_init(&io_req->link);
  1064. io_req->on_active_queue = 0;
  1065. /* Move IO req to retire queue */
  1066. list_add_tail(&io_req->link, &tgt->io_retire_queue);
  1067. init_completion(&io_req->tm_done);
  1068. if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
  1069. printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
  1070. "already in abts processing\n", io_req->xid);
  1071. if (cancel_delayed_work(&io_req->timeout_work))
  1072. kref_put(&io_req->refcount,
  1073. bnx2fc_cmd_release); /* drop timer hold */
  1074. rc = bnx2fc_expl_logo(lport, io_req);
  1075. goto out;
  1076. }
  1077. /* Cancel the current timer running on this io_req */
  1078. if (cancel_delayed_work(&io_req->timeout_work))
  1079. kref_put(&io_req->refcount,
  1080. bnx2fc_cmd_release); /* drop timer hold */
  1081. set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
  1082. io_req->wait_for_comp = 1;
  1083. rc = bnx2fc_initiate_abts(io_req);
  1084. if (rc == FAILED) {
  1085. bnx2fc_initiate_cleanup(io_req);
  1086. spin_unlock_bh(&tgt->tgt_lock);
  1087. wait_for_completion(&io_req->tm_done);
  1088. spin_lock_bh(&tgt->tgt_lock);
  1089. io_req->wait_for_comp = 0;
  1090. goto done;
  1091. }
  1092. spin_unlock_bh(&tgt->tgt_lock);
  1093. wait_for_completion(&io_req->tm_done);
  1094. spin_lock_bh(&tgt->tgt_lock);
  1095. io_req->wait_for_comp = 0;
  1096. if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
  1097. &io_req->req_flags))) {
  1098. /* Let the scsi-ml try to recover this command */
  1099. printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
  1100. io_req->xid);
  1101. rc = bnx2fc_expl_logo(lport, io_req);
  1102. goto out;
  1103. } else {
  1104. /*
  1105. * We come here even when there was a race condition
  1106. * between timeout and abts completion, and abts
  1107. * completion happens just in time.
  1108. */
  1109. BNX2FC_IO_DBG(io_req, "abort succeeded\n");
  1110. rc = SUCCESS;
  1111. bnx2fc_scsi_done(io_req, DID_ABORT);
  1112. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  1113. }
  1114. done:
  1115. /* release the reference taken in eh_abort */
  1116. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  1117. out:
  1118. spin_unlock_bh(&tgt->tgt_lock);
  1119. return rc;
  1120. }
  1121. void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
  1122. struct fcoe_task_ctx_entry *task,
  1123. u8 rx_state)
  1124. {
  1125. struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
  1126. struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
  1127. u32 offset = cb_arg->offset;
  1128. enum fc_rctl r_ctl = cb_arg->r_ctl;
  1129. int rc = 0;
  1130. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  1131. BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
  1132. "cmd_type = %d\n",
  1133. seq_clnp_req->xid, seq_clnp_req->cmd_type);
  1134. if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
  1135. printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
  1136. seq_clnp_req->xid);
  1137. goto free_cb_arg;
  1138. }
  1139. spin_unlock_bh(&tgt->tgt_lock);
  1140. rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
  1141. spin_lock_bh(&tgt->tgt_lock);
  1142. if (rc)
  1143. printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
  1144. " IO will abort\n");
  1145. seq_clnp_req->cb_arg = NULL;
  1146. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  1147. free_cb_arg:
  1148. kfree(cb_arg);
  1149. return;
  1150. }
  1151. void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
  1152. struct fcoe_task_ctx_entry *task,
  1153. u8 num_rq)
  1154. {
  1155. BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
  1156. "refcnt = %d, cmd_type = %d\n",
  1157. io_req->refcount.refcount.counter, io_req->cmd_type);
  1158. bnx2fc_scsi_done(io_req, DID_ERROR);
  1159. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  1160. if (io_req->wait_for_comp)
  1161. complete(&io_req->tm_done);
  1162. }
  1163. void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
  1164. struct fcoe_task_ctx_entry *task,
  1165. u8 num_rq)
  1166. {
  1167. u32 r_ctl;
  1168. u32 r_a_tov = FC_DEF_R_A_TOV;
  1169. u8 issue_rrq = 0;
  1170. struct bnx2fc_rport *tgt = io_req->tgt;
  1171. BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
  1172. "refcnt = %d, cmd_type = %d\n",
  1173. io_req->xid,
  1174. io_req->refcount.refcount.counter, io_req->cmd_type);
  1175. if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
  1176. &io_req->req_flags)) {
  1177. BNX2FC_IO_DBG(io_req, "Timer context finished processing"
  1178. " this io\n");
  1179. return;
  1180. }
  1181. /* Do not issue RRQ as this IO is already cleanedup */
  1182. if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
  1183. &io_req->req_flags))
  1184. goto io_compl;
  1185. /*
  1186. * For ABTS issued due to SCSI eh_abort_handler, timeout
  1187. * values are maintained by scsi-ml itself. Cancel timeout
  1188. * in case ABTS issued as part of task management function
  1189. * or due to FW error.
  1190. */
  1191. if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
  1192. if (cancel_delayed_work(&io_req->timeout_work))
  1193. kref_put(&io_req->refcount,
  1194. bnx2fc_cmd_release); /* drop timer hold */
  1195. r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
  1196. switch (r_ctl) {
  1197. case FC_RCTL_BA_ACC:
  1198. /*
  1199. * Dont release this cmd yet. It will be relesed
  1200. * after we get RRQ response
  1201. */
  1202. BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
  1203. issue_rrq = 1;
  1204. break;
  1205. case FC_RCTL_BA_RJT:
  1206. BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
  1207. break;
  1208. default:
  1209. printk(KERN_ERR PFX "Unknown ABTS response\n");
  1210. break;
  1211. }
  1212. if (issue_rrq) {
  1213. BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
  1214. set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
  1215. }
  1216. set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
  1217. bnx2fc_cmd_timer_set(io_req, r_a_tov);
  1218. io_compl:
  1219. if (io_req->wait_for_comp) {
  1220. if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
  1221. &io_req->req_flags))
  1222. complete(&io_req->tm_done);
  1223. } else {
  1224. /*
  1225. * We end up here when ABTS is issued as
  1226. * in asynchronous context, i.e., as part
  1227. * of task management completion, or
  1228. * when FW error is received or when the
  1229. * ABTS is issued when the IO is timed
  1230. * out.
  1231. */
  1232. if (io_req->on_active_queue) {
  1233. list_del_init(&io_req->link);
  1234. io_req->on_active_queue = 0;
  1235. /* Move IO req to retire queue */
  1236. list_add_tail(&io_req->link, &tgt->io_retire_queue);
  1237. }
  1238. bnx2fc_scsi_done(io_req, DID_ERROR);
  1239. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  1240. }
  1241. }
  1242. static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
  1243. {
  1244. struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
  1245. struct bnx2fc_rport *tgt = io_req->tgt;
  1246. struct bnx2fc_cmd *cmd, *tmp;
  1247. int tm_lun = sc_cmd->device->lun;
  1248. int rc = 0;
  1249. int lun;
  1250. /* called with tgt_lock held */
  1251. BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
  1252. /*
  1253. * Walk thru the active_ios queue and ABORT the IO
  1254. * that matches with the LUN that was reset
  1255. */
  1256. list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
  1257. BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
  1258. lun = cmd->sc_cmd->device->lun;
  1259. if (lun == tm_lun) {
  1260. /* Initiate ABTS on this cmd */
  1261. if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
  1262. &cmd->req_flags)) {
  1263. /* cancel the IO timeout */
  1264. if (cancel_delayed_work(&io_req->timeout_work))
  1265. kref_put(&io_req->refcount,
  1266. bnx2fc_cmd_release);
  1267. /* timer hold */
  1268. rc = bnx2fc_initiate_abts(cmd);
  1269. /* abts shouldn't fail in this context */
  1270. WARN_ON(rc != SUCCESS);
  1271. } else
  1272. printk(KERN_ERR PFX "lun_rst: abts already in"
  1273. " progress for this IO 0x%x\n",
  1274. cmd->xid);
  1275. }
  1276. }
  1277. }
  1278. static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
  1279. {
  1280. struct bnx2fc_rport *tgt = io_req->tgt;
  1281. struct bnx2fc_cmd *cmd, *tmp;
  1282. int rc = 0;
  1283. /* called with tgt_lock held */
  1284. BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
  1285. /*
  1286. * Walk thru the active_ios queue and ABORT the IO
  1287. * that matches with the LUN that was reset
  1288. */
  1289. list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
  1290. BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
  1291. /* Initiate ABTS */
  1292. if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
  1293. &cmd->req_flags)) {
  1294. /* cancel the IO timeout */
  1295. if (cancel_delayed_work(&io_req->timeout_work))
  1296. kref_put(&io_req->refcount,
  1297. bnx2fc_cmd_release); /* timer hold */
  1298. rc = bnx2fc_initiate_abts(cmd);
  1299. /* abts shouldn't fail in this context */
  1300. WARN_ON(rc != SUCCESS);
  1301. } else
  1302. printk(KERN_ERR PFX "tgt_rst: abts already in progress"
  1303. " for this IO 0x%x\n", cmd->xid);
  1304. }
  1305. }
  1306. void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
  1307. struct fcoe_task_ctx_entry *task, u8 num_rq)
  1308. {
  1309. struct bnx2fc_mp_req *tm_req;
  1310. struct fc_frame_header *fc_hdr;
  1311. struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
  1312. u64 *hdr;
  1313. u64 *temp_hdr;
  1314. void *rsp_buf;
  1315. /* Called with tgt_lock held */
  1316. BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
  1317. if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
  1318. set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
  1319. else {
  1320. /* TM has already timed out and we got
  1321. * delayed completion. Ignore completion
  1322. * processing.
  1323. */
  1324. return;
  1325. }
  1326. tm_req = &(io_req->mp_req);
  1327. fc_hdr = &(tm_req->resp_fc_hdr);
  1328. hdr = (u64 *)fc_hdr;
  1329. temp_hdr = (u64 *)
  1330. &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
  1331. hdr[0] = cpu_to_be64(temp_hdr[0]);
  1332. hdr[1] = cpu_to_be64(temp_hdr[1]);
  1333. hdr[2] = cpu_to_be64(temp_hdr[2]);
  1334. tm_req->resp_len =
  1335. task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
  1336. rsp_buf = tm_req->resp_buf;
  1337. if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
  1338. bnx2fc_parse_fcp_rsp(io_req,
  1339. (struct fcoe_fcp_rsp_payload *)
  1340. rsp_buf, num_rq);
  1341. if (io_req->fcp_rsp_code == 0) {
  1342. /* TM successful */
  1343. if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
  1344. bnx2fc_lun_reset_cmpl(io_req);
  1345. else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
  1346. bnx2fc_tgt_reset_cmpl(io_req);
  1347. }
  1348. } else {
  1349. printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
  1350. fc_hdr->fh_r_ctl);
  1351. }
  1352. if (!sc_cmd->SCp.ptr) {
  1353. printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
  1354. return;
  1355. }
  1356. switch (io_req->fcp_status) {
  1357. case FC_GOOD:
  1358. if (io_req->cdb_status == 0) {
  1359. /* Good IO completion */
  1360. sc_cmd->result = DID_OK << 16;
  1361. } else {
  1362. /* Transport status is good, SCSI status not good */
  1363. sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
  1364. }
  1365. if (io_req->fcp_resid)
  1366. scsi_set_resid(sc_cmd, io_req->fcp_resid);
  1367. break;
  1368. default:
  1369. BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
  1370. io_req->fcp_status);
  1371. break;
  1372. }
  1373. sc_cmd = io_req->sc_cmd;
  1374. io_req->sc_cmd = NULL;
  1375. /* check if the io_req exists in tgt's tmf_q */
  1376. if (io_req->on_tmf_queue) {
  1377. list_del_init(&io_req->link);
  1378. io_req->on_tmf_queue = 0;
  1379. } else {
  1380. printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
  1381. return;
  1382. }
  1383. sc_cmd->SCp.ptr = NULL;
  1384. sc_cmd->scsi_done(sc_cmd);
  1385. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  1386. if (io_req->wait_for_comp) {
  1387. BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
  1388. complete(&io_req->tm_done);
  1389. }
  1390. }
  1391. static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
  1392. int bd_index)
  1393. {
  1394. struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
  1395. int frag_size, sg_frags;
  1396. sg_frags = 0;
  1397. while (sg_len) {
  1398. if (sg_len >= BNX2FC_BD_SPLIT_SZ)
  1399. frag_size = BNX2FC_BD_SPLIT_SZ;
  1400. else
  1401. frag_size = sg_len;
  1402. bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
  1403. bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
  1404. bd[bd_index + sg_frags].buf_len = (u16)frag_size;
  1405. bd[bd_index + sg_frags].flags = 0;
  1406. addr += (u64) frag_size;
  1407. sg_frags++;
  1408. sg_len -= frag_size;
  1409. }
  1410. return sg_frags;
  1411. }
  1412. static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
  1413. {
  1414. struct bnx2fc_interface *interface = io_req->port->priv;
  1415. struct bnx2fc_hba *hba = interface->hba;
  1416. struct scsi_cmnd *sc = io_req->sc_cmd;
  1417. struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
  1418. struct scatterlist *sg;
  1419. int byte_count = 0;
  1420. int sg_count = 0;
  1421. int bd_count = 0;
  1422. int sg_frags;
  1423. unsigned int sg_len;
  1424. u64 addr;
  1425. int i;
  1426. sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
  1427. scsi_sg_count(sc), sc->sc_data_direction);
  1428. scsi_for_each_sg(sc, sg, sg_count, i) {
  1429. sg_len = sg_dma_len(sg);
  1430. addr = sg_dma_address(sg);
  1431. if (sg_len > BNX2FC_MAX_BD_LEN) {
  1432. sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
  1433. bd_count);
  1434. } else {
  1435. sg_frags = 1;
  1436. bd[bd_count].buf_addr_lo = addr & 0xffffffff;
  1437. bd[bd_count].buf_addr_hi = addr >> 32;
  1438. bd[bd_count].buf_len = (u16)sg_len;
  1439. bd[bd_count].flags = 0;
  1440. }
  1441. bd_count += sg_frags;
  1442. byte_count += sg_len;
  1443. }
  1444. if (byte_count != scsi_bufflen(sc))
  1445. printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
  1446. "task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
  1447. io_req->xid);
  1448. return bd_count;
  1449. }
  1450. static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
  1451. {
  1452. struct scsi_cmnd *sc = io_req->sc_cmd;
  1453. struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
  1454. int bd_count;
  1455. if (scsi_sg_count(sc)) {
  1456. bd_count = bnx2fc_map_sg(io_req);
  1457. if (bd_count == 0)
  1458. return -ENOMEM;
  1459. } else {
  1460. bd_count = 0;
  1461. bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
  1462. bd[0].buf_len = bd[0].flags = 0;
  1463. }
  1464. io_req->bd_tbl->bd_valid = bd_count;
  1465. return 0;
  1466. }
  1467. static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
  1468. {
  1469. struct scsi_cmnd *sc = io_req->sc_cmd;
  1470. if (io_req->bd_tbl->bd_valid && sc) {
  1471. scsi_dma_unmap(sc);
  1472. io_req->bd_tbl->bd_valid = 0;
  1473. }
  1474. }
  1475. void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
  1476. struct fcp_cmnd *fcp_cmnd)
  1477. {
  1478. struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
  1479. char tag[2];
  1480. memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
  1481. int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
  1482. fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
  1483. memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
  1484. fcp_cmnd->fc_cmdref = 0;
  1485. fcp_cmnd->fc_pri_ta = 0;
  1486. fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
  1487. fcp_cmnd->fc_flags = io_req->io_req_flags;
  1488. if (scsi_populate_tag_msg(sc_cmd, tag)) {
  1489. switch (tag[0]) {
  1490. case HEAD_OF_QUEUE_TAG:
  1491. fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ;
  1492. break;
  1493. case ORDERED_QUEUE_TAG:
  1494. fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED;
  1495. break;
  1496. default:
  1497. fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
  1498. break;
  1499. }
  1500. } else {
  1501. fcp_cmnd->fc_pri_ta = 0;
  1502. }
  1503. }
  1504. static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
  1505. struct fcoe_fcp_rsp_payload *fcp_rsp,
  1506. u8 num_rq)
  1507. {
  1508. struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
  1509. struct bnx2fc_rport *tgt = io_req->tgt;
  1510. u8 rsp_flags = fcp_rsp->fcp_flags.flags;
  1511. u32 rq_buff_len = 0;
  1512. int i;
  1513. unsigned char *rq_data;
  1514. unsigned char *dummy;
  1515. int fcp_sns_len = 0;
  1516. int fcp_rsp_len = 0;
  1517. io_req->fcp_status = FC_GOOD;
  1518. io_req->fcp_resid = fcp_rsp->fcp_resid;
  1519. io_req->scsi_comp_flags = rsp_flags;
  1520. CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
  1521. fcp_rsp->scsi_status_code;
  1522. /* Fetch fcp_rsp_info and fcp_sns_info if available */
  1523. if (num_rq) {
  1524. /*
  1525. * We do not anticipate num_rq >1, as the linux defined
  1526. * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
  1527. * 256 bytes of single rq buffer is good enough to hold this.
  1528. */
  1529. if (rsp_flags &
  1530. FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
  1531. fcp_rsp_len = rq_buff_len
  1532. = fcp_rsp->fcp_rsp_len;
  1533. }
  1534. if (rsp_flags &
  1535. FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
  1536. fcp_sns_len = fcp_rsp->fcp_sns_len;
  1537. rq_buff_len += fcp_rsp->fcp_sns_len;
  1538. }
  1539. io_req->fcp_rsp_len = fcp_rsp_len;
  1540. io_req->fcp_sns_len = fcp_sns_len;
  1541. if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
  1542. /* Invalid sense sense length. */
  1543. printk(KERN_ERR PFX "invalid sns length %d\n",
  1544. rq_buff_len);
  1545. /* reset rq_buff_len */
  1546. rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
  1547. }
  1548. rq_data = bnx2fc_get_next_rqe(tgt, 1);
  1549. if (num_rq > 1) {
  1550. /* We do not need extra sense data */
  1551. for (i = 1; i < num_rq; i++)
  1552. dummy = bnx2fc_get_next_rqe(tgt, 1);
  1553. }
  1554. /* fetch fcp_rsp_code */
  1555. if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
  1556. /* Only for task management function */
  1557. io_req->fcp_rsp_code = rq_data[3];
  1558. printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
  1559. io_req->fcp_rsp_code);
  1560. }
  1561. /* fetch sense data */
  1562. rq_data += fcp_rsp_len;
  1563. if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
  1564. printk(KERN_ERR PFX "Truncating sense buffer\n");
  1565. fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
  1566. }
  1567. memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  1568. if (fcp_sns_len)
  1569. memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
  1570. /* return RQ entries */
  1571. for (i = 0; i < num_rq; i++)
  1572. bnx2fc_return_rqe(tgt, 1);
  1573. }
  1574. }
  1575. /**
  1576. * bnx2fc_queuecommand - Queuecommand function of the scsi template
  1577. *
  1578. * @host: The Scsi_Host the command was issued to
  1579. * @sc_cmd: struct scsi_cmnd to be executed
  1580. *
  1581. * This is the IO strategy routine, called by SCSI-ML
  1582. **/
  1583. int bnx2fc_queuecommand(struct Scsi_Host *host,
  1584. struct scsi_cmnd *sc_cmd)
  1585. {
  1586. struct fc_lport *lport = shost_priv(host);
  1587. struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
  1588. struct fc_rport_libfc_priv *rp = rport->dd_data;
  1589. struct bnx2fc_rport *tgt;
  1590. struct bnx2fc_cmd *io_req;
  1591. int rc = 0;
  1592. int rval;
  1593. rval = fc_remote_port_chkready(rport);
  1594. if (rval) {
  1595. sc_cmd->result = rval;
  1596. sc_cmd->scsi_done(sc_cmd);
  1597. return 0;
  1598. }
  1599. if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
  1600. rc = SCSI_MLQUEUE_HOST_BUSY;
  1601. goto exit_qcmd;
  1602. }
  1603. /* rport and tgt are allocated together, so tgt should be non-NULL */
  1604. tgt = (struct bnx2fc_rport *)&rp[1];
  1605. if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
  1606. /*
  1607. * Session is not offloaded yet. Let SCSI-ml retry
  1608. * the command.
  1609. */
  1610. rc = SCSI_MLQUEUE_TARGET_BUSY;
  1611. goto exit_qcmd;
  1612. }
  1613. io_req = bnx2fc_cmd_alloc(tgt);
  1614. if (!io_req) {
  1615. rc = SCSI_MLQUEUE_HOST_BUSY;
  1616. goto exit_qcmd;
  1617. }
  1618. io_req->sc_cmd = sc_cmd;
  1619. if (bnx2fc_post_io_req(tgt, io_req)) {
  1620. printk(KERN_ERR PFX "Unable to post io_req\n");
  1621. rc = SCSI_MLQUEUE_HOST_BUSY;
  1622. goto exit_qcmd;
  1623. }
  1624. exit_qcmd:
  1625. return rc;
  1626. }
  1627. void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
  1628. struct fcoe_task_ctx_entry *task,
  1629. u8 num_rq)
  1630. {
  1631. struct fcoe_fcp_rsp_payload *fcp_rsp;
  1632. struct bnx2fc_rport *tgt = io_req->tgt;
  1633. struct scsi_cmnd *sc_cmd;
  1634. struct Scsi_Host *host;
  1635. /* scsi_cmd_cmpl is called with tgt lock held */
  1636. if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
  1637. /* we will not receive ABTS response for this IO */
  1638. BNX2FC_IO_DBG(io_req, "Timer context finished processing "
  1639. "this scsi cmd\n");
  1640. }
  1641. /* Cancel the timeout_work, as we received IO completion */
  1642. if (cancel_delayed_work(&io_req->timeout_work))
  1643. kref_put(&io_req->refcount,
  1644. bnx2fc_cmd_release); /* drop timer hold */
  1645. sc_cmd = io_req->sc_cmd;
  1646. if (sc_cmd == NULL) {
  1647. printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
  1648. return;
  1649. }
  1650. /* Fetch fcp_rsp from task context and perform cmd completion */
  1651. fcp_rsp = (struct fcoe_fcp_rsp_payload *)
  1652. &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
  1653. /* parse fcp_rsp and obtain sense data from RQ if available */
  1654. bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
  1655. host = sc_cmd->device->host;
  1656. if (!sc_cmd->SCp.ptr) {
  1657. printk(KERN_ERR PFX "SCp.ptr is NULL\n");
  1658. return;
  1659. }
  1660. if (io_req->on_active_queue) {
  1661. list_del_init(&io_req->link);
  1662. io_req->on_active_queue = 0;
  1663. /* Move IO req to retire queue */
  1664. list_add_tail(&io_req->link, &tgt->io_retire_queue);
  1665. } else {
  1666. /* This should not happen, but could have been pulled
  1667. * by bnx2fc_flush_active_ios(), or during a race
  1668. * between command abort and (late) completion.
  1669. */
  1670. BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
  1671. if (io_req->wait_for_comp)
  1672. if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
  1673. &io_req->req_flags))
  1674. complete(&io_req->tm_done);
  1675. }
  1676. bnx2fc_unmap_sg_list(io_req);
  1677. io_req->sc_cmd = NULL;
  1678. switch (io_req->fcp_status) {
  1679. case FC_GOOD:
  1680. if (io_req->cdb_status == 0) {
  1681. /* Good IO completion */
  1682. sc_cmd->result = DID_OK << 16;
  1683. } else {
  1684. /* Transport status is good, SCSI status not good */
  1685. BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
  1686. " fcp_resid = 0x%x\n",
  1687. io_req->cdb_status, io_req->fcp_resid);
  1688. sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
  1689. }
  1690. if (io_req->fcp_resid)
  1691. scsi_set_resid(sc_cmd, io_req->fcp_resid);
  1692. break;
  1693. default:
  1694. printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
  1695. io_req->fcp_status);
  1696. break;
  1697. }
  1698. sc_cmd->SCp.ptr = NULL;
  1699. sc_cmd->scsi_done(sc_cmd);
  1700. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  1701. }
  1702. int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
  1703. struct bnx2fc_cmd *io_req)
  1704. {
  1705. struct fcoe_task_ctx_entry *task;
  1706. struct fcoe_task_ctx_entry *task_page;
  1707. struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
  1708. struct fcoe_port *port = tgt->port;
  1709. struct bnx2fc_interface *interface = port->priv;
  1710. struct bnx2fc_hba *hba = interface->hba;
  1711. struct fc_lport *lport = port->lport;
  1712. struct fc_stats *stats;
  1713. int task_idx, index;
  1714. u16 xid;
  1715. /* Initialize rest of io_req fields */
  1716. io_req->cmd_type = BNX2FC_SCSI_CMD;
  1717. io_req->port = port;
  1718. io_req->tgt = tgt;
  1719. io_req->data_xfer_len = scsi_bufflen(sc_cmd);
  1720. sc_cmd->SCp.ptr = (char *)io_req;
  1721. stats = per_cpu_ptr(lport->stats, get_cpu());
  1722. if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
  1723. io_req->io_req_flags = BNX2FC_READ;
  1724. stats->InputRequests++;
  1725. stats->InputBytes += io_req->data_xfer_len;
  1726. } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
  1727. io_req->io_req_flags = BNX2FC_WRITE;
  1728. stats->OutputRequests++;
  1729. stats->OutputBytes += io_req->data_xfer_len;
  1730. } else {
  1731. io_req->io_req_flags = 0;
  1732. stats->ControlRequests++;
  1733. }
  1734. put_cpu();
  1735. xid = io_req->xid;
  1736. /* Build buffer descriptor list for firmware from sg list */
  1737. if (bnx2fc_build_bd_list_from_sg(io_req)) {
  1738. printk(KERN_ERR PFX "BD list creation failed\n");
  1739. spin_lock_bh(&tgt->tgt_lock);
  1740. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  1741. spin_unlock_bh(&tgt->tgt_lock);
  1742. return -EAGAIN;
  1743. }
  1744. task_idx = xid / BNX2FC_TASKS_PER_PAGE;
  1745. index = xid % BNX2FC_TASKS_PER_PAGE;
  1746. /* Initialize task context for this IO request */
  1747. task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
  1748. task = &(task_page[index]);
  1749. bnx2fc_init_task(io_req, task);
  1750. spin_lock_bh(&tgt->tgt_lock);
  1751. if (tgt->flush_in_prog) {
  1752. printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
  1753. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  1754. spin_unlock_bh(&tgt->tgt_lock);
  1755. return -EAGAIN;
  1756. }
  1757. if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
  1758. printk(KERN_ERR PFX "Session not ready...post_io\n");
  1759. kref_put(&io_req->refcount, bnx2fc_cmd_release);
  1760. spin_unlock_bh(&tgt->tgt_lock);
  1761. return -EAGAIN;
  1762. }
  1763. /* Time IO req */
  1764. if (tgt->io_timeout)
  1765. bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
  1766. /* Obtain free SQ entry */
  1767. bnx2fc_add_2_sq(tgt, xid);
  1768. /* Enqueue the io_req to active_cmd_queue */
  1769. io_req->on_active_queue = 1;
  1770. /* move io_req from pending_queue to active_queue */
  1771. list_add_tail(&io_req->link, &tgt->active_cmd_queue);
  1772. /* Ring doorbell */
  1773. bnx2fc_ring_doorbell(tgt);
  1774. spin_unlock_bh(&tgt->tgt_lock);
  1775. return 0;
  1776. }