bfa_ioim.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330
  1. /*
  2. * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <bfa.h>
  18. #include <cs/bfa_debug.h>
  19. #include <bfa_cb_ioim_macros.h>
  20. BFA_TRC_FILE(HAL, IOIM);
  21. /*
  22. * forward declarations.
  23. */
  24. static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
  25. static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
  26. static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
  27. static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
  28. static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
  29. static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
  30. static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
  31. static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
  32. static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
  33. static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
  34. /**
  35. * bfa_ioim_sm
  36. */
  37. /**
  38. * IO state machine events
  39. */
  40. enum bfa_ioim_event {
  41. BFA_IOIM_SM_START = 1, /* io start request from host */
  42. BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
  43. BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
  44. BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
  45. BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
  46. BFA_IOIM_SM_FREE = 6, /* io resource is freed */
  47. BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
  48. BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
  49. BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
  50. BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
  51. BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
  52. BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
  53. BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
  54. BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
  55. BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
  56. BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
  57. BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
  58. BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
  59. };
  60. /*
  61. * forward declaration of IO state machine
  62. */
  63. static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
  64. enum bfa_ioim_event event);
  65. static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
  66. enum bfa_ioim_event event);
  67. static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
  68. enum bfa_ioim_event event);
  69. static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
  70. enum bfa_ioim_event event);
  71. static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
  72. enum bfa_ioim_event event);
  73. static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
  74. enum bfa_ioim_event event);
  75. static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
  76. enum bfa_ioim_event event);
  77. static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
  78. enum bfa_ioim_event event);
  79. static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
  80. enum bfa_ioim_event event);
  81. static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
  82. enum bfa_ioim_event event);
  83. static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
  84. enum bfa_ioim_event event);
  85. /**
  86. * IO is not started (unallocated).
  87. */
  88. static void
  89. bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  90. {
  91. bfa_trc_fp(ioim->bfa, ioim->iotag);
  92. bfa_trc_fp(ioim->bfa, event);
  93. switch (event) {
  94. case BFA_IOIM_SM_START:
  95. if (!bfa_itnim_is_online(ioim->itnim)) {
  96. if (!bfa_itnim_hold_io(ioim->itnim)) {
  97. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  98. list_del(&ioim->qe);
  99. list_add_tail(&ioim->qe,
  100. &ioim->fcpim->ioim_comp_q);
  101. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  102. __bfa_cb_ioim_pathtov, ioim);
  103. } else {
  104. list_del(&ioim->qe);
  105. list_add_tail(&ioim->qe,
  106. &ioim->itnim->pending_q);
  107. }
  108. break;
  109. }
  110. if (ioim->nsges > BFI_SGE_INLINE) {
  111. if (!bfa_ioim_sge_setup(ioim)) {
  112. bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
  113. return;
  114. }
  115. }
  116. if (!bfa_ioim_send_ioreq(ioim)) {
  117. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  118. break;
  119. }
  120. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  121. break;
  122. case BFA_IOIM_SM_IOTOV:
  123. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  124. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  125. __bfa_cb_ioim_pathtov, ioim);
  126. break;
  127. case BFA_IOIM_SM_ABORT:
  128. /**
  129. * IO in pending queue can get abort requests. Complete abort
  130. * requests immediately.
  131. */
  132. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  133. bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
  134. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  135. ioim);
  136. break;
  137. default:
  138. bfa_sm_fault(ioim->bfa, event);
  139. }
  140. }
  141. /**
  142. * IO is waiting for SG pages.
  143. */
  144. static void
  145. bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  146. {
  147. bfa_trc(ioim->bfa, ioim->iotag);
  148. bfa_trc(ioim->bfa, event);
  149. switch (event) {
  150. case BFA_IOIM_SM_SGALLOCED:
  151. if (!bfa_ioim_send_ioreq(ioim)) {
  152. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  153. break;
  154. }
  155. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  156. break;
  157. case BFA_IOIM_SM_CLEANUP:
  158. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  159. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  160. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  161. ioim);
  162. bfa_ioim_notify_cleanup(ioim);
  163. break;
  164. case BFA_IOIM_SM_ABORT:
  165. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  166. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  167. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  168. ioim);
  169. break;
  170. case BFA_IOIM_SM_HWFAIL:
  171. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  172. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  173. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  174. ioim);
  175. break;
  176. default:
  177. bfa_sm_fault(ioim->bfa, event);
  178. }
  179. }
  180. /**
  181. * IO is active.
  182. */
  183. static void
  184. bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  185. {
  186. bfa_trc_fp(ioim->bfa, ioim->iotag);
  187. bfa_trc_fp(ioim->bfa, event);
  188. switch (event) {
  189. case BFA_IOIM_SM_COMP_GOOD:
  190. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  191. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  192. __bfa_cb_ioim_good_comp, ioim);
  193. break;
  194. case BFA_IOIM_SM_COMP:
  195. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  196. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
  197. ioim);
  198. break;
  199. case BFA_IOIM_SM_DONE:
  200. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  201. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
  202. ioim);
  203. break;
  204. case BFA_IOIM_SM_ABORT:
  205. ioim->iosp->abort_explicit = BFA_TRUE;
  206. ioim->io_cbfn = __bfa_cb_ioim_abort;
  207. if (bfa_ioim_send_abort(ioim))
  208. bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
  209. else {
  210. bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
  211. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  212. &ioim->iosp->reqq_wait);
  213. }
  214. break;
  215. case BFA_IOIM_SM_CLEANUP:
  216. ioim->iosp->abort_explicit = BFA_FALSE;
  217. ioim->io_cbfn = __bfa_cb_ioim_failed;
  218. if (bfa_ioim_send_abort(ioim))
  219. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  220. else {
  221. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  222. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  223. &ioim->iosp->reqq_wait);
  224. }
  225. break;
  226. case BFA_IOIM_SM_HWFAIL:
  227. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  228. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  229. ioim);
  230. break;
  231. default:
  232. bfa_sm_fault(ioim->bfa, event);
  233. }
  234. }
  235. /**
  236. * IO is being aborted, waiting for completion from firmware.
  237. */
  238. static void
  239. bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  240. {
  241. bfa_trc(ioim->bfa, ioim->iotag);
  242. bfa_trc(ioim->bfa, event);
  243. switch (event) {
  244. case BFA_IOIM_SM_COMP_GOOD:
  245. case BFA_IOIM_SM_COMP:
  246. case BFA_IOIM_SM_DONE:
  247. case BFA_IOIM_SM_FREE:
  248. break;
  249. case BFA_IOIM_SM_ABORT_DONE:
  250. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  251. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  252. ioim);
  253. break;
  254. case BFA_IOIM_SM_ABORT_COMP:
  255. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  256. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  257. ioim);
  258. break;
  259. case BFA_IOIM_SM_COMP_UTAG:
  260. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  261. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  262. ioim);
  263. break;
  264. case BFA_IOIM_SM_CLEANUP:
  265. bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
  266. ioim->iosp->abort_explicit = BFA_FALSE;
  267. if (bfa_ioim_send_abort(ioim))
  268. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  269. else {
  270. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  271. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  272. &ioim->iosp->reqq_wait);
  273. }
  274. break;
  275. case BFA_IOIM_SM_HWFAIL:
  276. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  277. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  278. ioim);
  279. break;
  280. default:
  281. bfa_sm_fault(ioim->bfa, event);
  282. }
  283. }
  284. /**
  285. * IO is being cleaned up (implicit abort), waiting for completion from
  286. * firmware.
  287. */
  288. static void
  289. bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  290. {
  291. bfa_trc(ioim->bfa, ioim->iotag);
  292. bfa_trc(ioim->bfa, event);
  293. switch (event) {
  294. case BFA_IOIM_SM_COMP_GOOD:
  295. case BFA_IOIM_SM_COMP:
  296. case BFA_IOIM_SM_DONE:
  297. case BFA_IOIM_SM_FREE:
  298. break;
  299. case BFA_IOIM_SM_ABORT:
  300. /**
  301. * IO is already being aborted implicitly
  302. */
  303. ioim->io_cbfn = __bfa_cb_ioim_abort;
  304. break;
  305. case BFA_IOIM_SM_ABORT_DONE:
  306. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  307. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  308. bfa_ioim_notify_cleanup(ioim);
  309. break;
  310. case BFA_IOIM_SM_ABORT_COMP:
  311. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  312. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  313. bfa_ioim_notify_cleanup(ioim);
  314. break;
  315. case BFA_IOIM_SM_COMP_UTAG:
  316. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  317. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  318. bfa_ioim_notify_cleanup(ioim);
  319. break;
  320. case BFA_IOIM_SM_HWFAIL:
  321. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  322. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  323. ioim);
  324. break;
  325. case BFA_IOIM_SM_CLEANUP:
  326. /**
  327. * IO can be in cleanup state already due to TM command. 2nd cleanup
  328. * request comes from ITN offline event.
  329. */
  330. break;
  331. default:
  332. bfa_sm_fault(ioim->bfa, event);
  333. }
  334. }
  335. /**
  336. * IO is waiting for room in request CQ
  337. */
  338. static void
  339. bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  340. {
  341. bfa_trc(ioim->bfa, ioim->iotag);
  342. bfa_trc(ioim->bfa, event);
  343. switch (event) {
  344. case BFA_IOIM_SM_QRESUME:
  345. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  346. bfa_ioim_send_ioreq(ioim);
  347. break;
  348. case BFA_IOIM_SM_ABORT:
  349. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  350. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  351. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  352. ioim);
  353. break;
  354. case BFA_IOIM_SM_CLEANUP:
  355. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  356. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  357. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  358. ioim);
  359. bfa_ioim_notify_cleanup(ioim);
  360. break;
  361. case BFA_IOIM_SM_HWFAIL:
  362. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  363. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  364. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  365. ioim);
  366. break;
  367. default:
  368. bfa_sm_fault(ioim->bfa, event);
  369. }
  370. }
  371. /**
  372. * Active IO is being aborted, waiting for room in request CQ.
  373. */
  374. static void
  375. bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  376. {
  377. bfa_trc(ioim->bfa, ioim->iotag);
  378. bfa_trc(ioim->bfa, event);
  379. switch (event) {
  380. case BFA_IOIM_SM_QRESUME:
  381. bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
  382. bfa_ioim_send_abort(ioim);
  383. break;
  384. case BFA_IOIM_SM_CLEANUP:
  385. bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
  386. ioim->iosp->abort_explicit = BFA_FALSE;
  387. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  388. break;
  389. case BFA_IOIM_SM_COMP_GOOD:
  390. case BFA_IOIM_SM_COMP:
  391. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  392. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  393. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  394. ioim);
  395. break;
  396. case BFA_IOIM_SM_DONE:
  397. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  398. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  399. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  400. ioim);
  401. break;
  402. case BFA_IOIM_SM_HWFAIL:
  403. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  404. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  405. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  406. ioim);
  407. break;
  408. default:
  409. bfa_sm_fault(ioim->bfa, event);
  410. }
  411. }
  412. /**
  413. * Active IO is being cleaned up, waiting for room in request CQ.
  414. */
  415. static void
  416. bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  417. {
  418. bfa_trc(ioim->bfa, ioim->iotag);
  419. bfa_trc(ioim->bfa, event);
  420. switch (event) {
  421. case BFA_IOIM_SM_QRESUME:
  422. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  423. bfa_ioim_send_abort(ioim);
  424. break;
  425. case BFA_IOIM_SM_ABORT:
  426. /**
  427. * IO is alraedy being cleaned up implicitly
  428. */
  429. ioim->io_cbfn = __bfa_cb_ioim_abort;
  430. break;
  431. case BFA_IOIM_SM_COMP_GOOD:
  432. case BFA_IOIM_SM_COMP:
  433. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  434. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  435. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  436. bfa_ioim_notify_cleanup(ioim);
  437. break;
  438. case BFA_IOIM_SM_DONE:
  439. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  440. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  441. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  442. bfa_ioim_notify_cleanup(ioim);
  443. break;
  444. case BFA_IOIM_SM_HWFAIL:
  445. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  446. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  447. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  448. ioim);
  449. break;
  450. default:
  451. bfa_sm_fault(ioim->bfa, event);
  452. }
  453. }
  454. /**
  455. * IO bfa callback is pending.
  456. */
  457. static void
  458. bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  459. {
  460. bfa_trc_fp(ioim->bfa, ioim->iotag);
  461. bfa_trc_fp(ioim->bfa, event);
  462. switch (event) {
  463. case BFA_IOIM_SM_HCB:
  464. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  465. bfa_ioim_free(ioim);
  466. bfa_cb_ioim_resfree(ioim->bfa->bfad);
  467. break;
  468. case BFA_IOIM_SM_CLEANUP:
  469. bfa_ioim_notify_cleanup(ioim);
  470. break;
  471. case BFA_IOIM_SM_HWFAIL:
  472. break;
  473. default:
  474. bfa_sm_fault(ioim->bfa, event);
  475. }
  476. }
  477. /**
  478. * IO bfa callback is pending. IO resource cannot be freed.
  479. */
  480. static void
  481. bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  482. {
  483. bfa_trc(ioim->bfa, ioim->iotag);
  484. bfa_trc(ioim->bfa, event);
  485. switch (event) {
  486. case BFA_IOIM_SM_HCB:
  487. bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
  488. list_del(&ioim->qe);
  489. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
  490. break;
  491. case BFA_IOIM_SM_FREE:
  492. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  493. break;
  494. case BFA_IOIM_SM_CLEANUP:
  495. bfa_ioim_notify_cleanup(ioim);
  496. break;
  497. case BFA_IOIM_SM_HWFAIL:
  498. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  499. break;
  500. default:
  501. bfa_sm_fault(ioim->bfa, event);
  502. }
  503. }
  504. /**
  505. * IO is completed, waiting resource free from firmware.
  506. */
  507. static void
  508. bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  509. {
  510. bfa_trc(ioim->bfa, ioim->iotag);
  511. bfa_trc(ioim->bfa, event);
  512. switch (event) {
  513. case BFA_IOIM_SM_FREE:
  514. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  515. bfa_ioim_free(ioim);
  516. bfa_cb_ioim_resfree(ioim->bfa->bfad);
  517. break;
  518. case BFA_IOIM_SM_CLEANUP:
  519. bfa_ioim_notify_cleanup(ioim);
  520. break;
  521. case BFA_IOIM_SM_HWFAIL:
  522. break;
  523. default:
  524. bfa_sm_fault(ioim->bfa, event);
  525. }
  526. }
  527. /**
  528. * bfa_ioim_private
  529. */
  530. static void
  531. __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
  532. {
  533. struct bfa_ioim_s *ioim = cbarg;
  534. if (!complete) {
  535. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  536. return;
  537. }
  538. bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
  539. }
  540. static void
  541. __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
  542. {
  543. struct bfa_ioim_s *ioim = cbarg;
  544. struct bfi_ioim_rsp_s *m;
  545. u8 *snsinfo = NULL;
  546. u8 sns_len = 0;
  547. s32 residue = 0;
  548. if (!complete) {
  549. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  550. return;
  551. }
  552. m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
  553. if (m->io_status == BFI_IOIM_STS_OK) {
  554. /**
  555. * setup sense information, if present
  556. */
  557. if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION
  558. && m->sns_len) {
  559. sns_len = m->sns_len;
  560. snsinfo = ioim->iosp->snsinfo;
  561. }
  562. /**
  563. * setup residue value correctly for normal completions
  564. */
  565. if (m->resid_flags == FCP_RESID_UNDER)
  566. residue = bfa_os_ntohl(m->residue);
  567. if (m->resid_flags == FCP_RESID_OVER) {
  568. residue = bfa_os_ntohl(m->residue);
  569. residue = -residue;
  570. }
  571. }
  572. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
  573. m->scsi_status, sns_len, snsinfo, residue);
  574. }
  575. static void
  576. __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
  577. {
  578. struct bfa_ioim_s *ioim = cbarg;
  579. if (!complete) {
  580. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  581. return;
  582. }
  583. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
  584. 0, 0, NULL, 0);
  585. }
  586. static void
  587. __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
  588. {
  589. struct bfa_ioim_s *ioim = cbarg;
  590. if (!complete) {
  591. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  592. return;
  593. }
  594. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
  595. 0, 0, NULL, 0);
  596. }
  597. static void
  598. __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
  599. {
  600. struct bfa_ioim_s *ioim = cbarg;
  601. if (!complete) {
  602. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  603. return;
  604. }
  605. bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
  606. }
  607. static void
  608. bfa_ioim_sgpg_alloced(void *cbarg)
  609. {
  610. struct bfa_ioim_s *ioim = cbarg;
  611. ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
  612. list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
  613. bfa_ioim_sgpg_setup(ioim);
  614. bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
  615. }
  616. /**
  617. * Send I/O request to firmware.
  618. */
  619. static bfa_boolean_t
  620. bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
  621. {
  622. struct bfa_itnim_s *itnim = ioim->itnim;
  623. struct bfi_ioim_req_s *m;
  624. static struct fcp_cmnd_s cmnd_z0 = { 0 };
  625. struct bfi_sge_s *sge;
  626. u32 pgdlen = 0;
  627. u64 addr;
  628. struct scatterlist *sg;
  629. struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
  630. /**
  631. * check for room in queue to send request now
  632. */
  633. m = bfa_reqq_next(ioim->bfa, ioim->reqq);
  634. if (!m) {
  635. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  636. &ioim->iosp->reqq_wait);
  637. return BFA_FALSE;
  638. }
  639. /**
  640. * build i/o request message next
  641. */
  642. m->io_tag = bfa_os_htons(ioim->iotag);
  643. m->rport_hdl = ioim->itnim->rport->fw_handle;
  644. m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
  645. /**
  646. * build inline IO SG element here
  647. */
  648. sge = &m->sges[0];
  649. if (ioim->nsges) {
  650. sg = (struct scatterlist *)scsi_sglist(cmnd);
  651. addr = bfa_os_sgaddr(sg_dma_address(sg));
  652. sge->sga = *(union bfi_addr_u *) &addr;
  653. pgdlen = sg_dma_len(sg);
  654. sge->sg_len = pgdlen;
  655. sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
  656. BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
  657. bfa_sge_to_be(sge);
  658. sge++;
  659. }
  660. if (ioim->nsges > BFI_SGE_INLINE) {
  661. sge->sga = ioim->sgpg->sgpg_pa;
  662. } else {
  663. sge->sga.a32.addr_lo = 0;
  664. sge->sga.a32.addr_hi = 0;
  665. }
  666. sge->sg_len = pgdlen;
  667. sge->flags = BFI_SGE_PGDLEN;
  668. bfa_sge_to_be(sge);
  669. /**
  670. * set up I/O command parameters
  671. */
  672. bfa_os_assign(m->cmnd, cmnd_z0);
  673. m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
  674. m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
  675. bfa_os_assign(m->cmnd.cdb,
  676. *(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
  677. m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
  678. /**
  679. * set up I/O message header
  680. */
  681. switch (m->cmnd.iodir) {
  682. case FCP_IODIR_READ:
  683. bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
  684. bfa_stats(itnim, input_reqs);
  685. break;
  686. case FCP_IODIR_WRITE:
  687. bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
  688. bfa_stats(itnim, output_reqs);
  689. break;
  690. case FCP_IODIR_RW:
  691. bfa_stats(itnim, input_reqs);
  692. bfa_stats(itnim, output_reqs);
  693. default:
  694. bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
  695. }
  696. if (itnim->seq_rec ||
  697. (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
  698. bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
  699. #ifdef IOIM_ADVANCED
  700. m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
  701. m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
  702. m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
  703. /**
  704. * Handle large CDB (>16 bytes).
  705. */
  706. m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
  707. FCP_CMND_CDB_LEN) / sizeof(u32);
  708. if (m->cmnd.addl_cdb_len) {
  709. bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
  710. bfa_cb_ioim_get_cdb(ioim->dio) + 1,
  711. m->cmnd.addl_cdb_len * sizeof(u32));
  712. fcp_cmnd_fcpdl(&m->cmnd) =
  713. bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
  714. }
  715. #endif
  716. /**
  717. * queue I/O message to firmware
  718. */
  719. bfa_reqq_produce(ioim->bfa, ioim->reqq);
  720. return BFA_TRUE;
  721. }
  722. /**
  723. * Setup any additional SG pages needed.Inline SG element is setup
  724. * at queuing time.
  725. */
  726. static bfa_boolean_t
  727. bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
  728. {
  729. u16 nsgpgs;
  730. bfa_assert(ioim->nsges > BFI_SGE_INLINE);
  731. /**
  732. * allocate SG pages needed
  733. */
  734. nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
  735. if (!nsgpgs)
  736. return BFA_TRUE;
  737. if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
  738. != BFA_STATUS_OK) {
  739. bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
  740. return BFA_FALSE;
  741. }
  742. ioim->nsgpgs = nsgpgs;
  743. bfa_ioim_sgpg_setup(ioim);
  744. return BFA_TRUE;
  745. }
  746. static void
  747. bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
  748. {
  749. int sgeid, nsges, i;
  750. struct bfi_sge_s *sge;
  751. struct bfa_sgpg_s *sgpg;
  752. u32 pgcumsz;
  753. u64 addr;
  754. struct scatterlist *sg;
  755. struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
  756. sgeid = BFI_SGE_INLINE;
  757. ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
  758. sg = scsi_sglist(cmnd);
  759. sg = sg_next(sg);
  760. do {
  761. sge = sgpg->sgpg->sges;
  762. nsges = ioim->nsges - sgeid;
  763. if (nsges > BFI_SGPG_DATA_SGES)
  764. nsges = BFI_SGPG_DATA_SGES;
  765. pgcumsz = 0;
  766. for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
  767. addr = bfa_os_sgaddr(sg_dma_address(sg));
  768. sge->sga = *(union bfi_addr_u *) &addr;
  769. sge->sg_len = sg_dma_len(sg);
  770. pgcumsz += sge->sg_len;
  771. /**
  772. * set flags
  773. */
  774. if (i < (nsges - 1))
  775. sge->flags = BFI_SGE_DATA;
  776. else if (sgeid < (ioim->nsges - 1))
  777. sge->flags = BFI_SGE_DATA_CPL;
  778. else
  779. sge->flags = BFI_SGE_DATA_LAST;
  780. }
  781. sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
  782. /**
  783. * set the link element of each page
  784. */
  785. if (sgeid == ioim->nsges) {
  786. sge->flags = BFI_SGE_PGDLEN;
  787. sge->sga.a32.addr_lo = 0;
  788. sge->sga.a32.addr_hi = 0;
  789. } else {
  790. sge->flags = BFI_SGE_LINK;
  791. sge->sga = sgpg->sgpg_pa;
  792. }
  793. sge->sg_len = pgcumsz;
  794. } while (sgeid < ioim->nsges);
  795. }
  796. /**
  797. * Send I/O abort request to firmware.
  798. */
  799. static bfa_boolean_t
  800. bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
  801. {
  802. struct bfi_ioim_abort_req_s *m;
  803. enum bfi_ioim_h2i msgop;
  804. /**
  805. * check for room in queue to send request now
  806. */
  807. m = bfa_reqq_next(ioim->bfa, ioim->reqq);
  808. if (!m)
  809. return BFA_FALSE;
  810. /**
  811. * build i/o request message next
  812. */
  813. if (ioim->iosp->abort_explicit)
  814. msgop = BFI_IOIM_H2I_IOABORT_REQ;
  815. else
  816. msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
  817. bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
  818. m->io_tag = bfa_os_htons(ioim->iotag);
  819. m->abort_tag = ++ioim->abort_tag;
  820. /**
  821. * queue I/O message to firmware
  822. */
  823. bfa_reqq_produce(ioim->bfa, ioim->reqq);
  824. return BFA_TRUE;
  825. }
  826. /**
  827. * Call to resume any I/O requests waiting for room in request queue.
  828. */
  829. static void
  830. bfa_ioim_qresume(void *cbarg)
  831. {
  832. struct bfa_ioim_s *ioim = cbarg;
  833. bfa_fcpim_stats(ioim->fcpim, qresumes);
  834. bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
  835. }
  836. static void
  837. bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
  838. {
  839. /**
  840. * Move IO from itnim queue to fcpim global queue since itnim will be
  841. * freed.
  842. */
  843. list_del(&ioim->qe);
  844. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  845. if (!ioim->iosp->tskim) {
  846. if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
  847. bfa_cb_dequeue(&ioim->hcb_qe);
  848. list_del(&ioim->qe);
  849. list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
  850. }
  851. bfa_itnim_iodone(ioim->itnim);
  852. } else
  853. bfa_tskim_iodone(ioim->iosp->tskim);
  854. }
  855. /**
  856. * or after the link comes back.
  857. */
  858. void
  859. bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
  860. {
  861. /**
  862. * If path tov timer expired, failback with PATHTOV status - these
  863. * IO requests are not normally retried by IO stack.
  864. *
  865. * Otherwise device cameback online and fail it with normal failed
  866. * status so that IO stack retries these failed IO requests.
  867. */
  868. if (iotov)
  869. ioim->io_cbfn = __bfa_cb_ioim_pathtov;
  870. else
  871. ioim->io_cbfn = __bfa_cb_ioim_failed;
  872. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  873. /**
  874. * Move IO to fcpim global queue since itnim will be
  875. * freed.
  876. */
  877. list_del(&ioim->qe);
  878. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  879. }
  880. /**
  881. * bfa_ioim_friend
  882. */
  883. /**
  884. * Memory allocation and initialization.
  885. */
  886. void
  887. bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
  888. {
  889. struct bfa_ioim_s *ioim;
  890. struct bfa_ioim_sp_s *iosp;
  891. u16 i;
  892. u8 *snsinfo;
  893. u32 snsbufsz;
  894. /**
  895. * claim memory first
  896. */
  897. ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
  898. fcpim->ioim_arr = ioim;
  899. bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
  900. iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
  901. fcpim->ioim_sp_arr = iosp;
  902. bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
  903. /**
  904. * Claim DMA memory for per IO sense data.
  905. */
  906. snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
  907. fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
  908. bfa_meminfo_dma_phys(minfo) += snsbufsz;
  909. fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
  910. bfa_meminfo_dma_virt(minfo) += snsbufsz;
  911. snsinfo = fcpim->snsbase.kva;
  912. bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
  913. /**
  914. * Initialize ioim free queues
  915. */
  916. INIT_LIST_HEAD(&fcpim->ioim_free_q);
  917. INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
  918. INIT_LIST_HEAD(&fcpim->ioim_comp_q);
  919. for (i = 0; i < fcpim->num_ioim_reqs;
  920. i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
  921. /*
  922. * initialize IOIM
  923. */
  924. bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
  925. ioim->iotag = i;
  926. ioim->bfa = fcpim->bfa;
  927. ioim->fcpim = fcpim;
  928. ioim->iosp = iosp;
  929. iosp->snsinfo = snsinfo;
  930. INIT_LIST_HEAD(&ioim->sgpg_q);
  931. bfa_reqq_winit(&ioim->iosp->reqq_wait,
  932. bfa_ioim_qresume, ioim);
  933. bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
  934. bfa_ioim_sgpg_alloced, ioim);
  935. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  936. list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
  937. }
  938. }
  939. /**
  940. * Driver detach time call.
  941. */
  942. void
  943. bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
  944. {
  945. }
  946. void
  947. bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  948. {
  949. struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
  950. struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
  951. struct bfa_ioim_s *ioim;
  952. u16 iotag;
  953. enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
  954. iotag = bfa_os_ntohs(rsp->io_tag);
  955. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
  956. bfa_assert(ioim->iotag == iotag);
  957. bfa_trc(ioim->bfa, ioim->iotag);
  958. bfa_trc(ioim->bfa, rsp->io_status);
  959. bfa_trc(ioim->bfa, rsp->reuse_io_tag);
  960. if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
  961. bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
  962. switch (rsp->io_status) {
  963. case BFI_IOIM_STS_OK:
  964. bfa_fcpim_stats(fcpim, iocomp_ok);
  965. if (rsp->reuse_io_tag == 0)
  966. evt = BFA_IOIM_SM_DONE;
  967. else
  968. evt = BFA_IOIM_SM_COMP;
  969. break;
  970. case BFI_IOIM_STS_TIMEDOUT:
  971. case BFI_IOIM_STS_ABORTED:
  972. rsp->io_status = BFI_IOIM_STS_ABORTED;
  973. bfa_fcpim_stats(fcpim, iocomp_aborted);
  974. if (rsp->reuse_io_tag == 0)
  975. evt = BFA_IOIM_SM_DONE;
  976. else
  977. evt = BFA_IOIM_SM_COMP;
  978. break;
  979. case BFI_IOIM_STS_PROTO_ERR:
  980. bfa_fcpim_stats(fcpim, iocom_proto_err);
  981. bfa_assert(rsp->reuse_io_tag);
  982. evt = BFA_IOIM_SM_COMP;
  983. break;
  984. case BFI_IOIM_STS_SQER_NEEDED:
  985. bfa_fcpim_stats(fcpim, iocom_sqer_needed);
  986. bfa_assert(rsp->reuse_io_tag == 0);
  987. evt = BFA_IOIM_SM_SQRETRY;
  988. break;
  989. case BFI_IOIM_STS_RES_FREE:
  990. bfa_fcpim_stats(fcpim, iocom_res_free);
  991. evt = BFA_IOIM_SM_FREE;
  992. break;
  993. case BFI_IOIM_STS_HOST_ABORTED:
  994. bfa_fcpim_stats(fcpim, iocom_hostabrts);
  995. if (rsp->abort_tag != ioim->abort_tag) {
  996. bfa_trc(ioim->bfa, rsp->abort_tag);
  997. bfa_trc(ioim->bfa, ioim->abort_tag);
  998. return;
  999. }
  1000. if (rsp->reuse_io_tag)
  1001. evt = BFA_IOIM_SM_ABORT_COMP;
  1002. else
  1003. evt = BFA_IOIM_SM_ABORT_DONE;
  1004. break;
  1005. case BFI_IOIM_STS_UTAG:
  1006. bfa_fcpim_stats(fcpim, iocom_utags);
  1007. evt = BFA_IOIM_SM_COMP_UTAG;
  1008. break;
  1009. default:
  1010. bfa_assert(0);
  1011. }
  1012. bfa_sm_send_event(ioim, evt);
  1013. }
  1014. void
  1015. bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  1016. {
  1017. struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
  1018. struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
  1019. struct bfa_ioim_s *ioim;
  1020. u16 iotag;
  1021. iotag = bfa_os_ntohs(rsp->io_tag);
  1022. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
  1023. bfa_assert(ioim->iotag == iotag);
  1024. bfa_trc_fp(ioim->bfa, ioim->iotag);
  1025. bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
  1026. }
  1027. /**
  1028. * Called by itnim to clean up IO while going offline.
  1029. */
  1030. void
  1031. bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
  1032. {
  1033. bfa_trc(ioim->bfa, ioim->iotag);
  1034. bfa_fcpim_stats(ioim->fcpim, io_cleanups);
  1035. ioim->iosp->tskim = NULL;
  1036. bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
  1037. }
  1038. void
  1039. bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
  1040. {
  1041. bfa_trc(ioim->bfa, ioim->iotag);
  1042. bfa_fcpim_stats(ioim->fcpim, io_tmaborts);
  1043. ioim->iosp->tskim = tskim;
  1044. bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
  1045. }
  1046. /**
  1047. * IOC failure handling.
  1048. */
  1049. void
  1050. bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
  1051. {
  1052. bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
  1053. }
  1054. /**
  1055. * IO offline TOV popped. Fail the pending IO.
  1056. */
  1057. void
  1058. bfa_ioim_tov(struct bfa_ioim_s *ioim)
  1059. {
  1060. bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
  1061. }
  1062. /**
  1063. * bfa_ioim_api
  1064. */
  1065. /**
  1066. * Allocate IOIM resource for initiator mode I/O request.
  1067. */
  1068. struct bfa_ioim_s *
  1069. bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
  1070. struct bfa_itnim_s *itnim, u16 nsges)
  1071. {
  1072. struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
  1073. struct bfa_ioim_s *ioim;
  1074. /**
  1075. * alocate IOIM resource
  1076. */
  1077. bfa_q_deq(&fcpim->ioim_free_q, &ioim);
  1078. if (!ioim) {
  1079. bfa_fcpim_stats(fcpim, no_iotags);
  1080. return NULL;
  1081. }
  1082. ioim->dio = dio;
  1083. ioim->itnim = itnim;
  1084. ioim->nsges = nsges;
  1085. ioim->nsgpgs = 0;
  1086. bfa_stats(fcpim, total_ios);
  1087. bfa_stats(itnim, ios);
  1088. fcpim->ios_active++;
  1089. list_add_tail(&ioim->qe, &itnim->io_q);
  1090. bfa_trc_fp(ioim->bfa, ioim->iotag);
  1091. return ioim;
  1092. }
  1093. void
  1094. bfa_ioim_free(struct bfa_ioim_s *ioim)
  1095. {
  1096. struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
  1097. bfa_trc_fp(ioim->bfa, ioim->iotag);
  1098. bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
  1099. bfa_assert_fp(list_empty(&ioim->sgpg_q)
  1100. || (ioim->nsges > BFI_SGE_INLINE));
  1101. if (ioim->nsgpgs > 0)
  1102. bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
  1103. bfa_stats(ioim->itnim, io_comps);
  1104. fcpim->ios_active--;
  1105. list_del(&ioim->qe);
  1106. list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
  1107. }
  1108. void
  1109. bfa_ioim_start(struct bfa_ioim_s *ioim)
  1110. {
  1111. bfa_trc_fp(ioim->bfa, ioim->iotag);
  1112. /**
  1113. * Obtain the queue over which this request has to be issued
  1114. */
  1115. ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
  1116. bfa_cb_ioim_get_reqq(ioim->dio) :
  1117. bfa_itnim_get_reqq(ioim);
  1118. bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
  1119. }
  1120. /**
  1121. * Driver I/O abort request.
  1122. */
  1123. void
  1124. bfa_ioim_abort(struct bfa_ioim_s *ioim)
  1125. {
  1126. bfa_trc(ioim->bfa, ioim->iotag);
  1127. bfa_fcpim_stats(ioim->fcpim, io_aborts);
  1128. bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
  1129. }