bfa_ioim.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364
  1. /*
  2. * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <bfa.h>
  18. #include <cs/bfa_debug.h>
  19. #include <bfa_cb_ioim_macros.h>
  20. BFA_TRC_FILE(HAL, IOIM);
  21. /*
  22. * forward declarations.
  23. */
  24. static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
  25. static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
  26. static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
  27. static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
  28. static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
  29. static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
  30. static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
  31. static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
  32. static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
  33. static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
  34. /**
  35. * bfa_ioim_sm
  36. */
  37. /**
  38. * IO state machine events
  39. */
  40. enum bfa_ioim_event {
  41. BFA_IOIM_SM_START = 1, /* io start request from host */
  42. BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
  43. BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
  44. BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
  45. BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
  46. BFA_IOIM_SM_FREE = 6, /* io resource is freed */
  47. BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
  48. BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
  49. BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
  50. BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
  51. BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
  52. BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
  53. BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
  54. BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
  55. BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
  56. BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
  57. BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
  58. BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
  59. };
  60. /*
  61. * forward declaration of IO state machine
  62. */
  63. static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
  64. enum bfa_ioim_event event);
  65. static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
  66. enum bfa_ioim_event event);
  67. static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
  68. enum bfa_ioim_event event);
  69. static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
  70. enum bfa_ioim_event event);
  71. static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
  72. enum bfa_ioim_event event);
  73. static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
  74. enum bfa_ioim_event event);
  75. static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
  76. enum bfa_ioim_event event);
  77. static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
  78. enum bfa_ioim_event event);
  79. static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
  80. enum bfa_ioim_event event);
  81. static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
  82. enum bfa_ioim_event event);
  83. static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
  84. enum bfa_ioim_event event);
  85. /**
  86. * IO is not started (unallocated).
  87. */
  88. static void
  89. bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  90. {
  91. bfa_trc_fp(ioim->bfa, ioim->iotag);
  92. bfa_trc_fp(ioim->bfa, event);
  93. switch (event) {
  94. case BFA_IOIM_SM_START:
  95. if (!bfa_itnim_is_online(ioim->itnim)) {
  96. if (!bfa_itnim_hold_io(ioim->itnim)) {
  97. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  98. list_del(&ioim->qe);
  99. list_add_tail(&ioim->qe,
  100. &ioim->fcpim->ioim_comp_q);
  101. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  102. __bfa_cb_ioim_pathtov, ioim);
  103. } else {
  104. list_del(&ioim->qe);
  105. list_add_tail(&ioim->qe,
  106. &ioim->itnim->pending_q);
  107. }
  108. break;
  109. }
  110. if (ioim->nsges > BFI_SGE_INLINE) {
  111. if (!bfa_ioim_sge_setup(ioim)) {
  112. bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
  113. return;
  114. }
  115. }
  116. if (!bfa_ioim_send_ioreq(ioim)) {
  117. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  118. break;
  119. }
  120. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  121. break;
  122. case BFA_IOIM_SM_IOTOV:
  123. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  124. list_del(&ioim->qe);
  125. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  126. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  127. __bfa_cb_ioim_pathtov, ioim);
  128. break;
  129. case BFA_IOIM_SM_ABORT:
  130. /**
  131. * IO in pending queue can get abort requests. Complete abort
  132. * requests immediately.
  133. */
  134. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  135. bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
  136. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  137. ioim);
  138. break;
  139. default:
  140. bfa_sm_fault(ioim->bfa, event);
  141. }
  142. }
  143. /**
  144. * IO is waiting for SG pages.
  145. */
  146. static void
  147. bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  148. {
  149. bfa_trc(ioim->bfa, ioim->iotag);
  150. bfa_trc(ioim->bfa, event);
  151. switch (event) {
  152. case BFA_IOIM_SM_SGALLOCED:
  153. if (!bfa_ioim_send_ioreq(ioim)) {
  154. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  155. break;
  156. }
  157. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  158. break;
  159. case BFA_IOIM_SM_CLEANUP:
  160. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  161. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  162. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  163. ioim);
  164. bfa_ioim_notify_cleanup(ioim);
  165. break;
  166. case BFA_IOIM_SM_ABORT:
  167. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  168. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  169. list_del(&ioim->qe);
  170. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  171. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  172. ioim);
  173. break;
  174. case BFA_IOIM_SM_HWFAIL:
  175. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  176. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  177. list_del(&ioim->qe);
  178. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  179. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  180. ioim);
  181. break;
  182. default:
  183. bfa_sm_fault(ioim->bfa, event);
  184. }
  185. }
  186. /**
  187. * IO is active.
  188. */
  189. static void
  190. bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  191. {
  192. bfa_trc_fp(ioim->bfa, ioim->iotag);
  193. bfa_trc_fp(ioim->bfa, event);
  194. switch (event) {
  195. case BFA_IOIM_SM_COMP_GOOD:
  196. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  197. list_del(&ioim->qe);
  198. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  199. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  200. __bfa_cb_ioim_good_comp, ioim);
  201. break;
  202. case BFA_IOIM_SM_COMP:
  203. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  204. list_del(&ioim->qe);
  205. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  206. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
  207. ioim);
  208. break;
  209. case BFA_IOIM_SM_DONE:
  210. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  211. list_del(&ioim->qe);
  212. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  213. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
  214. ioim);
  215. break;
  216. case BFA_IOIM_SM_ABORT:
  217. ioim->iosp->abort_explicit = BFA_TRUE;
  218. ioim->io_cbfn = __bfa_cb_ioim_abort;
  219. if (bfa_ioim_send_abort(ioim))
  220. bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
  221. else {
  222. bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
  223. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  224. &ioim->iosp->reqq_wait);
  225. }
  226. break;
  227. case BFA_IOIM_SM_CLEANUP:
  228. ioim->iosp->abort_explicit = BFA_FALSE;
  229. ioim->io_cbfn = __bfa_cb_ioim_failed;
  230. if (bfa_ioim_send_abort(ioim))
  231. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  232. else {
  233. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  234. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  235. &ioim->iosp->reqq_wait);
  236. }
  237. break;
  238. case BFA_IOIM_SM_HWFAIL:
  239. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  240. list_del(&ioim->qe);
  241. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  242. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  243. ioim);
  244. break;
  245. default:
  246. bfa_sm_fault(ioim->bfa, event);
  247. }
  248. }
  249. /**
  250. * IO is being aborted, waiting for completion from firmware.
  251. */
  252. static void
  253. bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  254. {
  255. bfa_trc(ioim->bfa, ioim->iotag);
  256. bfa_trc(ioim->bfa, event);
  257. switch (event) {
  258. case BFA_IOIM_SM_COMP_GOOD:
  259. case BFA_IOIM_SM_COMP:
  260. case BFA_IOIM_SM_DONE:
  261. case BFA_IOIM_SM_FREE:
  262. break;
  263. case BFA_IOIM_SM_ABORT_DONE:
  264. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  265. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  266. ioim);
  267. break;
  268. case BFA_IOIM_SM_ABORT_COMP:
  269. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  270. list_del(&ioim->qe);
  271. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  272. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  273. ioim);
  274. break;
  275. case BFA_IOIM_SM_COMP_UTAG:
  276. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  277. list_del(&ioim->qe);
  278. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  279. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  280. ioim);
  281. break;
  282. case BFA_IOIM_SM_CLEANUP:
  283. bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
  284. ioim->iosp->abort_explicit = BFA_FALSE;
  285. if (bfa_ioim_send_abort(ioim))
  286. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  287. else {
  288. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  289. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  290. &ioim->iosp->reqq_wait);
  291. }
  292. break;
  293. case BFA_IOIM_SM_HWFAIL:
  294. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  295. list_del(&ioim->qe);
  296. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  297. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  298. ioim);
  299. break;
  300. default:
  301. bfa_sm_fault(ioim->bfa, event);
  302. }
  303. }
  304. /**
  305. * IO is being cleaned up (implicit abort), waiting for completion from
  306. * firmware.
  307. */
  308. static void
  309. bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  310. {
  311. bfa_trc(ioim->bfa, ioim->iotag);
  312. bfa_trc(ioim->bfa, event);
  313. switch (event) {
  314. case BFA_IOIM_SM_COMP_GOOD:
  315. case BFA_IOIM_SM_COMP:
  316. case BFA_IOIM_SM_DONE:
  317. case BFA_IOIM_SM_FREE:
  318. break;
  319. case BFA_IOIM_SM_ABORT:
  320. /**
  321. * IO is already being aborted implicitly
  322. */
  323. ioim->io_cbfn = __bfa_cb_ioim_abort;
  324. break;
  325. case BFA_IOIM_SM_ABORT_DONE:
  326. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  327. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  328. bfa_ioim_notify_cleanup(ioim);
  329. break;
  330. case BFA_IOIM_SM_ABORT_COMP:
  331. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  332. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  333. bfa_ioim_notify_cleanup(ioim);
  334. break;
  335. case BFA_IOIM_SM_COMP_UTAG:
  336. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  337. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  338. bfa_ioim_notify_cleanup(ioim);
  339. break;
  340. case BFA_IOIM_SM_HWFAIL:
  341. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  342. list_del(&ioim->qe);
  343. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  344. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  345. ioim);
  346. break;
  347. case BFA_IOIM_SM_CLEANUP:
  348. /**
  349. * IO can be in cleanup state already due to TM command. 2nd cleanup
  350. * request comes from ITN offline event.
  351. */
  352. break;
  353. default:
  354. bfa_sm_fault(ioim->bfa, event);
  355. }
  356. }
  357. /**
  358. * IO is waiting for room in request CQ
  359. */
  360. static void
  361. bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  362. {
  363. bfa_trc(ioim->bfa, ioim->iotag);
  364. bfa_trc(ioim->bfa, event);
  365. switch (event) {
  366. case BFA_IOIM_SM_QRESUME:
  367. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  368. bfa_ioim_send_ioreq(ioim);
  369. break;
  370. case BFA_IOIM_SM_ABORT:
  371. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  372. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  373. list_del(&ioim->qe);
  374. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  375. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  376. ioim);
  377. break;
  378. case BFA_IOIM_SM_CLEANUP:
  379. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  380. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  381. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  382. ioim);
  383. bfa_ioim_notify_cleanup(ioim);
  384. break;
  385. case BFA_IOIM_SM_HWFAIL:
  386. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  387. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  388. list_del(&ioim->qe);
  389. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  390. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  391. ioim);
  392. break;
  393. default:
  394. bfa_sm_fault(ioim->bfa, event);
  395. }
  396. }
  397. /**
  398. * Active IO is being aborted, waiting for room in request CQ.
  399. */
  400. static void
  401. bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  402. {
  403. bfa_trc(ioim->bfa, ioim->iotag);
  404. bfa_trc(ioim->bfa, event);
  405. switch (event) {
  406. case BFA_IOIM_SM_QRESUME:
  407. bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
  408. bfa_ioim_send_abort(ioim);
  409. break;
  410. case BFA_IOIM_SM_CLEANUP:
  411. bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
  412. ioim->iosp->abort_explicit = BFA_FALSE;
  413. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  414. break;
  415. case BFA_IOIM_SM_COMP_GOOD:
  416. case BFA_IOIM_SM_COMP:
  417. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  418. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  419. list_del(&ioim->qe);
  420. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  421. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  422. ioim);
  423. break;
  424. case BFA_IOIM_SM_DONE:
  425. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  426. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  427. list_del(&ioim->qe);
  428. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  429. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  430. ioim);
  431. break;
  432. case BFA_IOIM_SM_HWFAIL:
  433. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  434. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  435. list_del(&ioim->qe);
  436. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  437. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  438. ioim);
  439. break;
  440. default:
  441. bfa_sm_fault(ioim->bfa, event);
  442. }
  443. }
  444. /**
  445. * Active IO is being cleaned up, waiting for room in request CQ.
  446. */
  447. static void
  448. bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  449. {
  450. bfa_trc(ioim->bfa, ioim->iotag);
  451. bfa_trc(ioim->bfa, event);
  452. switch (event) {
  453. case BFA_IOIM_SM_QRESUME:
  454. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  455. bfa_ioim_send_abort(ioim);
  456. break;
  457. case BFA_IOIM_SM_ABORT:
  458. /**
  459. * IO is already being cleaned up implicitly
  460. */
  461. ioim->io_cbfn = __bfa_cb_ioim_abort;
  462. break;
  463. case BFA_IOIM_SM_COMP_GOOD:
  464. case BFA_IOIM_SM_COMP:
  465. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  466. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  467. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  468. bfa_ioim_notify_cleanup(ioim);
  469. break;
  470. case BFA_IOIM_SM_DONE:
  471. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  472. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  473. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  474. bfa_ioim_notify_cleanup(ioim);
  475. break;
  476. case BFA_IOIM_SM_HWFAIL:
  477. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  478. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  479. list_del(&ioim->qe);
  480. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  481. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  482. ioim);
  483. break;
  484. default:
  485. bfa_sm_fault(ioim->bfa, event);
  486. }
  487. }
  488. /**
  489. * IO bfa callback is pending.
  490. */
  491. static void
  492. bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  493. {
  494. bfa_trc_fp(ioim->bfa, ioim->iotag);
  495. bfa_trc_fp(ioim->bfa, event);
  496. switch (event) {
  497. case BFA_IOIM_SM_HCB:
  498. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  499. bfa_ioim_free(ioim);
  500. bfa_cb_ioim_resfree(ioim->bfa->bfad);
  501. break;
  502. case BFA_IOIM_SM_CLEANUP:
  503. bfa_ioim_notify_cleanup(ioim);
  504. break;
  505. case BFA_IOIM_SM_HWFAIL:
  506. break;
  507. default:
  508. bfa_sm_fault(ioim->bfa, event);
  509. }
  510. }
  511. /**
  512. * IO bfa callback is pending. IO resource cannot be freed.
  513. */
  514. static void
  515. bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  516. {
  517. bfa_trc(ioim->bfa, ioim->iotag);
  518. bfa_trc(ioim->bfa, event);
  519. switch (event) {
  520. case BFA_IOIM_SM_HCB:
  521. bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
  522. list_del(&ioim->qe);
  523. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
  524. break;
  525. case BFA_IOIM_SM_FREE:
  526. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  527. break;
  528. case BFA_IOIM_SM_CLEANUP:
  529. bfa_ioim_notify_cleanup(ioim);
  530. break;
  531. case BFA_IOIM_SM_HWFAIL:
  532. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  533. break;
  534. default:
  535. bfa_sm_fault(ioim->bfa, event);
  536. }
  537. }
  538. /**
  539. * IO is completed, waiting resource free from firmware.
  540. */
  541. static void
  542. bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  543. {
  544. bfa_trc(ioim->bfa, ioim->iotag);
  545. bfa_trc(ioim->bfa, event);
  546. switch (event) {
  547. case BFA_IOIM_SM_FREE:
  548. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  549. bfa_ioim_free(ioim);
  550. bfa_cb_ioim_resfree(ioim->bfa->bfad);
  551. break;
  552. case BFA_IOIM_SM_CLEANUP:
  553. bfa_ioim_notify_cleanup(ioim);
  554. break;
  555. case BFA_IOIM_SM_HWFAIL:
  556. break;
  557. default:
  558. bfa_sm_fault(ioim->bfa, event);
  559. }
  560. }
  561. /**
  562. * bfa_ioim_private
  563. */
  564. static void
  565. __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
  566. {
  567. struct bfa_ioim_s *ioim = cbarg;
  568. if (!complete) {
  569. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  570. return;
  571. }
  572. bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
  573. }
  574. static void
  575. __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
  576. {
  577. struct bfa_ioim_s *ioim = cbarg;
  578. struct bfi_ioim_rsp_s *m;
  579. u8 *snsinfo = NULL;
  580. u8 sns_len = 0;
  581. s32 residue = 0;
  582. if (!complete) {
  583. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  584. return;
  585. }
  586. m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
  587. if (m->io_status == BFI_IOIM_STS_OK) {
  588. /**
  589. * setup sense information, if present
  590. */
  591. if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION
  592. && m->sns_len) {
  593. sns_len = m->sns_len;
  594. snsinfo = ioim->iosp->snsinfo;
  595. }
  596. /**
  597. * setup residue value correctly for normal completions
  598. */
  599. if (m->resid_flags == FCP_RESID_UNDER)
  600. residue = bfa_os_ntohl(m->residue);
  601. if (m->resid_flags == FCP_RESID_OVER) {
  602. residue = bfa_os_ntohl(m->residue);
  603. residue = -residue;
  604. }
  605. }
  606. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
  607. m->scsi_status, sns_len, snsinfo, residue);
  608. }
  609. static void
  610. __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
  611. {
  612. struct bfa_ioim_s *ioim = cbarg;
  613. if (!complete) {
  614. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  615. return;
  616. }
  617. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
  618. 0, 0, NULL, 0);
  619. }
  620. static void
  621. __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
  622. {
  623. struct bfa_ioim_s *ioim = cbarg;
  624. if (!complete) {
  625. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  626. return;
  627. }
  628. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
  629. 0, 0, NULL, 0);
  630. }
  631. static void
  632. __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
  633. {
  634. struct bfa_ioim_s *ioim = cbarg;
  635. if (!complete) {
  636. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  637. return;
  638. }
  639. bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
  640. }
  641. static void
  642. bfa_ioim_sgpg_alloced(void *cbarg)
  643. {
  644. struct bfa_ioim_s *ioim = cbarg;
  645. ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
  646. list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
  647. bfa_ioim_sgpg_setup(ioim);
  648. bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
  649. }
  650. /**
  651. * Send I/O request to firmware.
  652. */
  653. static bfa_boolean_t
  654. bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
  655. {
  656. struct bfa_itnim_s *itnim = ioim->itnim;
  657. struct bfi_ioim_req_s *m;
  658. static struct fcp_cmnd_s cmnd_z0 = { 0 };
  659. struct bfi_sge_s *sge;
  660. u32 pgdlen = 0;
  661. u64 addr;
  662. struct scatterlist *sg;
  663. struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
  664. /**
  665. * check for room in queue to send request now
  666. */
  667. m = bfa_reqq_next(ioim->bfa, ioim->reqq);
  668. if (!m) {
  669. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  670. &ioim->iosp->reqq_wait);
  671. return BFA_FALSE;
  672. }
  673. /**
  674. * build i/o request message next
  675. */
  676. m->io_tag = bfa_os_htons(ioim->iotag);
  677. m->rport_hdl = ioim->itnim->rport->fw_handle;
  678. m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
  679. /**
  680. * build inline IO SG element here
  681. */
  682. sge = &m->sges[0];
  683. if (ioim->nsges) {
  684. sg = (struct scatterlist *)scsi_sglist(cmnd);
  685. addr = bfa_os_sgaddr(sg_dma_address(sg));
  686. sge->sga = *(union bfi_addr_u *) &addr;
  687. pgdlen = sg_dma_len(sg);
  688. sge->sg_len = pgdlen;
  689. sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
  690. BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
  691. bfa_sge_to_be(sge);
  692. sge++;
  693. }
  694. if (ioim->nsges > BFI_SGE_INLINE) {
  695. sge->sga = ioim->sgpg->sgpg_pa;
  696. } else {
  697. sge->sga.a32.addr_lo = 0;
  698. sge->sga.a32.addr_hi = 0;
  699. }
  700. sge->sg_len = pgdlen;
  701. sge->flags = BFI_SGE_PGDLEN;
  702. bfa_sge_to_be(sge);
  703. /**
  704. * set up I/O command parameters
  705. */
  706. bfa_os_assign(m->cmnd, cmnd_z0);
  707. m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
  708. m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
  709. bfa_os_assign(m->cmnd.cdb,
  710. *(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
  711. m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
  712. /**
  713. * set up I/O message header
  714. */
  715. switch (m->cmnd.iodir) {
  716. case FCP_IODIR_READ:
  717. bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
  718. bfa_stats(itnim, input_reqs);
  719. break;
  720. case FCP_IODIR_WRITE:
  721. bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
  722. bfa_stats(itnim, output_reqs);
  723. break;
  724. case FCP_IODIR_RW:
  725. bfa_stats(itnim, input_reqs);
  726. bfa_stats(itnim, output_reqs);
  727. default:
  728. bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
  729. }
  730. if (itnim->seq_rec ||
  731. (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
  732. bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
  733. #ifdef IOIM_ADVANCED
  734. m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
  735. m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
  736. m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
  737. /**
  738. * Handle large CDB (>16 bytes).
  739. */
  740. m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
  741. FCP_CMND_CDB_LEN) / sizeof(u32);
  742. if (m->cmnd.addl_cdb_len) {
  743. bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
  744. bfa_cb_ioim_get_cdb(ioim->dio) + 1,
  745. m->cmnd.addl_cdb_len * sizeof(u32));
  746. fcp_cmnd_fcpdl(&m->cmnd) =
  747. bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
  748. }
  749. #endif
  750. /**
  751. * queue I/O message to firmware
  752. */
  753. bfa_reqq_produce(ioim->bfa, ioim->reqq);
  754. return BFA_TRUE;
  755. }
  756. /**
  757. * Setup any additional SG pages needed.Inline SG element is setup
  758. * at queuing time.
  759. */
  760. static bfa_boolean_t
  761. bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
  762. {
  763. u16 nsgpgs;
  764. bfa_assert(ioim->nsges > BFI_SGE_INLINE);
  765. /**
  766. * allocate SG pages needed
  767. */
  768. nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
  769. if (!nsgpgs)
  770. return BFA_TRUE;
  771. if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
  772. != BFA_STATUS_OK) {
  773. bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
  774. return BFA_FALSE;
  775. }
  776. ioim->nsgpgs = nsgpgs;
  777. bfa_ioim_sgpg_setup(ioim);
  778. return BFA_TRUE;
  779. }
  780. static void
  781. bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
  782. {
  783. int sgeid, nsges, i;
  784. struct bfi_sge_s *sge;
  785. struct bfa_sgpg_s *sgpg;
  786. u32 pgcumsz;
  787. u64 addr;
  788. struct scatterlist *sg;
  789. struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
  790. sgeid = BFI_SGE_INLINE;
  791. ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
  792. sg = scsi_sglist(cmnd);
  793. sg = sg_next(sg);
  794. do {
  795. sge = sgpg->sgpg->sges;
  796. nsges = ioim->nsges - sgeid;
  797. if (nsges > BFI_SGPG_DATA_SGES)
  798. nsges = BFI_SGPG_DATA_SGES;
  799. pgcumsz = 0;
  800. for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
  801. addr = bfa_os_sgaddr(sg_dma_address(sg));
  802. sge->sga = *(union bfi_addr_u *) &addr;
  803. sge->sg_len = sg_dma_len(sg);
  804. pgcumsz += sge->sg_len;
  805. /**
  806. * set flags
  807. */
  808. if (i < (nsges - 1))
  809. sge->flags = BFI_SGE_DATA;
  810. else if (sgeid < (ioim->nsges - 1))
  811. sge->flags = BFI_SGE_DATA_CPL;
  812. else
  813. sge->flags = BFI_SGE_DATA_LAST;
  814. }
  815. sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
  816. /**
  817. * set the link element of each page
  818. */
  819. if (sgeid == ioim->nsges) {
  820. sge->flags = BFI_SGE_PGDLEN;
  821. sge->sga.a32.addr_lo = 0;
  822. sge->sga.a32.addr_hi = 0;
  823. } else {
  824. sge->flags = BFI_SGE_LINK;
  825. sge->sga = sgpg->sgpg_pa;
  826. }
  827. sge->sg_len = pgcumsz;
  828. } while (sgeid < ioim->nsges);
  829. }
  830. /**
  831. * Send I/O abort request to firmware.
  832. */
  833. static bfa_boolean_t
  834. bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
  835. {
  836. struct bfi_ioim_abort_req_s *m;
  837. enum bfi_ioim_h2i msgop;
  838. /**
  839. * check for room in queue to send request now
  840. */
  841. m = bfa_reqq_next(ioim->bfa, ioim->reqq);
  842. if (!m)
  843. return BFA_FALSE;
  844. /**
  845. * build i/o request message next
  846. */
  847. if (ioim->iosp->abort_explicit)
  848. msgop = BFI_IOIM_H2I_IOABORT_REQ;
  849. else
  850. msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
  851. bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
  852. m->io_tag = bfa_os_htons(ioim->iotag);
  853. m->abort_tag = ++ioim->abort_tag;
  854. /**
  855. * queue I/O message to firmware
  856. */
  857. bfa_reqq_produce(ioim->bfa, ioim->reqq);
  858. return BFA_TRUE;
  859. }
  860. /**
  861. * Call to resume any I/O requests waiting for room in request queue.
  862. */
  863. static void
  864. bfa_ioim_qresume(void *cbarg)
  865. {
  866. struct bfa_ioim_s *ioim = cbarg;
  867. bfa_fcpim_stats(ioim->fcpim, qresumes);
  868. bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
  869. }
  870. static void
  871. bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
  872. {
  873. /**
  874. * Move IO from itnim queue to fcpim global queue since itnim will be
  875. * freed.
  876. */
  877. list_del(&ioim->qe);
  878. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  879. if (!ioim->iosp->tskim) {
  880. if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
  881. bfa_cb_dequeue(&ioim->hcb_qe);
  882. list_del(&ioim->qe);
  883. list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
  884. }
  885. bfa_itnim_iodone(ioim->itnim);
  886. } else
  887. bfa_tskim_iodone(ioim->iosp->tskim);
  888. }
  889. /**
  890. * or after the link comes back.
  891. */
  892. void
  893. bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
  894. {
  895. /**
  896. * If path tov timer expired, failback with PATHTOV status - these
  897. * IO requests are not normally retried by IO stack.
  898. *
  899. * Otherwise device cameback online and fail it with normal failed
  900. * status so that IO stack retries these failed IO requests.
  901. */
  902. if (iotov)
  903. ioim->io_cbfn = __bfa_cb_ioim_pathtov;
  904. else
  905. ioim->io_cbfn = __bfa_cb_ioim_failed;
  906. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  907. /**
  908. * Move IO to fcpim global queue since itnim will be
  909. * freed.
  910. */
  911. list_del(&ioim->qe);
  912. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  913. }
  914. /**
  915. * bfa_ioim_friend
  916. */
  917. /**
  918. * Memory allocation and initialization.
  919. */
  920. void
  921. bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
  922. {
  923. struct bfa_ioim_s *ioim;
  924. struct bfa_ioim_sp_s *iosp;
  925. u16 i;
  926. u8 *snsinfo;
  927. u32 snsbufsz;
  928. /**
  929. * claim memory first
  930. */
  931. ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
  932. fcpim->ioim_arr = ioim;
  933. bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
  934. iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
  935. fcpim->ioim_sp_arr = iosp;
  936. bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
  937. /**
  938. * Claim DMA memory for per IO sense data.
  939. */
  940. snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
  941. fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
  942. bfa_meminfo_dma_phys(minfo) += snsbufsz;
  943. fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
  944. bfa_meminfo_dma_virt(minfo) += snsbufsz;
  945. snsinfo = fcpim->snsbase.kva;
  946. bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
  947. /**
  948. * Initialize ioim free queues
  949. */
  950. INIT_LIST_HEAD(&fcpim->ioim_free_q);
  951. INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
  952. INIT_LIST_HEAD(&fcpim->ioim_comp_q);
  953. for (i = 0; i < fcpim->num_ioim_reqs;
  954. i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
  955. /*
  956. * initialize IOIM
  957. */
  958. bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
  959. ioim->iotag = i;
  960. ioim->bfa = fcpim->bfa;
  961. ioim->fcpim = fcpim;
  962. ioim->iosp = iosp;
  963. iosp->snsinfo = snsinfo;
  964. INIT_LIST_HEAD(&ioim->sgpg_q);
  965. bfa_reqq_winit(&ioim->iosp->reqq_wait,
  966. bfa_ioim_qresume, ioim);
  967. bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
  968. bfa_ioim_sgpg_alloced, ioim);
  969. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  970. list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
  971. }
  972. }
  973. /**
  974. * Driver detach time call.
  975. */
  976. void
  977. bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
  978. {
  979. }
  980. void
  981. bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  982. {
  983. struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
  984. struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
  985. struct bfa_ioim_s *ioim;
  986. u16 iotag;
  987. enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
  988. iotag = bfa_os_ntohs(rsp->io_tag);
  989. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
  990. bfa_assert(ioim->iotag == iotag);
  991. bfa_trc(ioim->bfa, ioim->iotag);
  992. bfa_trc(ioim->bfa, rsp->io_status);
  993. bfa_trc(ioim->bfa, rsp->reuse_io_tag);
  994. if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
  995. bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
  996. switch (rsp->io_status) {
  997. case BFI_IOIM_STS_OK:
  998. bfa_fcpim_stats(fcpim, iocomp_ok);
  999. if (rsp->reuse_io_tag == 0)
  1000. evt = BFA_IOIM_SM_DONE;
  1001. else
  1002. evt = BFA_IOIM_SM_COMP;
  1003. break;
  1004. case BFI_IOIM_STS_TIMEDOUT:
  1005. case BFI_IOIM_STS_ABORTED:
  1006. rsp->io_status = BFI_IOIM_STS_ABORTED;
  1007. bfa_fcpim_stats(fcpim, iocomp_aborted);
  1008. if (rsp->reuse_io_tag == 0)
  1009. evt = BFA_IOIM_SM_DONE;
  1010. else
  1011. evt = BFA_IOIM_SM_COMP;
  1012. break;
  1013. case BFI_IOIM_STS_PROTO_ERR:
  1014. bfa_fcpim_stats(fcpim, iocom_proto_err);
  1015. bfa_assert(rsp->reuse_io_tag);
  1016. evt = BFA_IOIM_SM_COMP;
  1017. break;
  1018. case BFI_IOIM_STS_SQER_NEEDED:
  1019. bfa_fcpim_stats(fcpim, iocom_sqer_needed);
  1020. bfa_assert(rsp->reuse_io_tag == 0);
  1021. evt = BFA_IOIM_SM_SQRETRY;
  1022. break;
  1023. case BFI_IOIM_STS_RES_FREE:
  1024. bfa_fcpim_stats(fcpim, iocom_res_free);
  1025. evt = BFA_IOIM_SM_FREE;
  1026. break;
  1027. case BFI_IOIM_STS_HOST_ABORTED:
  1028. bfa_fcpim_stats(fcpim, iocom_hostabrts);
  1029. if (rsp->abort_tag != ioim->abort_tag) {
  1030. bfa_trc(ioim->bfa, rsp->abort_tag);
  1031. bfa_trc(ioim->bfa, ioim->abort_tag);
  1032. return;
  1033. }
  1034. if (rsp->reuse_io_tag)
  1035. evt = BFA_IOIM_SM_ABORT_COMP;
  1036. else
  1037. evt = BFA_IOIM_SM_ABORT_DONE;
  1038. break;
  1039. case BFI_IOIM_STS_UTAG:
  1040. bfa_fcpim_stats(fcpim, iocom_utags);
  1041. evt = BFA_IOIM_SM_COMP_UTAG;
  1042. break;
  1043. default:
  1044. bfa_assert(0);
  1045. }
  1046. bfa_sm_send_event(ioim, evt);
  1047. }
  1048. void
  1049. bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  1050. {
  1051. struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
  1052. struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
  1053. struct bfa_ioim_s *ioim;
  1054. u16 iotag;
  1055. iotag = bfa_os_ntohs(rsp->io_tag);
  1056. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
  1057. bfa_assert(ioim->iotag == iotag);
  1058. bfa_trc_fp(ioim->bfa, ioim->iotag);
  1059. bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
  1060. }
  1061. /**
  1062. * Called by itnim to clean up IO while going offline.
  1063. */
  1064. void
  1065. bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
  1066. {
  1067. bfa_trc(ioim->bfa, ioim->iotag);
  1068. bfa_fcpim_stats(ioim->fcpim, io_cleanups);
  1069. ioim->iosp->tskim = NULL;
  1070. bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
  1071. }
  1072. void
  1073. bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
  1074. {
  1075. bfa_trc(ioim->bfa, ioim->iotag);
  1076. bfa_fcpim_stats(ioim->fcpim, io_tmaborts);
  1077. ioim->iosp->tskim = tskim;
  1078. bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
  1079. }
  1080. /**
  1081. * IOC failure handling.
  1082. */
  1083. void
  1084. bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
  1085. {
  1086. bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
  1087. }
  1088. /**
  1089. * IO offline TOV popped. Fail the pending IO.
  1090. */
  1091. void
  1092. bfa_ioim_tov(struct bfa_ioim_s *ioim)
  1093. {
  1094. bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
  1095. }
  1096. /**
  1097. * bfa_ioim_api
  1098. */
  1099. /**
  1100. * Allocate IOIM resource for initiator mode I/O request.
  1101. */
  1102. struct bfa_ioim_s *
  1103. bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
  1104. struct bfa_itnim_s *itnim, u16 nsges)
  1105. {
  1106. struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
  1107. struct bfa_ioim_s *ioim;
  1108. /**
  1109. * alocate IOIM resource
  1110. */
  1111. bfa_q_deq(&fcpim->ioim_free_q, &ioim);
  1112. if (!ioim) {
  1113. bfa_fcpim_stats(fcpim, no_iotags);
  1114. return NULL;
  1115. }
  1116. ioim->dio = dio;
  1117. ioim->itnim = itnim;
  1118. ioim->nsges = nsges;
  1119. ioim->nsgpgs = 0;
  1120. bfa_stats(fcpim, total_ios);
  1121. bfa_stats(itnim, ios);
  1122. fcpim->ios_active++;
  1123. list_add_tail(&ioim->qe, &itnim->io_q);
  1124. bfa_trc_fp(ioim->bfa, ioim->iotag);
  1125. return ioim;
  1126. }
  1127. void
  1128. bfa_ioim_free(struct bfa_ioim_s *ioim)
  1129. {
  1130. struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
  1131. bfa_trc_fp(ioim->bfa, ioim->iotag);
  1132. bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
  1133. bfa_assert_fp(list_empty(&ioim->sgpg_q)
  1134. || (ioim->nsges > BFI_SGE_INLINE));
  1135. if (ioim->nsgpgs > 0)
  1136. bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
  1137. bfa_stats(ioim->itnim, io_comps);
  1138. fcpim->ios_active--;
  1139. list_del(&ioim->qe);
  1140. list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
  1141. }
  1142. void
  1143. bfa_ioim_start(struct bfa_ioim_s *ioim)
  1144. {
  1145. bfa_trc_fp(ioim->bfa, ioim->iotag);
  1146. /**
  1147. * Obtain the queue over which this request has to be issued
  1148. */
  1149. ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
  1150. bfa_cb_ioim_get_reqq(ioim->dio) :
  1151. bfa_itnim_get_reqq(ioim);
  1152. bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
  1153. }
  1154. /**
  1155. * Driver I/O abort request.
  1156. */
  1157. void
  1158. bfa_ioim_abort(struct bfa_ioim_s *ioim)
  1159. {
  1160. bfa_trc(ioim->bfa, ioim->iotag);
  1161. bfa_fcpim_stats(ioim->fcpim, io_aborts);
  1162. bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
  1163. }