qla_mr.c 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/delay.h>
  9. #include <linux/pci.h>
  10. #include <linux/ratelimit.h>
  11. #include <linux/vmalloc.h>
  12. #include <scsi/scsi_tcq.h>
  13. #include <linux/utsname.h>
  14. /* QLAFX00 specific Mailbox implementation functions */
  15. /*
  16. * qlafx00_mailbox_command
  17. * Issue mailbox command and waits for completion.
  18. *
  19. * Input:
  20. * ha = adapter block pointer.
  21. * mcp = driver internal mbx struct pointer.
  22. *
  23. * Output:
  24. * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
  25. *
  26. * Returns:
  27. * 0 : QLA_SUCCESS = cmd performed success
  28. * 1 : QLA_FUNCTION_FAILED (error encountered)
  29. * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
  30. *
  31. * Context:
  32. * Kernel context.
  33. */
  34. static int
  35. qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
  36. {
  37. int rval;
  38. unsigned long flags = 0;
  39. device_reg_t __iomem *reg;
  40. uint8_t abort_active;
  41. uint8_t io_lock_on;
  42. uint16_t command = 0;
  43. uint32_t *iptr;
  44. uint32_t __iomem *optr;
  45. uint32_t cnt;
  46. uint32_t mboxes;
  47. unsigned long wait_time;
  48. struct qla_hw_data *ha = vha->hw;
  49. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  50. if (ha->pdev->error_state > pci_channel_io_frozen) {
  51. ql_log(ql_log_warn, vha, 0x115c,
  52. "error_state is greater than pci_channel_io_frozen, "
  53. "exiting.\n");
  54. return QLA_FUNCTION_TIMEOUT;
  55. }
  56. if (vha->device_flags & DFLG_DEV_FAILED) {
  57. ql_log(ql_log_warn, vha, 0x115f,
  58. "Device in failed state, exiting.\n");
  59. return QLA_FUNCTION_TIMEOUT;
  60. }
  61. reg = ha->iobase;
  62. io_lock_on = base_vha->flags.init_done;
  63. rval = QLA_SUCCESS;
  64. abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  65. if (ha->flags.pci_channel_io_perm_failure) {
  66. ql_log(ql_log_warn, vha, 0x1175,
  67. "Perm failure on EEH timeout MBX, exiting.\n");
  68. return QLA_FUNCTION_TIMEOUT;
  69. }
  70. if (ha->flags.isp82xx_fw_hung) {
  71. /* Setting Link-Down error */
  72. mcp->mb[0] = MBS_LINK_DOWN_ERROR;
  73. ql_log(ql_log_warn, vha, 0x1176,
  74. "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
  75. rval = QLA_FUNCTION_FAILED;
  76. goto premature_exit;
  77. }
  78. /*
  79. * Wait for active mailbox commands to finish by waiting at most tov
  80. * seconds. This is to serialize actual issuing of mailbox cmds during
  81. * non ISP abort time.
  82. */
  83. if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
  84. /* Timeout occurred. Return error. */
  85. ql_log(ql_log_warn, vha, 0x1177,
  86. "Cmd access timeout, cmd=0x%x, Exiting.\n",
  87. mcp->mb[0]);
  88. return QLA_FUNCTION_TIMEOUT;
  89. }
  90. ha->flags.mbox_busy = 1;
  91. /* Save mailbox command for debug */
  92. ha->mcp32 = mcp;
  93. ql_dbg(ql_dbg_mbx, vha, 0x1178,
  94. "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
  95. spin_lock_irqsave(&ha->hardware_lock, flags);
  96. /* Load mailbox registers. */
  97. optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
  98. iptr = mcp->mb;
  99. command = mcp->mb[0];
  100. mboxes = mcp->out_mb;
  101. for (cnt = 0; cnt < ha->mbx_count; cnt++) {
  102. if (mboxes & BIT_0)
  103. WRT_REG_DWORD(optr, *iptr);
  104. mboxes >>= 1;
  105. optr++;
  106. iptr++;
  107. }
  108. /* Issue set host interrupt command to send cmd out. */
  109. ha->flags.mbox_int = 0;
  110. clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  111. ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
  112. (uint8_t *)mcp->mb, 16);
  113. ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
  114. ((uint8_t *)mcp->mb + 0x10), 16);
  115. ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
  116. ((uint8_t *)mcp->mb + 0x20), 8);
  117. /* Unlock mbx registers and wait for interrupt */
  118. ql_dbg(ql_dbg_mbx, vha, 0x1179,
  119. "Going to unlock irq & waiting for interrupts. "
  120. "jiffies=%lx.\n", jiffies);
  121. /* Wait for mbx cmd completion until timeout */
  122. if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
  123. set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
  124. QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
  125. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  126. wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
  127. } else {
  128. ql_dbg(ql_dbg_mbx, vha, 0x112c,
  129. "Cmd=%x Polling Mode.\n", command);
  130. QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
  131. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  132. wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
  133. while (!ha->flags.mbox_int) {
  134. if (time_after(jiffies, wait_time))
  135. break;
  136. /* Check for pending interrupts. */
  137. qla2x00_poll(ha->rsp_q_map[0]);
  138. if (!ha->flags.mbox_int &&
  139. !(IS_QLA2200(ha) &&
  140. command == MBC_LOAD_RISC_RAM_EXTENDED))
  141. usleep_range(10000, 11000);
  142. } /* while */
  143. ql_dbg(ql_dbg_mbx, vha, 0x112d,
  144. "Waited %d sec.\n",
  145. (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
  146. }
  147. /* Check whether we timed out */
  148. if (ha->flags.mbox_int) {
  149. uint32_t *iptr2;
  150. ql_dbg(ql_dbg_mbx, vha, 0x112e,
  151. "Cmd=%x completed.\n", command);
  152. /* Got interrupt. Clear the flag. */
  153. ha->flags.mbox_int = 0;
  154. clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  155. if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
  156. rval = QLA_FUNCTION_FAILED;
  157. /* Load return mailbox registers. */
  158. iptr2 = mcp->mb;
  159. iptr = (uint32_t *)&ha->mailbox_out32[0];
  160. mboxes = mcp->in_mb;
  161. for (cnt = 0; cnt < ha->mbx_count; cnt++) {
  162. if (mboxes & BIT_0)
  163. *iptr2 = *iptr;
  164. mboxes >>= 1;
  165. iptr2++;
  166. iptr++;
  167. }
  168. } else {
  169. rval = QLA_FUNCTION_TIMEOUT;
  170. }
  171. ha->flags.mbox_busy = 0;
  172. /* Clean up */
  173. ha->mcp32 = NULL;
  174. if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
  175. ql_dbg(ql_dbg_mbx, vha, 0x113a,
  176. "checking for additional resp interrupt.\n");
  177. /* polling mode for non isp_abort commands. */
  178. qla2x00_poll(ha->rsp_q_map[0]);
  179. }
  180. if (rval == QLA_FUNCTION_TIMEOUT &&
  181. mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
  182. if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
  183. ha->flags.eeh_busy) {
  184. /* not in dpc. schedule it for dpc to take over. */
  185. ql_dbg(ql_dbg_mbx, vha, 0x115d,
  186. "Timeout, schedule isp_abort_needed.\n");
  187. if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
  188. !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
  189. !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  190. ql_log(ql_log_info, base_vha, 0x115e,
  191. "Mailbox cmd timeout occurred, cmd=0x%x, "
  192. "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
  193. "abort.\n", command, mcp->mb[0],
  194. ha->flags.eeh_busy);
  195. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  196. qla2xxx_wake_dpc(vha);
  197. }
  198. } else if (!abort_active) {
  199. /* call abort directly since we are in the DPC thread */
  200. ql_dbg(ql_dbg_mbx, vha, 0x1160,
  201. "Timeout, calling abort_isp.\n");
  202. if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
  203. !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
  204. !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  205. ql_log(ql_log_info, base_vha, 0x1161,
  206. "Mailbox cmd timeout occurred, cmd=0x%x, "
  207. "mb[0]=0x%x. Scheduling ISP abort ",
  208. command, mcp->mb[0]);
  209. set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
  210. clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  211. if (ha->isp_ops->abort_isp(vha)) {
  212. /* Failed. retry later. */
  213. set_bit(ISP_ABORT_NEEDED,
  214. &vha->dpc_flags);
  215. }
  216. clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
  217. ql_dbg(ql_dbg_mbx, vha, 0x1162,
  218. "Finished abort_isp.\n");
  219. }
  220. }
  221. }
  222. premature_exit:
  223. /* Allow next mbx cmd to come in. */
  224. complete(&ha->mbx_cmd_comp);
  225. if (rval) {
  226. ql_log(ql_log_warn, base_vha, 0x1163,
  227. "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
  228. "mb[3]=%x, cmd=%x ****.\n",
  229. mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
  230. } else {
  231. ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
  232. }
  233. return rval;
  234. }
  235. /*
  236. * qlafx00_driver_shutdown
  237. * Indicate a driver shutdown to firmware.
  238. *
  239. * Input:
  240. * ha = adapter block pointer.
  241. *
  242. * Returns:
  243. * local function return status code.
  244. *
  245. * Context:
  246. * Kernel context.
  247. */
  248. static int
  249. qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
  250. {
  251. int rval;
  252. struct mbx_cmd_32 mc;
  253. struct mbx_cmd_32 *mcp = &mc;
  254. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
  255. "Entered %s.\n", __func__);
  256. mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
  257. mcp->out_mb = MBX_0;
  258. mcp->in_mb = MBX_0;
  259. if (tmo)
  260. mcp->tov = tmo;
  261. else
  262. mcp->tov = MBX_TOV_SECONDS;
  263. mcp->flags = 0;
  264. rval = qlafx00_mailbox_command(vha, mcp);
  265. if (rval != QLA_SUCCESS) {
  266. ql_dbg(ql_dbg_mbx, vha, 0x1167,
  267. "Failed=%x.\n", rval);
  268. } else {
  269. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
  270. "Done %s.\n", __func__);
  271. }
  272. return rval;
  273. }
  274. /*
  275. * qlafx00_get_firmware_state
  276. * Get adapter firmware state.
  277. *
  278. * Input:
  279. * ha = adapter block pointer.
  280. * TARGET_QUEUE_LOCK must be released.
  281. * ADAPTER_STATE_LOCK must be released.
  282. *
  283. * Returns:
  284. * qla7xxx local function return status code.
  285. *
  286. * Context:
  287. * Kernel context.
  288. */
  289. static int
  290. qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
  291. {
  292. int rval;
  293. struct mbx_cmd_32 mc;
  294. struct mbx_cmd_32 *mcp = &mc;
  295. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
  296. "Entered %s.\n", __func__);
  297. mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
  298. mcp->out_mb = MBX_0;
  299. mcp->in_mb = MBX_1|MBX_0;
  300. mcp->tov = MBX_TOV_SECONDS;
  301. mcp->flags = 0;
  302. rval = qlafx00_mailbox_command(vha, mcp);
  303. /* Return firmware states. */
  304. states[0] = mcp->mb[1];
  305. if (rval != QLA_SUCCESS) {
  306. ql_dbg(ql_dbg_mbx, vha, 0x116a,
  307. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  308. } else {
  309. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
  310. "Done %s.\n", __func__);
  311. }
  312. return rval;
  313. }
  314. /*
  315. * qlafx00_init_firmware
  316. * Initialize adapter firmware.
  317. *
  318. * Input:
  319. * ha = adapter block pointer.
  320. * dptr = Initialization control block pointer.
  321. * size = size of initialization control block.
  322. * TARGET_QUEUE_LOCK must be released.
  323. * ADAPTER_STATE_LOCK must be released.
  324. *
  325. * Returns:
  326. * qlafx00 local function return status code.
  327. *
  328. * Context:
  329. * Kernel context.
  330. */
  331. int
  332. qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
  333. {
  334. int rval;
  335. struct mbx_cmd_32 mc;
  336. struct mbx_cmd_32 *mcp = &mc;
  337. struct qla_hw_data *ha = vha->hw;
  338. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
  339. "Entered %s.\n", __func__);
  340. mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
  341. mcp->mb[1] = 0;
  342. mcp->mb[2] = MSD(ha->init_cb_dma);
  343. mcp->mb[3] = LSD(ha->init_cb_dma);
  344. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  345. mcp->in_mb = MBX_0;
  346. mcp->buf_size = size;
  347. mcp->flags = MBX_DMA_OUT;
  348. mcp->tov = MBX_TOV_SECONDS;
  349. rval = qlafx00_mailbox_command(vha, mcp);
  350. if (rval != QLA_SUCCESS) {
  351. ql_dbg(ql_dbg_mbx, vha, 0x116d,
  352. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  353. } else {
  354. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
  355. "Done %s.\n", __func__);
  356. }
  357. return rval;
  358. }
  359. /*
  360. * qlafx00_mbx_reg_test
  361. */
  362. static int
  363. qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
  364. {
  365. int rval;
  366. struct mbx_cmd_32 mc;
  367. struct mbx_cmd_32 *mcp = &mc;
  368. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
  369. "Entered %s.\n", __func__);
  370. mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
  371. mcp->mb[1] = 0xAAAA;
  372. mcp->mb[2] = 0x5555;
  373. mcp->mb[3] = 0xAA55;
  374. mcp->mb[4] = 0x55AA;
  375. mcp->mb[5] = 0xA5A5;
  376. mcp->mb[6] = 0x5A5A;
  377. mcp->mb[7] = 0x2525;
  378. mcp->mb[8] = 0xBBBB;
  379. mcp->mb[9] = 0x6666;
  380. mcp->mb[10] = 0xBB66;
  381. mcp->mb[11] = 0x66BB;
  382. mcp->mb[12] = 0xB6B6;
  383. mcp->mb[13] = 0x6B6B;
  384. mcp->mb[14] = 0x3636;
  385. mcp->mb[15] = 0xCCCC;
  386. mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
  387. MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  388. mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
  389. MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  390. mcp->buf_size = 0;
  391. mcp->flags = MBX_DMA_OUT;
  392. mcp->tov = MBX_TOV_SECONDS;
  393. rval = qlafx00_mailbox_command(vha, mcp);
  394. if (rval == QLA_SUCCESS) {
  395. if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
  396. mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
  397. rval = QLA_FUNCTION_FAILED;
  398. if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
  399. mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
  400. rval = QLA_FUNCTION_FAILED;
  401. if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
  402. mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
  403. rval = QLA_FUNCTION_FAILED;
  404. if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
  405. mcp->mb[31] != 0xCCCC)
  406. rval = QLA_FUNCTION_FAILED;
  407. }
  408. if (rval != QLA_SUCCESS) {
  409. ql_dbg(ql_dbg_mbx, vha, 0x1170,
  410. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  411. } else {
  412. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
  413. "Done %s.\n", __func__);
  414. }
  415. return rval;
  416. }
  417. /**
  418. * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
  419. * @ha: HA context
  420. *
  421. * Returns 0 on success.
  422. */
  423. int
  424. qlafx00_pci_config(scsi_qla_host_t *vha)
  425. {
  426. uint16_t w;
  427. struct qla_hw_data *ha = vha->hw;
  428. pci_set_master(ha->pdev);
  429. pci_try_set_mwi(ha->pdev);
  430. pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
  431. w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
  432. w &= ~PCI_COMMAND_INTX_DISABLE;
  433. pci_write_config_word(ha->pdev, PCI_COMMAND, w);
  434. /* PCIe -- adjust Maximum Read Request Size (2048). */
  435. if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
  436. pcie_set_readrq(ha->pdev, 2048);
  437. ha->chip_revision = ha->pdev->revision;
  438. return QLA_SUCCESS;
  439. }
  440. /**
  441. * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
  442. * @ha: HA context
  443. *
  444. */
  445. static inline void
  446. qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
  447. {
  448. unsigned long flags = 0;
  449. struct qla_hw_data *ha = vha->hw;
  450. int i, core;
  451. uint32_t cnt;
  452. /* Set all 4 cores in reset */
  453. for (i = 0; i < 4; i++) {
  454. QLAFX00_SET_HBA_SOC_REG(ha,
  455. (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
  456. }
  457. /* Set all 4 core Clock gating control */
  458. for (i = 0; i < 4; i++) {
  459. QLAFX00_SET_HBA_SOC_REG(ha,
  460. (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
  461. }
  462. /* Reset all units in Fabric */
  463. QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101));
  464. /* Reset all interrupt control registers */
  465. for (i = 0; i < 115; i++) {
  466. QLAFX00_SET_HBA_SOC_REG(ha,
  467. (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
  468. }
  469. /* Reset Timers control registers. per core */
  470. for (core = 0; core < 4; core++)
  471. for (i = 0; i < 8; i++)
  472. QLAFX00_SET_HBA_SOC_REG(ha,
  473. (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
  474. /* Reset per core IRQ ack register */
  475. for (core = 0; core < 4; core++)
  476. QLAFX00_SET_HBA_SOC_REG(ha,
  477. (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
  478. /* Set Fabric control and config to defaults */
  479. QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
  480. QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
  481. spin_lock_irqsave(&ha->hardware_lock, flags);
  482. /* Kick in Fabric units */
  483. QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
  484. /* Kick in Core0 to start boot process */
  485. QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
  486. /* Wait 10secs for soft-reset to complete. */
  487. for (cnt = 10; cnt; cnt--) {
  488. msleep(1000);
  489. barrier();
  490. }
  491. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  492. }
  493. /**
  494. * qlafx00_soft_reset() - Soft Reset ISPFx00.
  495. * @ha: HA context
  496. *
  497. * Returns 0 on success.
  498. */
  499. void
  500. qlafx00_soft_reset(scsi_qla_host_t *vha)
  501. {
  502. struct qla_hw_data *ha = vha->hw;
  503. if (unlikely(pci_channel_offline(ha->pdev) &&
  504. ha->flags.pci_channel_io_perm_failure))
  505. return;
  506. ha->isp_ops->disable_intrs(ha);
  507. qlafx00_soc_cpu_reset(vha);
  508. ha->isp_ops->enable_intrs(ha);
  509. }
  510. /**
  511. * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
  512. * @ha: HA context
  513. *
  514. * Returns 0 on success.
  515. */
  516. int
  517. qlafx00_chip_diag(scsi_qla_host_t *vha)
  518. {
  519. int rval = 0;
  520. struct qla_hw_data *ha = vha->hw;
  521. struct req_que *req = ha->req_q_map[0];
  522. ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
  523. rval = qlafx00_mbx_reg_test(vha);
  524. if (rval) {
  525. ql_log(ql_log_warn, vha, 0x1165,
  526. "Failed mailbox send register test\n");
  527. } else {
  528. /* Flag a successful rval */
  529. rval = QLA_SUCCESS;
  530. }
  531. return rval;
  532. }
  533. void
  534. qlafx00_config_rings(struct scsi_qla_host *vha)
  535. {
  536. struct qla_hw_data *ha = vha->hw;
  537. struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
  538. struct init_cb_fx *icb;
  539. struct req_que *req = ha->req_q_map[0];
  540. struct rsp_que *rsp = ha->rsp_q_map[0];
  541. /* Setup ring parameters in initialization control block. */
  542. icb = (struct init_cb_fx *)ha->init_cb;
  543. icb->request_q_outpointer = __constant_cpu_to_le16(0);
  544. icb->response_q_inpointer = __constant_cpu_to_le16(0);
  545. icb->request_q_length = cpu_to_le16(req->length);
  546. icb->response_q_length = cpu_to_le16(rsp->length);
  547. icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
  548. icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
  549. icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
  550. icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
  551. WRT_REG_DWORD(&reg->req_q_in, 0);
  552. WRT_REG_DWORD(&reg->req_q_out, 0);
  553. WRT_REG_DWORD(&reg->rsp_q_in, 0);
  554. WRT_REG_DWORD(&reg->rsp_q_out, 0);
  555. /* PCI posting */
  556. RD_REG_DWORD(&reg->rsp_q_out);
  557. }
  558. char *
  559. qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
  560. {
  561. struct qla_hw_data *ha = vha->hw;
  562. int pcie_reg;
  563. pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
  564. if (pcie_reg) {
  565. strcpy(str, "PCIe iSA");
  566. return str;
  567. }
  568. return str;
  569. }
  570. char *
  571. qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str)
  572. {
  573. struct qla_hw_data *ha = vha->hw;
  574. sprintf(str, "%s", ha->mr.fw_version);
  575. return str;
  576. }
  577. void
  578. qlafx00_enable_intrs(struct qla_hw_data *ha)
  579. {
  580. unsigned long flags = 0;
  581. spin_lock_irqsave(&ha->hardware_lock, flags);
  582. ha->interrupts_on = 1;
  583. QLAFX00_ENABLE_ICNTRL_REG(ha);
  584. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  585. }
  586. void
  587. qlafx00_disable_intrs(struct qla_hw_data *ha)
  588. {
  589. unsigned long flags = 0;
  590. spin_lock_irqsave(&ha->hardware_lock, flags);
  591. ha->interrupts_on = 0;
  592. QLAFX00_DISABLE_ICNTRL_REG(ha);
  593. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  594. }
  595. static void
  596. qlafx00_tmf_iocb_timeout(void *data)
  597. {
  598. srb_t *sp = (srb_t *)data;
  599. struct srb_iocb *tmf = &sp->u.iocb_cmd;
  600. tmf->u.tmf.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
  601. complete(&tmf->u.tmf.comp);
  602. }
  603. static void
  604. qlafx00_tmf_sp_done(void *data, void *ptr, int res)
  605. {
  606. srb_t *sp = (srb_t *)ptr;
  607. struct srb_iocb *tmf = &sp->u.iocb_cmd;
  608. complete(&tmf->u.tmf.comp);
  609. }
  610. static int
  611. qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags,
  612. uint32_t lun, uint32_t tag)
  613. {
  614. scsi_qla_host_t *vha = fcport->vha;
  615. struct srb_iocb *tm_iocb;
  616. srb_t *sp;
  617. int rval = QLA_FUNCTION_FAILED;
  618. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  619. if (!sp)
  620. goto done;
  621. tm_iocb = &sp->u.iocb_cmd;
  622. sp->type = SRB_TM_CMD;
  623. sp->name = "tmf";
  624. qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
  625. tm_iocb->u.tmf.flags = flags;
  626. tm_iocb->u.tmf.lun = lun;
  627. tm_iocb->u.tmf.data = tag;
  628. sp->done = qlafx00_tmf_sp_done;
  629. tm_iocb->timeout = qlafx00_tmf_iocb_timeout;
  630. init_completion(&tm_iocb->u.tmf.comp);
  631. rval = qla2x00_start_sp(sp);
  632. if (rval != QLA_SUCCESS)
  633. goto done_free_sp;
  634. ql_dbg(ql_dbg_async, vha, 0x507b,
  635. "Task management command issued target_id=%x\n",
  636. fcport->tgt_id);
  637. wait_for_completion(&tm_iocb->u.tmf.comp);
  638. rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
  639. QLA_SUCCESS : QLA_FUNCTION_FAILED;
  640. done_free_sp:
  641. sp->free(vha, sp);
  642. done:
  643. return rval;
  644. }
  645. int
  646. qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
  647. {
  648. return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
  649. }
  650. int
  651. qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
  652. {
  653. return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
  654. }
  655. int
  656. qlafx00_iospace_config(struct qla_hw_data *ha)
  657. {
  658. if (pci_request_selected_regions(ha->pdev, ha->bars,
  659. QLA2XXX_DRIVER_NAME)) {
  660. ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
  661. "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
  662. pci_name(ha->pdev));
  663. goto iospace_error_exit;
  664. }
  665. /* Use MMIO operations for all accesses. */
  666. if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
  667. ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
  668. "Invalid pci I/O region size (%s).\n",
  669. pci_name(ha->pdev));
  670. goto iospace_error_exit;
  671. }
  672. if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
  673. ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
  674. "Invalid PCI mem BAR0 region size (%s), aborting\n",
  675. pci_name(ha->pdev));
  676. goto iospace_error_exit;
  677. }
  678. ha->cregbase =
  679. ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
  680. if (!ha->cregbase) {
  681. ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
  682. "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
  683. goto iospace_error_exit;
  684. }
  685. if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
  686. ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
  687. "region #2 not an MMIO resource (%s), aborting\n",
  688. pci_name(ha->pdev));
  689. goto iospace_error_exit;
  690. }
  691. if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
  692. ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
  693. "Invalid PCI mem BAR2 region size (%s), aborting\n",
  694. pci_name(ha->pdev));
  695. goto iospace_error_exit;
  696. }
  697. ha->iobase =
  698. ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
  699. if (!ha->iobase) {
  700. ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
  701. "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
  702. goto iospace_error_exit;
  703. }
  704. /* Determine queue resources */
  705. ha->max_req_queues = ha->max_rsp_queues = 1;
  706. ql_log_pci(ql_log_info, ha->pdev, 0x012c,
  707. "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
  708. ha->bars, ha->cregbase, ha->iobase);
  709. return 0;
  710. iospace_error_exit:
  711. return -ENOMEM;
  712. }
  713. static void
  714. qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
  715. {
  716. struct qla_hw_data *ha = vha->hw;
  717. struct req_que *req = ha->req_q_map[0];
  718. struct rsp_que *rsp = ha->rsp_q_map[0];
  719. req->length_fx00 = req->length;
  720. req->ring_fx00 = req->ring;
  721. req->dma_fx00 = req->dma;
  722. rsp->length_fx00 = rsp->length;
  723. rsp->ring_fx00 = rsp->ring;
  724. rsp->dma_fx00 = rsp->dma;
  725. ql_dbg(ql_dbg_init, vha, 0x012d,
  726. "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
  727. "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
  728. req->length_fx00, (u64)req->dma_fx00);
  729. ql_dbg(ql_dbg_init, vha, 0x012e,
  730. "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
  731. "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
  732. rsp->length_fx00, (u64)rsp->dma_fx00);
  733. }
  734. static int
  735. qlafx00_config_queues(struct scsi_qla_host *vha)
  736. {
  737. struct qla_hw_data *ha = vha->hw;
  738. struct req_que *req = ha->req_q_map[0];
  739. struct rsp_que *rsp = ha->rsp_q_map[0];
  740. dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
  741. req->length = ha->req_que_len;
  742. req->ring = (void *)ha->iobase + ha->req_que_off;
  743. req->dma = bar2_hdl + ha->req_que_off;
  744. if ((!req->ring) || (req->length == 0)) {
  745. ql_log_pci(ql_log_info, ha->pdev, 0x012f,
  746. "Unable to allocate memory for req_ring\n");
  747. return QLA_FUNCTION_FAILED;
  748. }
  749. ql_dbg(ql_dbg_init, vha, 0x0130,
  750. "req: %p req_ring pointer %p req len 0x%x "
  751. "req off 0x%x\n, req->dma: 0x%llx",
  752. req, req->ring, req->length,
  753. ha->req_que_off, (u64)req->dma);
  754. rsp->length = ha->rsp_que_len;
  755. rsp->ring = (void *)ha->iobase + ha->rsp_que_off;
  756. rsp->dma = bar2_hdl + ha->rsp_que_off;
  757. if ((!rsp->ring) || (rsp->length == 0)) {
  758. ql_log_pci(ql_log_info, ha->pdev, 0x0131,
  759. "Unable to allocate memory for rsp_ring\n");
  760. return QLA_FUNCTION_FAILED;
  761. }
  762. ql_dbg(ql_dbg_init, vha, 0x0132,
  763. "rsp: %p rsp_ring pointer %p rsp len 0x%x "
  764. "rsp off 0x%x, rsp->dma: 0x%llx\n",
  765. rsp, rsp->ring, rsp->length,
  766. ha->rsp_que_off, (u64)rsp->dma);
  767. return QLA_SUCCESS;
  768. }
  769. static int
  770. qlafx00_init_fw_ready(scsi_qla_host_t *vha)
  771. {
  772. int rval = 0;
  773. unsigned long wtime;
  774. uint16_t wait_time; /* Wait time */
  775. struct qla_hw_data *ha = vha->hw;
  776. struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
  777. uint32_t aenmbx, aenmbx7 = 0;
  778. uint32_t state[5];
  779. bool done = false;
  780. /* 30 seconds wait - Adjust if required */
  781. wait_time = 30;
  782. /* wait time before firmware ready */
  783. wtime = jiffies + (wait_time * HZ);
  784. do {
  785. aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
  786. barrier();
  787. ql_dbg(ql_dbg_mbx, vha, 0x0133,
  788. "aenmbx: 0x%x\n", aenmbx);
  789. switch (aenmbx) {
  790. case MBA_FW_NOT_STARTED:
  791. case MBA_FW_STARTING:
  792. break;
  793. case MBA_SYSTEM_ERR:
  794. case MBA_REQ_TRANSFER_ERR:
  795. case MBA_RSP_TRANSFER_ERR:
  796. case MBA_FW_INIT_FAILURE:
  797. qlafx00_soft_reset(vha);
  798. break;
  799. case MBA_FW_RESTART_CMPLT:
  800. /* Set the mbx and rqstq intr code */
  801. aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
  802. ha->mbx_intr_code = MSW(aenmbx7);
  803. ha->rqstq_intr_code = LSW(aenmbx7);
  804. ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
  805. ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
  806. ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
  807. ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
  808. WRT_REG_DWORD(&reg->aenmailbox0, 0);
  809. RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
  810. ql_dbg(ql_dbg_init, vha, 0x0134,
  811. "f/w returned mbx_intr_code: 0x%x, "
  812. "rqstq_intr_code: 0x%x\n",
  813. ha->mbx_intr_code, ha->rqstq_intr_code);
  814. QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
  815. rval = QLA_SUCCESS;
  816. done = true;
  817. break;
  818. default:
  819. /* If fw is apparently not ready. In order to continue,
  820. * we might need to issue Mbox cmd, but the problem is
  821. * that the DoorBell vector values that come with the
  822. * 8060 AEN are most likely gone by now (and thus no
  823. * bell would be rung on the fw side when mbox cmd is
  824. * issued). We have to therefore grab the 8060 AEN
  825. * shadow regs (filled in by FW when the last 8060
  826. * AEN was being posted).
  827. * Do the following to determine what is needed in
  828. * order to get the FW ready:
  829. * 1. reload the 8060 AEN values from the shadow regs
  830. * 2. clear int status to get rid of possible pending
  831. * interrupts
  832. * 3. issue Get FW State Mbox cmd to determine fw state
  833. * Set the mbx and rqstq intr code from Shadow Regs
  834. */
  835. aenmbx7 = RD_REG_DWORD(&reg->initval7);
  836. ha->mbx_intr_code = MSW(aenmbx7);
  837. ha->rqstq_intr_code = LSW(aenmbx7);
  838. ha->req_que_off = RD_REG_DWORD(&reg->initval1);
  839. ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
  840. ha->req_que_len = RD_REG_DWORD(&reg->initval5);
  841. ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
  842. ql_dbg(ql_dbg_init, vha, 0x0135,
  843. "f/w returned mbx_intr_code: 0x%x, "
  844. "rqstq_intr_code: 0x%x\n",
  845. ha->mbx_intr_code, ha->rqstq_intr_code);
  846. QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
  847. /* Get the FW state */
  848. rval = qlafx00_get_firmware_state(vha, state);
  849. if (rval != QLA_SUCCESS) {
  850. /* Retry if timer has not expired */
  851. break;
  852. }
  853. if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
  854. /* Firmware is waiting to be
  855. * initialized by driver
  856. */
  857. rval = QLA_SUCCESS;
  858. done = true;
  859. break;
  860. }
  861. /* Issue driver shutdown and wait until f/w recovers.
  862. * Driver should continue to poll until 8060 AEN is
  863. * received indicating firmware recovery.
  864. */
  865. ql_dbg(ql_dbg_init, vha, 0x0136,
  866. "Sending Driver shutdown fw_state 0x%x\n",
  867. state[0]);
  868. rval = qlafx00_driver_shutdown(vha, 10);
  869. if (rval != QLA_SUCCESS) {
  870. rval = QLA_FUNCTION_FAILED;
  871. break;
  872. }
  873. msleep(500);
  874. wtime = jiffies + (wait_time * HZ);
  875. break;
  876. }
  877. if (!done) {
  878. if (time_after_eq(jiffies, wtime)) {
  879. ql_dbg(ql_dbg_init, vha, 0x0137,
  880. "Init f/w failed: aen[7]: 0x%x\n",
  881. RD_REG_DWORD(&reg->aenmailbox7));
  882. rval = QLA_FUNCTION_FAILED;
  883. done = true;
  884. break;
  885. }
  886. /* Delay for a while */
  887. msleep(500);
  888. }
  889. } while (!done);
  890. if (rval)
  891. ql_dbg(ql_dbg_init, vha, 0x0138,
  892. "%s **** FAILED ****.\n", __func__);
  893. else
  894. ql_dbg(ql_dbg_init, vha, 0x0139,
  895. "%s **** SUCCESS ****.\n", __func__);
  896. return rval;
  897. }
  898. /*
  899. * qlafx00_fw_ready() - Waits for firmware ready.
  900. * @ha: HA context
  901. *
  902. * Returns 0 on success.
  903. */
  904. int
  905. qlafx00_fw_ready(scsi_qla_host_t *vha)
  906. {
  907. int rval;
  908. unsigned long wtime;
  909. uint16_t wait_time; /* Wait time if loop is coming ready */
  910. uint32_t state[5];
  911. rval = QLA_SUCCESS;
  912. wait_time = 10;
  913. /* wait time before firmware ready */
  914. wtime = jiffies + (wait_time * HZ);
  915. /* Wait for ISP to finish init */
  916. if (!vha->flags.init_done)
  917. ql_dbg(ql_dbg_init, vha, 0x013a,
  918. "Waiting for init to complete...\n");
  919. do {
  920. rval = qlafx00_get_firmware_state(vha, state);
  921. if (rval == QLA_SUCCESS) {
  922. if (state[0] == FSTATE_FX00_INITIALIZED) {
  923. ql_dbg(ql_dbg_init, vha, 0x013b,
  924. "fw_state=%x\n", state[0]);
  925. rval = QLA_SUCCESS;
  926. break;
  927. }
  928. }
  929. rval = QLA_FUNCTION_FAILED;
  930. if (time_after_eq(jiffies, wtime))
  931. break;
  932. /* Delay for a while */
  933. msleep(500);
  934. ql_dbg(ql_dbg_init, vha, 0x013c,
  935. "fw_state=%x curr time=%lx.\n", state[0], jiffies);
  936. } while (1);
  937. if (rval)
  938. ql_dbg(ql_dbg_init, vha, 0x013d,
  939. "Firmware ready **** FAILED ****.\n");
  940. else
  941. ql_dbg(ql_dbg_init, vha, 0x013e,
  942. "Firmware ready **** SUCCESS ****.\n");
  943. return rval;
  944. }
  945. static int
  946. qlafx00_find_all_targets(scsi_qla_host_t *vha,
  947. struct list_head *new_fcports)
  948. {
  949. int rval;
  950. uint16_t tgt_id;
  951. fc_port_t *fcport, *new_fcport;
  952. int found;
  953. struct qla_hw_data *ha = vha->hw;
  954. rval = QLA_SUCCESS;
  955. if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
  956. return QLA_FUNCTION_FAILED;
  957. if ((atomic_read(&vha->loop_down_timer) ||
  958. STATE_TRANSITION(vha))) {
  959. atomic_set(&vha->loop_down_timer, 0);
  960. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  961. return QLA_FUNCTION_FAILED;
  962. }
  963. ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
  964. "Listing Target bit map...\n");
  965. ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
  966. 0x2089, (uint8_t *)ha->gid_list, 32);
  967. /* Allocate temporary rmtport for any new rmtports discovered. */
  968. new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  969. if (new_fcport == NULL)
  970. return QLA_MEMORY_ALLOC_FAILED;
  971. for_each_set_bit(tgt_id, (void *)ha->gid_list,
  972. QLAFX00_TGT_NODE_LIST_SIZE) {
  973. /* Send get target node info */
  974. new_fcport->tgt_id = tgt_id;
  975. rval = qlafx00_fx_disc(vha, new_fcport,
  976. FXDISC_GET_TGT_NODE_INFO);
  977. if (rval != QLA_SUCCESS) {
  978. ql_log(ql_log_warn, vha, 0x208a,
  979. "Target info scan failed -- assuming zero-entry "
  980. "result...\n");
  981. continue;
  982. }
  983. /* Locate matching device in database. */
  984. found = 0;
  985. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  986. if (memcmp(new_fcport->port_name,
  987. fcport->port_name, WWN_SIZE))
  988. continue;
  989. found++;
  990. /*
  991. * If tgt_id is same and state FCS_ONLINE, nothing
  992. * changed.
  993. */
  994. if (fcport->tgt_id == new_fcport->tgt_id &&
  995. atomic_read(&fcport->state) == FCS_ONLINE)
  996. break;
  997. /*
  998. * Tgt ID changed or device was marked to be updated.
  999. */
  1000. ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
  1001. "TGT-ID Change(%s): Present tgt id: "
  1002. "0x%x state: 0x%x "
  1003. "wwnn = %llx wwpn = %llx.\n",
  1004. __func__, fcport->tgt_id,
  1005. atomic_read(&fcport->state),
  1006. (unsigned long long)wwn_to_u64(fcport->node_name),
  1007. (unsigned long long)wwn_to_u64(fcport->port_name));
  1008. ql_log(ql_log_info, vha, 0x208c,
  1009. "TGT-ID Announce(%s): Discovered tgt "
  1010. "id 0x%x wwnn = %llx "
  1011. "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
  1012. (unsigned long long)
  1013. wwn_to_u64(new_fcport->node_name),
  1014. (unsigned long long)
  1015. wwn_to_u64(new_fcport->port_name));
  1016. if (atomic_read(&fcport->state) != FCS_ONLINE) {
  1017. fcport->old_tgt_id = fcport->tgt_id;
  1018. fcport->tgt_id = new_fcport->tgt_id;
  1019. ql_log(ql_log_info, vha, 0x208d,
  1020. "TGT-ID: New fcport Added: %p\n", fcport);
  1021. qla2x00_update_fcport(vha, fcport);
  1022. } else {
  1023. ql_log(ql_log_info, vha, 0x208e,
  1024. " Existing TGT-ID %x did not get "
  1025. " offline event from firmware.\n",
  1026. fcport->old_tgt_id);
  1027. qla2x00_mark_device_lost(vha, fcport, 0, 0);
  1028. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  1029. kfree(new_fcport);
  1030. return rval;
  1031. }
  1032. break;
  1033. }
  1034. if (found)
  1035. continue;
  1036. /* If device was not in our fcports list, then add it. */
  1037. list_add_tail(&new_fcport->list, new_fcports);
  1038. /* Allocate a new replacement fcport. */
  1039. new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
  1040. if (new_fcport == NULL)
  1041. return QLA_MEMORY_ALLOC_FAILED;
  1042. }
  1043. kfree(new_fcport);
  1044. return rval;
  1045. }
  1046. /*
  1047. * qlafx00_configure_all_targets
  1048. * Setup target devices with node ID's.
  1049. *
  1050. * Input:
  1051. * ha = adapter block pointer.
  1052. *
  1053. * Returns:
  1054. * 0 = success.
  1055. * BIT_0 = error
  1056. */
  1057. static int
  1058. qlafx00_configure_all_targets(scsi_qla_host_t *vha)
  1059. {
  1060. int rval;
  1061. fc_port_t *fcport, *rmptemp;
  1062. LIST_HEAD(new_fcports);
  1063. rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
  1064. FXDISC_GET_TGT_NODE_LIST);
  1065. if (rval != QLA_SUCCESS) {
  1066. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  1067. return rval;
  1068. }
  1069. rval = qlafx00_find_all_targets(vha, &new_fcports);
  1070. if (rval != QLA_SUCCESS) {
  1071. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  1072. return rval;
  1073. }
  1074. /*
  1075. * Delete all previous devices marked lost.
  1076. */
  1077. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  1078. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  1079. break;
  1080. if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
  1081. if (fcport->port_type != FCT_INITIATOR)
  1082. qla2x00_mark_device_lost(vha, fcport, 0, 0);
  1083. }
  1084. }
  1085. /*
  1086. * Add the new devices to our devices list.
  1087. */
  1088. list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
  1089. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
  1090. break;
  1091. qla2x00_update_fcport(vha, fcport);
  1092. list_move_tail(&fcport->list, &vha->vp_fcports);
  1093. ql_log(ql_log_info, vha, 0x208f,
  1094. "Attach new target id 0x%x wwnn = %llx "
  1095. "wwpn = %llx.\n",
  1096. fcport->tgt_id,
  1097. (unsigned long long)wwn_to_u64(fcport->node_name),
  1098. (unsigned long long)wwn_to_u64(fcport->port_name));
  1099. }
  1100. /* Free all new device structures not processed. */
  1101. list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
  1102. list_del(&fcport->list);
  1103. kfree(fcport);
  1104. }
  1105. return rval;
  1106. }
  1107. /*
  1108. * qlafx00_configure_devices
  1109. * Updates Fibre Channel Device Database with what is actually on loop.
  1110. *
  1111. * Input:
  1112. * ha = adapter block pointer.
  1113. *
  1114. * Returns:
  1115. * 0 = success.
  1116. * 1 = error.
  1117. * 2 = database was full and device was not configured.
  1118. */
  1119. int
  1120. qlafx00_configure_devices(scsi_qla_host_t *vha)
  1121. {
  1122. int rval;
  1123. unsigned long flags, save_flags;
  1124. rval = QLA_SUCCESS;
  1125. save_flags = flags = vha->dpc_flags;
  1126. ql_dbg(ql_dbg_disc, vha, 0x2090,
  1127. "Configure devices -- dpc flags =0x%lx\n", flags);
  1128. rval = qlafx00_configure_all_targets(vha);
  1129. if (rval == QLA_SUCCESS) {
  1130. if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  1131. rval = QLA_FUNCTION_FAILED;
  1132. } else {
  1133. atomic_set(&vha->loop_state, LOOP_READY);
  1134. ql_log(ql_log_info, vha, 0x2091,
  1135. "Device Ready\n");
  1136. }
  1137. }
  1138. if (rval) {
  1139. ql_dbg(ql_dbg_disc, vha, 0x2092,
  1140. "%s *** FAILED ***.\n", __func__);
  1141. } else {
  1142. ql_dbg(ql_dbg_disc, vha, 0x2093,
  1143. "%s: exiting normally.\n", __func__);
  1144. }
  1145. return rval;
  1146. }
  1147. static void
  1148. qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
  1149. {
  1150. struct qla_hw_data *ha = vha->hw;
  1151. fc_port_t *fcport;
  1152. vha->flags.online = 0;
  1153. ha->flags.chip_reset_done = 0;
  1154. ha->mr.fw_hbt_en = 0;
  1155. clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1156. vha->qla_stats.total_isp_aborts++;
  1157. ql_log(ql_log_info, vha, 0x013f,
  1158. "Performing ISP error recovery - ha = %p.\n", ha);
  1159. ha->isp_ops->reset_chip(vha);
  1160. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  1161. atomic_set(&vha->loop_state, LOOP_DOWN);
  1162. atomic_set(&vha->loop_down_timer,
  1163. QLAFX00_LOOP_DOWN_TIME);
  1164. } else {
  1165. if (!atomic_read(&vha->loop_down_timer))
  1166. atomic_set(&vha->loop_down_timer,
  1167. QLAFX00_LOOP_DOWN_TIME);
  1168. }
  1169. /* Clear all async request states across all VPs. */
  1170. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  1171. fcport->flags = 0;
  1172. if (atomic_read(&fcport->state) == FCS_ONLINE)
  1173. qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
  1174. }
  1175. if (!ha->flags.eeh_busy) {
  1176. /* Requeue all commands in outstanding command list. */
  1177. qla2x00_abort_all_cmds(vha, DID_RESET << 16);
  1178. }
  1179. qla2x00_free_irqs(vha);
  1180. set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
  1181. /* Clear the Interrupts */
  1182. QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
  1183. ql_log(ql_log_info, vha, 0x0140,
  1184. "%s Done done - ha=%p.\n", __func__, ha);
  1185. }
  1186. /**
  1187. * qlafx00_init_response_q_entries() - Initializes response queue entries.
  1188. * @ha: HA context
  1189. *
  1190. * Beginning of request ring has initialization control block already built
  1191. * by nvram config routine.
  1192. *
  1193. * Returns 0 on success.
  1194. */
  1195. void
  1196. qlafx00_init_response_q_entries(struct rsp_que *rsp)
  1197. {
  1198. uint16_t cnt;
  1199. response_t *pkt;
  1200. rsp->ring_ptr = rsp->ring;
  1201. rsp->ring_index = 0;
  1202. rsp->status_srb = NULL;
  1203. pkt = rsp->ring_ptr;
  1204. for (cnt = 0; cnt < rsp->length; cnt++) {
  1205. pkt->signature = RESPONSE_PROCESSED;
  1206. WRT_REG_DWORD((void __iomem *)&pkt->signature,
  1207. RESPONSE_PROCESSED);
  1208. pkt++;
  1209. }
  1210. }
  1211. int
  1212. qlafx00_rescan_isp(scsi_qla_host_t *vha)
  1213. {
  1214. uint32_t status = QLA_FUNCTION_FAILED;
  1215. struct qla_hw_data *ha = vha->hw;
  1216. struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
  1217. uint32_t aenmbx7;
  1218. qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
  1219. aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
  1220. ha->mbx_intr_code = MSW(aenmbx7);
  1221. ha->rqstq_intr_code = LSW(aenmbx7);
  1222. ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
  1223. ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
  1224. ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
  1225. ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
  1226. ql_dbg(ql_dbg_disc, vha, 0x2094,
  1227. "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
  1228. " Req que offset 0x%x Rsp que offset 0x%x\n",
  1229. ha->mbx_intr_code, ha->rqstq_intr_code,
  1230. ha->req_que_off, ha->rsp_que_len);
  1231. /* Clear the Interrupts */
  1232. QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
  1233. status = qla2x00_init_rings(vha);
  1234. if (!status) {
  1235. vha->flags.online = 1;
  1236. /* if no cable then assume it's good */
  1237. if ((vha->device_flags & DFLG_NO_CABLE))
  1238. status = 0;
  1239. /* Register system information */
  1240. if (qlafx00_fx_disc(vha,
  1241. &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
  1242. ql_dbg(ql_dbg_disc, vha, 0x2095,
  1243. "failed to register host info\n");
  1244. }
  1245. scsi_unblock_requests(vha->host);
  1246. return status;
  1247. }
  1248. void
  1249. qlafx00_timer_routine(scsi_qla_host_t *vha)
  1250. {
  1251. struct qla_hw_data *ha = vha->hw;
  1252. uint32_t fw_heart_beat;
  1253. uint32_t aenmbx0;
  1254. struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
  1255. /* Check firmware health */
  1256. if (ha->mr.fw_hbt_cnt)
  1257. ha->mr.fw_hbt_cnt--;
  1258. else {
  1259. if ((!ha->flags.mr_reset_hdlr_active) &&
  1260. (!test_bit(UNLOADING, &vha->dpc_flags)) &&
  1261. (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
  1262. (ha->mr.fw_hbt_en)) {
  1263. fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
  1264. if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
  1265. ha->mr.old_fw_hbt_cnt = fw_heart_beat;
  1266. ha->mr.fw_hbt_miss_cnt = 0;
  1267. } else {
  1268. ha->mr.fw_hbt_miss_cnt++;
  1269. if (ha->mr.fw_hbt_miss_cnt ==
  1270. QLAFX00_HEARTBEAT_MISS_CNT) {
  1271. set_bit(ISP_ABORT_NEEDED,
  1272. &vha->dpc_flags);
  1273. qla2xxx_wake_dpc(vha);
  1274. ha->mr.fw_hbt_miss_cnt = 0;
  1275. }
  1276. }
  1277. }
  1278. ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
  1279. }
  1280. if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
  1281. /* Reset recovery to be performed in timer routine */
  1282. aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
  1283. if (ha->mr.fw_reset_timer_exp) {
  1284. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1285. qla2xxx_wake_dpc(vha);
  1286. ha->mr.fw_reset_timer_exp = 0;
  1287. } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
  1288. /* Wake up DPC to rescan the targets */
  1289. set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
  1290. clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
  1291. qla2xxx_wake_dpc(vha);
  1292. ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
  1293. } else if ((aenmbx0 == MBA_FW_STARTING) &&
  1294. (!ha->mr.fw_hbt_en)) {
  1295. ha->mr.fw_hbt_en = 1;
  1296. } else if (!ha->mr.fw_reset_timer_tick) {
  1297. if (aenmbx0 == ha->mr.old_aenmbx0_state)
  1298. ha->mr.fw_reset_timer_exp = 1;
  1299. ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
  1300. } else if (aenmbx0 == 0xFFFFFFFF) {
  1301. uint32_t data0, data1;
  1302. data0 = QLAFX00_RD_REG(ha,
  1303. QLAFX00_BAR1_BASE_ADDR_REG);
  1304. data1 = QLAFX00_RD_REG(ha,
  1305. QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
  1306. data0 &= 0xffff0000;
  1307. data1 &= 0x0000ffff;
  1308. QLAFX00_WR_REG(ha,
  1309. QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
  1310. (data0 | data1));
  1311. } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
  1312. ha->mr.fw_reset_timer_tick =
  1313. QLAFX00_MAX_RESET_INTERVAL;
  1314. }
  1315. ha->mr.old_aenmbx0_state = aenmbx0;
  1316. ha->mr.fw_reset_timer_tick--;
  1317. }
  1318. }
  1319. /*
  1320. * qlfx00a_reset_initialize
  1321. * Re-initialize after a iSA device reset.
  1322. *
  1323. * Input:
  1324. * ha = adapter block pointer.
  1325. *
  1326. * Returns:
  1327. * 0 = success
  1328. */
  1329. int
  1330. qlafx00_reset_initialize(scsi_qla_host_t *vha)
  1331. {
  1332. struct qla_hw_data *ha = vha->hw;
  1333. if (vha->device_flags & DFLG_DEV_FAILED) {
  1334. ql_dbg(ql_dbg_init, vha, 0x0142,
  1335. "Device in failed state\n");
  1336. return QLA_SUCCESS;
  1337. }
  1338. ha->flags.mr_reset_hdlr_active = 1;
  1339. if (vha->flags.online) {
  1340. scsi_block_requests(vha->host);
  1341. qlafx00_abort_isp_cleanup(vha);
  1342. }
  1343. ql_log(ql_log_info, vha, 0x0143,
  1344. "(%s): succeeded.\n", __func__);
  1345. ha->flags.mr_reset_hdlr_active = 0;
  1346. return QLA_SUCCESS;
  1347. }
  1348. /*
  1349. * qlafx00_abort_isp
  1350. * Resets ISP and aborts all outstanding commands.
  1351. *
  1352. * Input:
  1353. * ha = adapter block pointer.
  1354. *
  1355. * Returns:
  1356. * 0 = success
  1357. */
  1358. int
  1359. qlafx00_abort_isp(scsi_qla_host_t *vha)
  1360. {
  1361. struct qla_hw_data *ha = vha->hw;
  1362. if (vha->flags.online) {
  1363. if (unlikely(pci_channel_offline(ha->pdev) &&
  1364. ha->flags.pci_channel_io_perm_failure)) {
  1365. clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
  1366. return QLA_SUCCESS;
  1367. }
  1368. scsi_block_requests(vha->host);
  1369. qlafx00_abort_isp_cleanup(vha);
  1370. }
  1371. ql_log(ql_log_info, vha, 0x0145,
  1372. "(%s): succeeded.\n", __func__);
  1373. return QLA_SUCCESS;
  1374. }
  1375. static inline fc_port_t*
  1376. qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
  1377. {
  1378. fc_port_t *fcport;
  1379. /* Check for matching device in remote port list. */
  1380. fcport = NULL;
  1381. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  1382. if (fcport->tgt_id == tgt_id) {
  1383. ql_dbg(ql_dbg_async, vha, 0x5072,
  1384. "Matching fcport(%p) found with TGT-ID: 0x%x "
  1385. "and Remote TGT_ID: 0x%x\n",
  1386. fcport, fcport->tgt_id, tgt_id);
  1387. break;
  1388. }
  1389. }
  1390. return fcport;
  1391. }
  1392. static void
  1393. qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
  1394. {
  1395. fc_port_t *fcport;
  1396. ql_log(ql_log_info, vha, 0x5073,
  1397. "Detach TGT-ID: 0x%x\n", tgt_id);
  1398. fcport = qlafx00_get_fcport(vha, tgt_id);
  1399. if (!fcport)
  1400. return;
  1401. qla2x00_mark_device_lost(vha, fcport, 0, 0);
  1402. return;
  1403. }
  1404. int
  1405. qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
  1406. {
  1407. int rval = 0;
  1408. uint32_t aen_code, aen_data;
  1409. aen_code = FCH_EVT_VENDOR_UNIQUE;
  1410. aen_data = evt->u.aenfx.evtcode;
  1411. switch (evt->u.aenfx.evtcode) {
  1412. case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
  1413. if (evt->u.aenfx.mbx[1] == 0) {
  1414. if (evt->u.aenfx.mbx[2] == 1) {
  1415. if (!vha->flags.fw_tgt_reported)
  1416. vha->flags.fw_tgt_reported = 1;
  1417. atomic_set(&vha->loop_down_timer, 0);
  1418. atomic_set(&vha->loop_state, LOOP_UP);
  1419. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  1420. qla2xxx_wake_dpc(vha);
  1421. } else if (evt->u.aenfx.mbx[2] == 2) {
  1422. qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
  1423. }
  1424. } else if (evt->u.aenfx.mbx[1] == 0xffff) {
  1425. if (evt->u.aenfx.mbx[2] == 1) {
  1426. if (!vha->flags.fw_tgt_reported)
  1427. vha->flags.fw_tgt_reported = 1;
  1428. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  1429. } else if (evt->u.aenfx.mbx[2] == 2) {
  1430. vha->device_flags |= DFLG_NO_CABLE;
  1431. qla2x00_mark_all_devices_lost(vha, 1);
  1432. }
  1433. }
  1434. break;
  1435. case QLAFX00_MBA_LINK_UP:
  1436. aen_code = FCH_EVT_LINKUP;
  1437. aen_data = 0;
  1438. break;
  1439. case QLAFX00_MBA_LINK_DOWN:
  1440. aen_code = FCH_EVT_LINKDOWN;
  1441. aen_data = 0;
  1442. break;
  1443. }
  1444. fc_host_post_event(vha->host, fc_get_event_number(),
  1445. aen_code, aen_data);
  1446. return rval;
  1447. }
  1448. static void
  1449. qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
  1450. {
  1451. u64 port_name = 0, node_name = 0;
  1452. port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
  1453. node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
  1454. fc_host_node_name(vha->host) = node_name;
  1455. fc_host_port_name(vha->host) = port_name;
  1456. if (!pinfo->port_type)
  1457. vha->hw->current_topology = ISP_CFG_F;
  1458. if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
  1459. atomic_set(&vha->loop_state, LOOP_READY);
  1460. else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
  1461. atomic_set(&vha->loop_state, LOOP_DOWN);
  1462. vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
  1463. }
  1464. static void
  1465. qla2x00_fxdisc_iocb_timeout(void *data)
  1466. {
  1467. srb_t *sp = (srb_t *)data;
  1468. struct srb_iocb *lio = &sp->u.iocb_cmd;
  1469. complete(&lio->u.fxiocb.fxiocb_comp);
  1470. }
  1471. static void
  1472. qla2x00_fxdisc_sp_done(void *data, void *ptr, int res)
  1473. {
  1474. srb_t *sp = (srb_t *)ptr;
  1475. struct srb_iocb *lio = &sp->u.iocb_cmd;
  1476. complete(&lio->u.fxiocb.fxiocb_comp);
  1477. }
  1478. int
  1479. qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
  1480. {
  1481. srb_t *sp;
  1482. struct srb_iocb *fdisc;
  1483. int rval = QLA_FUNCTION_FAILED;
  1484. struct qla_hw_data *ha = vha->hw;
  1485. struct host_system_info *phost_info;
  1486. struct register_host_info *preg_hsi;
  1487. struct new_utsname *p_sysid = NULL;
  1488. struct timeval tv;
  1489. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  1490. if (!sp)
  1491. goto done;
  1492. fdisc = &sp->u.iocb_cmd;
  1493. switch (fx_type) {
  1494. case FXDISC_GET_CONFIG_INFO:
  1495. fdisc->u.fxiocb.flags =
  1496. SRB_FXDISC_RESP_DMA_VALID;
  1497. fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
  1498. break;
  1499. case FXDISC_GET_PORT_INFO:
  1500. fdisc->u.fxiocb.flags =
  1501. SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
  1502. fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
  1503. fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id);
  1504. break;
  1505. case FXDISC_GET_TGT_NODE_INFO:
  1506. fdisc->u.fxiocb.flags =
  1507. SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
  1508. fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
  1509. fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id);
  1510. break;
  1511. case FXDISC_GET_TGT_NODE_LIST:
  1512. fdisc->u.fxiocb.flags =
  1513. SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
  1514. fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
  1515. break;
  1516. case FXDISC_REG_HOST_INFO:
  1517. fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
  1518. fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
  1519. p_sysid = utsname();
  1520. if (!p_sysid) {
  1521. ql_log(ql_log_warn, vha, 0x303c,
  1522. "Not able to get the system informtion\n");
  1523. goto done_free_sp;
  1524. }
  1525. break;
  1526. default:
  1527. break;
  1528. }
  1529. if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
  1530. fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
  1531. fdisc->u.fxiocb.req_len,
  1532. &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
  1533. if (!fdisc->u.fxiocb.req_addr)
  1534. goto done_free_sp;
  1535. if (fx_type == FXDISC_REG_HOST_INFO) {
  1536. preg_hsi = (struct register_host_info *)
  1537. fdisc->u.fxiocb.req_addr;
  1538. phost_info = &preg_hsi->hsi;
  1539. memset(preg_hsi, 0, sizeof(struct register_host_info));
  1540. phost_info->os_type = OS_TYPE_LINUX;
  1541. strncpy(phost_info->sysname,
  1542. p_sysid->sysname, SYSNAME_LENGTH);
  1543. strncpy(phost_info->nodename,
  1544. p_sysid->nodename, NODENAME_LENGTH);
  1545. strncpy(phost_info->release,
  1546. p_sysid->release, RELEASE_LENGTH);
  1547. strncpy(phost_info->version,
  1548. p_sysid->version, VERSION_LENGTH);
  1549. strncpy(phost_info->machine,
  1550. p_sysid->machine, MACHINE_LENGTH);
  1551. strncpy(phost_info->domainname,
  1552. p_sysid->domainname, DOMNAME_LENGTH);
  1553. strncpy(phost_info->hostdriver,
  1554. QLA2XXX_VERSION, VERSION_LENGTH);
  1555. do_gettimeofday(&tv);
  1556. preg_hsi->utc = (uint64_t)tv.tv_sec;
  1557. ql_dbg(ql_dbg_init, vha, 0x0149,
  1558. "ISP%04X: Host registration with firmware\n",
  1559. ha->pdev->device);
  1560. ql_dbg(ql_dbg_init, vha, 0x014a,
  1561. "os_type = '%d', sysname = '%s', nodname = '%s'\n",
  1562. phost_info->os_type,
  1563. phost_info->sysname,
  1564. phost_info->nodename);
  1565. ql_dbg(ql_dbg_init, vha, 0x014b,
  1566. "release = '%s', version = '%s'\n",
  1567. phost_info->release,
  1568. phost_info->version);
  1569. ql_dbg(ql_dbg_init, vha, 0x014c,
  1570. "machine = '%s' "
  1571. "domainname = '%s', hostdriver = '%s'\n",
  1572. phost_info->machine,
  1573. phost_info->domainname,
  1574. phost_info->hostdriver);
  1575. ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
  1576. (uint8_t *)phost_info,
  1577. sizeof(struct host_system_info));
  1578. }
  1579. }
  1580. if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
  1581. fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
  1582. fdisc->u.fxiocb.rsp_len,
  1583. &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
  1584. if (!fdisc->u.fxiocb.rsp_addr)
  1585. goto done_unmap_req;
  1586. }
  1587. sp->type = SRB_FXIOCB_DCMD;
  1588. sp->name = "fxdisc";
  1589. qla2x00_init_timer(sp, FXDISC_TIMEOUT);
  1590. fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
  1591. fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
  1592. sp->done = qla2x00_fxdisc_sp_done;
  1593. rval = qla2x00_start_sp(sp);
  1594. if (rval != QLA_SUCCESS)
  1595. goto done_unmap_dma;
  1596. wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
  1597. if (fx_type == FXDISC_GET_CONFIG_INFO) {
  1598. struct config_info_data *pinfo =
  1599. (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
  1600. memcpy(&vha->hw->mr.product_name, pinfo->product_name,
  1601. sizeof(vha->hw->mr.product_name));
  1602. memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
  1603. sizeof(vha->hw->mr.symbolic_name));
  1604. memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
  1605. sizeof(vha->hw->mr.serial_num));
  1606. memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
  1607. sizeof(vha->hw->mr.hw_version));
  1608. memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
  1609. sizeof(vha->hw->mr.fw_version));
  1610. strim(vha->hw->mr.fw_version);
  1611. memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
  1612. sizeof(vha->hw->mr.uboot_version));
  1613. memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
  1614. sizeof(vha->hw->mr.fru_serial_num));
  1615. } else if (fx_type == FXDISC_GET_PORT_INFO) {
  1616. struct port_info_data *pinfo =
  1617. (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
  1618. memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
  1619. memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
  1620. vha->d_id.b.domain = pinfo->port_id[0];
  1621. vha->d_id.b.area = pinfo->port_id[1];
  1622. vha->d_id.b.al_pa = pinfo->port_id[2];
  1623. qlafx00_update_host_attr(vha, pinfo);
  1624. ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
  1625. (uint8_t *)pinfo, 16);
  1626. } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
  1627. struct qlafx00_tgt_node_info *pinfo =
  1628. (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
  1629. memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
  1630. memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
  1631. fcport->port_type = FCT_TARGET;
  1632. ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
  1633. (uint8_t *)pinfo, 16);
  1634. } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
  1635. struct qlafx00_tgt_node_info *pinfo =
  1636. (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
  1637. ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
  1638. (uint8_t *)pinfo, 16);
  1639. memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
  1640. }
  1641. rval = le32_to_cpu(fdisc->u.fxiocb.result);
  1642. done_unmap_dma:
  1643. if (fdisc->u.fxiocb.rsp_addr)
  1644. dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
  1645. fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
  1646. done_unmap_req:
  1647. if (fdisc->u.fxiocb.req_addr)
  1648. dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
  1649. fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
  1650. done_free_sp:
  1651. sp->free(vha, sp);
  1652. done:
  1653. return rval;
  1654. }
  1655. static void
  1656. qlafx00_abort_iocb_timeout(void *data)
  1657. {
  1658. srb_t *sp = (srb_t *)data;
  1659. struct srb_iocb *abt = &sp->u.iocb_cmd;
  1660. abt->u.abt.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
  1661. complete(&abt->u.abt.comp);
  1662. }
  1663. static void
  1664. qlafx00_abort_sp_done(void *data, void *ptr, int res)
  1665. {
  1666. srb_t *sp = (srb_t *)ptr;
  1667. struct srb_iocb *abt = &sp->u.iocb_cmd;
  1668. complete(&abt->u.abt.comp);
  1669. }
  1670. static int
  1671. qlafx00_async_abt_cmd(srb_t *cmd_sp)
  1672. {
  1673. scsi_qla_host_t *vha = cmd_sp->fcport->vha;
  1674. fc_port_t *fcport = cmd_sp->fcport;
  1675. struct srb_iocb *abt_iocb;
  1676. srb_t *sp;
  1677. int rval = QLA_FUNCTION_FAILED;
  1678. sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
  1679. if (!sp)
  1680. goto done;
  1681. abt_iocb = &sp->u.iocb_cmd;
  1682. sp->type = SRB_ABT_CMD;
  1683. sp->name = "abort";
  1684. qla2x00_init_timer(sp, FXDISC_TIMEOUT);
  1685. abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
  1686. sp->done = qlafx00_abort_sp_done;
  1687. abt_iocb->timeout = qlafx00_abort_iocb_timeout;
  1688. init_completion(&abt_iocb->u.abt.comp);
  1689. rval = qla2x00_start_sp(sp);
  1690. if (rval != QLA_SUCCESS)
  1691. goto done_free_sp;
  1692. ql_dbg(ql_dbg_async, vha, 0x507c,
  1693. "Abort command issued - hdl=%x, target_id=%x\n",
  1694. cmd_sp->handle, fcport->tgt_id);
  1695. wait_for_completion(&abt_iocb->u.abt.comp);
  1696. rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
  1697. QLA_SUCCESS : QLA_FUNCTION_FAILED;
  1698. done_free_sp:
  1699. sp->free(vha, sp);
  1700. done:
  1701. return rval;
  1702. }
  1703. int
  1704. qlafx00_abort_command(srb_t *sp)
  1705. {
  1706. unsigned long flags = 0;
  1707. uint32_t handle;
  1708. fc_port_t *fcport = sp->fcport;
  1709. struct scsi_qla_host *vha = fcport->vha;
  1710. struct qla_hw_data *ha = vha->hw;
  1711. struct req_que *req = vha->req;
  1712. spin_lock_irqsave(&ha->hardware_lock, flags);
  1713. for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) {
  1714. if (req->outstanding_cmds[handle] == sp)
  1715. break;
  1716. }
  1717. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1718. if (handle == DEFAULT_OUTSTANDING_COMMANDS) {
  1719. /* Command not found. */
  1720. return QLA_FUNCTION_FAILED;
  1721. }
  1722. return qlafx00_async_abt_cmd(sp);
  1723. }
  1724. /*
  1725. * qlafx00_initialize_adapter
  1726. * Initialize board.
  1727. *
  1728. * Input:
  1729. * ha = adapter block pointer.
  1730. *
  1731. * Returns:
  1732. * 0 = success
  1733. */
  1734. int
  1735. qlafx00_initialize_adapter(scsi_qla_host_t *vha)
  1736. {
  1737. int rval;
  1738. struct qla_hw_data *ha = vha->hw;
  1739. /* Clear adapter flags. */
  1740. vha->flags.online = 0;
  1741. ha->flags.chip_reset_done = 0;
  1742. vha->flags.reset_active = 0;
  1743. ha->flags.pci_channel_io_perm_failure = 0;
  1744. ha->flags.eeh_busy = 0;
  1745. ha->thermal_support = 0;
  1746. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  1747. atomic_set(&vha->loop_state, LOOP_DOWN);
  1748. vha->device_flags = DFLG_NO_CABLE;
  1749. vha->dpc_flags = 0;
  1750. vha->flags.management_server_logged_in = 0;
  1751. vha->marker_needed = 0;
  1752. ha->isp_abort_cnt = 0;
  1753. ha->beacon_blink_led = 0;
  1754. set_bit(0, ha->req_qid_map);
  1755. set_bit(0, ha->rsp_qid_map);
  1756. ql_dbg(ql_dbg_init, vha, 0x0147,
  1757. "Configuring PCI space...\n");
  1758. rval = ha->isp_ops->pci_config(vha);
  1759. if (rval) {
  1760. ql_log(ql_log_warn, vha, 0x0148,
  1761. "Unable to configure PCI space.\n");
  1762. return rval;
  1763. }
  1764. rval = qlafx00_init_fw_ready(vha);
  1765. if (rval != QLA_SUCCESS)
  1766. return rval;
  1767. qlafx00_save_queue_ptrs(vha);
  1768. rval = qlafx00_config_queues(vha);
  1769. if (rval != QLA_SUCCESS)
  1770. return rval;
  1771. /*
  1772. * Allocate the array of outstanding commands
  1773. * now that we know the firmware resources.
  1774. */
  1775. rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
  1776. if (rval != QLA_SUCCESS)
  1777. return rval;
  1778. rval = qla2x00_init_rings(vha);
  1779. ha->flags.chip_reset_done = 1;
  1780. return rval;
  1781. }
  1782. uint32_t
  1783. qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
  1784. char *buf)
  1785. {
  1786. scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  1787. int rval = QLA_FUNCTION_FAILED;
  1788. uint32_t state[1];
  1789. if (qla2x00_reset_active(vha))
  1790. ql_log(ql_log_warn, vha, 0x70ce,
  1791. "ISP reset active.\n");
  1792. else if (!vha->hw->flags.eeh_busy) {
  1793. rval = qlafx00_get_firmware_state(vha, state);
  1794. }
  1795. if (rval != QLA_SUCCESS)
  1796. memset(state, -1, sizeof(state));
  1797. return state[0];
  1798. }
  1799. void
  1800. qlafx00_get_host_speed(struct Scsi_Host *shost)
  1801. {
  1802. struct qla_hw_data *ha = ((struct scsi_qla_host *)
  1803. (shost_priv(shost)))->hw;
  1804. u32 speed = FC_PORTSPEED_UNKNOWN;
  1805. switch (ha->link_data_rate) {
  1806. case QLAFX00_PORT_SPEED_2G:
  1807. speed = FC_PORTSPEED_2GBIT;
  1808. break;
  1809. case QLAFX00_PORT_SPEED_4G:
  1810. speed = FC_PORTSPEED_4GBIT;
  1811. break;
  1812. case QLAFX00_PORT_SPEED_8G:
  1813. speed = FC_PORTSPEED_8GBIT;
  1814. break;
  1815. case QLAFX00_PORT_SPEED_10G:
  1816. speed = FC_PORTSPEED_10GBIT;
  1817. break;
  1818. }
  1819. fc_host_speed(shost) = speed;
  1820. }
  1821. /** QLAFX00 specific ISR implementation functions */
  1822. static inline void
  1823. qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
  1824. uint32_t sense_len, struct rsp_que *rsp, int res)
  1825. {
  1826. struct scsi_qla_host *vha = sp->fcport->vha;
  1827. struct scsi_cmnd *cp = GET_CMD_SP(sp);
  1828. uint32_t track_sense_len;
  1829. SET_FW_SENSE_LEN(sp, sense_len);
  1830. if (sense_len >= SCSI_SENSE_BUFFERSIZE)
  1831. sense_len = SCSI_SENSE_BUFFERSIZE;
  1832. SET_CMD_SENSE_LEN(sp, sense_len);
  1833. SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
  1834. track_sense_len = sense_len;
  1835. if (sense_len > par_sense_len)
  1836. sense_len = par_sense_len;
  1837. memcpy(cp->sense_buffer, sense_data, sense_len);
  1838. SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
  1839. SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
  1840. track_sense_len -= sense_len;
  1841. SET_CMD_SENSE_LEN(sp, track_sense_len);
  1842. ql_dbg(ql_dbg_io, vha, 0x304d,
  1843. "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
  1844. sense_len, par_sense_len, track_sense_len);
  1845. if (GET_FW_SENSE_LEN(sp) > 0) {
  1846. rsp->status_srb = sp;
  1847. cp->result = res;
  1848. }
  1849. if (sense_len) {
  1850. ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
  1851. "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
  1852. sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
  1853. cp);
  1854. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
  1855. cp->sense_buffer, sense_len);
  1856. }
  1857. }
  1858. static void
  1859. qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  1860. struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
  1861. __le16 sstatus, __le16 cpstatus)
  1862. {
  1863. struct srb_iocb *tmf;
  1864. tmf = &sp->u.iocb_cmd;
  1865. if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) ||
  1866. (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID)))
  1867. cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE);
  1868. tmf->u.tmf.comp_status = cpstatus;
  1869. sp->done(vha, sp, 0);
  1870. }
  1871. static void
  1872. qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  1873. struct abort_iocb_entry_fx00 *pkt)
  1874. {
  1875. const char func[] = "ABT_IOCB";
  1876. srb_t *sp;
  1877. struct srb_iocb *abt;
  1878. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  1879. if (!sp)
  1880. return;
  1881. abt = &sp->u.iocb_cmd;
  1882. abt->u.abt.comp_status = pkt->tgt_id_sts;
  1883. sp->done(vha, sp, 0);
  1884. }
  1885. static void
  1886. qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
  1887. struct ioctl_iocb_entry_fx00 *pkt)
  1888. {
  1889. const char func[] = "IOSB_IOCB";
  1890. srb_t *sp;
  1891. struct fc_bsg_job *bsg_job;
  1892. struct srb_iocb *iocb_job;
  1893. int res;
  1894. struct qla_mt_iocb_rsp_fx00 fstatus;
  1895. uint8_t *fw_sts_ptr;
  1896. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  1897. if (!sp)
  1898. return;
  1899. if (sp->type == SRB_FXIOCB_DCMD) {
  1900. iocb_job = &sp->u.iocb_cmd;
  1901. iocb_job->u.fxiocb.seq_number = pkt->seq_no;
  1902. iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags;
  1903. iocb_job->u.fxiocb.result = pkt->status;
  1904. if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
  1905. iocb_job->u.fxiocb.req_data =
  1906. pkt->dataword_r;
  1907. } else {
  1908. bsg_job = sp->u.bsg_job;
  1909. memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
  1910. fstatus.reserved_1 = pkt->reserved_0;
  1911. fstatus.func_type = pkt->comp_func_num;
  1912. fstatus.ioctl_flags = pkt->fw_iotcl_flags;
  1913. fstatus.ioctl_data = pkt->dataword_r;
  1914. fstatus.adapid = pkt->adapid;
  1915. fstatus.adapid_hi = pkt->adapid_hi;
  1916. fstatus.reserved_2 = pkt->reserved_1;
  1917. fstatus.res_count = pkt->residuallen;
  1918. fstatus.status = pkt->status;
  1919. fstatus.seq_number = pkt->seq_no;
  1920. memcpy(fstatus.reserved_3,
  1921. pkt->reserved_2, 20 * sizeof(uint8_t));
  1922. fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
  1923. sizeof(struct fc_bsg_reply);
  1924. memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
  1925. sizeof(struct qla_mt_iocb_rsp_fx00));
  1926. bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
  1927. sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
  1928. ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
  1929. sp->fcport->vha, 0x5080,
  1930. (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
  1931. ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
  1932. sp->fcport->vha, 0x5074,
  1933. (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
  1934. res = bsg_job->reply->result = DID_OK << 16;
  1935. bsg_job->reply->reply_payload_rcv_len =
  1936. bsg_job->reply_payload.payload_len;
  1937. }
  1938. sp->done(vha, sp, res);
  1939. }
  1940. /**
  1941. * qlafx00_status_entry() - Process a Status IOCB entry.
  1942. * @ha: SCSI driver HA context
  1943. * @pkt: Entry pointer
  1944. */
  1945. static void
  1946. qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
  1947. {
  1948. srb_t *sp;
  1949. fc_port_t *fcport;
  1950. struct scsi_cmnd *cp;
  1951. struct sts_entry_fx00 *sts;
  1952. __le16 comp_status;
  1953. __le16 scsi_status;
  1954. uint16_t ox_id;
  1955. __le16 lscsi_status;
  1956. int32_t resid;
  1957. uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
  1958. fw_resid_len;
  1959. uint8_t *rsp_info = NULL, *sense_data = NULL;
  1960. struct qla_hw_data *ha = vha->hw;
  1961. uint32_t hindex, handle;
  1962. uint16_t que;
  1963. struct req_que *req;
  1964. int logit = 1;
  1965. int res = 0;
  1966. sts = (struct sts_entry_fx00 *) pkt;
  1967. comp_status = sts->comp_status;
  1968. scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK);
  1969. hindex = sts->handle;
  1970. handle = LSW(hindex);
  1971. que = MSW(hindex);
  1972. req = ha->req_q_map[que];
  1973. /* Validate handle. */
  1974. if (handle < req->num_outstanding_cmds)
  1975. sp = req->outstanding_cmds[handle];
  1976. else
  1977. sp = NULL;
  1978. if (sp == NULL) {
  1979. ql_dbg(ql_dbg_io, vha, 0x3034,
  1980. "Invalid status handle (0x%x).\n", handle);
  1981. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1982. qla2xxx_wake_dpc(vha);
  1983. return;
  1984. }
  1985. if (sp->type == SRB_TM_CMD) {
  1986. req->outstanding_cmds[handle] = NULL;
  1987. qlafx00_tm_iocb_entry(vha, req, pkt, sp,
  1988. scsi_status, comp_status);
  1989. return;
  1990. }
  1991. /* Fast path completion. */
  1992. if (comp_status == CS_COMPLETE && scsi_status == 0) {
  1993. qla2x00_do_host_ramp_up(vha);
  1994. qla2x00_process_completed_request(vha, req, handle);
  1995. return;
  1996. }
  1997. req->outstanding_cmds[handle] = NULL;
  1998. cp = GET_CMD_SP(sp);
  1999. if (cp == NULL) {
  2000. ql_dbg(ql_dbg_io, vha, 0x3048,
  2001. "Command already returned (0x%x/%p).\n",
  2002. handle, sp);
  2003. return;
  2004. }
  2005. lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK);
  2006. fcport = sp->fcport;
  2007. ox_id = 0;
  2008. sense_len = par_sense_len = rsp_info_len = resid_len =
  2009. fw_resid_len = 0;
  2010. if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
  2011. sense_len = sts->sense_len;
  2012. if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
  2013. | (uint16_t)SS_RESIDUAL_OVER)))
  2014. resid_len = le32_to_cpu(sts->residual_len);
  2015. if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN))
  2016. fw_resid_len = le32_to_cpu(sts->residual_len);
  2017. rsp_info = sense_data = sts->data;
  2018. par_sense_len = sizeof(sts->data);
  2019. /* Check for overrun. */
  2020. if (comp_status == CS_COMPLETE &&
  2021. scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER))
  2022. comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN);
  2023. /*
  2024. * Based on Host and scsi status generate status code for Linux
  2025. */
  2026. switch (le16_to_cpu(comp_status)) {
  2027. case CS_COMPLETE:
  2028. case CS_QUEUE_FULL:
  2029. if (scsi_status == 0) {
  2030. res = DID_OK << 16;
  2031. break;
  2032. }
  2033. if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
  2034. | (uint16_t)SS_RESIDUAL_OVER))) {
  2035. resid = resid_len;
  2036. scsi_set_resid(cp, resid);
  2037. if (!lscsi_status &&
  2038. ((unsigned)(scsi_bufflen(cp) - resid) <
  2039. cp->underflow)) {
  2040. ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
  2041. "Mid-layer underflow "
  2042. "detected (0x%x of 0x%x bytes).\n",
  2043. resid, scsi_bufflen(cp));
  2044. res = DID_ERROR << 16;
  2045. break;
  2046. }
  2047. }
  2048. res = DID_OK << 16 | le16_to_cpu(lscsi_status);
  2049. if (lscsi_status ==
  2050. cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
  2051. ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
  2052. "QUEUE FULL detected.\n");
  2053. break;
  2054. }
  2055. logit = 0;
  2056. if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
  2057. break;
  2058. memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  2059. if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
  2060. break;
  2061. qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
  2062. rsp, res);
  2063. break;
  2064. case CS_DATA_UNDERRUN:
  2065. /* Use F/W calculated residual length. */
  2066. if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
  2067. resid = fw_resid_len;
  2068. else
  2069. resid = resid_len;
  2070. scsi_set_resid(cp, resid);
  2071. if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) {
  2072. if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
  2073. && fw_resid_len != resid_len) {
  2074. ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
  2075. "Dropped frame(s) detected "
  2076. "(0x%x of 0x%x bytes).\n",
  2077. resid, scsi_bufflen(cp));
  2078. res = DID_ERROR << 16 |
  2079. le16_to_cpu(lscsi_status);
  2080. goto check_scsi_status;
  2081. }
  2082. if (!lscsi_status &&
  2083. ((unsigned)(scsi_bufflen(cp) - resid) <
  2084. cp->underflow)) {
  2085. ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
  2086. "Mid-layer underflow "
  2087. "detected (0x%x of 0x%x bytes, "
  2088. "cp->underflow: 0x%x).\n",
  2089. resid, scsi_bufflen(cp), cp->underflow);
  2090. res = DID_ERROR << 16;
  2091. break;
  2092. }
  2093. } else if (lscsi_status !=
  2094. cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) &&
  2095. lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) {
  2096. /*
  2097. * scsi status of task set and busy are considered
  2098. * to be task not completed.
  2099. */
  2100. ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
  2101. "Dropped frame(s) detected (0x%x "
  2102. "of 0x%x bytes).\n", resid,
  2103. scsi_bufflen(cp));
  2104. res = DID_ERROR << 16 | le16_to_cpu(lscsi_status);
  2105. goto check_scsi_status;
  2106. } else {
  2107. ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
  2108. "scsi_status: 0x%x, lscsi_status: 0x%x\n",
  2109. scsi_status, lscsi_status);
  2110. }
  2111. res = DID_OK << 16 | le16_to_cpu(lscsi_status);
  2112. logit = 0;
  2113. check_scsi_status:
  2114. /*
  2115. * Check to see if SCSI Status is non zero. If so report SCSI
  2116. * Status.
  2117. */
  2118. if (lscsi_status != 0) {
  2119. if (lscsi_status ==
  2120. cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
  2121. ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
  2122. "QUEUE FULL detected.\n");
  2123. logit = 1;
  2124. break;
  2125. }
  2126. if (lscsi_status !=
  2127. cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
  2128. break;
  2129. memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  2130. if (!(scsi_status &
  2131. cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
  2132. break;
  2133. qlafx00_handle_sense(sp, sense_data, par_sense_len,
  2134. sense_len, rsp, res);
  2135. }
  2136. break;
  2137. case CS_PORT_LOGGED_OUT:
  2138. case CS_PORT_CONFIG_CHG:
  2139. case CS_PORT_BUSY:
  2140. case CS_INCOMPLETE:
  2141. case CS_PORT_UNAVAILABLE:
  2142. case CS_TIMEOUT:
  2143. case CS_RESET:
  2144. /*
  2145. * We are going to have the fc class block the rport
  2146. * while we try to recover so instruct the mid layer
  2147. * to requeue until the class decides how to handle this.
  2148. */
  2149. res = DID_TRANSPORT_DISRUPTED << 16;
  2150. ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
  2151. "Port down status: port-state=0x%x.\n",
  2152. atomic_read(&fcport->state));
  2153. if (atomic_read(&fcport->state) == FCS_ONLINE)
  2154. qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
  2155. break;
  2156. case CS_ABORTED:
  2157. res = DID_RESET << 16;
  2158. break;
  2159. default:
  2160. res = DID_ERROR << 16;
  2161. break;
  2162. }
  2163. if (logit)
  2164. ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
  2165. "FCP command status: 0x%x-0x%x (0x%x) "
  2166. "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x"
  2167. "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
  2168. "rsp_info=0x%x resid=0x%x fw_resid=0x%x "
  2169. "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n",
  2170. comp_status, scsi_status, res, vha->host_no,
  2171. cp->device->id, cp->device->lun, fcport->tgt_id,
  2172. lscsi_status, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2],
  2173. cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6],
  2174. cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp),
  2175. rsp_info_len, resid_len, fw_resid_len, sense_len,
  2176. par_sense_len, rsp_info_len);
  2177. if (!res)
  2178. qla2x00_do_host_ramp_up(vha);
  2179. if (rsp->status_srb == NULL)
  2180. sp->done(ha, sp, res);
  2181. }
  2182. /**
  2183. * qlafx00_status_cont_entry() - Process a Status Continuations entry.
  2184. * @ha: SCSI driver HA context
  2185. * @pkt: Entry pointer
  2186. *
  2187. * Extended sense data.
  2188. */
  2189. static void
  2190. qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
  2191. {
  2192. uint8_t sense_sz = 0;
  2193. struct qla_hw_data *ha = rsp->hw;
  2194. struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
  2195. srb_t *sp = rsp->status_srb;
  2196. struct scsi_cmnd *cp;
  2197. uint32_t sense_len;
  2198. uint8_t *sense_ptr;
  2199. if (!sp) {
  2200. ql_dbg(ql_dbg_io, vha, 0x3037,
  2201. "no SP, sp = %p\n", sp);
  2202. return;
  2203. }
  2204. if (!GET_FW_SENSE_LEN(sp)) {
  2205. ql_dbg(ql_dbg_io, vha, 0x304b,
  2206. "no fw sense data, sp = %p\n", sp);
  2207. return;
  2208. }
  2209. cp = GET_CMD_SP(sp);
  2210. if (cp == NULL) {
  2211. ql_log(ql_log_warn, vha, 0x303b,
  2212. "cmd is NULL: already returned to OS (sp=%p).\n", sp);
  2213. rsp->status_srb = NULL;
  2214. return;
  2215. }
  2216. if (!GET_CMD_SENSE_LEN(sp)) {
  2217. ql_dbg(ql_dbg_io, vha, 0x304c,
  2218. "no sense data, sp = %p\n", sp);
  2219. } else {
  2220. sense_len = GET_CMD_SENSE_LEN(sp);
  2221. sense_ptr = GET_CMD_SENSE_PTR(sp);
  2222. ql_dbg(ql_dbg_io, vha, 0x304f,
  2223. "sp=%p sense_len=0x%x sense_ptr=%p.\n",
  2224. sp, sense_len, sense_ptr);
  2225. if (sense_len > sizeof(pkt->data))
  2226. sense_sz = sizeof(pkt->data);
  2227. else
  2228. sense_sz = sense_len;
  2229. /* Move sense data. */
  2230. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
  2231. (uint8_t *)pkt, sizeof(sts_cont_entry_t));
  2232. memcpy(sense_ptr, pkt->data, sense_sz);
  2233. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
  2234. sense_ptr, sense_sz);
  2235. sense_len -= sense_sz;
  2236. sense_ptr += sense_sz;
  2237. SET_CMD_SENSE_PTR(sp, sense_ptr);
  2238. SET_CMD_SENSE_LEN(sp, sense_len);
  2239. }
  2240. sense_len = GET_FW_SENSE_LEN(sp);
  2241. sense_len = (sense_len > sizeof(pkt->data)) ?
  2242. (sense_len - sizeof(pkt->data)) : 0;
  2243. SET_FW_SENSE_LEN(sp, sense_len);
  2244. /* Place command on done queue. */
  2245. if (sense_len == 0) {
  2246. rsp->status_srb = NULL;
  2247. sp->done(ha, sp, cp->result);
  2248. }
  2249. }
  2250. /**
  2251. * qlafx00_multistatus_entry() - Process Multi response queue entries.
  2252. * @ha: SCSI driver HA context
  2253. */
  2254. static void
  2255. qlafx00_multistatus_entry(struct scsi_qla_host *vha,
  2256. struct rsp_que *rsp, void *pkt)
  2257. {
  2258. srb_t *sp;
  2259. struct multi_sts_entry_fx00 *stsmfx;
  2260. struct qla_hw_data *ha = vha->hw;
  2261. uint32_t handle, hindex, handle_count, i;
  2262. uint16_t que;
  2263. struct req_que *req;
  2264. __le32 *handle_ptr;
  2265. stsmfx = (struct multi_sts_entry_fx00 *) pkt;
  2266. handle_count = stsmfx->handle_count;
  2267. if (handle_count > MAX_HANDLE_COUNT) {
  2268. ql_dbg(ql_dbg_io, vha, 0x3035,
  2269. "Invalid handle count (0x%x).\n", handle_count);
  2270. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2271. qla2xxx_wake_dpc(vha);
  2272. return;
  2273. }
  2274. handle_ptr = &stsmfx->handles[0];
  2275. for (i = 0; i < handle_count; i++) {
  2276. hindex = le32_to_cpu(*handle_ptr);
  2277. handle = LSW(hindex);
  2278. que = MSW(hindex);
  2279. req = ha->req_q_map[que];
  2280. /* Validate handle. */
  2281. if (handle < req->num_outstanding_cmds)
  2282. sp = req->outstanding_cmds[handle];
  2283. else
  2284. sp = NULL;
  2285. if (sp == NULL) {
  2286. ql_dbg(ql_dbg_io, vha, 0x3044,
  2287. "Invalid status handle (0x%x).\n", handle);
  2288. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2289. qla2xxx_wake_dpc(vha);
  2290. return;
  2291. }
  2292. qla2x00_process_completed_request(vha, req, handle);
  2293. handle_ptr++;
  2294. }
  2295. }
  2296. /**
  2297. * qlafx00_error_entry() - Process an error entry.
  2298. * @ha: SCSI driver HA context
  2299. * @pkt: Entry pointer
  2300. */
  2301. static void
  2302. qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
  2303. struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
  2304. {
  2305. srb_t *sp;
  2306. struct qla_hw_data *ha = vha->hw;
  2307. const char func[] = "ERROR-IOCB";
  2308. uint16_t que = MSW(pkt->handle);
  2309. struct req_que *req = NULL;
  2310. int res = DID_ERROR << 16;
  2311. ql_dbg(ql_dbg_async, vha, 0x507f,
  2312. "type of error status in response: 0x%x\n", estatus);
  2313. req = ha->req_q_map[que];
  2314. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  2315. if (sp) {
  2316. sp->done(ha, sp, res);
  2317. return;
  2318. }
  2319. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2320. qla2xxx_wake_dpc(vha);
  2321. }
  2322. /**
  2323. * qlafx00_process_response_queue() - Process response queue entries.
  2324. * @ha: SCSI driver HA context
  2325. */
  2326. static void
  2327. qlafx00_process_response_queue(struct scsi_qla_host *vha,
  2328. struct rsp_que *rsp)
  2329. {
  2330. struct sts_entry_fx00 *pkt;
  2331. response_t *lptr;
  2332. if (!vha->flags.online)
  2333. return;
  2334. while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) !=
  2335. RESPONSE_PROCESSED) {
  2336. lptr = rsp->ring_ptr;
  2337. memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
  2338. sizeof(rsp->rsp_pkt));
  2339. pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
  2340. rsp->ring_index++;
  2341. if (rsp->ring_index == rsp->length) {
  2342. rsp->ring_index = 0;
  2343. rsp->ring_ptr = rsp->ring;
  2344. } else {
  2345. rsp->ring_ptr++;
  2346. }
  2347. if (pkt->entry_status != 0 &&
  2348. pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
  2349. qlafx00_error_entry(vha, rsp,
  2350. (struct sts_entry_fx00 *)pkt, pkt->entry_status,
  2351. pkt->entry_type);
  2352. goto next_iter;
  2353. continue;
  2354. }
  2355. switch (pkt->entry_type) {
  2356. case STATUS_TYPE_FX00:
  2357. qlafx00_status_entry(vha, rsp, pkt);
  2358. break;
  2359. case STATUS_CONT_TYPE_FX00:
  2360. qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
  2361. break;
  2362. case MULTI_STATUS_TYPE_FX00:
  2363. qlafx00_multistatus_entry(vha, rsp, pkt);
  2364. break;
  2365. case ABORT_IOCB_TYPE_FX00:
  2366. qlafx00_abort_iocb_entry(vha, rsp->req,
  2367. (struct abort_iocb_entry_fx00 *)pkt);
  2368. break;
  2369. case IOCTL_IOSB_TYPE_FX00:
  2370. qlafx00_ioctl_iosb_entry(vha, rsp->req,
  2371. (struct ioctl_iocb_entry_fx00 *)pkt);
  2372. break;
  2373. default:
  2374. /* Type Not Supported. */
  2375. ql_dbg(ql_dbg_async, vha, 0x5081,
  2376. "Received unknown response pkt type %x "
  2377. "entry status=%x.\n",
  2378. pkt->entry_type, pkt->entry_status);
  2379. break;
  2380. }
  2381. next_iter:
  2382. WRT_REG_DWORD((void __iomem *)&lptr->signature,
  2383. RESPONSE_PROCESSED);
  2384. wmb();
  2385. }
  2386. /* Adjust ring index */
  2387. WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
  2388. }
  2389. /**
  2390. * qlafx00_async_event() - Process aynchronous events.
  2391. * @ha: SCSI driver HA context
  2392. */
  2393. static void
  2394. qlafx00_async_event(scsi_qla_host_t *vha)
  2395. {
  2396. struct qla_hw_data *ha = vha->hw;
  2397. struct device_reg_fx00 __iomem *reg;
  2398. int data_size = 1;
  2399. reg = &ha->iobase->ispfx00;
  2400. /* Setup to process RIO completion. */
  2401. switch (ha->aenmb[0]) {
  2402. case QLAFX00_MBA_SYSTEM_ERR: /* System Error */
  2403. ql_log(ql_log_warn, vha, 0x5079,
  2404. "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
  2405. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2406. break;
  2407. case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */
  2408. ql_dbg(ql_dbg_async, vha, 0x5076,
  2409. "Asynchronous FW shutdown requested.\n");
  2410. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2411. qla2xxx_wake_dpc(vha);
  2412. break;
  2413. case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
  2414. ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
  2415. ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
  2416. ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
  2417. ql_dbg(ql_dbg_async, vha, 0x5077,
  2418. "Asynchronous port Update received "
  2419. "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
  2420. ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
  2421. data_size = 4;
  2422. break;
  2423. default:
  2424. ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
  2425. ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
  2426. ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
  2427. ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
  2428. ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
  2429. ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
  2430. ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
  2431. ql_dbg(ql_dbg_async, vha, 0x5078,
  2432. "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
  2433. ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
  2434. ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
  2435. break;
  2436. }
  2437. qlafx00_post_aenfx_work(vha, ha->aenmb[0],
  2438. (uint32_t *)ha->aenmb, data_size);
  2439. }
  2440. /**
  2441. *
  2442. * qlafx00x_mbx_completion() - Process mailbox command completions.
  2443. * @ha: SCSI driver HA context
  2444. * @mb16: Mailbox16 register
  2445. */
  2446. static void
  2447. qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
  2448. {
  2449. uint16_t cnt;
  2450. uint16_t __iomem *wptr;
  2451. struct qla_hw_data *ha = vha->hw;
  2452. struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
  2453. if (!ha->mcp32)
  2454. ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
  2455. /* Load return mailbox registers. */
  2456. ha->flags.mbox_int = 1;
  2457. ha->mailbox_out32[0] = mb0;
  2458. wptr = (uint16_t __iomem *)&reg->mailbox17;
  2459. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  2460. ha->mailbox_out32[cnt] = RD_REG_WORD(wptr);
  2461. wptr++;
  2462. }
  2463. }
  2464. /**
  2465. * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
  2466. * @irq:
  2467. * @dev_id: SCSI driver HA context
  2468. *
  2469. * Called by system whenever the host adapter generates an interrupt.
  2470. *
  2471. * Returns handled flag.
  2472. */
  2473. irqreturn_t
  2474. qlafx00_intr_handler(int irq, void *dev_id)
  2475. {
  2476. scsi_qla_host_t *vha;
  2477. struct qla_hw_data *ha;
  2478. struct device_reg_fx00 __iomem *reg;
  2479. int status;
  2480. unsigned long iter;
  2481. uint32_t stat;
  2482. uint32_t mb[8];
  2483. struct rsp_que *rsp;
  2484. unsigned long flags;
  2485. uint32_t clr_intr = 0;
  2486. rsp = (struct rsp_que *) dev_id;
  2487. if (!rsp) {
  2488. ql_log(ql_log_info, NULL, 0x507d,
  2489. "%s: NULL response queue pointer.\n", __func__);
  2490. return IRQ_NONE;
  2491. }
  2492. ha = rsp->hw;
  2493. reg = &ha->iobase->ispfx00;
  2494. status = 0;
  2495. if (unlikely(pci_channel_offline(ha->pdev)))
  2496. return IRQ_HANDLED;
  2497. spin_lock_irqsave(&ha->hardware_lock, flags);
  2498. vha = pci_get_drvdata(ha->pdev);
  2499. for (iter = 50; iter--; clr_intr = 0) {
  2500. stat = QLAFX00_RD_INTR_REG(ha);
  2501. if ((stat & QLAFX00_HST_INT_STS_BITS) == 0)
  2502. break;
  2503. switch (stat & QLAFX00_HST_INT_STS_BITS) {
  2504. case QLAFX00_INTR_MB_CMPLT:
  2505. case QLAFX00_INTR_MB_RSP_CMPLT:
  2506. case QLAFX00_INTR_MB_ASYNC_CMPLT:
  2507. case QLAFX00_INTR_ALL_CMPLT:
  2508. mb[0] = RD_REG_WORD(&reg->mailbox16);
  2509. qlafx00_mbx_completion(vha, mb[0]);
  2510. status |= MBX_INTERRUPT;
  2511. clr_intr |= QLAFX00_INTR_MB_CMPLT;
  2512. break;
  2513. case QLAFX00_INTR_ASYNC_CMPLT:
  2514. case QLAFX00_INTR_RSP_ASYNC_CMPLT:
  2515. ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
  2516. qlafx00_async_event(vha);
  2517. clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
  2518. break;
  2519. case QLAFX00_INTR_RSP_CMPLT:
  2520. qlafx00_process_response_queue(vha, rsp);
  2521. clr_intr |= QLAFX00_INTR_RSP_CMPLT;
  2522. break;
  2523. default:
  2524. ql_dbg(ql_dbg_async, vha, 0x507a,
  2525. "Unrecognized interrupt type (%d).\n", stat);
  2526. break;
  2527. }
  2528. QLAFX00_CLR_INTR_REG(ha, clr_intr);
  2529. QLAFX00_RD_INTR_REG(ha);
  2530. }
  2531. qla2x00_handle_mbx_completion(ha, status);
  2532. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2533. return IRQ_HANDLED;
  2534. }
  2535. /** QLAFX00 specific IOCB implementation functions */
  2536. static inline cont_a64_entry_t *
  2537. qlafx00_prep_cont_type1_iocb(struct req_que *req,
  2538. cont_a64_entry_t *lcont_pkt)
  2539. {
  2540. cont_a64_entry_t *cont_pkt;
  2541. /* Adjust ring index. */
  2542. req->ring_index++;
  2543. if (req->ring_index == req->length) {
  2544. req->ring_index = 0;
  2545. req->ring_ptr = req->ring;
  2546. } else {
  2547. req->ring_ptr++;
  2548. }
  2549. cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
  2550. /* Load packet defaults. */
  2551. lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00;
  2552. return cont_pkt;
  2553. }
  2554. static inline void
  2555. qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
  2556. uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
  2557. {
  2558. uint16_t avail_dsds;
  2559. __le32 *cur_dsd;
  2560. scsi_qla_host_t *vha;
  2561. struct scsi_cmnd *cmd;
  2562. struct scatterlist *sg;
  2563. int i, cont;
  2564. struct req_que *req;
  2565. cont_a64_entry_t lcont_pkt;
  2566. cont_a64_entry_t *cont_pkt;
  2567. vha = sp->fcport->vha;
  2568. req = vha->req;
  2569. cmd = GET_CMD_SP(sp);
  2570. cont = 0;
  2571. cont_pkt = NULL;
  2572. /* Update entry type to indicate Command Type 3 IOCB */
  2573. lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7;
  2574. /* No data transfer */
  2575. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  2576. lcmd_pkt->byte_count = __constant_cpu_to_le32(0);
  2577. return;
  2578. }
  2579. /* Set transfer direction */
  2580. if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  2581. lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
  2582. vha->qla_stats.output_bytes += scsi_bufflen(cmd);
  2583. } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  2584. lcmd_pkt->cntrl_flags = TMF_READ_DATA;
  2585. vha->qla_stats.input_bytes += scsi_bufflen(cmd);
  2586. }
  2587. /* One DSD is available in the Command Type 3 IOCB */
  2588. avail_dsds = 1;
  2589. cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address;
  2590. /* Load data segments */
  2591. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  2592. dma_addr_t sle_dma;
  2593. /* Allocate additional continuation packets? */
  2594. if (avail_dsds == 0) {
  2595. /*
  2596. * Five DSDs are available in the Continuation
  2597. * Type 1 IOCB.
  2598. */
  2599. memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
  2600. cont_pkt =
  2601. qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
  2602. cur_dsd = (__le32 *)lcont_pkt.dseg_0_address;
  2603. avail_dsds = 5;
  2604. cont = 1;
  2605. }
  2606. sle_dma = sg_dma_address(sg);
  2607. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  2608. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  2609. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  2610. avail_dsds--;
  2611. if (avail_dsds == 0 && cont == 1) {
  2612. cont = 0;
  2613. memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
  2614. REQUEST_ENTRY_SIZE);
  2615. }
  2616. }
  2617. if (avail_dsds != 0 && cont == 1) {
  2618. memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
  2619. REQUEST_ENTRY_SIZE);
  2620. }
  2621. }
  2622. /**
  2623. * qlafx00_start_scsi() - Send a SCSI command to the ISP
  2624. * @sp: command to send to the ISP
  2625. *
  2626. * Returns non-zero if a failure occurred, else zero.
  2627. */
  2628. int
  2629. qlafx00_start_scsi(srb_t *sp)
  2630. {
  2631. int ret, nseg;
  2632. unsigned long flags;
  2633. uint32_t index;
  2634. uint32_t handle;
  2635. uint16_t cnt;
  2636. uint16_t req_cnt;
  2637. uint16_t tot_dsds;
  2638. struct req_que *req = NULL;
  2639. struct rsp_que *rsp = NULL;
  2640. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  2641. struct scsi_qla_host *vha = sp->fcport->vha;
  2642. struct qla_hw_data *ha = vha->hw;
  2643. struct cmd_type_7_fx00 *cmd_pkt;
  2644. struct cmd_type_7_fx00 lcmd_pkt;
  2645. struct scsi_lun llun;
  2646. char tag[2];
  2647. /* Setup device pointers. */
  2648. ret = 0;
  2649. rsp = ha->rsp_q_map[0];
  2650. req = vha->req;
  2651. /* So we know we haven't pci_map'ed anything yet */
  2652. tot_dsds = 0;
  2653. /* Forcing marker needed for now */
  2654. vha->marker_needed = 0;
  2655. /* Send marker if required */
  2656. if (vha->marker_needed != 0) {
  2657. if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
  2658. QLA_SUCCESS)
  2659. return QLA_FUNCTION_FAILED;
  2660. vha->marker_needed = 0;
  2661. }
  2662. /* Acquire ring specific lock */
  2663. spin_lock_irqsave(&ha->hardware_lock, flags);
  2664. /* Check for room in outstanding command list. */
  2665. handle = req->current_outstanding_cmd;
  2666. for (index = 1; index < req->num_outstanding_cmds; index++) {
  2667. handle++;
  2668. if (handle == req->num_outstanding_cmds)
  2669. handle = 1;
  2670. if (!req->outstanding_cmds[handle])
  2671. break;
  2672. }
  2673. if (index == req->num_outstanding_cmds)
  2674. goto queuing_error;
  2675. /* Map the sg table so we have an accurate count of sg entries needed */
  2676. if (scsi_sg_count(cmd)) {
  2677. nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
  2678. scsi_sg_count(cmd), cmd->sc_data_direction);
  2679. if (unlikely(!nseg))
  2680. goto queuing_error;
  2681. } else
  2682. nseg = 0;
  2683. tot_dsds = nseg;
  2684. req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
  2685. if (req->cnt < (req_cnt + 2)) {
  2686. cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
  2687. if (req->ring_index < cnt)
  2688. req->cnt = cnt - req->ring_index;
  2689. else
  2690. req->cnt = req->length -
  2691. (req->ring_index - cnt);
  2692. if (req->cnt < (req_cnt + 2))
  2693. goto queuing_error;
  2694. }
  2695. /* Build command packet. */
  2696. req->current_outstanding_cmd = handle;
  2697. req->outstanding_cmds[handle] = sp;
  2698. sp->handle = handle;
  2699. cmd->host_scribble = (unsigned char *)(unsigned long)handle;
  2700. req->cnt -= req_cnt;
  2701. cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
  2702. memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
  2703. lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
  2704. lcmd_pkt.handle_hi = 0;
  2705. lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
  2706. lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
  2707. int_to_scsilun(cmd->device->lun, &llun);
  2708. host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
  2709. sizeof(lcmd_pkt.lun));
  2710. /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
  2711. if (scsi_populate_tag_msg(cmd, tag)) {
  2712. switch (tag[0]) {
  2713. case HEAD_OF_QUEUE_TAG:
  2714. lcmd_pkt.task = TSK_HEAD_OF_QUEUE;
  2715. break;
  2716. case ORDERED_QUEUE_TAG:
  2717. lcmd_pkt.task = TSK_ORDERED;
  2718. break;
  2719. }
  2720. }
  2721. /* Load SCSI command packet. */
  2722. host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
  2723. lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
  2724. /* Build IOCB segments */
  2725. qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
  2726. /* Set total data segment count. */
  2727. lcmd_pkt.entry_count = (uint8_t)req_cnt;
  2728. /* Specify response queue number where completion should happen */
  2729. lcmd_pkt.entry_status = (uint8_t) rsp->id;
  2730. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
  2731. (uint8_t *)cmd->cmnd, cmd->cmd_len);
  2732. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
  2733. (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
  2734. memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
  2735. wmb();
  2736. /* Adjust ring index. */
  2737. req->ring_index++;
  2738. if (req->ring_index == req->length) {
  2739. req->ring_index = 0;
  2740. req->ring_ptr = req->ring;
  2741. } else
  2742. req->ring_ptr++;
  2743. sp->flags |= SRB_DMA_VALID;
  2744. /* Set chip new ring index. */
  2745. WRT_REG_DWORD(req->req_q_in, req->ring_index);
  2746. QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
  2747. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2748. return QLA_SUCCESS;
  2749. queuing_error:
  2750. if (tot_dsds)
  2751. scsi_dma_unmap(cmd);
  2752. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2753. return QLA_FUNCTION_FAILED;
  2754. }
  2755. void
  2756. qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
  2757. {
  2758. struct srb_iocb *fxio = &sp->u.iocb_cmd;
  2759. scsi_qla_host_t *vha = sp->fcport->vha;
  2760. struct req_que *req = vha->req;
  2761. struct tsk_mgmt_entry_fx00 tm_iocb;
  2762. struct scsi_lun llun;
  2763. memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
  2764. tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
  2765. tm_iocb.entry_count = 1;
  2766. tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
  2767. tm_iocb.handle_hi = 0;
  2768. tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
  2769. tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
  2770. tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
  2771. if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
  2772. int_to_scsilun(fxio->u.tmf.lun, &llun);
  2773. host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
  2774. sizeof(struct scsi_lun));
  2775. }
  2776. memcpy((void *)ptm_iocb, &tm_iocb,
  2777. sizeof(struct tsk_mgmt_entry_fx00));
  2778. wmb();
  2779. }
  2780. void
  2781. qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
  2782. {
  2783. struct srb_iocb *fxio = &sp->u.iocb_cmd;
  2784. scsi_qla_host_t *vha = sp->fcport->vha;
  2785. struct req_que *req = vha->req;
  2786. struct abort_iocb_entry_fx00 abt_iocb;
  2787. memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
  2788. abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
  2789. abt_iocb.entry_count = 1;
  2790. abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
  2791. abt_iocb.abort_handle =
  2792. cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
  2793. abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
  2794. abt_iocb.req_que_no = cpu_to_le16(req->id);
  2795. memcpy((void *)pabt_iocb, &abt_iocb,
  2796. sizeof(struct abort_iocb_entry_fx00));
  2797. wmb();
  2798. }
  2799. void
  2800. qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
  2801. {
  2802. struct srb_iocb *fxio = &sp->u.iocb_cmd;
  2803. struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
  2804. struct fc_bsg_job *bsg_job;
  2805. struct fxdisc_entry_fx00 fx_iocb;
  2806. uint8_t entry_cnt = 1;
  2807. memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
  2808. fx_iocb.entry_type = FX00_IOCB_TYPE;
  2809. fx_iocb.handle = cpu_to_le32(sp->handle);
  2810. fx_iocb.entry_count = entry_cnt;
  2811. if (sp->type == SRB_FXIOCB_DCMD) {
  2812. fx_iocb.func_num =
  2813. sp->u.iocb_cmd.u.fxiocb.req_func_type;
  2814. fx_iocb.adapid = fxio->u.fxiocb.adapter_id;
  2815. fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi;
  2816. fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0;
  2817. fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1;
  2818. fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra;
  2819. if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
  2820. fx_iocb.req_dsdcnt = cpu_to_le16(1);
  2821. fx_iocb.req_xfrcnt =
  2822. cpu_to_le16(fxio->u.fxiocb.req_len);
  2823. fx_iocb.dseg_rq_address[0] =
  2824. cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
  2825. fx_iocb.dseg_rq_address[1] =
  2826. cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
  2827. fx_iocb.dseg_rq_len =
  2828. cpu_to_le32(fxio->u.fxiocb.req_len);
  2829. }
  2830. if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
  2831. fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
  2832. fx_iocb.rsp_xfrcnt =
  2833. cpu_to_le16(fxio->u.fxiocb.rsp_len);
  2834. fx_iocb.dseg_rsp_address[0] =
  2835. cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
  2836. fx_iocb.dseg_rsp_address[1] =
  2837. cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
  2838. fx_iocb.dseg_rsp_len =
  2839. cpu_to_le32(fxio->u.fxiocb.rsp_len);
  2840. }
  2841. if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
  2842. fx_iocb.dataword = fxio->u.fxiocb.req_data;
  2843. }
  2844. fx_iocb.flags = fxio->u.fxiocb.flags;
  2845. } else {
  2846. struct scatterlist *sg;
  2847. bsg_job = sp->u.bsg_job;
  2848. piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
  2849. &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  2850. fx_iocb.func_num = piocb_rqst->func_type;
  2851. fx_iocb.adapid = piocb_rqst->adapid;
  2852. fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
  2853. fx_iocb.reserved_0 = piocb_rqst->reserved_0;
  2854. fx_iocb.reserved_1 = piocb_rqst->reserved_1;
  2855. fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
  2856. fx_iocb.dataword = piocb_rqst->dataword;
  2857. fx_iocb.req_xfrcnt = piocb_rqst->req_len;
  2858. fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len;
  2859. if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
  2860. int avail_dsds, tot_dsds;
  2861. cont_a64_entry_t lcont_pkt;
  2862. cont_a64_entry_t *cont_pkt = NULL;
  2863. __le32 *cur_dsd;
  2864. int index = 0, cont = 0;
  2865. fx_iocb.req_dsdcnt =
  2866. cpu_to_le16(bsg_job->request_payload.sg_cnt);
  2867. tot_dsds =
  2868. bsg_job->request_payload.sg_cnt;
  2869. cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0];
  2870. avail_dsds = 1;
  2871. for_each_sg(bsg_job->request_payload.sg_list, sg,
  2872. tot_dsds, index) {
  2873. dma_addr_t sle_dma;
  2874. /* Allocate additional continuation packets? */
  2875. if (avail_dsds == 0) {
  2876. /*
  2877. * Five DSDs are available in the Cont.
  2878. * Type 1 IOCB.
  2879. */
  2880. memset(&lcont_pkt, 0,
  2881. REQUEST_ENTRY_SIZE);
  2882. cont_pkt =
  2883. qlafx00_prep_cont_type1_iocb(
  2884. sp->fcport->vha->req,
  2885. &lcont_pkt);
  2886. cur_dsd = (__le32 *)
  2887. lcont_pkt.dseg_0_address;
  2888. avail_dsds = 5;
  2889. cont = 1;
  2890. entry_cnt++;
  2891. }
  2892. sle_dma = sg_dma_address(sg);
  2893. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  2894. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  2895. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  2896. avail_dsds--;
  2897. if (avail_dsds == 0 && cont == 1) {
  2898. cont = 0;
  2899. memcpy_toio(
  2900. (void __iomem *)cont_pkt,
  2901. &lcont_pkt, REQUEST_ENTRY_SIZE);
  2902. ql_dump_buffer(
  2903. ql_dbg_user + ql_dbg_verbose,
  2904. sp->fcport->vha, 0x3042,
  2905. (uint8_t *)&lcont_pkt,
  2906. REQUEST_ENTRY_SIZE);
  2907. }
  2908. }
  2909. if (avail_dsds != 0 && cont == 1) {
  2910. memcpy_toio((void __iomem *)cont_pkt,
  2911. &lcont_pkt, REQUEST_ENTRY_SIZE);
  2912. ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
  2913. sp->fcport->vha, 0x3043,
  2914. (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
  2915. }
  2916. }
  2917. if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
  2918. int avail_dsds, tot_dsds;
  2919. cont_a64_entry_t lcont_pkt;
  2920. cont_a64_entry_t *cont_pkt = NULL;
  2921. __le32 *cur_dsd;
  2922. int index = 0, cont = 0;
  2923. fx_iocb.rsp_dsdcnt =
  2924. cpu_to_le16(bsg_job->reply_payload.sg_cnt);
  2925. tot_dsds = bsg_job->reply_payload.sg_cnt;
  2926. cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0];
  2927. avail_dsds = 1;
  2928. for_each_sg(bsg_job->reply_payload.sg_list, sg,
  2929. tot_dsds, index) {
  2930. dma_addr_t sle_dma;
  2931. /* Allocate additional continuation packets? */
  2932. if (avail_dsds == 0) {
  2933. /*
  2934. * Five DSDs are available in the Cont.
  2935. * Type 1 IOCB.
  2936. */
  2937. memset(&lcont_pkt, 0,
  2938. REQUEST_ENTRY_SIZE);
  2939. cont_pkt =
  2940. qlafx00_prep_cont_type1_iocb(
  2941. sp->fcport->vha->req,
  2942. &lcont_pkt);
  2943. cur_dsd = (__le32 *)
  2944. lcont_pkt.dseg_0_address;
  2945. avail_dsds = 5;
  2946. cont = 1;
  2947. entry_cnt++;
  2948. }
  2949. sle_dma = sg_dma_address(sg);
  2950. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  2951. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  2952. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  2953. avail_dsds--;
  2954. if (avail_dsds == 0 && cont == 1) {
  2955. cont = 0;
  2956. memcpy_toio((void __iomem *)cont_pkt,
  2957. &lcont_pkt,
  2958. REQUEST_ENTRY_SIZE);
  2959. ql_dump_buffer(
  2960. ql_dbg_user + ql_dbg_verbose,
  2961. sp->fcport->vha, 0x3045,
  2962. (uint8_t *)&lcont_pkt,
  2963. REQUEST_ENTRY_SIZE);
  2964. }
  2965. }
  2966. if (avail_dsds != 0 && cont == 1) {
  2967. memcpy_toio((void __iomem *)cont_pkt,
  2968. &lcont_pkt, REQUEST_ENTRY_SIZE);
  2969. ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
  2970. sp->fcport->vha, 0x3046,
  2971. (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
  2972. }
  2973. }
  2974. if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
  2975. fx_iocb.dataword = piocb_rqst->dataword;
  2976. fx_iocb.flags = piocb_rqst->flags;
  2977. fx_iocb.entry_count = entry_cnt;
  2978. }
  2979. ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
  2980. sp->fcport->vha, 0x3047,
  2981. (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
  2982. memcpy((void *)pfxiocb, &fx_iocb,
  2983. sizeof(struct fxdisc_entry_fx00));
  2984. wmb();
  2985. }