qla_isr.c 82 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_target.h"
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include <scsi/scsi_tcq.h>
  12. #include <scsi/scsi_bsg_fc.h>
  13. #include <scsi/scsi_eh.h>
  14. static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
  15. static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
  16. static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
  17. static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
  18. sts_entry_t *);
  19. /**
  20. * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
  21. * @irq:
  22. * @dev_id: SCSI driver HA context
  23. *
  24. * Called by system whenever the host adapter generates an interrupt.
  25. *
  26. * Returns handled flag.
  27. */
  28. irqreturn_t
  29. qla2100_intr_handler(int irq, void *dev_id)
  30. {
  31. scsi_qla_host_t *vha;
  32. struct qla_hw_data *ha;
  33. struct device_reg_2xxx __iomem *reg;
  34. int status;
  35. unsigned long iter;
  36. uint16_t hccr;
  37. uint16_t mb[4];
  38. struct rsp_que *rsp;
  39. unsigned long flags;
  40. rsp = (struct rsp_que *) dev_id;
  41. if (!rsp) {
  42. ql_log(ql_log_info, NULL, 0x505d,
  43. "%s: NULL response queue pointer.\n", __func__);
  44. return (IRQ_NONE);
  45. }
  46. ha = rsp->hw;
  47. reg = &ha->iobase->isp;
  48. status = 0;
  49. spin_lock_irqsave(&ha->hardware_lock, flags);
  50. vha = pci_get_drvdata(ha->pdev);
  51. for (iter = 50; iter--; ) {
  52. hccr = RD_REG_WORD(&reg->hccr);
  53. if (hccr & HCCR_RISC_PAUSE) {
  54. if (pci_channel_offline(ha->pdev))
  55. break;
  56. /*
  57. * Issue a "HARD" reset in order for the RISC interrupt
  58. * bit to be cleared. Schedule a big hammer to get
  59. * out of the RISC PAUSED state.
  60. */
  61. WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
  62. RD_REG_WORD(&reg->hccr);
  63. ha->isp_ops->fw_dump(vha, 1);
  64. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  65. break;
  66. } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
  67. break;
  68. if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
  69. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  70. RD_REG_WORD(&reg->hccr);
  71. /* Get mailbox data. */
  72. mb[0] = RD_MAILBOX_REG(ha, reg, 0);
  73. if (mb[0] > 0x3fff && mb[0] < 0x8000) {
  74. qla2x00_mbx_completion(vha, mb[0]);
  75. status |= MBX_INTERRUPT;
  76. } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
  77. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  78. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  79. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  80. qla2x00_async_event(vha, rsp, mb);
  81. } else {
  82. /*EMPTY*/
  83. ql_dbg(ql_dbg_async, vha, 0x5025,
  84. "Unrecognized interrupt type (%d).\n",
  85. mb[0]);
  86. }
  87. /* Release mailbox registers. */
  88. WRT_REG_WORD(&reg->semaphore, 0);
  89. RD_REG_WORD(&reg->semaphore);
  90. } else {
  91. qla2x00_process_response_queue(rsp);
  92. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  93. RD_REG_WORD(&reg->hccr);
  94. }
  95. }
  96. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  97. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  98. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  99. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  100. complete(&ha->mbx_intr_comp);
  101. }
  102. return (IRQ_HANDLED);
  103. }
  104. /**
  105. * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
  106. * @irq:
  107. * @dev_id: SCSI driver HA context
  108. *
  109. * Called by system whenever the host adapter generates an interrupt.
  110. *
  111. * Returns handled flag.
  112. */
  113. irqreturn_t
  114. qla2300_intr_handler(int irq, void *dev_id)
  115. {
  116. scsi_qla_host_t *vha;
  117. struct device_reg_2xxx __iomem *reg;
  118. int status;
  119. unsigned long iter;
  120. uint32_t stat;
  121. uint16_t hccr;
  122. uint16_t mb[4];
  123. struct rsp_que *rsp;
  124. struct qla_hw_data *ha;
  125. unsigned long flags;
  126. rsp = (struct rsp_que *) dev_id;
  127. if (!rsp) {
  128. ql_log(ql_log_info, NULL, 0x5058,
  129. "%s: NULL response queue pointer.\n", __func__);
  130. return (IRQ_NONE);
  131. }
  132. ha = rsp->hw;
  133. reg = &ha->iobase->isp;
  134. status = 0;
  135. spin_lock_irqsave(&ha->hardware_lock, flags);
  136. vha = pci_get_drvdata(ha->pdev);
  137. for (iter = 50; iter--; ) {
  138. stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
  139. if (stat & HSR_RISC_PAUSED) {
  140. if (unlikely(pci_channel_offline(ha->pdev)))
  141. break;
  142. hccr = RD_REG_WORD(&reg->hccr);
  143. if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
  144. ql_log(ql_log_warn, vha, 0x5026,
  145. "Parity error -- HCCR=%x, Dumping "
  146. "firmware.\n", hccr);
  147. else
  148. ql_log(ql_log_warn, vha, 0x5027,
  149. "RISC paused -- HCCR=%x, Dumping "
  150. "firmware.\n", hccr);
  151. /*
  152. * Issue a "HARD" reset in order for the RISC
  153. * interrupt bit to be cleared. Schedule a big
  154. * hammer to get out of the RISC PAUSED state.
  155. */
  156. WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
  157. RD_REG_WORD(&reg->hccr);
  158. ha->isp_ops->fw_dump(vha, 1);
  159. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  160. break;
  161. } else if ((stat & HSR_RISC_INT) == 0)
  162. break;
  163. switch (stat & 0xff) {
  164. case 0x1:
  165. case 0x2:
  166. case 0x10:
  167. case 0x11:
  168. qla2x00_mbx_completion(vha, MSW(stat));
  169. status |= MBX_INTERRUPT;
  170. /* Release mailbox registers. */
  171. WRT_REG_WORD(&reg->semaphore, 0);
  172. break;
  173. case 0x12:
  174. mb[0] = MSW(stat);
  175. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  176. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  177. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  178. qla2x00_async_event(vha, rsp, mb);
  179. break;
  180. case 0x13:
  181. qla2x00_process_response_queue(rsp);
  182. break;
  183. case 0x15:
  184. mb[0] = MBA_CMPLT_1_16BIT;
  185. mb[1] = MSW(stat);
  186. qla2x00_async_event(vha, rsp, mb);
  187. break;
  188. case 0x16:
  189. mb[0] = MBA_SCSI_COMPLETION;
  190. mb[1] = MSW(stat);
  191. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  192. qla2x00_async_event(vha, rsp, mb);
  193. break;
  194. default:
  195. ql_dbg(ql_dbg_async, vha, 0x5028,
  196. "Unrecognized interrupt type (%d).\n", stat & 0xff);
  197. break;
  198. }
  199. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  200. RD_REG_WORD_RELAXED(&reg->hccr);
  201. }
  202. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  203. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  204. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  205. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  206. complete(&ha->mbx_intr_comp);
  207. }
  208. return (IRQ_HANDLED);
  209. }
  210. /**
  211. * qla2x00_mbx_completion() - Process mailbox command completions.
  212. * @ha: SCSI driver HA context
  213. * @mb0: Mailbox0 register
  214. */
  215. static void
  216. qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
  217. {
  218. uint16_t cnt;
  219. uint32_t mboxes;
  220. uint16_t __iomem *wptr;
  221. struct qla_hw_data *ha = vha->hw;
  222. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  223. /* Read all mbox registers? */
  224. mboxes = (1 << ha->mbx_count) - 1;
  225. if (!ha->mcp)
  226. ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
  227. else
  228. mboxes = ha->mcp->in_mb;
  229. /* Load return mailbox registers. */
  230. ha->flags.mbox_int = 1;
  231. ha->mailbox_out[0] = mb0;
  232. mboxes >>= 1;
  233. wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
  234. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  235. if (IS_QLA2200(ha) && cnt == 8)
  236. wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
  237. if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
  238. ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
  239. else if (mboxes & BIT_0)
  240. ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
  241. wptr++;
  242. mboxes >>= 1;
  243. }
  244. }
  245. static void
  246. qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
  247. {
  248. static char *event[] =
  249. { "Complete", "Request Notification", "Time Extension" };
  250. int rval;
  251. struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
  252. uint16_t __iomem *wptr;
  253. uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
  254. /* Seed data -- mailbox1 -> mailbox7. */
  255. wptr = (uint16_t __iomem *)&reg24->mailbox1;
  256. for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
  257. mb[cnt] = RD_REG_WORD(wptr);
  258. ql_dbg(ql_dbg_async, vha, 0x5021,
  259. "Inter-Driver Communication %s -- "
  260. "%04x %04x %04x %04x %04x %04x %04x.\n",
  261. event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
  262. mb[4], mb[5], mb[6]);
  263. if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
  264. vha->hw->flags.idc_compl_status = 1;
  265. if (vha->hw->notify_dcbx_comp)
  266. complete(&vha->hw->dcbx_comp);
  267. }
  268. /* Acknowledgement needed? [Notify && non-zero timeout]. */
  269. timeout = (descr >> 8) & 0xf;
  270. if (aen != MBA_IDC_NOTIFY || !timeout)
  271. return;
  272. ql_dbg(ql_dbg_async, vha, 0x5022,
  273. "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
  274. vha->host_no, event[aen & 0xff], timeout);
  275. rval = qla2x00_post_idc_ack_work(vha, mb);
  276. if (rval != QLA_SUCCESS)
  277. ql_log(ql_log_warn, vha, 0x5023,
  278. "IDC failed to post ACK.\n");
  279. }
  280. #define LS_UNKNOWN 2
  281. const char *
  282. qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
  283. {
  284. static const char * const link_speeds[] = {
  285. "1", "2", "?", "4", "8", "16", "10"
  286. };
  287. if (IS_QLA2100(ha) || IS_QLA2200(ha))
  288. return link_speeds[0];
  289. else if (speed == 0x13)
  290. return link_speeds[6];
  291. else if (speed < 6)
  292. return link_speeds[speed];
  293. else
  294. return link_speeds[LS_UNKNOWN];
  295. }
  296. static void
  297. qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
  298. {
  299. struct qla_hw_data *ha = vha->hw;
  300. /*
  301. * 8200 AEN Interpretation:
  302. * mb[0] = AEN code
  303. * mb[1] = AEN Reason code
  304. * mb[2] = LSW of Peg-Halt Status-1 Register
  305. * mb[6] = MSW of Peg-Halt Status-1 Register
  306. * mb[3] = LSW of Peg-Halt Status-2 register
  307. * mb[7] = MSW of Peg-Halt Status-2 register
  308. * mb[4] = IDC Device-State Register value
  309. * mb[5] = IDC Driver-Presence Register value
  310. */
  311. ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
  312. "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
  313. mb[0], mb[1], mb[2], mb[6]);
  314. ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
  315. "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
  316. "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
  317. if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
  318. IDC_HEARTBEAT_FAILURE)) {
  319. ha->flags.nic_core_hung = 1;
  320. ql_log(ql_log_warn, vha, 0x5060,
  321. "83XX: F/W Error Reported: Check if reset required.\n");
  322. if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
  323. uint32_t protocol_engine_id, fw_err_code, err_level;
  324. /*
  325. * IDC_PEG_HALT_STATUS_CHANGE interpretation:
  326. * - PEG-Halt Status-1 Register:
  327. * (LSW = mb[2], MSW = mb[6])
  328. * Bits 0-7 = protocol-engine ID
  329. * Bits 8-28 = f/w error code
  330. * Bits 29-31 = Error-level
  331. * Error-level 0x1 = Non-Fatal error
  332. * Error-level 0x2 = Recoverable Fatal error
  333. * Error-level 0x4 = UnRecoverable Fatal error
  334. * - PEG-Halt Status-2 Register:
  335. * (LSW = mb[3], MSW = mb[7])
  336. */
  337. protocol_engine_id = (mb[2] & 0xff);
  338. fw_err_code = (((mb[2] & 0xff00) >> 8) |
  339. ((mb[6] & 0x1fff) << 8));
  340. err_level = ((mb[6] & 0xe000) >> 13);
  341. ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
  342. "Register: protocol_engine_id=0x%x "
  343. "fw_err_code=0x%x err_level=0x%x.\n",
  344. protocol_engine_id, fw_err_code, err_level);
  345. ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
  346. "Register: 0x%x%x.\n", mb[7], mb[3]);
  347. if (err_level == ERR_LEVEL_NON_FATAL) {
  348. ql_log(ql_log_warn, vha, 0x5063,
  349. "Not a fatal error, f/w has recovered "
  350. "iteself.\n");
  351. } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
  352. ql_log(ql_log_fatal, vha, 0x5064,
  353. "Recoverable Fatal error: Chip reset "
  354. "required.\n");
  355. qla83xx_schedule_work(vha,
  356. QLA83XX_NIC_CORE_RESET);
  357. } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
  358. ql_log(ql_log_fatal, vha, 0x5065,
  359. "Unrecoverable Fatal error: Set FAILED "
  360. "state, reboot required.\n");
  361. qla83xx_schedule_work(vha,
  362. QLA83XX_NIC_CORE_UNRECOVERABLE);
  363. }
  364. }
  365. if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
  366. uint16_t peg_fw_state, nw_interface_link_up;
  367. uint16_t nw_interface_signal_detect, sfp_status;
  368. uint16_t htbt_counter, htbt_monitor_enable;
  369. uint16_t sfp_additonal_info, sfp_multirate;
  370. uint16_t sfp_tx_fault, link_speed, dcbx_status;
  371. /*
  372. * IDC_NIC_FW_REPORTED_FAILURE interpretation:
  373. * - PEG-to-FC Status Register:
  374. * (LSW = mb[2], MSW = mb[6])
  375. * Bits 0-7 = Peg-Firmware state
  376. * Bit 8 = N/W Interface Link-up
  377. * Bit 9 = N/W Interface signal detected
  378. * Bits 10-11 = SFP Status
  379. * SFP Status 0x0 = SFP+ transceiver not expected
  380. * SFP Status 0x1 = SFP+ transceiver not present
  381. * SFP Status 0x2 = SFP+ transceiver invalid
  382. * SFP Status 0x3 = SFP+ transceiver present and
  383. * valid
  384. * Bits 12-14 = Heartbeat Counter
  385. * Bit 15 = Heartbeat Monitor Enable
  386. * Bits 16-17 = SFP Additional Info
  387. * SFP info 0x0 = Unregocnized transceiver for
  388. * Ethernet
  389. * SFP info 0x1 = SFP+ brand validation failed
  390. * SFP info 0x2 = SFP+ speed validation failed
  391. * SFP info 0x3 = SFP+ access error
  392. * Bit 18 = SFP Multirate
  393. * Bit 19 = SFP Tx Fault
  394. * Bits 20-22 = Link Speed
  395. * Bits 23-27 = Reserved
  396. * Bits 28-30 = DCBX Status
  397. * DCBX Status 0x0 = DCBX Disabled
  398. * DCBX Status 0x1 = DCBX Enabled
  399. * DCBX Status 0x2 = DCBX Exchange error
  400. * Bit 31 = Reserved
  401. */
  402. peg_fw_state = (mb[2] & 0x00ff);
  403. nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
  404. nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
  405. sfp_status = ((mb[2] & 0x0c00) >> 10);
  406. htbt_counter = ((mb[2] & 0x7000) >> 12);
  407. htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
  408. sfp_additonal_info = (mb[6] & 0x0003);
  409. sfp_multirate = ((mb[6] & 0x0004) >> 2);
  410. sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
  411. link_speed = ((mb[6] & 0x0070) >> 4);
  412. dcbx_status = ((mb[6] & 0x7000) >> 12);
  413. ql_log(ql_log_warn, vha, 0x5066,
  414. "Peg-to-Fc Status Register:\n"
  415. "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
  416. "nw_interface_signal_detect=0x%x"
  417. "\nsfp_statis=0x%x.\n ", peg_fw_state,
  418. nw_interface_link_up, nw_interface_signal_detect,
  419. sfp_status);
  420. ql_log(ql_log_warn, vha, 0x5067,
  421. "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
  422. "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
  423. htbt_counter, htbt_monitor_enable,
  424. sfp_additonal_info, sfp_multirate);
  425. ql_log(ql_log_warn, vha, 0x5068,
  426. "sfp_tx_fault=0x%x, link_state=0x%x, "
  427. "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
  428. dcbx_status);
  429. qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
  430. }
  431. if (mb[1] & IDC_HEARTBEAT_FAILURE) {
  432. ql_log(ql_log_warn, vha, 0x5069,
  433. "Heartbeat Failure encountered, chip reset "
  434. "required.\n");
  435. qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
  436. }
  437. }
  438. if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
  439. ql_log(ql_log_info, vha, 0x506a,
  440. "IDC Device-State changed = 0x%x.\n", mb[4]);
  441. if (ha->flags.nic_core_reset_owner)
  442. return;
  443. qla83xx_schedule_work(vha, MBA_IDC_AEN);
  444. }
  445. }
  446. int
  447. qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
  448. {
  449. struct qla_hw_data *ha = vha->hw;
  450. scsi_qla_host_t *vp;
  451. uint32_t vp_did;
  452. unsigned long flags;
  453. int ret = 0;
  454. if (!ha->num_vhosts)
  455. return ret;
  456. spin_lock_irqsave(&ha->vport_slock, flags);
  457. list_for_each_entry(vp, &ha->vp_list, list) {
  458. vp_did = vp->d_id.b24;
  459. if (vp_did == rscn_entry) {
  460. ret = 1;
  461. break;
  462. }
  463. }
  464. spin_unlock_irqrestore(&ha->vport_slock, flags);
  465. return ret;
  466. }
  467. /**
  468. * qla2x00_async_event() - Process aynchronous events.
  469. * @ha: SCSI driver HA context
  470. * @mb: Mailbox registers (0 - 3)
  471. */
  472. void
  473. qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
  474. {
  475. uint16_t handle_cnt;
  476. uint16_t cnt, mbx;
  477. uint32_t handles[5];
  478. struct qla_hw_data *ha = vha->hw;
  479. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  480. struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
  481. struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
  482. uint32_t rscn_entry, host_pid;
  483. unsigned long flags;
  484. /* Setup to process RIO completion. */
  485. handle_cnt = 0;
  486. if (IS_CNA_CAPABLE(ha))
  487. goto skip_rio;
  488. switch (mb[0]) {
  489. case MBA_SCSI_COMPLETION:
  490. handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
  491. handle_cnt = 1;
  492. break;
  493. case MBA_CMPLT_1_16BIT:
  494. handles[0] = mb[1];
  495. handle_cnt = 1;
  496. mb[0] = MBA_SCSI_COMPLETION;
  497. break;
  498. case MBA_CMPLT_2_16BIT:
  499. handles[0] = mb[1];
  500. handles[1] = mb[2];
  501. handle_cnt = 2;
  502. mb[0] = MBA_SCSI_COMPLETION;
  503. break;
  504. case MBA_CMPLT_3_16BIT:
  505. handles[0] = mb[1];
  506. handles[1] = mb[2];
  507. handles[2] = mb[3];
  508. handle_cnt = 3;
  509. mb[0] = MBA_SCSI_COMPLETION;
  510. break;
  511. case MBA_CMPLT_4_16BIT:
  512. handles[0] = mb[1];
  513. handles[1] = mb[2];
  514. handles[2] = mb[3];
  515. handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
  516. handle_cnt = 4;
  517. mb[0] = MBA_SCSI_COMPLETION;
  518. break;
  519. case MBA_CMPLT_5_16BIT:
  520. handles[0] = mb[1];
  521. handles[1] = mb[2];
  522. handles[2] = mb[3];
  523. handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
  524. handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
  525. handle_cnt = 5;
  526. mb[0] = MBA_SCSI_COMPLETION;
  527. break;
  528. case MBA_CMPLT_2_32BIT:
  529. handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
  530. handles[1] = le32_to_cpu(
  531. ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
  532. RD_MAILBOX_REG(ha, reg, 6));
  533. handle_cnt = 2;
  534. mb[0] = MBA_SCSI_COMPLETION;
  535. break;
  536. default:
  537. break;
  538. }
  539. skip_rio:
  540. switch (mb[0]) {
  541. case MBA_SCSI_COMPLETION: /* Fast Post */
  542. if (!vha->flags.online)
  543. break;
  544. for (cnt = 0; cnt < handle_cnt; cnt++)
  545. qla2x00_process_completed_request(vha, rsp->req,
  546. handles[cnt]);
  547. break;
  548. case MBA_RESET: /* Reset */
  549. ql_dbg(ql_dbg_async, vha, 0x5002,
  550. "Asynchronous RESET.\n");
  551. set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  552. break;
  553. case MBA_SYSTEM_ERR: /* System Error */
  554. mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
  555. RD_REG_WORD(&reg24->mailbox7) : 0;
  556. ql_log(ql_log_warn, vha, 0x5003,
  557. "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
  558. "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
  559. ha->isp_ops->fw_dump(vha, 1);
  560. if (IS_FWI2_CAPABLE(ha)) {
  561. if (mb[1] == 0 && mb[2] == 0) {
  562. ql_log(ql_log_fatal, vha, 0x5004,
  563. "Unrecoverable Hardware Error: adapter "
  564. "marked OFFLINE!\n");
  565. vha->flags.online = 0;
  566. vha->device_flags |= DFLG_DEV_FAILED;
  567. } else {
  568. /* Check to see if MPI timeout occurred */
  569. if ((mbx & MBX_3) && (ha->flags.port0))
  570. set_bit(MPI_RESET_NEEDED,
  571. &vha->dpc_flags);
  572. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  573. }
  574. } else if (mb[1] == 0) {
  575. ql_log(ql_log_fatal, vha, 0x5005,
  576. "Unrecoverable Hardware Error: adapter marked "
  577. "OFFLINE!\n");
  578. vha->flags.online = 0;
  579. vha->device_flags |= DFLG_DEV_FAILED;
  580. } else
  581. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  582. break;
  583. case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
  584. ql_log(ql_log_warn, vha, 0x5006,
  585. "ISP Request Transfer Error (%x).\n", mb[1]);
  586. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  587. break;
  588. case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
  589. ql_log(ql_log_warn, vha, 0x5007,
  590. "ISP Response Transfer Error.\n");
  591. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  592. break;
  593. case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
  594. ql_dbg(ql_dbg_async, vha, 0x5008,
  595. "Asynchronous WAKEUP_THRES.\n");
  596. break;
  597. case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
  598. ql_dbg(ql_dbg_async, vha, 0x5009,
  599. "LIP occurred (%x).\n", mb[1]);
  600. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  601. atomic_set(&vha->loop_state, LOOP_DOWN);
  602. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  603. qla2x00_mark_all_devices_lost(vha, 1);
  604. }
  605. if (vha->vp_idx) {
  606. atomic_set(&vha->vp_state, VP_FAILED);
  607. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  608. }
  609. set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
  610. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  611. vha->flags.management_server_logged_in = 0;
  612. qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
  613. break;
  614. case MBA_LOOP_UP: /* Loop Up Event */
  615. if (IS_QLA2100(ha) || IS_QLA2200(ha))
  616. ha->link_data_rate = PORT_SPEED_1GB;
  617. else
  618. ha->link_data_rate = mb[1];
  619. ql_dbg(ql_dbg_async, vha, 0x500a,
  620. "LOOP UP detected (%s Gbps).\n",
  621. qla2x00_get_link_speed_str(ha, ha->link_data_rate));
  622. vha->flags.management_server_logged_in = 0;
  623. qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
  624. break;
  625. case MBA_LOOP_DOWN: /* Loop Down Event */
  626. mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
  627. ? RD_REG_WORD(&reg24->mailbox4) : 0;
  628. mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
  629. ql_dbg(ql_dbg_async, vha, 0x500b,
  630. "LOOP DOWN detected (%x %x %x %x).\n",
  631. mb[1], mb[2], mb[3], mbx);
  632. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  633. atomic_set(&vha->loop_state, LOOP_DOWN);
  634. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  635. vha->device_flags |= DFLG_NO_CABLE;
  636. qla2x00_mark_all_devices_lost(vha, 1);
  637. }
  638. if (vha->vp_idx) {
  639. atomic_set(&vha->vp_state, VP_FAILED);
  640. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  641. }
  642. vha->flags.management_server_logged_in = 0;
  643. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  644. qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
  645. break;
  646. case MBA_LIP_RESET: /* LIP reset occurred */
  647. ql_dbg(ql_dbg_async, vha, 0x500c,
  648. "LIP reset occurred (%x).\n", mb[1]);
  649. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  650. atomic_set(&vha->loop_state, LOOP_DOWN);
  651. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  652. qla2x00_mark_all_devices_lost(vha, 1);
  653. }
  654. if (vha->vp_idx) {
  655. atomic_set(&vha->vp_state, VP_FAILED);
  656. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  657. }
  658. set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  659. ha->operating_mode = LOOP;
  660. vha->flags.management_server_logged_in = 0;
  661. qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
  662. break;
  663. /* case MBA_DCBX_COMPLETE: */
  664. case MBA_POINT_TO_POINT: /* Point-to-Point */
  665. if (IS_QLA2100(ha))
  666. break;
  667. if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
  668. ql_dbg(ql_dbg_async, vha, 0x500d,
  669. "DCBX Completed -- %04x %04x %04x.\n",
  670. mb[1], mb[2], mb[3]);
  671. if (ha->notify_dcbx_comp)
  672. complete(&ha->dcbx_comp);
  673. } else
  674. ql_dbg(ql_dbg_async, vha, 0x500e,
  675. "Asynchronous P2P MODE received.\n");
  676. /*
  677. * Until there's a transition from loop down to loop up, treat
  678. * this as loop down only.
  679. */
  680. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  681. atomic_set(&vha->loop_state, LOOP_DOWN);
  682. if (!atomic_read(&vha->loop_down_timer))
  683. atomic_set(&vha->loop_down_timer,
  684. LOOP_DOWN_TIME);
  685. qla2x00_mark_all_devices_lost(vha, 1);
  686. }
  687. if (vha->vp_idx) {
  688. atomic_set(&vha->vp_state, VP_FAILED);
  689. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  690. }
  691. if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
  692. set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  693. set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
  694. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  695. ha->flags.gpsc_supported = 1;
  696. vha->flags.management_server_logged_in = 0;
  697. break;
  698. case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
  699. if (IS_QLA2100(ha))
  700. break;
  701. ql_dbg(ql_dbg_async, vha, 0x500f,
  702. "Configuration change detected: value=%x.\n", mb[1]);
  703. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  704. atomic_set(&vha->loop_state, LOOP_DOWN);
  705. if (!atomic_read(&vha->loop_down_timer))
  706. atomic_set(&vha->loop_down_timer,
  707. LOOP_DOWN_TIME);
  708. qla2x00_mark_all_devices_lost(vha, 1);
  709. }
  710. if (vha->vp_idx) {
  711. atomic_set(&vha->vp_state, VP_FAILED);
  712. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  713. }
  714. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  715. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  716. break;
  717. case MBA_PORT_UPDATE: /* Port database update */
  718. /*
  719. * Handle only global and vn-port update events
  720. *
  721. * Relevant inputs:
  722. * mb[1] = N_Port handle of changed port
  723. * OR 0xffff for global event
  724. * mb[2] = New login state
  725. * 7 = Port logged out
  726. * mb[3] = LSB is vp_idx, 0xff = all vps
  727. *
  728. * Skip processing if:
  729. * Event is global, vp_idx is NOT all vps,
  730. * vp_idx does not match
  731. * Event is not global, vp_idx does not match
  732. */
  733. if (IS_QLA2XXX_MIDTYPE(ha) &&
  734. ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
  735. (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
  736. break;
  737. /* Global event -- port logout or port unavailable. */
  738. if (mb[1] == 0xffff && mb[2] == 0x7) {
  739. ql_dbg(ql_dbg_async, vha, 0x5010,
  740. "Port unavailable %04x %04x %04x.\n",
  741. mb[1], mb[2], mb[3]);
  742. ql_log(ql_log_warn, vha, 0x505e,
  743. "Link is offline.\n");
  744. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  745. atomic_set(&vha->loop_state, LOOP_DOWN);
  746. atomic_set(&vha->loop_down_timer,
  747. LOOP_DOWN_TIME);
  748. vha->device_flags |= DFLG_NO_CABLE;
  749. qla2x00_mark_all_devices_lost(vha, 1);
  750. }
  751. if (vha->vp_idx) {
  752. atomic_set(&vha->vp_state, VP_FAILED);
  753. fc_vport_set_state(vha->fc_vport,
  754. FC_VPORT_FAILED);
  755. qla2x00_mark_all_devices_lost(vha, 1);
  756. }
  757. vha->flags.management_server_logged_in = 0;
  758. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  759. break;
  760. }
  761. /*
  762. * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
  763. * event etc. earlier indicating loop is down) then process
  764. * it. Otherwise ignore it and Wait for RSCN to come in.
  765. */
  766. atomic_set(&vha->loop_down_timer, 0);
  767. if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
  768. ql_dbg(ql_dbg_async, vha, 0x5011,
  769. "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
  770. mb[1], mb[2], mb[3]);
  771. qlt_async_event(mb[0], vha, mb);
  772. break;
  773. }
  774. ql_dbg(ql_dbg_async, vha, 0x5012,
  775. "Port database changed %04x %04x %04x.\n",
  776. mb[1], mb[2], mb[3]);
  777. ql_log(ql_log_warn, vha, 0x505f,
  778. "Link is operational (%s Gbps).\n",
  779. qla2x00_get_link_speed_str(ha, ha->link_data_rate));
  780. /*
  781. * Mark all devices as missing so we will login again.
  782. */
  783. atomic_set(&vha->loop_state, LOOP_UP);
  784. qla2x00_mark_all_devices_lost(vha, 1);
  785. if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
  786. set_bit(SCR_PENDING, &vha->dpc_flags);
  787. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  788. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  789. qlt_async_event(mb[0], vha, mb);
  790. break;
  791. case MBA_RSCN_UPDATE: /* State Change Registration */
  792. /* Check if the Vport has issued a SCR */
  793. if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
  794. break;
  795. /* Only handle SCNs for our Vport index. */
  796. if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
  797. break;
  798. ql_dbg(ql_dbg_async, vha, 0x5013,
  799. "RSCN database changed -- %04x %04x %04x.\n",
  800. mb[1], mb[2], mb[3]);
  801. rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
  802. host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
  803. | vha->d_id.b.al_pa;
  804. if (rscn_entry == host_pid) {
  805. ql_dbg(ql_dbg_async, vha, 0x5014,
  806. "Ignoring RSCN update to local host "
  807. "port ID (%06x).\n", host_pid);
  808. break;
  809. }
  810. /* Ignore reserved bits from RSCN-payload. */
  811. rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
  812. /* Skip RSCNs for virtual ports on the same physical port */
  813. if (qla2x00_is_a_vp_did(vha, rscn_entry))
  814. break;
  815. atomic_set(&vha->loop_down_timer, 0);
  816. vha->flags.management_server_logged_in = 0;
  817. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  818. set_bit(RSCN_UPDATE, &vha->dpc_flags);
  819. qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
  820. break;
  821. /* case MBA_RIO_RESPONSE: */
  822. case MBA_ZIO_RESPONSE:
  823. ql_dbg(ql_dbg_async, vha, 0x5015,
  824. "[R|Z]IO update completion.\n");
  825. if (IS_FWI2_CAPABLE(ha))
  826. qla24xx_process_response_queue(vha, rsp);
  827. else
  828. qla2x00_process_response_queue(rsp);
  829. break;
  830. case MBA_DISCARD_RND_FRAME:
  831. ql_dbg(ql_dbg_async, vha, 0x5016,
  832. "Discard RND Frame -- %04x %04x %04x.\n",
  833. mb[1], mb[2], mb[3]);
  834. break;
  835. case MBA_TRACE_NOTIFICATION:
  836. ql_dbg(ql_dbg_async, vha, 0x5017,
  837. "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
  838. break;
  839. case MBA_ISP84XX_ALERT:
  840. ql_dbg(ql_dbg_async, vha, 0x5018,
  841. "ISP84XX Alert Notification -- %04x %04x %04x.\n",
  842. mb[1], mb[2], mb[3]);
  843. spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
  844. switch (mb[1]) {
  845. case A84_PANIC_RECOVERY:
  846. ql_log(ql_log_info, vha, 0x5019,
  847. "Alert 84XX: panic recovery %04x %04x.\n",
  848. mb[2], mb[3]);
  849. break;
  850. case A84_OP_LOGIN_COMPLETE:
  851. ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
  852. ql_log(ql_log_info, vha, 0x501a,
  853. "Alert 84XX: firmware version %x.\n",
  854. ha->cs84xx->op_fw_version);
  855. break;
  856. case A84_DIAG_LOGIN_COMPLETE:
  857. ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
  858. ql_log(ql_log_info, vha, 0x501b,
  859. "Alert 84XX: diagnostic firmware version %x.\n",
  860. ha->cs84xx->diag_fw_version);
  861. break;
  862. case A84_GOLD_LOGIN_COMPLETE:
  863. ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
  864. ha->cs84xx->fw_update = 1;
  865. ql_log(ql_log_info, vha, 0x501c,
  866. "Alert 84XX: gold firmware version %x.\n",
  867. ha->cs84xx->gold_fw_version);
  868. break;
  869. default:
  870. ql_log(ql_log_warn, vha, 0x501d,
  871. "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
  872. mb[1], mb[2], mb[3]);
  873. }
  874. spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
  875. break;
  876. case MBA_DCBX_START:
  877. ql_dbg(ql_dbg_async, vha, 0x501e,
  878. "DCBX Started -- %04x %04x %04x.\n",
  879. mb[1], mb[2], mb[3]);
  880. break;
  881. case MBA_DCBX_PARAM_UPDATE:
  882. ql_dbg(ql_dbg_async, vha, 0x501f,
  883. "DCBX Parameters Updated -- %04x %04x %04x.\n",
  884. mb[1], mb[2], mb[3]);
  885. break;
  886. case MBA_FCF_CONF_ERR:
  887. ql_dbg(ql_dbg_async, vha, 0x5020,
  888. "FCF Configuration Error -- %04x %04x %04x.\n",
  889. mb[1], mb[2], mb[3]);
  890. break;
  891. case MBA_IDC_NOTIFY:
  892. if (IS_QLA8031(vha->hw)) {
  893. mb[4] = RD_REG_WORD(&reg24->mailbox4);
  894. if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
  895. (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
  896. (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
  897. set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
  898. /*
  899. * Extend loop down timer since port is active.
  900. */
  901. if (atomic_read(&vha->loop_state) == LOOP_DOWN)
  902. atomic_set(&vha->loop_down_timer,
  903. LOOP_DOWN_TIME);
  904. qla2xxx_wake_dpc(vha);
  905. }
  906. }
  907. case MBA_IDC_COMPLETE:
  908. if (ha->notify_lb_portup_comp)
  909. complete(&ha->lb_portup_comp);
  910. /* Fallthru */
  911. case MBA_IDC_TIME_EXT:
  912. if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
  913. qla81xx_idc_event(vha, mb[0], mb[1]);
  914. break;
  915. case MBA_IDC_AEN:
  916. mb[4] = RD_REG_WORD(&reg24->mailbox4);
  917. mb[5] = RD_REG_WORD(&reg24->mailbox5);
  918. mb[6] = RD_REG_WORD(&reg24->mailbox6);
  919. mb[7] = RD_REG_WORD(&reg24->mailbox7);
  920. qla83xx_handle_8200_aen(vha, mb);
  921. break;
  922. default:
  923. ql_dbg(ql_dbg_async, vha, 0x5057,
  924. "Unknown AEN:%04x %04x %04x %04x\n",
  925. mb[0], mb[1], mb[2], mb[3]);
  926. }
  927. qlt_async_event(mb[0], vha, mb);
  928. if (!vha->vp_idx && ha->num_vhosts)
  929. qla2x00_alert_all_vps(rsp, mb);
  930. }
  931. /**
  932. * qla2x00_process_completed_request() - Process a Fast Post response.
  933. * @ha: SCSI driver HA context
  934. * @index: SRB index
  935. */
  936. void
  937. qla2x00_process_completed_request(struct scsi_qla_host *vha,
  938. struct req_que *req, uint32_t index)
  939. {
  940. srb_t *sp;
  941. struct qla_hw_data *ha = vha->hw;
  942. /* Validate handle. */
  943. if (index >= req->num_outstanding_cmds) {
  944. ql_log(ql_log_warn, vha, 0x3014,
  945. "Invalid SCSI command index (%x).\n", index);
  946. if (IS_QLA82XX(ha))
  947. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  948. else
  949. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  950. return;
  951. }
  952. sp = req->outstanding_cmds[index];
  953. if (sp) {
  954. /* Free outstanding command slot. */
  955. req->outstanding_cmds[index] = NULL;
  956. /* Save ISP completion status */
  957. sp->done(ha, sp, DID_OK << 16);
  958. } else {
  959. ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
  960. if (IS_QLA82XX(ha))
  961. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  962. else
  963. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  964. }
  965. }
  966. srb_t *
  967. qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
  968. struct req_que *req, void *iocb)
  969. {
  970. struct qla_hw_data *ha = vha->hw;
  971. sts_entry_t *pkt = iocb;
  972. srb_t *sp = NULL;
  973. uint16_t index;
  974. index = LSW(pkt->handle);
  975. if (index >= req->num_outstanding_cmds) {
  976. ql_log(ql_log_warn, vha, 0x5031,
  977. "Invalid command index (%x).\n", index);
  978. if (IS_QLA82XX(ha))
  979. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  980. else
  981. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  982. goto done;
  983. }
  984. sp = req->outstanding_cmds[index];
  985. if (!sp) {
  986. ql_log(ql_log_warn, vha, 0x5032,
  987. "Invalid completion handle (%x) -- timed-out.\n", index);
  988. return sp;
  989. }
  990. if (sp->handle != index) {
  991. ql_log(ql_log_warn, vha, 0x5033,
  992. "SRB handle (%x) mismatch %x.\n", sp->handle, index);
  993. return NULL;
  994. }
  995. req->outstanding_cmds[index] = NULL;
  996. done:
  997. return sp;
  998. }
  999. static void
  1000. qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  1001. struct mbx_entry *mbx)
  1002. {
  1003. const char func[] = "MBX-IOCB";
  1004. const char *type;
  1005. fc_port_t *fcport;
  1006. srb_t *sp;
  1007. struct srb_iocb *lio;
  1008. uint16_t *data;
  1009. uint16_t status;
  1010. sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
  1011. if (!sp)
  1012. return;
  1013. lio = &sp->u.iocb_cmd;
  1014. type = sp->name;
  1015. fcport = sp->fcport;
  1016. data = lio->u.logio.data;
  1017. data[0] = MBS_COMMAND_ERROR;
  1018. data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
  1019. QLA_LOGIO_LOGIN_RETRIED : 0;
  1020. if (mbx->entry_status) {
  1021. ql_dbg(ql_dbg_async, vha, 0x5043,
  1022. "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
  1023. "entry-status=%x status=%x state-flag=%x "
  1024. "status-flags=%x.\n", type, sp->handle,
  1025. fcport->d_id.b.domain, fcport->d_id.b.area,
  1026. fcport->d_id.b.al_pa, mbx->entry_status,
  1027. le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
  1028. le16_to_cpu(mbx->status_flags));
  1029. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
  1030. (uint8_t *)mbx, sizeof(*mbx));
  1031. goto logio_done;
  1032. }
  1033. status = le16_to_cpu(mbx->status);
  1034. if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
  1035. le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
  1036. status = 0;
  1037. if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
  1038. ql_dbg(ql_dbg_async, vha, 0x5045,
  1039. "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
  1040. type, sp->handle, fcport->d_id.b.domain,
  1041. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1042. le16_to_cpu(mbx->mb1));
  1043. data[0] = MBS_COMMAND_COMPLETE;
  1044. if (sp->type == SRB_LOGIN_CMD) {
  1045. fcport->port_type = FCT_TARGET;
  1046. if (le16_to_cpu(mbx->mb1) & BIT_0)
  1047. fcport->port_type = FCT_INITIATOR;
  1048. else if (le16_to_cpu(mbx->mb1) & BIT_1)
  1049. fcport->flags |= FCF_FCP2_DEVICE;
  1050. }
  1051. goto logio_done;
  1052. }
  1053. data[0] = le16_to_cpu(mbx->mb0);
  1054. switch (data[0]) {
  1055. case MBS_PORT_ID_USED:
  1056. data[1] = le16_to_cpu(mbx->mb1);
  1057. break;
  1058. case MBS_LOOP_ID_USED:
  1059. break;
  1060. default:
  1061. data[0] = MBS_COMMAND_ERROR;
  1062. break;
  1063. }
  1064. ql_log(ql_log_warn, vha, 0x5046,
  1065. "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
  1066. "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
  1067. fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1068. status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
  1069. le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
  1070. le16_to_cpu(mbx->mb7));
  1071. logio_done:
  1072. sp->done(vha, sp, 0);
  1073. }
  1074. static void
  1075. qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
  1076. sts_entry_t *pkt, int iocb_type)
  1077. {
  1078. const char func[] = "CT_IOCB";
  1079. const char *type;
  1080. srb_t *sp;
  1081. struct fc_bsg_job *bsg_job;
  1082. uint16_t comp_status;
  1083. int res;
  1084. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  1085. if (!sp)
  1086. return;
  1087. bsg_job = sp->u.bsg_job;
  1088. type = "ct pass-through";
  1089. comp_status = le16_to_cpu(pkt->comp_status);
  1090. /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
  1091. * fc payload to the caller
  1092. */
  1093. bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  1094. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1095. if (comp_status != CS_COMPLETE) {
  1096. if (comp_status == CS_DATA_UNDERRUN) {
  1097. res = DID_OK << 16;
  1098. bsg_job->reply->reply_payload_rcv_len =
  1099. le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
  1100. ql_log(ql_log_warn, vha, 0x5048,
  1101. "CT pass-through-%s error "
  1102. "comp_status-status=0x%x total_byte = 0x%x.\n",
  1103. type, comp_status,
  1104. bsg_job->reply->reply_payload_rcv_len);
  1105. } else {
  1106. ql_log(ql_log_warn, vha, 0x5049,
  1107. "CT pass-through-%s error "
  1108. "comp_status-status=0x%x.\n", type, comp_status);
  1109. res = DID_ERROR << 16;
  1110. bsg_job->reply->reply_payload_rcv_len = 0;
  1111. }
  1112. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
  1113. (uint8_t *)pkt, sizeof(*pkt));
  1114. } else {
  1115. res = DID_OK << 16;
  1116. bsg_job->reply->reply_payload_rcv_len =
  1117. bsg_job->reply_payload.payload_len;
  1118. bsg_job->reply_len = 0;
  1119. }
  1120. sp->done(vha, sp, res);
  1121. }
  1122. static void
  1123. qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
  1124. struct sts_entry_24xx *pkt, int iocb_type)
  1125. {
  1126. const char func[] = "ELS_CT_IOCB";
  1127. const char *type;
  1128. srb_t *sp;
  1129. struct fc_bsg_job *bsg_job;
  1130. uint16_t comp_status;
  1131. uint32_t fw_status[3];
  1132. uint8_t* fw_sts_ptr;
  1133. int res;
  1134. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  1135. if (!sp)
  1136. return;
  1137. bsg_job = sp->u.bsg_job;
  1138. type = NULL;
  1139. switch (sp->type) {
  1140. case SRB_ELS_CMD_RPT:
  1141. case SRB_ELS_CMD_HST:
  1142. type = "els";
  1143. break;
  1144. case SRB_CT_CMD:
  1145. type = "ct pass-through";
  1146. break;
  1147. default:
  1148. ql_dbg(ql_dbg_user, vha, 0x503e,
  1149. "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
  1150. return;
  1151. }
  1152. comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
  1153. fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
  1154. fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
  1155. /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
  1156. * fc payload to the caller
  1157. */
  1158. bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  1159. bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
  1160. if (comp_status != CS_COMPLETE) {
  1161. if (comp_status == CS_DATA_UNDERRUN) {
  1162. res = DID_OK << 16;
  1163. bsg_job->reply->reply_payload_rcv_len =
  1164. le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
  1165. ql_dbg(ql_dbg_user, vha, 0x503f,
  1166. "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
  1167. "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
  1168. type, sp->handle, comp_status, fw_status[1], fw_status[2],
  1169. le16_to_cpu(((struct els_sts_entry_24xx *)
  1170. pkt)->total_byte_count));
  1171. fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
  1172. memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
  1173. }
  1174. else {
  1175. ql_dbg(ql_dbg_user, vha, 0x5040,
  1176. "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
  1177. "error subcode 1=0x%x error subcode 2=0x%x.\n",
  1178. type, sp->handle, comp_status,
  1179. le16_to_cpu(((struct els_sts_entry_24xx *)
  1180. pkt)->error_subcode_1),
  1181. le16_to_cpu(((struct els_sts_entry_24xx *)
  1182. pkt)->error_subcode_2));
  1183. res = DID_ERROR << 16;
  1184. bsg_job->reply->reply_payload_rcv_len = 0;
  1185. fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
  1186. memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
  1187. }
  1188. ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
  1189. (uint8_t *)pkt, sizeof(*pkt));
  1190. }
  1191. else {
  1192. res = DID_OK << 16;
  1193. bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
  1194. bsg_job->reply_len = 0;
  1195. }
  1196. sp->done(vha, sp, res);
  1197. }
  1198. static void
  1199. qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
  1200. struct logio_entry_24xx *logio)
  1201. {
  1202. const char func[] = "LOGIO-IOCB";
  1203. const char *type;
  1204. fc_port_t *fcport;
  1205. srb_t *sp;
  1206. struct srb_iocb *lio;
  1207. uint16_t *data;
  1208. uint32_t iop[2];
  1209. sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
  1210. if (!sp)
  1211. return;
  1212. lio = &sp->u.iocb_cmd;
  1213. type = sp->name;
  1214. fcport = sp->fcport;
  1215. data = lio->u.logio.data;
  1216. data[0] = MBS_COMMAND_ERROR;
  1217. data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
  1218. QLA_LOGIO_LOGIN_RETRIED : 0;
  1219. if (logio->entry_status) {
  1220. ql_log(ql_log_warn, fcport->vha, 0x5034,
  1221. "Async-%s error entry - hdl=%x"
  1222. "portid=%02x%02x%02x entry-status=%x.\n",
  1223. type, sp->handle, fcport->d_id.b.domain,
  1224. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1225. logio->entry_status);
  1226. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
  1227. (uint8_t *)logio, sizeof(*logio));
  1228. goto logio_done;
  1229. }
  1230. if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
  1231. ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
  1232. "Async-%s complete - hdl=%x portid=%02x%02x%02x "
  1233. "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
  1234. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1235. le32_to_cpu(logio->io_parameter[0]));
  1236. data[0] = MBS_COMMAND_COMPLETE;
  1237. if (sp->type != SRB_LOGIN_CMD)
  1238. goto logio_done;
  1239. iop[0] = le32_to_cpu(logio->io_parameter[0]);
  1240. if (iop[0] & BIT_4) {
  1241. fcport->port_type = FCT_TARGET;
  1242. if (iop[0] & BIT_8)
  1243. fcport->flags |= FCF_FCP2_DEVICE;
  1244. } else if (iop[0] & BIT_5)
  1245. fcport->port_type = FCT_INITIATOR;
  1246. if (iop[0] & BIT_7)
  1247. fcport->flags |= FCF_CONF_COMP_SUPPORTED;
  1248. if (logio->io_parameter[7] || logio->io_parameter[8])
  1249. fcport->supported_classes |= FC_COS_CLASS2;
  1250. if (logio->io_parameter[9] || logio->io_parameter[10])
  1251. fcport->supported_classes |= FC_COS_CLASS3;
  1252. goto logio_done;
  1253. }
  1254. iop[0] = le32_to_cpu(logio->io_parameter[0]);
  1255. iop[1] = le32_to_cpu(logio->io_parameter[1]);
  1256. switch (iop[0]) {
  1257. case LSC_SCODE_PORTID_USED:
  1258. data[0] = MBS_PORT_ID_USED;
  1259. data[1] = LSW(iop[1]);
  1260. break;
  1261. case LSC_SCODE_NPORT_USED:
  1262. data[0] = MBS_LOOP_ID_USED;
  1263. break;
  1264. default:
  1265. data[0] = MBS_COMMAND_ERROR;
  1266. break;
  1267. }
  1268. ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
  1269. "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
  1270. "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
  1271. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1272. le16_to_cpu(logio->comp_status),
  1273. le32_to_cpu(logio->io_parameter[0]),
  1274. le32_to_cpu(logio->io_parameter[1]));
  1275. logio_done:
  1276. sp->done(vha, sp, 0);
  1277. }
  1278. static void
  1279. qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  1280. struct tsk_mgmt_entry *tsk)
  1281. {
  1282. const char func[] = "TMF-IOCB";
  1283. const char *type;
  1284. fc_port_t *fcport;
  1285. srb_t *sp;
  1286. struct srb_iocb *iocb;
  1287. struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
  1288. int error = 1;
  1289. sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
  1290. if (!sp)
  1291. return;
  1292. iocb = &sp->u.iocb_cmd;
  1293. type = sp->name;
  1294. fcport = sp->fcport;
  1295. if (sts->entry_status) {
  1296. ql_log(ql_log_warn, fcport->vha, 0x5038,
  1297. "Async-%s error - hdl=%x entry-status(%x).\n",
  1298. type, sp->handle, sts->entry_status);
  1299. } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
  1300. ql_log(ql_log_warn, fcport->vha, 0x5039,
  1301. "Async-%s error - hdl=%x completion status(%x).\n",
  1302. type, sp->handle, sts->comp_status);
  1303. } else if (!(le16_to_cpu(sts->scsi_status) &
  1304. SS_RESPONSE_INFO_LEN_VALID)) {
  1305. ql_log(ql_log_warn, fcport->vha, 0x503a,
  1306. "Async-%s error - hdl=%x no response info(%x).\n",
  1307. type, sp->handle, sts->scsi_status);
  1308. } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
  1309. ql_log(ql_log_warn, fcport->vha, 0x503b,
  1310. "Async-%s error - hdl=%x not enough response(%d).\n",
  1311. type, sp->handle, sts->rsp_data_len);
  1312. } else if (sts->data[3]) {
  1313. ql_log(ql_log_warn, fcport->vha, 0x503c,
  1314. "Async-%s error - hdl=%x response(%x).\n",
  1315. type, sp->handle, sts->data[3]);
  1316. } else {
  1317. error = 0;
  1318. }
  1319. if (error) {
  1320. iocb->u.tmf.data = error;
  1321. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
  1322. (uint8_t *)sts, sizeof(*sts));
  1323. }
  1324. sp->done(vha, sp, 0);
  1325. }
  1326. /**
  1327. * qla2x00_process_response_queue() - Process response queue entries.
  1328. * @ha: SCSI driver HA context
  1329. */
  1330. void
  1331. qla2x00_process_response_queue(struct rsp_que *rsp)
  1332. {
  1333. struct scsi_qla_host *vha;
  1334. struct qla_hw_data *ha = rsp->hw;
  1335. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  1336. sts_entry_t *pkt;
  1337. uint16_t handle_cnt;
  1338. uint16_t cnt;
  1339. vha = pci_get_drvdata(ha->pdev);
  1340. if (!vha->flags.online)
  1341. return;
  1342. while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
  1343. pkt = (sts_entry_t *)rsp->ring_ptr;
  1344. rsp->ring_index++;
  1345. if (rsp->ring_index == rsp->length) {
  1346. rsp->ring_index = 0;
  1347. rsp->ring_ptr = rsp->ring;
  1348. } else {
  1349. rsp->ring_ptr++;
  1350. }
  1351. if (pkt->entry_status != 0) {
  1352. qla2x00_error_entry(vha, rsp, pkt);
  1353. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  1354. wmb();
  1355. continue;
  1356. }
  1357. switch (pkt->entry_type) {
  1358. case STATUS_TYPE:
  1359. qla2x00_status_entry(vha, rsp, pkt);
  1360. break;
  1361. case STATUS_TYPE_21:
  1362. handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
  1363. for (cnt = 0; cnt < handle_cnt; cnt++) {
  1364. qla2x00_process_completed_request(vha, rsp->req,
  1365. ((sts21_entry_t *)pkt)->handle[cnt]);
  1366. }
  1367. break;
  1368. case STATUS_TYPE_22:
  1369. handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
  1370. for (cnt = 0; cnt < handle_cnt; cnt++) {
  1371. qla2x00_process_completed_request(vha, rsp->req,
  1372. ((sts22_entry_t *)pkt)->handle[cnt]);
  1373. }
  1374. break;
  1375. case STATUS_CONT_TYPE:
  1376. qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
  1377. break;
  1378. case MBX_IOCB_TYPE:
  1379. qla2x00_mbx_iocb_entry(vha, rsp->req,
  1380. (struct mbx_entry *)pkt);
  1381. break;
  1382. case CT_IOCB_TYPE:
  1383. qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
  1384. break;
  1385. default:
  1386. /* Type Not Supported. */
  1387. ql_log(ql_log_warn, vha, 0x504a,
  1388. "Received unknown response pkt type %x "
  1389. "entry status=%x.\n",
  1390. pkt->entry_type, pkt->entry_status);
  1391. break;
  1392. }
  1393. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  1394. wmb();
  1395. }
  1396. /* Adjust ring index */
  1397. WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
  1398. }
  1399. static inline void
  1400. qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
  1401. uint32_t sense_len, struct rsp_que *rsp, int res)
  1402. {
  1403. struct scsi_qla_host *vha = sp->fcport->vha;
  1404. struct scsi_cmnd *cp = GET_CMD_SP(sp);
  1405. uint32_t track_sense_len;
  1406. if (sense_len >= SCSI_SENSE_BUFFERSIZE)
  1407. sense_len = SCSI_SENSE_BUFFERSIZE;
  1408. SET_CMD_SENSE_LEN(sp, sense_len);
  1409. SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
  1410. track_sense_len = sense_len;
  1411. if (sense_len > par_sense_len)
  1412. sense_len = par_sense_len;
  1413. memcpy(cp->sense_buffer, sense_data, sense_len);
  1414. SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
  1415. track_sense_len -= sense_len;
  1416. SET_CMD_SENSE_LEN(sp, track_sense_len);
  1417. if (track_sense_len != 0) {
  1418. rsp->status_srb = sp;
  1419. cp->result = res;
  1420. }
  1421. if (sense_len) {
  1422. ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
  1423. "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
  1424. sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
  1425. cp);
  1426. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
  1427. cp->sense_buffer, sense_len);
  1428. }
  1429. }
  1430. struct scsi_dif_tuple {
  1431. __be16 guard; /* Checksum */
  1432. __be16 app_tag; /* APPL identifier */
  1433. __be32 ref_tag; /* Target LBA or indirect LBA */
  1434. };
  1435. /*
  1436. * Checks the guard or meta-data for the type of error
  1437. * detected by the HBA. In case of errors, we set the
  1438. * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
  1439. * to indicate to the kernel that the HBA detected error.
  1440. */
  1441. static inline int
  1442. qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
  1443. {
  1444. struct scsi_qla_host *vha = sp->fcport->vha;
  1445. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  1446. uint8_t *ap = &sts24->data[12];
  1447. uint8_t *ep = &sts24->data[20];
  1448. uint32_t e_ref_tag, a_ref_tag;
  1449. uint16_t e_app_tag, a_app_tag;
  1450. uint16_t e_guard, a_guard;
  1451. /*
  1452. * swab32 of the "data" field in the beginning of qla2x00_status_entry()
  1453. * would make guard field appear at offset 2
  1454. */
  1455. a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
  1456. a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
  1457. a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
  1458. e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
  1459. e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
  1460. e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
  1461. ql_dbg(ql_dbg_io, vha, 0x3023,
  1462. "iocb(s) %p Returned STATUS.\n", sts24);
  1463. ql_dbg(ql_dbg_io, vha, 0x3024,
  1464. "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
  1465. " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
  1466. " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
  1467. cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
  1468. a_app_tag, e_app_tag, a_guard, e_guard);
  1469. /*
  1470. * Ignore sector if:
  1471. * For type 3: ref & app tag is all 'f's
  1472. * For type 0,1,2: app tag is all 'f's
  1473. */
  1474. if ((a_app_tag == 0xffff) &&
  1475. ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
  1476. (a_ref_tag == 0xffffffff))) {
  1477. uint32_t blocks_done, resid;
  1478. sector_t lba_s = scsi_get_lba(cmd);
  1479. /* 2TB boundary case covered automatically with this */
  1480. blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
  1481. resid = scsi_bufflen(cmd) - (blocks_done *
  1482. cmd->device->sector_size);
  1483. scsi_set_resid(cmd, resid);
  1484. cmd->result = DID_OK << 16;
  1485. /* Update protection tag */
  1486. if (scsi_prot_sg_count(cmd)) {
  1487. uint32_t i, j = 0, k = 0, num_ent;
  1488. struct scatterlist *sg;
  1489. struct sd_dif_tuple *spt;
  1490. /* Patch the corresponding protection tags */
  1491. scsi_for_each_prot_sg(cmd, sg,
  1492. scsi_prot_sg_count(cmd), i) {
  1493. num_ent = sg_dma_len(sg) / 8;
  1494. if (k + num_ent < blocks_done) {
  1495. k += num_ent;
  1496. continue;
  1497. }
  1498. j = blocks_done - k - 1;
  1499. k = blocks_done;
  1500. break;
  1501. }
  1502. if (k != blocks_done) {
  1503. ql_log(ql_log_warn, vha, 0x302f,
  1504. "unexpected tag values tag:lba=%x:%llx)\n",
  1505. e_ref_tag, (unsigned long long)lba_s);
  1506. return 1;
  1507. }
  1508. spt = page_address(sg_page(sg)) + sg->offset;
  1509. spt += j;
  1510. spt->app_tag = 0xffff;
  1511. if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
  1512. spt->ref_tag = 0xffffffff;
  1513. }
  1514. return 0;
  1515. }
  1516. /* check guard */
  1517. if (e_guard != a_guard) {
  1518. scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
  1519. 0x10, 0x1);
  1520. set_driver_byte(cmd, DRIVER_SENSE);
  1521. set_host_byte(cmd, DID_ABORT);
  1522. cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
  1523. return 1;
  1524. }
  1525. /* check ref tag */
  1526. if (e_ref_tag != a_ref_tag) {
  1527. scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
  1528. 0x10, 0x3);
  1529. set_driver_byte(cmd, DRIVER_SENSE);
  1530. set_host_byte(cmd, DID_ABORT);
  1531. cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
  1532. return 1;
  1533. }
  1534. /* check appl tag */
  1535. if (e_app_tag != a_app_tag) {
  1536. scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
  1537. 0x10, 0x2);
  1538. set_driver_byte(cmd, DRIVER_SENSE);
  1539. set_host_byte(cmd, DID_ABORT);
  1540. cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
  1541. return 1;
  1542. }
  1543. return 1;
  1544. }
  1545. static void
  1546. qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
  1547. struct req_que *req, uint32_t index)
  1548. {
  1549. struct qla_hw_data *ha = vha->hw;
  1550. srb_t *sp;
  1551. uint16_t comp_status;
  1552. uint16_t scsi_status;
  1553. uint16_t thread_id;
  1554. uint32_t rval = EXT_STATUS_OK;
  1555. struct fc_bsg_job *bsg_job = NULL;
  1556. sts_entry_t *sts;
  1557. struct sts_entry_24xx *sts24;
  1558. sts = (sts_entry_t *) pkt;
  1559. sts24 = (struct sts_entry_24xx *) pkt;
  1560. /* Validate handle. */
  1561. if (index >= req->num_outstanding_cmds) {
  1562. ql_log(ql_log_warn, vha, 0x70af,
  1563. "Invalid SCSI completion handle 0x%x.\n", index);
  1564. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1565. return;
  1566. }
  1567. sp = req->outstanding_cmds[index];
  1568. if (sp) {
  1569. /* Free outstanding command slot. */
  1570. req->outstanding_cmds[index] = NULL;
  1571. bsg_job = sp->u.bsg_job;
  1572. } else {
  1573. ql_log(ql_log_warn, vha, 0x70b0,
  1574. "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
  1575. req->id, index);
  1576. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1577. return;
  1578. }
  1579. if (IS_FWI2_CAPABLE(ha)) {
  1580. comp_status = le16_to_cpu(sts24->comp_status);
  1581. scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
  1582. } else {
  1583. comp_status = le16_to_cpu(sts->comp_status);
  1584. scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
  1585. }
  1586. thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
  1587. switch (comp_status) {
  1588. case CS_COMPLETE:
  1589. if (scsi_status == 0) {
  1590. bsg_job->reply->reply_payload_rcv_len =
  1591. bsg_job->reply_payload.payload_len;
  1592. rval = EXT_STATUS_OK;
  1593. }
  1594. goto done;
  1595. case CS_DATA_OVERRUN:
  1596. ql_dbg(ql_dbg_user, vha, 0x70b1,
  1597. "Command completed with date overrun thread_id=%d\n",
  1598. thread_id);
  1599. rval = EXT_STATUS_DATA_OVERRUN;
  1600. break;
  1601. case CS_DATA_UNDERRUN:
  1602. ql_dbg(ql_dbg_user, vha, 0x70b2,
  1603. "Command completed with date underrun thread_id=%d\n",
  1604. thread_id);
  1605. rval = EXT_STATUS_DATA_UNDERRUN;
  1606. break;
  1607. case CS_BIDIR_RD_OVERRUN:
  1608. ql_dbg(ql_dbg_user, vha, 0x70b3,
  1609. "Command completed with read data overrun thread_id=%d\n",
  1610. thread_id);
  1611. rval = EXT_STATUS_DATA_OVERRUN;
  1612. break;
  1613. case CS_BIDIR_RD_WR_OVERRUN:
  1614. ql_dbg(ql_dbg_user, vha, 0x70b4,
  1615. "Command completed with read and write data overrun "
  1616. "thread_id=%d\n", thread_id);
  1617. rval = EXT_STATUS_DATA_OVERRUN;
  1618. break;
  1619. case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
  1620. ql_dbg(ql_dbg_user, vha, 0x70b5,
  1621. "Command completed with read data over and write data "
  1622. "underrun thread_id=%d\n", thread_id);
  1623. rval = EXT_STATUS_DATA_OVERRUN;
  1624. break;
  1625. case CS_BIDIR_RD_UNDERRUN:
  1626. ql_dbg(ql_dbg_user, vha, 0x70b6,
  1627. "Command completed with read data data underrun "
  1628. "thread_id=%d\n", thread_id);
  1629. rval = EXT_STATUS_DATA_UNDERRUN;
  1630. break;
  1631. case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
  1632. ql_dbg(ql_dbg_user, vha, 0x70b7,
  1633. "Command completed with read data under and write data "
  1634. "overrun thread_id=%d\n", thread_id);
  1635. rval = EXT_STATUS_DATA_UNDERRUN;
  1636. break;
  1637. case CS_BIDIR_RD_WR_UNDERRUN:
  1638. ql_dbg(ql_dbg_user, vha, 0x70b8,
  1639. "Command completed with read and write data underrun "
  1640. "thread_id=%d\n", thread_id);
  1641. rval = EXT_STATUS_DATA_UNDERRUN;
  1642. break;
  1643. case CS_BIDIR_DMA:
  1644. ql_dbg(ql_dbg_user, vha, 0x70b9,
  1645. "Command completed with data DMA error thread_id=%d\n",
  1646. thread_id);
  1647. rval = EXT_STATUS_DMA_ERR;
  1648. break;
  1649. case CS_TIMEOUT:
  1650. ql_dbg(ql_dbg_user, vha, 0x70ba,
  1651. "Command completed with timeout thread_id=%d\n",
  1652. thread_id);
  1653. rval = EXT_STATUS_TIMEOUT;
  1654. break;
  1655. default:
  1656. ql_dbg(ql_dbg_user, vha, 0x70bb,
  1657. "Command completed with completion status=0x%x "
  1658. "thread_id=%d\n", comp_status, thread_id);
  1659. rval = EXT_STATUS_ERR;
  1660. break;
  1661. }
  1662. bsg_job->reply->reply_payload_rcv_len = 0;
  1663. done:
  1664. /* Return the vendor specific reply to API */
  1665. bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
  1666. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  1667. /* Always return DID_OK, bsg will send the vendor specific response
  1668. * in this case only */
  1669. sp->done(vha, sp, (DID_OK << 6));
  1670. }
  1671. /**
  1672. * qla2x00_status_entry() - Process a Status IOCB entry.
  1673. * @ha: SCSI driver HA context
  1674. * @pkt: Entry pointer
  1675. */
  1676. static void
  1677. qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
  1678. {
  1679. srb_t *sp;
  1680. fc_port_t *fcport;
  1681. struct scsi_cmnd *cp;
  1682. sts_entry_t *sts;
  1683. struct sts_entry_24xx *sts24;
  1684. uint16_t comp_status;
  1685. uint16_t scsi_status;
  1686. uint16_t ox_id;
  1687. uint8_t lscsi_status;
  1688. int32_t resid;
  1689. uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
  1690. fw_resid_len;
  1691. uint8_t *rsp_info, *sense_data;
  1692. struct qla_hw_data *ha = vha->hw;
  1693. uint32_t handle;
  1694. uint16_t que;
  1695. struct req_que *req;
  1696. int logit = 1;
  1697. int res = 0;
  1698. uint16_t state_flags = 0;
  1699. sts = (sts_entry_t *) pkt;
  1700. sts24 = (struct sts_entry_24xx *) pkt;
  1701. if (IS_FWI2_CAPABLE(ha)) {
  1702. comp_status = le16_to_cpu(sts24->comp_status);
  1703. scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
  1704. state_flags = le16_to_cpu(sts24->state_flags);
  1705. } else {
  1706. comp_status = le16_to_cpu(sts->comp_status);
  1707. scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
  1708. }
  1709. handle = (uint32_t) LSW(sts->handle);
  1710. que = MSW(sts->handle);
  1711. req = ha->req_q_map[que];
  1712. /* Validate handle. */
  1713. if (handle < req->num_outstanding_cmds)
  1714. sp = req->outstanding_cmds[handle];
  1715. else
  1716. sp = NULL;
  1717. if (sp == NULL) {
  1718. ql_dbg(ql_dbg_io, vha, 0x3017,
  1719. "Invalid status handle (0x%x).\n", sts->handle);
  1720. if (IS_QLA82XX(ha))
  1721. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  1722. else
  1723. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1724. qla2xxx_wake_dpc(vha);
  1725. return;
  1726. }
  1727. if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
  1728. qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
  1729. return;
  1730. }
  1731. /* Fast path completion. */
  1732. if (comp_status == CS_COMPLETE && scsi_status == 0) {
  1733. qla2x00_do_host_ramp_up(vha);
  1734. qla2x00_process_completed_request(vha, req, handle);
  1735. return;
  1736. }
  1737. req->outstanding_cmds[handle] = NULL;
  1738. cp = GET_CMD_SP(sp);
  1739. if (cp == NULL) {
  1740. ql_dbg(ql_dbg_io, vha, 0x3018,
  1741. "Command already returned (0x%x/%p).\n",
  1742. sts->handle, sp);
  1743. return;
  1744. }
  1745. lscsi_status = scsi_status & STATUS_MASK;
  1746. fcport = sp->fcport;
  1747. ox_id = 0;
  1748. sense_len = par_sense_len = rsp_info_len = resid_len =
  1749. fw_resid_len = 0;
  1750. if (IS_FWI2_CAPABLE(ha)) {
  1751. if (scsi_status & SS_SENSE_LEN_VALID)
  1752. sense_len = le32_to_cpu(sts24->sense_len);
  1753. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
  1754. rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
  1755. if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
  1756. resid_len = le32_to_cpu(sts24->rsp_residual_count);
  1757. if (comp_status == CS_DATA_UNDERRUN)
  1758. fw_resid_len = le32_to_cpu(sts24->residual_len);
  1759. rsp_info = sts24->data;
  1760. sense_data = sts24->data;
  1761. host_to_fcp_swap(sts24->data, sizeof(sts24->data));
  1762. ox_id = le16_to_cpu(sts24->ox_id);
  1763. par_sense_len = sizeof(sts24->data);
  1764. } else {
  1765. if (scsi_status & SS_SENSE_LEN_VALID)
  1766. sense_len = le16_to_cpu(sts->req_sense_length);
  1767. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
  1768. rsp_info_len = le16_to_cpu(sts->rsp_info_len);
  1769. resid_len = le32_to_cpu(sts->residual_length);
  1770. rsp_info = sts->rsp_info;
  1771. sense_data = sts->req_sense_data;
  1772. par_sense_len = sizeof(sts->req_sense_data);
  1773. }
  1774. /* Check for any FCP transport errors. */
  1775. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
  1776. /* Sense data lies beyond any FCP RESPONSE data. */
  1777. if (IS_FWI2_CAPABLE(ha)) {
  1778. sense_data += rsp_info_len;
  1779. par_sense_len -= rsp_info_len;
  1780. }
  1781. if (rsp_info_len > 3 && rsp_info[3]) {
  1782. ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
  1783. "FCP I/O protocol failure (0x%x/0x%x).\n",
  1784. rsp_info_len, rsp_info[3]);
  1785. res = DID_BUS_BUSY << 16;
  1786. goto out;
  1787. }
  1788. }
  1789. /* Check for overrun. */
  1790. if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
  1791. scsi_status & SS_RESIDUAL_OVER)
  1792. comp_status = CS_DATA_OVERRUN;
  1793. /*
  1794. * Based on Host and scsi status generate status code for Linux
  1795. */
  1796. switch (comp_status) {
  1797. case CS_COMPLETE:
  1798. case CS_QUEUE_FULL:
  1799. if (scsi_status == 0) {
  1800. res = DID_OK << 16;
  1801. break;
  1802. }
  1803. if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
  1804. resid = resid_len;
  1805. scsi_set_resid(cp, resid);
  1806. if (!lscsi_status &&
  1807. ((unsigned)(scsi_bufflen(cp) - resid) <
  1808. cp->underflow)) {
  1809. ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
  1810. "Mid-layer underflow "
  1811. "detected (0x%x of 0x%x bytes).\n",
  1812. resid, scsi_bufflen(cp));
  1813. res = DID_ERROR << 16;
  1814. break;
  1815. }
  1816. }
  1817. res = DID_OK << 16 | lscsi_status;
  1818. if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
  1819. ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
  1820. "QUEUE FULL detected.\n");
  1821. break;
  1822. }
  1823. logit = 0;
  1824. if (lscsi_status != SS_CHECK_CONDITION)
  1825. break;
  1826. memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  1827. if (!(scsi_status & SS_SENSE_LEN_VALID))
  1828. break;
  1829. qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
  1830. rsp, res);
  1831. break;
  1832. case CS_DATA_UNDERRUN:
  1833. /* Use F/W calculated residual length. */
  1834. resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
  1835. scsi_set_resid(cp, resid);
  1836. if (scsi_status & SS_RESIDUAL_UNDER) {
  1837. if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
  1838. ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
  1839. "Dropped frame(s) detected "
  1840. "(0x%x of 0x%x bytes).\n",
  1841. resid, scsi_bufflen(cp));
  1842. res = DID_ERROR << 16 | lscsi_status;
  1843. goto check_scsi_status;
  1844. }
  1845. if (!lscsi_status &&
  1846. ((unsigned)(scsi_bufflen(cp) - resid) <
  1847. cp->underflow)) {
  1848. ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
  1849. "Mid-layer underflow "
  1850. "detected (0x%x of 0x%x bytes).\n",
  1851. resid, scsi_bufflen(cp));
  1852. res = DID_ERROR << 16;
  1853. break;
  1854. }
  1855. } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
  1856. lscsi_status != SAM_STAT_BUSY) {
  1857. /*
  1858. * scsi status of task set and busy are considered to be
  1859. * task not completed.
  1860. */
  1861. ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
  1862. "Dropped frame(s) detected (0x%x "
  1863. "of 0x%x bytes).\n", resid,
  1864. scsi_bufflen(cp));
  1865. res = DID_ERROR << 16 | lscsi_status;
  1866. goto check_scsi_status;
  1867. } else {
  1868. ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
  1869. "scsi_status: 0x%x, lscsi_status: 0x%x\n",
  1870. scsi_status, lscsi_status);
  1871. }
  1872. res = DID_OK << 16 | lscsi_status;
  1873. logit = 0;
  1874. check_scsi_status:
  1875. /*
  1876. * Check to see if SCSI Status is non zero. If so report SCSI
  1877. * Status.
  1878. */
  1879. if (lscsi_status != 0) {
  1880. if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
  1881. ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
  1882. "QUEUE FULL detected.\n");
  1883. logit = 1;
  1884. break;
  1885. }
  1886. if (lscsi_status != SS_CHECK_CONDITION)
  1887. break;
  1888. memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  1889. if (!(scsi_status & SS_SENSE_LEN_VALID))
  1890. break;
  1891. qla2x00_handle_sense(sp, sense_data, par_sense_len,
  1892. sense_len, rsp, res);
  1893. }
  1894. break;
  1895. case CS_PORT_LOGGED_OUT:
  1896. case CS_PORT_CONFIG_CHG:
  1897. case CS_PORT_BUSY:
  1898. case CS_INCOMPLETE:
  1899. case CS_PORT_UNAVAILABLE:
  1900. case CS_TIMEOUT:
  1901. case CS_RESET:
  1902. /*
  1903. * We are going to have the fc class block the rport
  1904. * while we try to recover so instruct the mid layer
  1905. * to requeue until the class decides how to handle this.
  1906. */
  1907. res = DID_TRANSPORT_DISRUPTED << 16;
  1908. if (comp_status == CS_TIMEOUT) {
  1909. if (IS_FWI2_CAPABLE(ha))
  1910. break;
  1911. else if ((le16_to_cpu(sts->status_flags) &
  1912. SF_LOGOUT_SENT) == 0)
  1913. break;
  1914. }
  1915. ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
  1916. "Port down status: port-state=0x%x.\n",
  1917. atomic_read(&fcport->state));
  1918. if (atomic_read(&fcport->state) == FCS_ONLINE)
  1919. qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
  1920. break;
  1921. case CS_ABORTED:
  1922. res = DID_RESET << 16;
  1923. break;
  1924. case CS_DIF_ERROR:
  1925. logit = qla2x00_handle_dif_error(sp, sts24);
  1926. res = cp->result;
  1927. break;
  1928. case CS_TRANSPORT:
  1929. res = DID_ERROR << 16;
  1930. if (!IS_PI_SPLIT_DET_CAPABLE(ha))
  1931. break;
  1932. if (state_flags & BIT_4)
  1933. scmd_printk(KERN_WARNING, cp,
  1934. "Unsupported device '%s' found.\n",
  1935. cp->device->vendor);
  1936. break;
  1937. default:
  1938. res = DID_ERROR << 16;
  1939. break;
  1940. }
  1941. out:
  1942. if (logit)
  1943. ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
  1944. "FCP command status: 0x%x-0x%x (0x%x) "
  1945. "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
  1946. "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
  1947. "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
  1948. comp_status, scsi_status, res, vha->host_no,
  1949. cp->device->id, cp->device->lun, fcport->d_id.b.domain,
  1950. fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
  1951. cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
  1952. cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
  1953. cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
  1954. resid_len, fw_resid_len);
  1955. if (!res)
  1956. qla2x00_do_host_ramp_up(vha);
  1957. if (rsp->status_srb == NULL)
  1958. sp->done(ha, sp, res);
  1959. }
  1960. /**
  1961. * qla2x00_status_cont_entry() - Process a Status Continuations entry.
  1962. * @ha: SCSI driver HA context
  1963. * @pkt: Entry pointer
  1964. *
  1965. * Extended sense data.
  1966. */
  1967. static void
  1968. qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
  1969. {
  1970. uint8_t sense_sz = 0;
  1971. struct qla_hw_data *ha = rsp->hw;
  1972. struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
  1973. srb_t *sp = rsp->status_srb;
  1974. struct scsi_cmnd *cp;
  1975. uint32_t sense_len;
  1976. uint8_t *sense_ptr;
  1977. if (!sp || !GET_CMD_SENSE_LEN(sp))
  1978. return;
  1979. sense_len = GET_CMD_SENSE_LEN(sp);
  1980. sense_ptr = GET_CMD_SENSE_PTR(sp);
  1981. cp = GET_CMD_SP(sp);
  1982. if (cp == NULL) {
  1983. ql_log(ql_log_warn, vha, 0x3025,
  1984. "cmd is NULL: already returned to OS (sp=%p).\n", sp);
  1985. rsp->status_srb = NULL;
  1986. return;
  1987. }
  1988. if (sense_len > sizeof(pkt->data))
  1989. sense_sz = sizeof(pkt->data);
  1990. else
  1991. sense_sz = sense_len;
  1992. /* Move sense data. */
  1993. if (IS_FWI2_CAPABLE(ha))
  1994. host_to_fcp_swap(pkt->data, sizeof(pkt->data));
  1995. memcpy(sense_ptr, pkt->data, sense_sz);
  1996. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
  1997. sense_ptr, sense_sz);
  1998. sense_len -= sense_sz;
  1999. sense_ptr += sense_sz;
  2000. SET_CMD_SENSE_PTR(sp, sense_ptr);
  2001. SET_CMD_SENSE_LEN(sp, sense_len);
  2002. /* Place command on done queue. */
  2003. if (sense_len == 0) {
  2004. rsp->status_srb = NULL;
  2005. sp->done(ha, sp, cp->result);
  2006. }
  2007. }
  2008. /**
  2009. * qla2x00_error_entry() - Process an error entry.
  2010. * @ha: SCSI driver HA context
  2011. * @pkt: Entry pointer
  2012. */
  2013. static void
  2014. qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
  2015. {
  2016. srb_t *sp;
  2017. struct qla_hw_data *ha = vha->hw;
  2018. const char func[] = "ERROR-IOCB";
  2019. uint16_t que = MSW(pkt->handle);
  2020. struct req_que *req = NULL;
  2021. int res = DID_ERROR << 16;
  2022. ql_dbg(ql_dbg_async, vha, 0x502a,
  2023. "type of error status in response: 0x%x\n", pkt->entry_status);
  2024. if (que >= ha->max_req_queues || !ha->req_q_map[que])
  2025. goto fatal;
  2026. req = ha->req_q_map[que];
  2027. if (pkt->entry_status & RF_BUSY)
  2028. res = DID_BUS_BUSY << 16;
  2029. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  2030. if (sp) {
  2031. sp->done(ha, sp, res);
  2032. return;
  2033. }
  2034. fatal:
  2035. ql_log(ql_log_warn, vha, 0x5030,
  2036. "Error entry - invalid handle/queue.\n");
  2037. if (IS_QLA82XX(ha))
  2038. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  2039. else
  2040. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2041. qla2xxx_wake_dpc(vha);
  2042. }
  2043. /**
  2044. * qla24xx_mbx_completion() - Process mailbox command completions.
  2045. * @ha: SCSI driver HA context
  2046. * @mb0: Mailbox0 register
  2047. */
  2048. static void
  2049. qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
  2050. {
  2051. uint16_t cnt;
  2052. uint32_t mboxes;
  2053. uint16_t __iomem *wptr;
  2054. struct qla_hw_data *ha = vha->hw;
  2055. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  2056. /* Read all mbox registers? */
  2057. mboxes = (1 << ha->mbx_count) - 1;
  2058. if (!ha->mcp)
  2059. ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
  2060. else
  2061. mboxes = ha->mcp->in_mb;
  2062. /* Load return mailbox registers. */
  2063. ha->flags.mbox_int = 1;
  2064. ha->mailbox_out[0] = mb0;
  2065. mboxes >>= 1;
  2066. wptr = (uint16_t __iomem *)&reg->mailbox1;
  2067. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  2068. if (mboxes & BIT_0)
  2069. ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
  2070. mboxes >>= 1;
  2071. wptr++;
  2072. }
  2073. }
  2074. /**
  2075. * qla24xx_process_response_queue() - Process response queue entries.
  2076. * @ha: SCSI driver HA context
  2077. */
  2078. void qla24xx_process_response_queue(struct scsi_qla_host *vha,
  2079. struct rsp_que *rsp)
  2080. {
  2081. struct sts_entry_24xx *pkt;
  2082. struct qla_hw_data *ha = vha->hw;
  2083. if (!vha->flags.online)
  2084. return;
  2085. while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
  2086. pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
  2087. rsp->ring_index++;
  2088. if (rsp->ring_index == rsp->length) {
  2089. rsp->ring_index = 0;
  2090. rsp->ring_ptr = rsp->ring;
  2091. } else {
  2092. rsp->ring_ptr++;
  2093. }
  2094. if (pkt->entry_status != 0) {
  2095. qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
  2096. (void)qlt_24xx_process_response_error(vha, pkt);
  2097. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  2098. wmb();
  2099. continue;
  2100. }
  2101. switch (pkt->entry_type) {
  2102. case STATUS_TYPE:
  2103. qla2x00_status_entry(vha, rsp, pkt);
  2104. break;
  2105. case STATUS_CONT_TYPE:
  2106. qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
  2107. break;
  2108. case VP_RPT_ID_IOCB_TYPE:
  2109. qla24xx_report_id_acquisition(vha,
  2110. (struct vp_rpt_id_entry_24xx *)pkt);
  2111. break;
  2112. case LOGINOUT_PORT_IOCB_TYPE:
  2113. qla24xx_logio_entry(vha, rsp->req,
  2114. (struct logio_entry_24xx *)pkt);
  2115. break;
  2116. case TSK_MGMT_IOCB_TYPE:
  2117. qla24xx_tm_iocb_entry(vha, rsp->req,
  2118. (struct tsk_mgmt_entry *)pkt);
  2119. break;
  2120. case CT_IOCB_TYPE:
  2121. qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
  2122. break;
  2123. case ELS_IOCB_TYPE:
  2124. qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
  2125. break;
  2126. case ABTS_RECV_24XX:
  2127. /* ensure that the ATIO queue is empty */
  2128. qlt_24xx_process_atio_queue(vha);
  2129. case ABTS_RESP_24XX:
  2130. case CTIO_TYPE7:
  2131. case NOTIFY_ACK_TYPE:
  2132. qlt_response_pkt_all_vps(vha, (response_t *)pkt);
  2133. break;
  2134. case MARKER_TYPE:
  2135. /* Do nothing in this case, this check is to prevent it
  2136. * from falling into default case
  2137. */
  2138. break;
  2139. default:
  2140. /* Type Not Supported. */
  2141. ql_dbg(ql_dbg_async, vha, 0x5042,
  2142. "Received unknown response pkt type %x "
  2143. "entry status=%x.\n",
  2144. pkt->entry_type, pkt->entry_status);
  2145. break;
  2146. }
  2147. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  2148. wmb();
  2149. }
  2150. /* Adjust ring index */
  2151. if (IS_QLA82XX(ha)) {
  2152. struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
  2153. WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
  2154. } else
  2155. WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
  2156. }
  2157. static void
  2158. qla2xxx_check_risc_status(scsi_qla_host_t *vha)
  2159. {
  2160. int rval;
  2161. uint32_t cnt;
  2162. struct qla_hw_data *ha = vha->hw;
  2163. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  2164. if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
  2165. return;
  2166. rval = QLA_SUCCESS;
  2167. WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
  2168. RD_REG_DWORD(&reg->iobase_addr);
  2169. WRT_REG_DWORD(&reg->iobase_window, 0x0001);
  2170. for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
  2171. rval == QLA_SUCCESS; cnt--) {
  2172. if (cnt) {
  2173. WRT_REG_DWORD(&reg->iobase_window, 0x0001);
  2174. udelay(10);
  2175. } else
  2176. rval = QLA_FUNCTION_TIMEOUT;
  2177. }
  2178. if (rval == QLA_SUCCESS)
  2179. goto next_test;
  2180. WRT_REG_DWORD(&reg->iobase_window, 0x0003);
  2181. for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
  2182. rval == QLA_SUCCESS; cnt--) {
  2183. if (cnt) {
  2184. WRT_REG_DWORD(&reg->iobase_window, 0x0003);
  2185. udelay(10);
  2186. } else
  2187. rval = QLA_FUNCTION_TIMEOUT;
  2188. }
  2189. if (rval != QLA_SUCCESS)
  2190. goto done;
  2191. next_test:
  2192. if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
  2193. ql_log(ql_log_info, vha, 0x504c,
  2194. "Additional code -- 0x55AA.\n");
  2195. done:
  2196. WRT_REG_DWORD(&reg->iobase_window, 0x0000);
  2197. RD_REG_DWORD(&reg->iobase_window);
  2198. }
  2199. /**
  2200. * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
  2201. * @irq:
  2202. * @dev_id: SCSI driver HA context
  2203. *
  2204. * Called by system whenever the host adapter generates an interrupt.
  2205. *
  2206. * Returns handled flag.
  2207. */
  2208. irqreturn_t
  2209. qla24xx_intr_handler(int irq, void *dev_id)
  2210. {
  2211. scsi_qla_host_t *vha;
  2212. struct qla_hw_data *ha;
  2213. struct device_reg_24xx __iomem *reg;
  2214. int status;
  2215. unsigned long iter;
  2216. uint32_t stat;
  2217. uint32_t hccr;
  2218. uint16_t mb[8];
  2219. struct rsp_que *rsp;
  2220. unsigned long flags;
  2221. rsp = (struct rsp_que *) dev_id;
  2222. if (!rsp) {
  2223. ql_log(ql_log_info, NULL, 0x5059,
  2224. "%s: NULL response queue pointer.\n", __func__);
  2225. return IRQ_NONE;
  2226. }
  2227. ha = rsp->hw;
  2228. reg = &ha->iobase->isp24;
  2229. status = 0;
  2230. if (unlikely(pci_channel_offline(ha->pdev)))
  2231. return IRQ_HANDLED;
  2232. spin_lock_irqsave(&ha->hardware_lock, flags);
  2233. vha = pci_get_drvdata(ha->pdev);
  2234. for (iter = 50; iter--; ) {
  2235. stat = RD_REG_DWORD(&reg->host_status);
  2236. if (stat & HSRX_RISC_PAUSED) {
  2237. if (unlikely(pci_channel_offline(ha->pdev)))
  2238. break;
  2239. hccr = RD_REG_DWORD(&reg->hccr);
  2240. ql_log(ql_log_warn, vha, 0x504b,
  2241. "RISC paused -- HCCR=%x, Dumping firmware.\n",
  2242. hccr);
  2243. qla2xxx_check_risc_status(vha);
  2244. ha->isp_ops->fw_dump(vha, 1);
  2245. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2246. break;
  2247. } else if ((stat & HSRX_RISC_INT) == 0)
  2248. break;
  2249. switch (stat & 0xff) {
  2250. case INTR_ROM_MB_SUCCESS:
  2251. case INTR_ROM_MB_FAILED:
  2252. case INTR_MB_SUCCESS:
  2253. case INTR_MB_FAILED:
  2254. qla24xx_mbx_completion(vha, MSW(stat));
  2255. status |= MBX_INTERRUPT;
  2256. break;
  2257. case INTR_ASYNC_EVENT:
  2258. mb[0] = MSW(stat);
  2259. mb[1] = RD_REG_WORD(&reg->mailbox1);
  2260. mb[2] = RD_REG_WORD(&reg->mailbox2);
  2261. mb[3] = RD_REG_WORD(&reg->mailbox3);
  2262. qla2x00_async_event(vha, rsp, mb);
  2263. break;
  2264. case INTR_RSP_QUE_UPDATE:
  2265. case INTR_RSP_QUE_UPDATE_83XX:
  2266. qla24xx_process_response_queue(vha, rsp);
  2267. break;
  2268. case INTR_ATIO_QUE_UPDATE:
  2269. qlt_24xx_process_atio_queue(vha);
  2270. break;
  2271. case INTR_ATIO_RSP_QUE_UPDATE:
  2272. qlt_24xx_process_atio_queue(vha);
  2273. qla24xx_process_response_queue(vha, rsp);
  2274. break;
  2275. default:
  2276. ql_dbg(ql_dbg_async, vha, 0x504f,
  2277. "Unrecognized interrupt type (%d).\n", stat * 0xff);
  2278. break;
  2279. }
  2280. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  2281. RD_REG_DWORD_RELAXED(&reg->hccr);
  2282. if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
  2283. ndelay(3500);
  2284. }
  2285. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2286. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  2287. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  2288. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  2289. complete(&ha->mbx_intr_comp);
  2290. }
  2291. return IRQ_HANDLED;
  2292. }
  2293. static irqreturn_t
  2294. qla24xx_msix_rsp_q(int irq, void *dev_id)
  2295. {
  2296. struct qla_hw_data *ha;
  2297. struct rsp_que *rsp;
  2298. struct device_reg_24xx __iomem *reg;
  2299. struct scsi_qla_host *vha;
  2300. unsigned long flags;
  2301. rsp = (struct rsp_que *) dev_id;
  2302. if (!rsp) {
  2303. ql_log(ql_log_info, NULL, 0x505a,
  2304. "%s: NULL response queue pointer.\n", __func__);
  2305. return IRQ_NONE;
  2306. }
  2307. ha = rsp->hw;
  2308. reg = &ha->iobase->isp24;
  2309. spin_lock_irqsave(&ha->hardware_lock, flags);
  2310. vha = pci_get_drvdata(ha->pdev);
  2311. qla24xx_process_response_queue(vha, rsp);
  2312. if (!ha->flags.disable_msix_handshake) {
  2313. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  2314. RD_REG_DWORD_RELAXED(&reg->hccr);
  2315. }
  2316. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2317. return IRQ_HANDLED;
  2318. }
  2319. static irqreturn_t
  2320. qla25xx_msix_rsp_q(int irq, void *dev_id)
  2321. {
  2322. struct qla_hw_data *ha;
  2323. struct rsp_que *rsp;
  2324. struct device_reg_24xx __iomem *reg;
  2325. unsigned long flags;
  2326. rsp = (struct rsp_que *) dev_id;
  2327. if (!rsp) {
  2328. ql_log(ql_log_info, NULL, 0x505b,
  2329. "%s: NULL response queue pointer.\n", __func__);
  2330. return IRQ_NONE;
  2331. }
  2332. ha = rsp->hw;
  2333. /* Clear the interrupt, if enabled, for this response queue */
  2334. if (!ha->flags.disable_msix_handshake) {
  2335. reg = &ha->iobase->isp24;
  2336. spin_lock_irqsave(&ha->hardware_lock, flags);
  2337. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  2338. RD_REG_DWORD_RELAXED(&reg->hccr);
  2339. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2340. }
  2341. queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
  2342. return IRQ_HANDLED;
  2343. }
  2344. static irqreturn_t
  2345. qla24xx_msix_default(int irq, void *dev_id)
  2346. {
  2347. scsi_qla_host_t *vha;
  2348. struct qla_hw_data *ha;
  2349. struct rsp_que *rsp;
  2350. struct device_reg_24xx __iomem *reg;
  2351. int status;
  2352. uint32_t stat;
  2353. uint32_t hccr;
  2354. uint16_t mb[8];
  2355. unsigned long flags;
  2356. rsp = (struct rsp_que *) dev_id;
  2357. if (!rsp) {
  2358. ql_log(ql_log_info, NULL, 0x505c,
  2359. "%s: NULL response queue pointer.\n", __func__);
  2360. return IRQ_NONE;
  2361. }
  2362. ha = rsp->hw;
  2363. reg = &ha->iobase->isp24;
  2364. status = 0;
  2365. spin_lock_irqsave(&ha->hardware_lock, flags);
  2366. vha = pci_get_drvdata(ha->pdev);
  2367. do {
  2368. stat = RD_REG_DWORD(&reg->host_status);
  2369. if (stat & HSRX_RISC_PAUSED) {
  2370. if (unlikely(pci_channel_offline(ha->pdev)))
  2371. break;
  2372. hccr = RD_REG_DWORD(&reg->hccr);
  2373. ql_log(ql_log_info, vha, 0x5050,
  2374. "RISC paused -- HCCR=%x, Dumping firmware.\n",
  2375. hccr);
  2376. qla2xxx_check_risc_status(vha);
  2377. ha->isp_ops->fw_dump(vha, 1);
  2378. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2379. break;
  2380. } else if ((stat & HSRX_RISC_INT) == 0)
  2381. break;
  2382. switch (stat & 0xff) {
  2383. case INTR_ROM_MB_SUCCESS:
  2384. case INTR_ROM_MB_FAILED:
  2385. case INTR_MB_SUCCESS:
  2386. case INTR_MB_FAILED:
  2387. qla24xx_mbx_completion(vha, MSW(stat));
  2388. status |= MBX_INTERRUPT;
  2389. break;
  2390. case INTR_ASYNC_EVENT:
  2391. mb[0] = MSW(stat);
  2392. mb[1] = RD_REG_WORD(&reg->mailbox1);
  2393. mb[2] = RD_REG_WORD(&reg->mailbox2);
  2394. mb[3] = RD_REG_WORD(&reg->mailbox3);
  2395. qla2x00_async_event(vha, rsp, mb);
  2396. break;
  2397. case INTR_RSP_QUE_UPDATE:
  2398. case INTR_RSP_QUE_UPDATE_83XX:
  2399. qla24xx_process_response_queue(vha, rsp);
  2400. break;
  2401. case INTR_ATIO_QUE_UPDATE:
  2402. qlt_24xx_process_atio_queue(vha);
  2403. break;
  2404. case INTR_ATIO_RSP_QUE_UPDATE:
  2405. qlt_24xx_process_atio_queue(vha);
  2406. qla24xx_process_response_queue(vha, rsp);
  2407. break;
  2408. default:
  2409. ql_dbg(ql_dbg_async, vha, 0x5051,
  2410. "Unrecognized interrupt type (%d).\n", stat & 0xff);
  2411. break;
  2412. }
  2413. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  2414. } while (0);
  2415. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2416. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  2417. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  2418. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  2419. complete(&ha->mbx_intr_comp);
  2420. }
  2421. return IRQ_HANDLED;
  2422. }
  2423. /* Interrupt handling helpers. */
  2424. struct qla_init_msix_entry {
  2425. const char *name;
  2426. irq_handler_t handler;
  2427. };
  2428. static struct qla_init_msix_entry msix_entries[3] = {
  2429. { "qla2xxx (default)", qla24xx_msix_default },
  2430. { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
  2431. { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
  2432. };
  2433. static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
  2434. { "qla2xxx (default)", qla82xx_msix_default },
  2435. { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
  2436. };
  2437. static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
  2438. { "qla2xxx (default)", qla24xx_msix_default },
  2439. { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
  2440. { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
  2441. };
  2442. static void
  2443. qla24xx_disable_msix(struct qla_hw_data *ha)
  2444. {
  2445. int i;
  2446. struct qla_msix_entry *qentry;
  2447. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  2448. for (i = 0; i < ha->msix_count; i++) {
  2449. qentry = &ha->msix_entries[i];
  2450. if (qentry->have_irq)
  2451. free_irq(qentry->vector, qentry->rsp);
  2452. }
  2453. pci_disable_msix(ha->pdev);
  2454. kfree(ha->msix_entries);
  2455. ha->msix_entries = NULL;
  2456. ha->flags.msix_enabled = 0;
  2457. ql_dbg(ql_dbg_init, vha, 0x0042,
  2458. "Disabled the MSI.\n");
  2459. }
  2460. static int
  2461. qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
  2462. {
  2463. #define MIN_MSIX_COUNT 2
  2464. int i, ret;
  2465. struct msix_entry *entries;
  2466. struct qla_msix_entry *qentry;
  2467. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  2468. entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
  2469. GFP_KERNEL);
  2470. if (!entries) {
  2471. ql_log(ql_log_warn, vha, 0x00bc,
  2472. "Failed to allocate memory for msix_entry.\n");
  2473. return -ENOMEM;
  2474. }
  2475. for (i = 0; i < ha->msix_count; i++)
  2476. entries[i].entry = i;
  2477. ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
  2478. if (ret) {
  2479. if (ret < MIN_MSIX_COUNT)
  2480. goto msix_failed;
  2481. ql_log(ql_log_warn, vha, 0x00c6,
  2482. "MSI-X: Failed to enable support "
  2483. "-- %d/%d\n Retry with %d vectors.\n",
  2484. ha->msix_count, ret, ret);
  2485. ha->msix_count = ret;
  2486. ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
  2487. if (ret) {
  2488. msix_failed:
  2489. ql_log(ql_log_fatal, vha, 0x00c7,
  2490. "MSI-X: Failed to enable support, "
  2491. "giving up -- %d/%d.\n",
  2492. ha->msix_count, ret);
  2493. goto msix_out;
  2494. }
  2495. ha->max_rsp_queues = ha->msix_count - 1;
  2496. }
  2497. ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
  2498. ha->msix_count, GFP_KERNEL);
  2499. if (!ha->msix_entries) {
  2500. ql_log(ql_log_fatal, vha, 0x00c8,
  2501. "Failed to allocate memory for ha->msix_entries.\n");
  2502. ret = -ENOMEM;
  2503. goto msix_out;
  2504. }
  2505. ha->flags.msix_enabled = 1;
  2506. for (i = 0; i < ha->msix_count; i++) {
  2507. qentry = &ha->msix_entries[i];
  2508. qentry->vector = entries[i].vector;
  2509. qentry->entry = entries[i].entry;
  2510. qentry->have_irq = 0;
  2511. qentry->rsp = NULL;
  2512. }
  2513. /* Enable MSI-X vectors for the base queue */
  2514. for (i = 0; i < ha->msix_count; i++) {
  2515. qentry = &ha->msix_entries[i];
  2516. if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
  2517. ret = request_irq(qentry->vector,
  2518. qla83xx_msix_entries[i].handler,
  2519. 0, qla83xx_msix_entries[i].name, rsp);
  2520. } else if (IS_QLA82XX(ha)) {
  2521. ret = request_irq(qentry->vector,
  2522. qla82xx_msix_entries[i].handler,
  2523. 0, qla82xx_msix_entries[i].name, rsp);
  2524. } else {
  2525. ret = request_irq(qentry->vector,
  2526. msix_entries[i].handler,
  2527. 0, msix_entries[i].name, rsp);
  2528. }
  2529. if (ret) {
  2530. ql_log(ql_log_fatal, vha, 0x00cb,
  2531. "MSI-X: unable to register handler -- %x/%d.\n",
  2532. qentry->vector, ret);
  2533. qla24xx_disable_msix(ha);
  2534. ha->mqenable = 0;
  2535. goto msix_out;
  2536. }
  2537. qentry->have_irq = 1;
  2538. qentry->rsp = rsp;
  2539. rsp->msix = qentry;
  2540. }
  2541. /* Enable MSI-X vector for response queue update for queue 0 */
  2542. if (IS_QLA83XX(ha)) {
  2543. if (ha->msixbase && ha->mqiobase &&
  2544. (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
  2545. ha->mqenable = 1;
  2546. } else
  2547. if (ha->mqiobase
  2548. && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
  2549. ha->mqenable = 1;
  2550. ql_dbg(ql_dbg_multiq, vha, 0xc005,
  2551. "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
  2552. ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
  2553. ql_dbg(ql_dbg_init, vha, 0x0055,
  2554. "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
  2555. ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
  2556. msix_out:
  2557. kfree(entries);
  2558. return ret;
  2559. }
  2560. int
  2561. qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
  2562. {
  2563. int ret;
  2564. device_reg_t __iomem *reg = ha->iobase;
  2565. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  2566. /* If possible, enable MSI-X. */
  2567. if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
  2568. !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
  2569. goto skip_msi;
  2570. if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
  2571. (ha->pdev->subsystem_device == 0x7040 ||
  2572. ha->pdev->subsystem_device == 0x7041 ||
  2573. ha->pdev->subsystem_device == 0x1705)) {
  2574. ql_log(ql_log_warn, vha, 0x0034,
  2575. "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
  2576. ha->pdev->subsystem_vendor,
  2577. ha->pdev->subsystem_device);
  2578. goto skip_msi;
  2579. }
  2580. if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
  2581. ql_log(ql_log_warn, vha, 0x0035,
  2582. "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
  2583. ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
  2584. goto skip_msix;
  2585. }
  2586. ret = qla24xx_enable_msix(ha, rsp);
  2587. if (!ret) {
  2588. ql_dbg(ql_dbg_init, vha, 0x0036,
  2589. "MSI-X: Enabled (0x%X, 0x%X).\n",
  2590. ha->chip_revision, ha->fw_attributes);
  2591. goto clear_risc_ints;
  2592. }
  2593. ql_log(ql_log_info, vha, 0x0037,
  2594. "MSI-X Falling back-to MSI mode -%d.\n", ret);
  2595. skip_msix:
  2596. if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
  2597. !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
  2598. goto skip_msi;
  2599. ret = pci_enable_msi(ha->pdev);
  2600. if (!ret) {
  2601. ql_dbg(ql_dbg_init, vha, 0x0038,
  2602. "MSI: Enabled.\n");
  2603. ha->flags.msi_enabled = 1;
  2604. } else
  2605. ql_log(ql_log_warn, vha, 0x0039,
  2606. "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
  2607. /* Skip INTx on ISP82xx. */
  2608. if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
  2609. return QLA_FUNCTION_FAILED;
  2610. skip_msi:
  2611. ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
  2612. ha->flags.msi_enabled ? 0 : IRQF_SHARED,
  2613. QLA2XXX_DRIVER_NAME, rsp);
  2614. if (ret) {
  2615. ql_log(ql_log_warn, vha, 0x003a,
  2616. "Failed to reserve interrupt %d already in use.\n",
  2617. ha->pdev->irq);
  2618. goto fail;
  2619. } else if (!ha->flags.msi_enabled) {
  2620. ql_dbg(ql_dbg_init, vha, 0x0125,
  2621. "INTa mode: Enabled.\n");
  2622. ha->flags.mr_intr_valid = 1;
  2623. }
  2624. clear_risc_ints:
  2625. spin_lock_irq(&ha->hardware_lock);
  2626. if (!IS_FWI2_CAPABLE(ha))
  2627. WRT_REG_WORD(&reg->isp.semaphore, 0);
  2628. spin_unlock_irq(&ha->hardware_lock);
  2629. fail:
  2630. return ret;
  2631. }
  2632. void
  2633. qla2x00_free_irqs(scsi_qla_host_t *vha)
  2634. {
  2635. struct qla_hw_data *ha = vha->hw;
  2636. struct rsp_que *rsp;
  2637. /*
  2638. * We need to check that ha->rsp_q_map is valid in case we are called
  2639. * from a probe failure context.
  2640. */
  2641. if (!ha->rsp_q_map || !ha->rsp_q_map[0])
  2642. return;
  2643. rsp = ha->rsp_q_map[0];
  2644. if (ha->flags.msix_enabled)
  2645. qla24xx_disable_msix(ha);
  2646. else if (ha->flags.msi_enabled) {
  2647. free_irq(ha->pdev->irq, rsp);
  2648. pci_disable_msi(ha->pdev);
  2649. } else
  2650. free_irq(ha->pdev->irq, rsp);
  2651. }
  2652. int qla25xx_request_irq(struct rsp_que *rsp)
  2653. {
  2654. struct qla_hw_data *ha = rsp->hw;
  2655. struct qla_init_msix_entry *intr = &msix_entries[2];
  2656. struct qla_msix_entry *msix = rsp->msix;
  2657. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  2658. int ret;
  2659. ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
  2660. if (ret) {
  2661. ql_log(ql_log_fatal, vha, 0x00e6,
  2662. "MSI-X: Unable to register handler -- %x/%d.\n",
  2663. msix->vector, ret);
  2664. return ret;
  2665. }
  2666. msix->have_irq = 1;
  2667. msix->rsp = rsp;
  2668. return ret;
  2669. }