qla_isr.c 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2011 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/delay.h>
  9. #include <linux/slab.h>
  10. #include <scsi/scsi_tcq.h>
  11. #include <scsi/scsi_bsg_fc.h>
  12. #include <scsi/scsi_eh.h>
  13. static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
  14. static void qla2x00_process_completed_request(struct scsi_qla_host *,
  15. struct req_que *, uint32_t);
  16. static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
  17. static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
  18. static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
  19. sts_entry_t *);
  20. /**
  21. * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
  22. * @irq:
  23. * @dev_id: SCSI driver HA context
  24. *
  25. * Called by system whenever the host adapter generates an interrupt.
  26. *
  27. * Returns handled flag.
  28. */
  29. irqreturn_t
  30. qla2100_intr_handler(int irq, void *dev_id)
  31. {
  32. scsi_qla_host_t *vha;
  33. struct qla_hw_data *ha;
  34. struct device_reg_2xxx __iomem *reg;
  35. int status;
  36. unsigned long iter;
  37. uint16_t hccr;
  38. uint16_t mb[4];
  39. struct rsp_que *rsp;
  40. unsigned long flags;
  41. rsp = (struct rsp_que *) dev_id;
  42. if (!rsp) {
  43. printk(KERN_INFO
  44. "%s(): NULL response queue pointer.\n", __func__);
  45. return (IRQ_NONE);
  46. }
  47. ha = rsp->hw;
  48. reg = &ha->iobase->isp;
  49. status = 0;
  50. spin_lock_irqsave(&ha->hardware_lock, flags);
  51. vha = pci_get_drvdata(ha->pdev);
  52. for (iter = 50; iter--; ) {
  53. hccr = RD_REG_WORD(&reg->hccr);
  54. if (hccr & HCCR_RISC_PAUSE) {
  55. if (pci_channel_offline(ha->pdev))
  56. break;
  57. /*
  58. * Issue a "HARD" reset in order for the RISC interrupt
  59. * bit to be cleared. Schedule a big hammer to get
  60. * out of the RISC PAUSED state.
  61. */
  62. WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
  63. RD_REG_WORD(&reg->hccr);
  64. ha->isp_ops->fw_dump(vha, 1);
  65. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  66. break;
  67. } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
  68. break;
  69. if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
  70. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  71. RD_REG_WORD(&reg->hccr);
  72. /* Get mailbox data. */
  73. mb[0] = RD_MAILBOX_REG(ha, reg, 0);
  74. if (mb[0] > 0x3fff && mb[0] < 0x8000) {
  75. qla2x00_mbx_completion(vha, mb[0]);
  76. status |= MBX_INTERRUPT;
  77. } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
  78. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  79. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  80. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  81. qla2x00_async_event(vha, rsp, mb);
  82. } else {
  83. /*EMPTY*/
  84. ql_dbg(ql_dbg_async, vha, 0x5025,
  85. "Unrecognized interrupt type (%d).\n",
  86. mb[0]);
  87. }
  88. /* Release mailbox registers. */
  89. WRT_REG_WORD(&reg->semaphore, 0);
  90. RD_REG_WORD(&reg->semaphore);
  91. } else {
  92. qla2x00_process_response_queue(rsp);
  93. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  94. RD_REG_WORD(&reg->hccr);
  95. }
  96. }
  97. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  98. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  99. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  100. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  101. complete(&ha->mbx_intr_comp);
  102. }
  103. return (IRQ_HANDLED);
  104. }
  105. /**
  106. * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
  107. * @irq:
  108. * @dev_id: SCSI driver HA context
  109. *
  110. * Called by system whenever the host adapter generates an interrupt.
  111. *
  112. * Returns handled flag.
  113. */
  114. irqreturn_t
  115. qla2300_intr_handler(int irq, void *dev_id)
  116. {
  117. scsi_qla_host_t *vha;
  118. struct device_reg_2xxx __iomem *reg;
  119. int status;
  120. unsigned long iter;
  121. uint32_t stat;
  122. uint16_t hccr;
  123. uint16_t mb[4];
  124. struct rsp_que *rsp;
  125. struct qla_hw_data *ha;
  126. unsigned long flags;
  127. rsp = (struct rsp_que *) dev_id;
  128. if (!rsp) {
  129. printk(KERN_INFO
  130. "%s(): NULL response queue pointer.\n", __func__);
  131. return (IRQ_NONE);
  132. }
  133. ha = rsp->hw;
  134. reg = &ha->iobase->isp;
  135. status = 0;
  136. spin_lock_irqsave(&ha->hardware_lock, flags);
  137. vha = pci_get_drvdata(ha->pdev);
  138. for (iter = 50; iter--; ) {
  139. stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
  140. if (stat & HSR_RISC_PAUSED) {
  141. if (unlikely(pci_channel_offline(ha->pdev)))
  142. break;
  143. hccr = RD_REG_WORD(&reg->hccr);
  144. if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
  145. ql_log(ql_log_warn, vha, 0x5026,
  146. "Parity error -- HCCR=%x, Dumping "
  147. "firmware.\n", hccr);
  148. else
  149. ql_log(ql_log_warn, vha, 0x5027,
  150. "RISC paused -- HCCR=%x, Dumping "
  151. "firmware.\n", hccr);
  152. /*
  153. * Issue a "HARD" reset in order for the RISC
  154. * interrupt bit to be cleared. Schedule a big
  155. * hammer to get out of the RISC PAUSED state.
  156. */
  157. WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
  158. RD_REG_WORD(&reg->hccr);
  159. ha->isp_ops->fw_dump(vha, 1);
  160. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  161. break;
  162. } else if ((stat & HSR_RISC_INT) == 0)
  163. break;
  164. switch (stat & 0xff) {
  165. case 0x1:
  166. case 0x2:
  167. case 0x10:
  168. case 0x11:
  169. qla2x00_mbx_completion(vha, MSW(stat));
  170. status |= MBX_INTERRUPT;
  171. /* Release mailbox registers. */
  172. WRT_REG_WORD(&reg->semaphore, 0);
  173. break;
  174. case 0x12:
  175. mb[0] = MSW(stat);
  176. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  177. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  178. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  179. qla2x00_async_event(vha, rsp, mb);
  180. break;
  181. case 0x13:
  182. qla2x00_process_response_queue(rsp);
  183. break;
  184. case 0x15:
  185. mb[0] = MBA_CMPLT_1_16BIT;
  186. mb[1] = MSW(stat);
  187. qla2x00_async_event(vha, rsp, mb);
  188. break;
  189. case 0x16:
  190. mb[0] = MBA_SCSI_COMPLETION;
  191. mb[1] = MSW(stat);
  192. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  193. qla2x00_async_event(vha, rsp, mb);
  194. break;
  195. default:
  196. ql_dbg(ql_dbg_async, vha, 0x5028,
  197. "Unrecognized interrupt type (%d).\n", stat & 0xff);
  198. break;
  199. }
  200. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  201. RD_REG_WORD_RELAXED(&reg->hccr);
  202. }
  203. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  204. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  205. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  206. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  207. complete(&ha->mbx_intr_comp);
  208. }
  209. return (IRQ_HANDLED);
  210. }
  211. /**
  212. * qla2x00_mbx_completion() - Process mailbox command completions.
  213. * @ha: SCSI driver HA context
  214. * @mb0: Mailbox0 register
  215. */
  216. static void
  217. qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
  218. {
  219. uint16_t cnt;
  220. uint32_t mboxes;
  221. uint16_t __iomem *wptr;
  222. struct qla_hw_data *ha = vha->hw;
  223. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  224. /* Read all mbox registers? */
  225. mboxes = (1 << ha->mbx_count) - 1;
  226. if (!ha->mcp)
  227. ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
  228. else
  229. mboxes = ha->mcp->in_mb;
  230. /* Load return mailbox registers. */
  231. ha->flags.mbox_int = 1;
  232. ha->mailbox_out[0] = mb0;
  233. mboxes >>= 1;
  234. wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
  235. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  236. if (IS_QLA2200(ha) && cnt == 8)
  237. wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
  238. if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
  239. ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
  240. else if (mboxes & BIT_0)
  241. ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
  242. wptr++;
  243. mboxes >>= 1;
  244. }
  245. }
  246. static void
  247. qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
  248. {
  249. static char *event[] =
  250. { "Complete", "Request Notification", "Time Extension" };
  251. int rval;
  252. struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
  253. uint16_t __iomem *wptr;
  254. uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
  255. /* Seed data -- mailbox1 -> mailbox7. */
  256. wptr = (uint16_t __iomem *)&reg24->mailbox1;
  257. for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
  258. mb[cnt] = RD_REG_WORD(wptr);
  259. ql_dbg(ql_dbg_async, vha, 0x5021,
  260. "Inter-Driver Commucation %s -- "
  261. "%04x %04x %04x %04x %04x %04x %04x.\n",
  262. event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
  263. mb[4], mb[5], mb[6]);
  264. /* Acknowledgement needed? [Notify && non-zero timeout]. */
  265. timeout = (descr >> 8) & 0xf;
  266. if (aen != MBA_IDC_NOTIFY || !timeout)
  267. return;
  268. ql_dbg(ql_dbg_async, vha, 0x5022,
  269. "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
  270. vha->host_no, event[aen & 0xff], timeout);
  271. rval = qla2x00_post_idc_ack_work(vha, mb);
  272. if (rval != QLA_SUCCESS)
  273. ql_log(ql_log_warn, vha, 0x5023,
  274. "IDC failed to post ACK.\n");
  275. }
  276. /**
  277. * qla2x00_async_event() - Process aynchronous events.
  278. * @ha: SCSI driver HA context
  279. * @mb: Mailbox registers (0 - 3)
  280. */
  281. void
  282. qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
  283. {
  284. #define LS_UNKNOWN 2
  285. static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
  286. char *link_speed;
  287. uint16_t handle_cnt;
  288. uint16_t cnt, mbx;
  289. uint32_t handles[5];
  290. struct qla_hw_data *ha = vha->hw;
  291. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  292. struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
  293. struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
  294. uint32_t rscn_entry, host_pid;
  295. uint8_t rscn_queue_index;
  296. unsigned long flags;
  297. /* Setup to process RIO completion. */
  298. handle_cnt = 0;
  299. if (IS_QLA8XXX_TYPE(ha))
  300. goto skip_rio;
  301. switch (mb[0]) {
  302. case MBA_SCSI_COMPLETION:
  303. handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
  304. handle_cnt = 1;
  305. break;
  306. case MBA_CMPLT_1_16BIT:
  307. handles[0] = mb[1];
  308. handle_cnt = 1;
  309. mb[0] = MBA_SCSI_COMPLETION;
  310. break;
  311. case MBA_CMPLT_2_16BIT:
  312. handles[0] = mb[1];
  313. handles[1] = mb[2];
  314. handle_cnt = 2;
  315. mb[0] = MBA_SCSI_COMPLETION;
  316. break;
  317. case MBA_CMPLT_3_16BIT:
  318. handles[0] = mb[1];
  319. handles[1] = mb[2];
  320. handles[2] = mb[3];
  321. handle_cnt = 3;
  322. mb[0] = MBA_SCSI_COMPLETION;
  323. break;
  324. case MBA_CMPLT_4_16BIT:
  325. handles[0] = mb[1];
  326. handles[1] = mb[2];
  327. handles[2] = mb[3];
  328. handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
  329. handle_cnt = 4;
  330. mb[0] = MBA_SCSI_COMPLETION;
  331. break;
  332. case MBA_CMPLT_5_16BIT:
  333. handles[0] = mb[1];
  334. handles[1] = mb[2];
  335. handles[2] = mb[3];
  336. handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
  337. handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
  338. handle_cnt = 5;
  339. mb[0] = MBA_SCSI_COMPLETION;
  340. break;
  341. case MBA_CMPLT_2_32BIT:
  342. handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
  343. handles[1] = le32_to_cpu(
  344. ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
  345. RD_MAILBOX_REG(ha, reg, 6));
  346. handle_cnt = 2;
  347. mb[0] = MBA_SCSI_COMPLETION;
  348. break;
  349. default:
  350. break;
  351. }
  352. skip_rio:
  353. switch (mb[0]) {
  354. case MBA_SCSI_COMPLETION: /* Fast Post */
  355. if (!vha->flags.online)
  356. break;
  357. for (cnt = 0; cnt < handle_cnt; cnt++)
  358. qla2x00_process_completed_request(vha, rsp->req,
  359. handles[cnt]);
  360. break;
  361. case MBA_RESET: /* Reset */
  362. ql_dbg(ql_dbg_async, vha, 0x5002,
  363. "Asynchronous RESET.\n");
  364. set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  365. break;
  366. case MBA_SYSTEM_ERR: /* System Error */
  367. mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
  368. ql_log(ql_log_warn, vha, 0x5003,
  369. "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
  370. "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
  371. ha->isp_ops->fw_dump(vha, 1);
  372. if (IS_FWI2_CAPABLE(ha)) {
  373. if (mb[1] == 0 && mb[2] == 0) {
  374. ql_log(ql_log_fatal, vha, 0x5004,
  375. "Unrecoverable Hardware Error: adapter "
  376. "marked OFFLINE!\n");
  377. vha->flags.online = 0;
  378. } else {
  379. /* Check to see if MPI timeout occurred */
  380. if ((mbx & MBX_3) && (ha->flags.port0))
  381. set_bit(MPI_RESET_NEEDED,
  382. &vha->dpc_flags);
  383. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  384. }
  385. } else if (mb[1] == 0) {
  386. ql_log(ql_log_fatal, vha, 0x5005,
  387. "Unrecoverable Hardware Error: adapter marked "
  388. "OFFLINE!\n");
  389. vha->flags.online = 0;
  390. } else
  391. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  392. break;
  393. case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
  394. ql_log(ql_log_warn, vha, 0x5006,
  395. "ISP Request Transfer Error (%x).\n", mb[1]);
  396. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  397. break;
  398. case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
  399. ql_log(ql_log_warn, vha, 0x5007,
  400. "ISP Response Transfer Error.\n");
  401. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  402. break;
  403. case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
  404. ql_dbg(ql_dbg_async, vha, 0x5008,
  405. "Asynchronous WAKEUP_THRES.\n");
  406. break;
  407. case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
  408. ql_dbg(ql_dbg_async, vha, 0x5009,
  409. "LIP occurred (%x).\n", mb[1]);
  410. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  411. atomic_set(&vha->loop_state, LOOP_DOWN);
  412. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  413. qla2x00_mark_all_devices_lost(vha, 1);
  414. }
  415. if (vha->vp_idx) {
  416. atomic_set(&vha->vp_state, VP_FAILED);
  417. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  418. }
  419. set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
  420. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  421. vha->flags.management_server_logged_in = 0;
  422. qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
  423. break;
  424. case MBA_LOOP_UP: /* Loop Up Event */
  425. if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
  426. link_speed = link_speeds[0];
  427. ha->link_data_rate = PORT_SPEED_1GB;
  428. } else {
  429. link_speed = link_speeds[LS_UNKNOWN];
  430. if (mb[1] < 5)
  431. link_speed = link_speeds[mb[1]];
  432. else if (mb[1] == 0x13)
  433. link_speed = link_speeds[5];
  434. ha->link_data_rate = mb[1];
  435. }
  436. ql_dbg(ql_dbg_async, vha, 0x500a,
  437. "LOOP UP detected (%s Gbps).\n", link_speed);
  438. vha->flags.management_server_logged_in = 0;
  439. qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
  440. break;
  441. case MBA_LOOP_DOWN: /* Loop Down Event */
  442. mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
  443. mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
  444. ql_dbg(ql_dbg_async, vha, 0x500b,
  445. "LOOP DOWN detected (%x %x %x %x).\n",
  446. mb[1], mb[2], mb[3], mbx);
  447. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  448. atomic_set(&vha->loop_state, LOOP_DOWN);
  449. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  450. vha->device_flags |= DFLG_NO_CABLE;
  451. qla2x00_mark_all_devices_lost(vha, 1);
  452. }
  453. if (vha->vp_idx) {
  454. atomic_set(&vha->vp_state, VP_FAILED);
  455. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  456. }
  457. vha->flags.management_server_logged_in = 0;
  458. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  459. qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
  460. break;
  461. case MBA_LIP_RESET: /* LIP reset occurred */
  462. ql_dbg(ql_dbg_async, vha, 0x500c,
  463. "LIP reset occurred (%x).\n", mb[1]);
  464. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  465. atomic_set(&vha->loop_state, LOOP_DOWN);
  466. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  467. qla2x00_mark_all_devices_lost(vha, 1);
  468. }
  469. if (vha->vp_idx) {
  470. atomic_set(&vha->vp_state, VP_FAILED);
  471. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  472. }
  473. set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  474. ha->operating_mode = LOOP;
  475. vha->flags.management_server_logged_in = 0;
  476. qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
  477. break;
  478. /* case MBA_DCBX_COMPLETE: */
  479. case MBA_POINT_TO_POINT: /* Point-to-Point */
  480. if (IS_QLA2100(ha))
  481. break;
  482. if (IS_QLA8XXX_TYPE(ha)) {
  483. ql_dbg(ql_dbg_async, vha, 0x500d,
  484. "DCBX Completed -- %04x %04x %04x.\n",
  485. mb[1], mb[2], mb[3]);
  486. if (ha->notify_dcbx_comp)
  487. complete(&ha->dcbx_comp);
  488. } else
  489. ql_dbg(ql_dbg_async, vha, 0x500e,
  490. "Asynchronous P2P MODE received.\n");
  491. /*
  492. * Until there's a transition from loop down to loop up, treat
  493. * this as loop down only.
  494. */
  495. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  496. atomic_set(&vha->loop_state, LOOP_DOWN);
  497. if (!atomic_read(&vha->loop_down_timer))
  498. atomic_set(&vha->loop_down_timer,
  499. LOOP_DOWN_TIME);
  500. qla2x00_mark_all_devices_lost(vha, 1);
  501. }
  502. if (vha->vp_idx) {
  503. atomic_set(&vha->vp_state, VP_FAILED);
  504. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  505. }
  506. if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
  507. set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
  508. set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
  509. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  510. ha->flags.gpsc_supported = 1;
  511. vha->flags.management_server_logged_in = 0;
  512. break;
  513. case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
  514. if (IS_QLA2100(ha))
  515. break;
  516. ql_dbg(ql_dbg_async, vha, 0x500f,
  517. "Configuration change detected: value=%x.\n", mb[1]);
  518. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  519. atomic_set(&vha->loop_state, LOOP_DOWN);
  520. if (!atomic_read(&vha->loop_down_timer))
  521. atomic_set(&vha->loop_down_timer,
  522. LOOP_DOWN_TIME);
  523. qla2x00_mark_all_devices_lost(vha, 1);
  524. }
  525. if (vha->vp_idx) {
  526. atomic_set(&vha->vp_state, VP_FAILED);
  527. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  528. }
  529. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  530. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  531. break;
  532. case MBA_PORT_UPDATE: /* Port database update */
  533. /*
  534. * Handle only global and vn-port update events
  535. *
  536. * Relevant inputs:
  537. * mb[1] = N_Port handle of changed port
  538. * OR 0xffff for global event
  539. * mb[2] = New login state
  540. * 7 = Port logged out
  541. * mb[3] = LSB is vp_idx, 0xff = all vps
  542. *
  543. * Skip processing if:
  544. * Event is global, vp_idx is NOT all vps,
  545. * vp_idx does not match
  546. * Event is not global, vp_idx does not match
  547. */
  548. if (IS_QLA2XXX_MIDTYPE(ha) &&
  549. ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
  550. (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
  551. break;
  552. /* Global event -- port logout or port unavailable. */
  553. if (mb[1] == 0xffff && mb[2] == 0x7) {
  554. ql_dbg(ql_dbg_async, vha, 0x5010,
  555. "Port unavailable %04x %04x %04x.\n",
  556. mb[1], mb[2], mb[3]);
  557. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  558. atomic_set(&vha->loop_state, LOOP_DOWN);
  559. atomic_set(&vha->loop_down_timer,
  560. LOOP_DOWN_TIME);
  561. vha->device_flags |= DFLG_NO_CABLE;
  562. qla2x00_mark_all_devices_lost(vha, 1);
  563. }
  564. if (vha->vp_idx) {
  565. atomic_set(&vha->vp_state, VP_FAILED);
  566. fc_vport_set_state(vha->fc_vport,
  567. FC_VPORT_FAILED);
  568. qla2x00_mark_all_devices_lost(vha, 1);
  569. }
  570. vha->flags.management_server_logged_in = 0;
  571. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  572. break;
  573. }
  574. /*
  575. * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
  576. * event etc. earlier indicating loop is down) then process
  577. * it. Otherwise ignore it and Wait for RSCN to come in.
  578. */
  579. atomic_set(&vha->loop_down_timer, 0);
  580. if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
  581. atomic_read(&vha->loop_state) != LOOP_DEAD) {
  582. ql_dbg(ql_dbg_async, vha, 0x5011,
  583. "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
  584. mb[1], mb[2], mb[3]);
  585. break;
  586. }
  587. ql_dbg(ql_dbg_async, vha, 0x5012,
  588. "Port database changed %04x %04x %04x.\n",
  589. mb[1], mb[2], mb[3]);
  590. /*
  591. * Mark all devices as missing so we will login again.
  592. */
  593. atomic_set(&vha->loop_state, LOOP_UP);
  594. qla2x00_mark_all_devices_lost(vha, 1);
  595. vha->flags.rscn_queue_overflow = 1;
  596. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  597. set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
  598. break;
  599. case MBA_RSCN_UPDATE: /* State Change Registration */
  600. /* Check if the Vport has issued a SCR */
  601. if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
  602. break;
  603. /* Only handle SCNs for our Vport index. */
  604. if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
  605. break;
  606. ql_dbg(ql_dbg_async, vha, 0x5013,
  607. "RSCN database changed -- %04x %04x %04x.\n",
  608. mb[1], mb[2], mb[3]);
  609. rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
  610. host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
  611. | vha->d_id.b.al_pa;
  612. if (rscn_entry == host_pid) {
  613. ql_dbg(ql_dbg_async, vha, 0x5014,
  614. "Ignoring RSCN update to local host "
  615. "port ID (%06x).\n", host_pid);
  616. break;
  617. }
  618. /* Ignore reserved bits from RSCN-payload. */
  619. rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
  620. rscn_queue_index = vha->rscn_in_ptr + 1;
  621. if (rscn_queue_index == MAX_RSCN_COUNT)
  622. rscn_queue_index = 0;
  623. if (rscn_queue_index != vha->rscn_out_ptr) {
  624. vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
  625. vha->rscn_in_ptr = rscn_queue_index;
  626. } else {
  627. vha->flags.rscn_queue_overflow = 1;
  628. }
  629. atomic_set(&vha->loop_down_timer, 0);
  630. vha->flags.management_server_logged_in = 0;
  631. set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
  632. set_bit(RSCN_UPDATE, &vha->dpc_flags);
  633. qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
  634. break;
  635. /* case MBA_RIO_RESPONSE: */
  636. case MBA_ZIO_RESPONSE:
  637. ql_dbg(ql_dbg_async, vha, 0x5015,
  638. "[R|Z]IO update completion.\n");
  639. if (IS_FWI2_CAPABLE(ha))
  640. qla24xx_process_response_queue(vha, rsp);
  641. else
  642. qla2x00_process_response_queue(rsp);
  643. break;
  644. case MBA_DISCARD_RND_FRAME:
  645. ql_dbg(ql_dbg_async, vha, 0x5016,
  646. "Discard RND Frame -- %04x %04x %04x.\n",
  647. mb[1], mb[2], mb[3]);
  648. break;
  649. case MBA_TRACE_NOTIFICATION:
  650. ql_dbg(ql_dbg_async, vha, 0x5017,
  651. "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
  652. break;
  653. case MBA_ISP84XX_ALERT:
  654. ql_dbg(ql_dbg_async, vha, 0x5018,
  655. "ISP84XX Alert Notification -- %04x %04x %04x.\n",
  656. mb[1], mb[2], mb[3]);
  657. spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
  658. switch (mb[1]) {
  659. case A84_PANIC_RECOVERY:
  660. ql_log(ql_log_info, vha, 0x5019,
  661. "Alert 84XX: panic recovery %04x %04x.\n",
  662. mb[2], mb[3]);
  663. break;
  664. case A84_OP_LOGIN_COMPLETE:
  665. ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
  666. ql_log(ql_log_info, vha, 0x501a,
  667. "Alert 84XX: firmware version %x.\n",
  668. ha->cs84xx->op_fw_version);
  669. break;
  670. case A84_DIAG_LOGIN_COMPLETE:
  671. ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
  672. ql_log(ql_log_info, vha, 0x501b,
  673. "Alert 84XX: diagnostic firmware version %x.\n",
  674. ha->cs84xx->diag_fw_version);
  675. break;
  676. case A84_GOLD_LOGIN_COMPLETE:
  677. ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
  678. ha->cs84xx->fw_update = 1;
  679. ql_log(ql_log_info, vha, 0x501c,
  680. "Alert 84XX: gold firmware version %x.\n",
  681. ha->cs84xx->gold_fw_version);
  682. break;
  683. default:
  684. ql_log(ql_log_warn, vha, 0x501d,
  685. "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
  686. mb[1], mb[2], mb[3]);
  687. }
  688. spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
  689. break;
  690. case MBA_DCBX_START:
  691. ql_dbg(ql_dbg_async, vha, 0x501e,
  692. "DCBX Started -- %04x %04x %04x.\n",
  693. mb[1], mb[2], mb[3]);
  694. break;
  695. case MBA_DCBX_PARAM_UPDATE:
  696. ql_dbg(ql_dbg_async, vha, 0x501f,
  697. "DCBX Parameters Updated -- %04x %04x %04x.\n",
  698. mb[1], mb[2], mb[3]);
  699. break;
  700. case MBA_FCF_CONF_ERR:
  701. ql_dbg(ql_dbg_async, vha, 0x5020,
  702. "FCF Configuration Error -- %04x %04x %04x.\n",
  703. mb[1], mb[2], mb[3]);
  704. break;
  705. case MBA_IDC_COMPLETE:
  706. case MBA_IDC_NOTIFY:
  707. case MBA_IDC_TIME_EXT:
  708. qla81xx_idc_event(vha, mb[0], mb[1]);
  709. break;
  710. }
  711. if (!vha->vp_idx && ha->num_vhosts)
  712. qla2x00_alert_all_vps(rsp, mb);
  713. }
  714. /**
  715. * qla2x00_process_completed_request() - Process a Fast Post response.
  716. * @ha: SCSI driver HA context
  717. * @index: SRB index
  718. */
  719. static void
  720. qla2x00_process_completed_request(struct scsi_qla_host *vha,
  721. struct req_que *req, uint32_t index)
  722. {
  723. srb_t *sp;
  724. struct qla_hw_data *ha = vha->hw;
  725. /* Validate handle. */
  726. if (index >= MAX_OUTSTANDING_COMMANDS) {
  727. ql_log(ql_log_warn, vha, 0x3014,
  728. "Invalid SCSI command index (%x).\n", index);
  729. if (IS_QLA82XX(ha))
  730. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  731. else
  732. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  733. return;
  734. }
  735. sp = req->outstanding_cmds[index];
  736. if (sp) {
  737. /* Free outstanding command slot. */
  738. req->outstanding_cmds[index] = NULL;
  739. /* Save ISP completion status */
  740. sp->cmd->result = DID_OK << 16;
  741. qla2x00_sp_compl(ha, sp);
  742. } else {
  743. ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
  744. if (IS_QLA82XX(ha))
  745. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  746. else
  747. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  748. }
  749. }
  750. static srb_t *
  751. qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
  752. struct req_que *req, void *iocb)
  753. {
  754. struct qla_hw_data *ha = vha->hw;
  755. sts_entry_t *pkt = iocb;
  756. srb_t *sp = NULL;
  757. uint16_t index;
  758. index = LSW(pkt->handle);
  759. if (index >= MAX_OUTSTANDING_COMMANDS) {
  760. ql_log(ql_log_warn, vha, 0x5031,
  761. "Invalid command index (%x).\n", index);
  762. if (IS_QLA82XX(ha))
  763. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  764. else
  765. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  766. goto done;
  767. }
  768. sp = req->outstanding_cmds[index];
  769. if (!sp) {
  770. ql_log(ql_log_warn, vha, 0x5032,
  771. "Invalid completion handle (%x) -- timed-out.\n", index);
  772. return sp;
  773. }
  774. if (sp->handle != index) {
  775. ql_log(ql_log_warn, vha, 0x5033,
  776. "SRB handle (%x) mismatch %x.\n", sp->handle, index);
  777. return NULL;
  778. }
  779. req->outstanding_cmds[index] = NULL;
  780. done:
  781. return sp;
  782. }
  783. static void
  784. qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  785. struct mbx_entry *mbx)
  786. {
  787. const char func[] = "MBX-IOCB";
  788. const char *type;
  789. fc_port_t *fcport;
  790. srb_t *sp;
  791. struct srb_iocb *lio;
  792. struct srb_ctx *ctx;
  793. uint16_t *data;
  794. uint16_t status;
  795. sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
  796. if (!sp)
  797. return;
  798. ctx = sp->ctx;
  799. lio = ctx->u.iocb_cmd;
  800. type = ctx->name;
  801. fcport = sp->fcport;
  802. data = lio->u.logio.data;
  803. data[0] = MBS_COMMAND_ERROR;
  804. data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
  805. QLA_LOGIO_LOGIN_RETRIED : 0;
  806. if (mbx->entry_status) {
  807. ql_dbg(ql_dbg_async, vha, 0x5043,
  808. "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
  809. "entry-status=%x status=%x state-flag=%x "
  810. "status-flags=%x.\n", type, sp->handle,
  811. fcport->d_id.b.domain, fcport->d_id.b.area,
  812. fcport->d_id.b.al_pa, mbx->entry_status,
  813. le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
  814. le16_to_cpu(mbx->status_flags));
  815. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
  816. (uint8_t *)mbx, sizeof(*mbx));
  817. goto logio_done;
  818. }
  819. status = le16_to_cpu(mbx->status);
  820. if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
  821. le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
  822. status = 0;
  823. if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
  824. ql_dbg(ql_dbg_async, vha, 0x5045,
  825. "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
  826. type, sp->handle, fcport->d_id.b.domain,
  827. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  828. le16_to_cpu(mbx->mb1));
  829. data[0] = MBS_COMMAND_COMPLETE;
  830. if (ctx->type == SRB_LOGIN_CMD) {
  831. fcport->port_type = FCT_TARGET;
  832. if (le16_to_cpu(mbx->mb1) & BIT_0)
  833. fcport->port_type = FCT_INITIATOR;
  834. else if (le16_to_cpu(mbx->mb1) & BIT_1)
  835. fcport->flags |= FCF_FCP2_DEVICE;
  836. }
  837. goto logio_done;
  838. }
  839. data[0] = le16_to_cpu(mbx->mb0);
  840. switch (data[0]) {
  841. case MBS_PORT_ID_USED:
  842. data[1] = le16_to_cpu(mbx->mb1);
  843. break;
  844. case MBS_LOOP_ID_USED:
  845. break;
  846. default:
  847. data[0] = MBS_COMMAND_ERROR;
  848. break;
  849. }
  850. ql_log(ql_log_warn, vha, 0x5046,
  851. "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
  852. "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
  853. fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
  854. status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
  855. le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
  856. le16_to_cpu(mbx->mb7));
  857. logio_done:
  858. lio->done(sp);
  859. }
  860. static void
  861. qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
  862. sts_entry_t *pkt, int iocb_type)
  863. {
  864. const char func[] = "CT_IOCB";
  865. const char *type;
  866. struct qla_hw_data *ha = vha->hw;
  867. srb_t *sp;
  868. struct srb_ctx *sp_bsg;
  869. struct fc_bsg_job *bsg_job;
  870. uint16_t comp_status;
  871. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  872. if (!sp)
  873. return;
  874. sp_bsg = sp->ctx;
  875. bsg_job = sp_bsg->u.bsg_job;
  876. type = NULL;
  877. switch (sp_bsg->type) {
  878. case SRB_CT_CMD:
  879. type = "ct pass-through";
  880. break;
  881. default:
  882. ql_log(ql_log_warn, vha, 0x5047,
  883. "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
  884. return;
  885. }
  886. comp_status = le16_to_cpu(pkt->comp_status);
  887. /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
  888. * fc payload to the caller
  889. */
  890. bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  891. bsg_job->reply_len = sizeof(struct fc_bsg_reply);
  892. if (comp_status != CS_COMPLETE) {
  893. if (comp_status == CS_DATA_UNDERRUN) {
  894. bsg_job->reply->result = DID_OK << 16;
  895. bsg_job->reply->reply_payload_rcv_len =
  896. le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
  897. ql_log(ql_log_warn, vha, 0x5048,
  898. "CT pass-through-%s error "
  899. "comp_status-status=0x%x total_byte = 0x%x.\n",
  900. type, comp_status,
  901. bsg_job->reply->reply_payload_rcv_len);
  902. } else {
  903. ql_log(ql_log_warn, vha, 0x5049,
  904. "CT pass-through-%s error "
  905. "comp_status-status=0x%x.\n", type, comp_status);
  906. bsg_job->reply->result = DID_ERROR << 16;
  907. bsg_job->reply->reply_payload_rcv_len = 0;
  908. }
  909. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
  910. (uint8_t *)pkt, sizeof(*pkt));
  911. } else {
  912. bsg_job->reply->result = DID_OK << 16;
  913. bsg_job->reply->reply_payload_rcv_len =
  914. bsg_job->reply_payload.payload_len;
  915. bsg_job->reply_len = 0;
  916. }
  917. dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
  918. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  919. dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
  920. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  921. if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
  922. kfree(sp->fcport);
  923. kfree(sp->ctx);
  924. mempool_free(sp, ha->srb_mempool);
  925. bsg_job->job_done(bsg_job);
  926. }
  927. static void
  928. qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
  929. struct sts_entry_24xx *pkt, int iocb_type)
  930. {
  931. const char func[] = "ELS_CT_IOCB";
  932. const char *type;
  933. struct qla_hw_data *ha = vha->hw;
  934. srb_t *sp;
  935. struct srb_ctx *sp_bsg;
  936. struct fc_bsg_job *bsg_job;
  937. uint16_t comp_status;
  938. uint32_t fw_status[3];
  939. uint8_t* fw_sts_ptr;
  940. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  941. if (!sp)
  942. return;
  943. sp_bsg = sp->ctx;
  944. bsg_job = sp_bsg->u.bsg_job;
  945. type = NULL;
  946. switch (sp_bsg->type) {
  947. case SRB_ELS_CMD_RPT:
  948. case SRB_ELS_CMD_HST:
  949. type = "els";
  950. break;
  951. case SRB_CT_CMD:
  952. type = "ct pass-through";
  953. break;
  954. default:
  955. ql_log(ql_log_warn, vha, 0x503e,
  956. "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
  957. return;
  958. }
  959. comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
  960. fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
  961. fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
  962. /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
  963. * fc payload to the caller
  964. */
  965. bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  966. bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
  967. if (comp_status != CS_COMPLETE) {
  968. if (comp_status == CS_DATA_UNDERRUN) {
  969. bsg_job->reply->result = DID_OK << 16;
  970. bsg_job->reply->reply_payload_rcv_len =
  971. le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
  972. ql_log(ql_log_info, vha, 0x503f,
  973. "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
  974. "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
  975. type, sp->handle, comp_status, fw_status[1], fw_status[2],
  976. le16_to_cpu(((struct els_sts_entry_24xx *)
  977. pkt)->total_byte_count));
  978. fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
  979. memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
  980. }
  981. else {
  982. ql_log(ql_log_info, vha, 0x5040,
  983. "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
  984. "error subcode 1=0x%x error subcode 2=0x%x.\n",
  985. type, sp->handle, comp_status,
  986. le16_to_cpu(((struct els_sts_entry_24xx *)
  987. pkt)->error_subcode_1),
  988. le16_to_cpu(((struct els_sts_entry_24xx *)
  989. pkt)->error_subcode_2));
  990. bsg_job->reply->result = DID_ERROR << 16;
  991. bsg_job->reply->reply_payload_rcv_len = 0;
  992. fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
  993. memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
  994. }
  995. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
  996. (uint8_t *)pkt, sizeof(*pkt));
  997. }
  998. else {
  999. bsg_job->reply->result = DID_OK << 16;
  1000. bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
  1001. bsg_job->reply_len = 0;
  1002. }
  1003. dma_unmap_sg(&ha->pdev->dev,
  1004. bsg_job->request_payload.sg_list,
  1005. bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1006. dma_unmap_sg(&ha->pdev->dev,
  1007. bsg_job->reply_payload.sg_list,
  1008. bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  1009. if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
  1010. (sp_bsg->type == SRB_CT_CMD))
  1011. kfree(sp->fcport);
  1012. kfree(sp->ctx);
  1013. mempool_free(sp, ha->srb_mempool);
  1014. bsg_job->job_done(bsg_job);
  1015. }
  1016. static void
  1017. qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
  1018. struct logio_entry_24xx *logio)
  1019. {
  1020. const char func[] = "LOGIO-IOCB";
  1021. const char *type;
  1022. fc_port_t *fcport;
  1023. srb_t *sp;
  1024. struct srb_iocb *lio;
  1025. struct srb_ctx *ctx;
  1026. uint16_t *data;
  1027. uint32_t iop[2];
  1028. sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
  1029. if (!sp)
  1030. return;
  1031. ctx = sp->ctx;
  1032. lio = ctx->u.iocb_cmd;
  1033. type = ctx->name;
  1034. fcport = sp->fcport;
  1035. data = lio->u.logio.data;
  1036. data[0] = MBS_COMMAND_ERROR;
  1037. data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
  1038. QLA_LOGIO_LOGIN_RETRIED : 0;
  1039. if (logio->entry_status) {
  1040. ql_log(ql_log_warn, vha, 0x5034,
  1041. "Async-%s error entry - hdl=%x"
  1042. "portid=%02x%02x%02x entry-status=%x.\n",
  1043. type, sp->handle, fcport->d_id.b.domain,
  1044. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1045. logio->entry_status);
  1046. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
  1047. (uint8_t *)logio, sizeof(*logio));
  1048. goto logio_done;
  1049. }
  1050. if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
  1051. ql_dbg(ql_dbg_async, vha, 0x5036,
  1052. "Async-%s complete - hdl=%x portid=%02x%02x%02x "
  1053. "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
  1054. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1055. le32_to_cpu(logio->io_parameter[0]));
  1056. data[0] = MBS_COMMAND_COMPLETE;
  1057. if (ctx->type != SRB_LOGIN_CMD)
  1058. goto logio_done;
  1059. iop[0] = le32_to_cpu(logio->io_parameter[0]);
  1060. if (iop[0] & BIT_4) {
  1061. fcport->port_type = FCT_TARGET;
  1062. if (iop[0] & BIT_8)
  1063. fcport->flags |= FCF_FCP2_DEVICE;
  1064. } else if (iop[0] & BIT_5)
  1065. fcport->port_type = FCT_INITIATOR;
  1066. if (logio->io_parameter[7] || logio->io_parameter[8])
  1067. fcport->supported_classes |= FC_COS_CLASS2;
  1068. if (logio->io_parameter[9] || logio->io_parameter[10])
  1069. fcport->supported_classes |= FC_COS_CLASS3;
  1070. goto logio_done;
  1071. }
  1072. iop[0] = le32_to_cpu(logio->io_parameter[0]);
  1073. iop[1] = le32_to_cpu(logio->io_parameter[1]);
  1074. switch (iop[0]) {
  1075. case LSC_SCODE_PORTID_USED:
  1076. data[0] = MBS_PORT_ID_USED;
  1077. data[1] = LSW(iop[1]);
  1078. break;
  1079. case LSC_SCODE_NPORT_USED:
  1080. data[0] = MBS_LOOP_ID_USED;
  1081. break;
  1082. default:
  1083. data[0] = MBS_COMMAND_ERROR;
  1084. break;
  1085. }
  1086. ql_dbg(ql_dbg_async, vha, 0x5037,
  1087. "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
  1088. "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
  1089. fcport->d_id.b.area, fcport->d_id.b.al_pa,
  1090. le16_to_cpu(logio->comp_status),
  1091. le32_to_cpu(logio->io_parameter[0]),
  1092. le32_to_cpu(logio->io_parameter[1]));
  1093. logio_done:
  1094. lio->done(sp);
  1095. }
  1096. static void
  1097. qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
  1098. struct tsk_mgmt_entry *tsk)
  1099. {
  1100. const char func[] = "TMF-IOCB";
  1101. const char *type;
  1102. fc_port_t *fcport;
  1103. srb_t *sp;
  1104. struct srb_iocb *iocb;
  1105. struct srb_ctx *ctx;
  1106. struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
  1107. int error = 1;
  1108. sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
  1109. if (!sp)
  1110. return;
  1111. ctx = sp->ctx;
  1112. iocb = ctx->u.iocb_cmd;
  1113. type = ctx->name;
  1114. fcport = sp->fcport;
  1115. if (sts->entry_status) {
  1116. ql_log(ql_log_warn, vha, 0x5038,
  1117. "Async-%s error - hdl=%x entry-status(%x).\n",
  1118. type, sp->handle, sts->entry_status);
  1119. } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
  1120. ql_log(ql_log_warn, vha, 0x5039,
  1121. "Async-%s error - hdl=%x completion status(%x).\n",
  1122. type, sp->handle, sts->comp_status);
  1123. } else if (!(le16_to_cpu(sts->scsi_status) &
  1124. SS_RESPONSE_INFO_LEN_VALID)) {
  1125. ql_log(ql_log_warn, vha, 0x503a,
  1126. "Async-%s error - hdl=%x no response info(%x).\n",
  1127. type, sp->handle, sts->scsi_status);
  1128. } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
  1129. ql_log(ql_log_warn, vha, 0x503b,
  1130. "Async-%s error - hdl=%x not enough response(%d).\n",
  1131. type, sp->handle, sts->rsp_data_len);
  1132. } else if (sts->data[3]) {
  1133. ql_log(ql_log_warn, vha, 0x503c,
  1134. "Async-%s error - hdl=%x response(%x).\n",
  1135. type, sp->handle, sts->data[3]);
  1136. } else {
  1137. error = 0;
  1138. }
  1139. if (error) {
  1140. iocb->u.tmf.data = error;
  1141. ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
  1142. (uint8_t *)sts, sizeof(*sts));
  1143. }
  1144. iocb->done(sp);
  1145. }
  1146. /**
  1147. * qla2x00_process_response_queue() - Process response queue entries.
  1148. * @ha: SCSI driver HA context
  1149. */
  1150. void
  1151. qla2x00_process_response_queue(struct rsp_que *rsp)
  1152. {
  1153. struct scsi_qla_host *vha;
  1154. struct qla_hw_data *ha = rsp->hw;
  1155. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  1156. sts_entry_t *pkt;
  1157. uint16_t handle_cnt;
  1158. uint16_t cnt;
  1159. vha = pci_get_drvdata(ha->pdev);
  1160. if (!vha->flags.online)
  1161. return;
  1162. while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
  1163. pkt = (sts_entry_t *)rsp->ring_ptr;
  1164. rsp->ring_index++;
  1165. if (rsp->ring_index == rsp->length) {
  1166. rsp->ring_index = 0;
  1167. rsp->ring_ptr = rsp->ring;
  1168. } else {
  1169. rsp->ring_ptr++;
  1170. }
  1171. if (pkt->entry_status != 0) {
  1172. qla2x00_error_entry(vha, rsp, pkt);
  1173. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  1174. wmb();
  1175. continue;
  1176. }
  1177. switch (pkt->entry_type) {
  1178. case STATUS_TYPE:
  1179. qla2x00_status_entry(vha, rsp, pkt);
  1180. break;
  1181. case STATUS_TYPE_21:
  1182. handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
  1183. for (cnt = 0; cnt < handle_cnt; cnt++) {
  1184. qla2x00_process_completed_request(vha, rsp->req,
  1185. ((sts21_entry_t *)pkt)->handle[cnt]);
  1186. }
  1187. break;
  1188. case STATUS_TYPE_22:
  1189. handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
  1190. for (cnt = 0; cnt < handle_cnt; cnt++) {
  1191. qla2x00_process_completed_request(vha, rsp->req,
  1192. ((sts22_entry_t *)pkt)->handle[cnt]);
  1193. }
  1194. break;
  1195. case STATUS_CONT_TYPE:
  1196. qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
  1197. break;
  1198. case MBX_IOCB_TYPE:
  1199. qla2x00_mbx_iocb_entry(vha, rsp->req,
  1200. (struct mbx_entry *)pkt);
  1201. break;
  1202. case CT_IOCB_TYPE:
  1203. qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
  1204. break;
  1205. default:
  1206. /* Type Not Supported. */
  1207. ql_log(ql_log_warn, vha, 0x504a,
  1208. "Received unknown response pkt type %x "
  1209. "entry status=%x.\n",
  1210. pkt->entry_type, pkt->entry_status);
  1211. break;
  1212. }
  1213. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  1214. wmb();
  1215. }
  1216. /* Adjust ring index */
  1217. WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
  1218. }
  1219. static inline void
  1220. qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
  1221. uint32_t sense_len, struct rsp_que *rsp)
  1222. {
  1223. struct scsi_qla_host *vha = sp->fcport->vha;
  1224. struct scsi_cmnd *cp = sp->cmd;
  1225. if (sense_len >= SCSI_SENSE_BUFFERSIZE)
  1226. sense_len = SCSI_SENSE_BUFFERSIZE;
  1227. sp->request_sense_length = sense_len;
  1228. sp->request_sense_ptr = cp->sense_buffer;
  1229. if (sp->request_sense_length > par_sense_len)
  1230. sense_len = par_sense_len;
  1231. memcpy(cp->sense_buffer, sense_data, sense_len);
  1232. sp->request_sense_ptr += sense_len;
  1233. sp->request_sense_length -= sense_len;
  1234. if (sp->request_sense_length != 0)
  1235. rsp->status_srb = sp;
  1236. if (sense_len) {
  1237. ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
  1238. "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
  1239. sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
  1240. cp);
  1241. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
  1242. cp->sense_buffer, sense_len);
  1243. }
  1244. }
  1245. struct scsi_dif_tuple {
  1246. __be16 guard; /* Checksum */
  1247. __be16 app_tag; /* APPL identifer */
  1248. __be32 ref_tag; /* Target LBA or indirect LBA */
  1249. };
  1250. /*
  1251. * Checks the guard or meta-data for the type of error
  1252. * detected by the HBA. In case of errors, we set the
  1253. * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
  1254. * to indicate to the kernel that the HBA detected error.
  1255. */
  1256. static inline int
  1257. qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
  1258. {
  1259. struct scsi_qla_host *vha = sp->fcport->vha;
  1260. struct scsi_cmnd *cmd = sp->cmd;
  1261. uint8_t *ap = &sts24->data[12];
  1262. uint8_t *ep = &sts24->data[20];
  1263. uint32_t e_ref_tag, a_ref_tag;
  1264. uint16_t e_app_tag, a_app_tag;
  1265. uint16_t e_guard, a_guard;
  1266. /*
  1267. * swab32 of the "data" field in the beginning of qla2x00_status_entry()
  1268. * would make guard field appear at offset 2
  1269. */
  1270. a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
  1271. a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
  1272. a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
  1273. e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
  1274. e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
  1275. e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
  1276. ql_dbg(ql_dbg_io, vha, 0x3023,
  1277. "iocb(s) %p Returned STATUS.\n", sts24);
  1278. ql_dbg(ql_dbg_io, vha, 0x3024,
  1279. "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
  1280. " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
  1281. " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
  1282. cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
  1283. a_app_tag, e_app_tag, a_guard, e_guard);
  1284. /*
  1285. * Ignore sector if:
  1286. * For type 3: ref & app tag is all 'f's
  1287. * For type 0,1,2: app tag is all 'f's
  1288. */
  1289. if ((a_app_tag == 0xffff) &&
  1290. ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
  1291. (a_ref_tag == 0xffffffff))) {
  1292. uint32_t blocks_done, resid;
  1293. sector_t lba_s = scsi_get_lba(cmd);
  1294. /* 2TB boundary case covered automatically with this */
  1295. blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
  1296. resid = scsi_bufflen(cmd) - (blocks_done *
  1297. cmd->device->sector_size);
  1298. scsi_set_resid(cmd, resid);
  1299. cmd->result = DID_OK << 16;
  1300. /* Update protection tag */
  1301. if (scsi_prot_sg_count(cmd)) {
  1302. uint32_t i, j = 0, k = 0, num_ent;
  1303. struct scatterlist *sg;
  1304. struct sd_dif_tuple *spt;
  1305. /* Patch the corresponding protection tags */
  1306. scsi_for_each_prot_sg(cmd, sg,
  1307. scsi_prot_sg_count(cmd), i) {
  1308. num_ent = sg_dma_len(sg) / 8;
  1309. if (k + num_ent < blocks_done) {
  1310. k += num_ent;
  1311. continue;
  1312. }
  1313. j = blocks_done - k - 1;
  1314. k = blocks_done;
  1315. break;
  1316. }
  1317. if (k != blocks_done) {
  1318. ql_log(ql_log_warn, vha, 0x302f,
  1319. "unexpected tag values tag:lba=%x:%llx)\n",
  1320. e_ref_tag, (unsigned long long)lba_s);
  1321. return 1;
  1322. }
  1323. spt = page_address(sg_page(sg)) + sg->offset;
  1324. spt += j;
  1325. spt->app_tag = 0xffff;
  1326. if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
  1327. spt->ref_tag = 0xffffffff;
  1328. }
  1329. return 0;
  1330. }
  1331. /* check guard */
  1332. if (e_guard != a_guard) {
  1333. scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
  1334. 0x10, 0x1);
  1335. set_driver_byte(cmd, DRIVER_SENSE);
  1336. set_host_byte(cmd, DID_ABORT);
  1337. cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
  1338. return 1;
  1339. }
  1340. /* check ref tag */
  1341. if (e_ref_tag != a_ref_tag) {
  1342. scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
  1343. 0x10, 0x3);
  1344. set_driver_byte(cmd, DRIVER_SENSE);
  1345. set_host_byte(cmd, DID_ABORT);
  1346. cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
  1347. return 1;
  1348. }
  1349. /* check appl tag */
  1350. if (e_app_tag != a_app_tag) {
  1351. scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
  1352. 0x10, 0x2);
  1353. set_driver_byte(cmd, DRIVER_SENSE);
  1354. set_host_byte(cmd, DID_ABORT);
  1355. cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
  1356. return 1;
  1357. }
  1358. return 1;
  1359. }
  1360. /**
  1361. * qla2x00_status_entry() - Process a Status IOCB entry.
  1362. * @ha: SCSI driver HA context
  1363. * @pkt: Entry pointer
  1364. */
  1365. static void
  1366. qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
  1367. {
  1368. srb_t *sp;
  1369. fc_port_t *fcport;
  1370. struct scsi_cmnd *cp;
  1371. sts_entry_t *sts;
  1372. struct sts_entry_24xx *sts24;
  1373. uint16_t comp_status;
  1374. uint16_t scsi_status;
  1375. uint16_t ox_id;
  1376. uint8_t lscsi_status;
  1377. int32_t resid;
  1378. uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
  1379. fw_resid_len;
  1380. uint8_t *rsp_info, *sense_data;
  1381. struct qla_hw_data *ha = vha->hw;
  1382. uint32_t handle;
  1383. uint16_t que;
  1384. struct req_que *req;
  1385. int logit = 1;
  1386. sts = (sts_entry_t *) pkt;
  1387. sts24 = (struct sts_entry_24xx *) pkt;
  1388. if (IS_FWI2_CAPABLE(ha)) {
  1389. comp_status = le16_to_cpu(sts24->comp_status);
  1390. scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
  1391. } else {
  1392. comp_status = le16_to_cpu(sts->comp_status);
  1393. scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
  1394. }
  1395. handle = (uint32_t) LSW(sts->handle);
  1396. que = MSW(sts->handle);
  1397. req = ha->req_q_map[que];
  1398. /* Fast path completion. */
  1399. if (comp_status == CS_COMPLETE && scsi_status == 0) {
  1400. qla2x00_process_completed_request(vha, req, handle);
  1401. return;
  1402. }
  1403. /* Validate handle. */
  1404. if (handle < MAX_OUTSTANDING_COMMANDS) {
  1405. sp = req->outstanding_cmds[handle];
  1406. req->outstanding_cmds[handle] = NULL;
  1407. } else
  1408. sp = NULL;
  1409. if (sp == NULL) {
  1410. ql_dbg(ql_dbg_io, vha, 0x3017,
  1411. "Invalid status handle (0x%x).\n", sts->handle);
  1412. if (IS_QLA82XX(ha))
  1413. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  1414. else
  1415. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1416. qla2xxx_wake_dpc(vha);
  1417. return;
  1418. }
  1419. cp = sp->cmd;
  1420. if (cp == NULL) {
  1421. ql_dbg(ql_dbg_io, vha, 0x3018,
  1422. "Command already returned (0x%x/%p).\n",
  1423. sts->handle, sp);
  1424. return;
  1425. }
  1426. lscsi_status = scsi_status & STATUS_MASK;
  1427. fcport = sp->fcport;
  1428. ox_id = 0;
  1429. sense_len = par_sense_len = rsp_info_len = resid_len =
  1430. fw_resid_len = 0;
  1431. if (IS_FWI2_CAPABLE(ha)) {
  1432. if (scsi_status & SS_SENSE_LEN_VALID)
  1433. sense_len = le32_to_cpu(sts24->sense_len);
  1434. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
  1435. rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
  1436. if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
  1437. resid_len = le32_to_cpu(sts24->rsp_residual_count);
  1438. if (comp_status == CS_DATA_UNDERRUN)
  1439. fw_resid_len = le32_to_cpu(sts24->residual_len);
  1440. rsp_info = sts24->data;
  1441. sense_data = sts24->data;
  1442. host_to_fcp_swap(sts24->data, sizeof(sts24->data));
  1443. ox_id = le16_to_cpu(sts24->ox_id);
  1444. par_sense_len = sizeof(sts24->data);
  1445. } else {
  1446. if (scsi_status & SS_SENSE_LEN_VALID)
  1447. sense_len = le16_to_cpu(sts->req_sense_length);
  1448. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
  1449. rsp_info_len = le16_to_cpu(sts->rsp_info_len);
  1450. resid_len = le32_to_cpu(sts->residual_length);
  1451. rsp_info = sts->rsp_info;
  1452. sense_data = sts->req_sense_data;
  1453. par_sense_len = sizeof(sts->req_sense_data);
  1454. }
  1455. /* Check for any FCP transport errors. */
  1456. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
  1457. /* Sense data lies beyond any FCP RESPONSE data. */
  1458. if (IS_FWI2_CAPABLE(ha)) {
  1459. sense_data += rsp_info_len;
  1460. par_sense_len -= rsp_info_len;
  1461. }
  1462. if (rsp_info_len > 3 && rsp_info[3]) {
  1463. ql_dbg(ql_dbg_io, vha, 0x3019,
  1464. "FCP I/O protocol failure (0x%x/0x%x).\n",
  1465. rsp_info_len, rsp_info[3]);
  1466. cp->result = DID_BUS_BUSY << 16;
  1467. goto out;
  1468. }
  1469. }
  1470. /* Check for overrun. */
  1471. if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
  1472. scsi_status & SS_RESIDUAL_OVER)
  1473. comp_status = CS_DATA_OVERRUN;
  1474. /*
  1475. * Based on Host and scsi status generate status code for Linux
  1476. */
  1477. switch (comp_status) {
  1478. case CS_COMPLETE:
  1479. case CS_QUEUE_FULL:
  1480. if (scsi_status == 0) {
  1481. cp->result = DID_OK << 16;
  1482. break;
  1483. }
  1484. if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
  1485. resid = resid_len;
  1486. scsi_set_resid(cp, resid);
  1487. if (!lscsi_status &&
  1488. ((unsigned)(scsi_bufflen(cp) - resid) <
  1489. cp->underflow)) {
  1490. ql_dbg(ql_dbg_io, vha, 0x301a,
  1491. "Mid-layer underflow "
  1492. "detected (0x%x of 0x%x bytes).\n",
  1493. resid, scsi_bufflen(cp));
  1494. cp->result = DID_ERROR << 16;
  1495. break;
  1496. }
  1497. }
  1498. cp->result = DID_OK << 16 | lscsi_status;
  1499. if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
  1500. ql_dbg(ql_dbg_io, vha, 0x301b,
  1501. "QUEUE FULL detected.\n");
  1502. break;
  1503. }
  1504. logit = 0;
  1505. if (lscsi_status != SS_CHECK_CONDITION)
  1506. break;
  1507. memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  1508. if (!(scsi_status & SS_SENSE_LEN_VALID))
  1509. break;
  1510. qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
  1511. rsp);
  1512. break;
  1513. case CS_DATA_UNDERRUN:
  1514. /* Use F/W calculated residual length. */
  1515. resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
  1516. scsi_set_resid(cp, resid);
  1517. if (scsi_status & SS_RESIDUAL_UNDER) {
  1518. if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
  1519. ql_dbg(ql_dbg_io, vha, 0x301d,
  1520. "Dropped frame(s) detected "
  1521. "(0x%x of 0x%x bytes).\n",
  1522. resid, scsi_bufflen(cp));
  1523. cp->result = DID_ERROR << 16 | lscsi_status;
  1524. goto check_scsi_status;
  1525. }
  1526. if (!lscsi_status &&
  1527. ((unsigned)(scsi_bufflen(cp) - resid) <
  1528. cp->underflow)) {
  1529. ql_dbg(ql_dbg_io, vha, 0x301e,
  1530. "Mid-layer underflow "
  1531. "detected (0x%x of 0x%x bytes).\n",
  1532. resid, scsi_bufflen(cp));
  1533. cp->result = DID_ERROR << 16;
  1534. break;
  1535. }
  1536. } else {
  1537. ql_dbg(ql_dbg_io, vha, 0x301f,
  1538. "Dropped frame(s) detected (0x%x "
  1539. "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
  1540. cp->result = DID_ERROR << 16 | lscsi_status;
  1541. goto check_scsi_status;
  1542. }
  1543. cp->result = DID_OK << 16 | lscsi_status;
  1544. logit = 0;
  1545. check_scsi_status:
  1546. /*
  1547. * Check to see if SCSI Status is non zero. If so report SCSI
  1548. * Status.
  1549. */
  1550. if (lscsi_status != 0) {
  1551. if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
  1552. ql_dbg(ql_dbg_io, vha, 0x3020,
  1553. "QUEUE FULL detected.\n");
  1554. logit = 1;
  1555. break;
  1556. }
  1557. if (lscsi_status != SS_CHECK_CONDITION)
  1558. break;
  1559. memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  1560. if (!(scsi_status & SS_SENSE_LEN_VALID))
  1561. break;
  1562. qla2x00_handle_sense(sp, sense_data, par_sense_len,
  1563. sense_len, rsp);
  1564. }
  1565. break;
  1566. case CS_PORT_LOGGED_OUT:
  1567. case CS_PORT_CONFIG_CHG:
  1568. case CS_PORT_BUSY:
  1569. case CS_INCOMPLETE:
  1570. case CS_PORT_UNAVAILABLE:
  1571. case CS_TIMEOUT:
  1572. case CS_RESET:
  1573. /*
  1574. * We are going to have the fc class block the rport
  1575. * while we try to recover so instruct the mid layer
  1576. * to requeue until the class decides how to handle this.
  1577. */
  1578. cp->result = DID_TRANSPORT_DISRUPTED << 16;
  1579. if (comp_status == CS_TIMEOUT) {
  1580. if (IS_FWI2_CAPABLE(ha))
  1581. break;
  1582. else if ((le16_to_cpu(sts->status_flags) &
  1583. SF_LOGOUT_SENT) == 0)
  1584. break;
  1585. }
  1586. ql_dbg(ql_dbg_io, vha, 0x3021,
  1587. "Port down status: port-state=0x%x.\n",
  1588. atomic_read(&fcport->state));
  1589. if (atomic_read(&fcport->state) == FCS_ONLINE)
  1590. qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
  1591. break;
  1592. case CS_ABORTED:
  1593. cp->result = DID_RESET << 16;
  1594. break;
  1595. case CS_DIF_ERROR:
  1596. logit = qla2x00_handle_dif_error(sp, sts24);
  1597. break;
  1598. default:
  1599. cp->result = DID_ERROR << 16;
  1600. break;
  1601. }
  1602. out:
  1603. if (logit)
  1604. ql_dbg(ql_dbg_io, vha, 0x3022,
  1605. "FCP command status: 0x%x-0x%x (0x%x) "
  1606. "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
  1607. "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
  1608. "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
  1609. comp_status, scsi_status, cp->result, vha->host_no,
  1610. cp->device->id, cp->device->lun, fcport->d_id.b.domain,
  1611. fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
  1612. cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
  1613. cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
  1614. cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
  1615. resid_len, fw_resid_len);
  1616. if (rsp->status_srb == NULL)
  1617. qla2x00_sp_compl(ha, sp);
  1618. }
  1619. /**
  1620. * qla2x00_status_cont_entry() - Process a Status Continuations entry.
  1621. * @ha: SCSI driver HA context
  1622. * @pkt: Entry pointer
  1623. *
  1624. * Extended sense data.
  1625. */
  1626. static void
  1627. qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
  1628. {
  1629. uint8_t sense_sz = 0;
  1630. struct qla_hw_data *ha = rsp->hw;
  1631. struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
  1632. srb_t *sp = rsp->status_srb;
  1633. struct scsi_cmnd *cp;
  1634. if (sp != NULL && sp->request_sense_length != 0) {
  1635. cp = sp->cmd;
  1636. if (cp == NULL) {
  1637. ql_log(ql_log_warn, vha, 0x3025,
  1638. "cmd is NULL: already returned to OS (sp=%p).\n",
  1639. sp);
  1640. rsp->status_srb = NULL;
  1641. return;
  1642. }
  1643. if (sp->request_sense_length > sizeof(pkt->data)) {
  1644. sense_sz = sizeof(pkt->data);
  1645. } else {
  1646. sense_sz = sp->request_sense_length;
  1647. }
  1648. /* Move sense data. */
  1649. if (IS_FWI2_CAPABLE(ha))
  1650. host_to_fcp_swap(pkt->data, sizeof(pkt->data));
  1651. memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
  1652. ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
  1653. sp->request_sense_ptr, sense_sz);
  1654. sp->request_sense_ptr += sense_sz;
  1655. sp->request_sense_length -= sense_sz;
  1656. /* Place command on done queue. */
  1657. if (sp->request_sense_length == 0) {
  1658. rsp->status_srb = NULL;
  1659. qla2x00_sp_compl(ha, sp);
  1660. }
  1661. }
  1662. }
  1663. static int
  1664. qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
  1665. {
  1666. struct qla_hw_data *ha = vha->hw;
  1667. struct srb_ctx *ctx;
  1668. if (!sp->ctx)
  1669. return 1;
  1670. ctx = sp->ctx;
  1671. if (ctx->type == SRB_LOGIN_CMD ||
  1672. ctx->type == SRB_LOGOUT_CMD ||
  1673. ctx->type == SRB_TM_CMD) {
  1674. ctx->u.iocb_cmd->done(sp);
  1675. return 0;
  1676. } else if (ctx->type == SRB_ADISC_CMD) {
  1677. ctx->u.iocb_cmd->free(sp);
  1678. return 0;
  1679. } else {
  1680. struct fc_bsg_job *bsg_job;
  1681. bsg_job = ctx->u.bsg_job;
  1682. if (ctx->type == SRB_ELS_CMD_HST ||
  1683. ctx->type == SRB_CT_CMD)
  1684. kfree(sp->fcport);
  1685. bsg_job->reply->reply_data.ctels_reply.status =
  1686. FC_CTELS_STATUS_OK;
  1687. bsg_job->reply->result = DID_ERROR << 16;
  1688. bsg_job->reply->reply_payload_rcv_len = 0;
  1689. kfree(sp->ctx);
  1690. mempool_free(sp, ha->srb_mempool);
  1691. bsg_job->job_done(bsg_job);
  1692. return 0;
  1693. }
  1694. return 1;
  1695. }
  1696. /**
  1697. * qla2x00_error_entry() - Process an error entry.
  1698. * @ha: SCSI driver HA context
  1699. * @pkt: Entry pointer
  1700. */
  1701. static void
  1702. qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
  1703. {
  1704. srb_t *sp;
  1705. struct qla_hw_data *ha = vha->hw;
  1706. const char func[] = "ERROR-IOCB";
  1707. uint16_t que = MSW(pkt->handle);
  1708. struct req_que *req = ha->req_q_map[que];
  1709. if (pkt->entry_status & RF_INV_E_ORDER)
  1710. ql_dbg(ql_dbg_async, vha, 0x502a,
  1711. "Invalid Entry Order.\n");
  1712. else if (pkt->entry_status & RF_INV_E_COUNT)
  1713. ql_dbg(ql_dbg_async, vha, 0x502b,
  1714. "Invalid Entry Count.\n");
  1715. else if (pkt->entry_status & RF_INV_E_PARAM)
  1716. ql_dbg(ql_dbg_async, vha, 0x502c,
  1717. "Invalid Entry Parameter.\n");
  1718. else if (pkt->entry_status & RF_INV_E_TYPE)
  1719. ql_dbg(ql_dbg_async, vha, 0x502d,
  1720. "Invalid Entry Type.\n");
  1721. else if (pkt->entry_status & RF_BUSY)
  1722. ql_dbg(ql_dbg_async, vha, 0x502e,
  1723. "Busy.\n");
  1724. else
  1725. ql_dbg(ql_dbg_async, vha, 0x502f,
  1726. "UNKNOWN flag error.\n");
  1727. sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
  1728. if (sp) {
  1729. if (qla2x00_free_sp_ctx(vha, sp)) {
  1730. if (pkt->entry_status &
  1731. (RF_INV_E_ORDER | RF_INV_E_COUNT |
  1732. RF_INV_E_PARAM | RF_INV_E_TYPE)) {
  1733. sp->cmd->result = DID_ERROR << 16;
  1734. } else if (pkt->entry_status & RF_BUSY) {
  1735. sp->cmd->result = DID_BUS_BUSY << 16;
  1736. } else {
  1737. sp->cmd->result = DID_ERROR << 16;
  1738. }
  1739. qla2x00_sp_compl(ha, sp);
  1740. }
  1741. } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
  1742. COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
  1743. || pkt->entry_type == COMMAND_TYPE_6) {
  1744. ql_log(ql_log_warn, vha, 0x5030,
  1745. "Error entry - invalid handle.\n");
  1746. if (IS_QLA82XX(ha))
  1747. set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
  1748. else
  1749. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1750. qla2xxx_wake_dpc(vha);
  1751. }
  1752. }
  1753. /**
  1754. * qla24xx_mbx_completion() - Process mailbox command completions.
  1755. * @ha: SCSI driver HA context
  1756. * @mb0: Mailbox0 register
  1757. */
  1758. static void
  1759. qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
  1760. {
  1761. uint16_t cnt;
  1762. uint32_t mboxes;
  1763. uint16_t __iomem *wptr;
  1764. struct qla_hw_data *ha = vha->hw;
  1765. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  1766. /* Read all mbox registers? */
  1767. mboxes = (1 << ha->mbx_count) - 1;
  1768. if (!ha->mcp)
  1769. ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
  1770. else
  1771. mboxes = ha->mcp->in_mb;
  1772. /* Load return mailbox registers. */
  1773. ha->flags.mbox_int = 1;
  1774. ha->mailbox_out[0] = mb0;
  1775. mboxes >>= 1;
  1776. wptr = (uint16_t __iomem *)&reg->mailbox1;
  1777. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  1778. if (mboxes & BIT_0)
  1779. ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
  1780. mboxes >>= 1;
  1781. wptr++;
  1782. }
  1783. }
  1784. /**
  1785. * qla24xx_process_response_queue() - Process response queue entries.
  1786. * @ha: SCSI driver HA context
  1787. */
  1788. void qla24xx_process_response_queue(struct scsi_qla_host *vha,
  1789. struct rsp_que *rsp)
  1790. {
  1791. struct sts_entry_24xx *pkt;
  1792. struct qla_hw_data *ha = vha->hw;
  1793. if (!vha->flags.online)
  1794. return;
  1795. while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
  1796. pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
  1797. rsp->ring_index++;
  1798. if (rsp->ring_index == rsp->length) {
  1799. rsp->ring_index = 0;
  1800. rsp->ring_ptr = rsp->ring;
  1801. } else {
  1802. rsp->ring_ptr++;
  1803. }
  1804. if (pkt->entry_status != 0) {
  1805. qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
  1806. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  1807. wmb();
  1808. continue;
  1809. }
  1810. switch (pkt->entry_type) {
  1811. case STATUS_TYPE:
  1812. qla2x00_status_entry(vha, rsp, pkt);
  1813. break;
  1814. case STATUS_CONT_TYPE:
  1815. qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
  1816. break;
  1817. case VP_RPT_ID_IOCB_TYPE:
  1818. qla24xx_report_id_acquisition(vha,
  1819. (struct vp_rpt_id_entry_24xx *)pkt);
  1820. break;
  1821. case LOGINOUT_PORT_IOCB_TYPE:
  1822. qla24xx_logio_entry(vha, rsp->req,
  1823. (struct logio_entry_24xx *)pkt);
  1824. break;
  1825. case TSK_MGMT_IOCB_TYPE:
  1826. qla24xx_tm_iocb_entry(vha, rsp->req,
  1827. (struct tsk_mgmt_entry *)pkt);
  1828. break;
  1829. case CT_IOCB_TYPE:
  1830. qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
  1831. break;
  1832. case ELS_IOCB_TYPE:
  1833. qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
  1834. break;
  1835. case MARKER_TYPE:
  1836. /* Do nothing in this case, this check is to prevent it
  1837. * from falling into default case
  1838. */
  1839. break;
  1840. default:
  1841. /* Type Not Supported. */
  1842. ql_dbg(ql_dbg_async, vha, 0x5042,
  1843. "Received unknown response pkt type %x "
  1844. "entry status=%x.\n",
  1845. pkt->entry_type, pkt->entry_status);
  1846. break;
  1847. }
  1848. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  1849. wmb();
  1850. }
  1851. /* Adjust ring index */
  1852. if (IS_QLA82XX(ha)) {
  1853. struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
  1854. WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
  1855. } else
  1856. WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
  1857. }
  1858. static void
  1859. qla2xxx_check_risc_status(scsi_qla_host_t *vha)
  1860. {
  1861. int rval;
  1862. uint32_t cnt;
  1863. struct qla_hw_data *ha = vha->hw;
  1864. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  1865. if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
  1866. return;
  1867. rval = QLA_SUCCESS;
  1868. WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
  1869. RD_REG_DWORD(&reg->iobase_addr);
  1870. WRT_REG_DWORD(&reg->iobase_window, 0x0001);
  1871. for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
  1872. rval == QLA_SUCCESS; cnt--) {
  1873. if (cnt) {
  1874. WRT_REG_DWORD(&reg->iobase_window, 0x0001);
  1875. udelay(10);
  1876. } else
  1877. rval = QLA_FUNCTION_TIMEOUT;
  1878. }
  1879. if (rval == QLA_SUCCESS)
  1880. goto next_test;
  1881. WRT_REG_DWORD(&reg->iobase_window, 0x0003);
  1882. for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
  1883. rval == QLA_SUCCESS; cnt--) {
  1884. if (cnt) {
  1885. WRT_REG_DWORD(&reg->iobase_window, 0x0003);
  1886. udelay(10);
  1887. } else
  1888. rval = QLA_FUNCTION_TIMEOUT;
  1889. }
  1890. if (rval != QLA_SUCCESS)
  1891. goto done;
  1892. next_test:
  1893. if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
  1894. ql_log(ql_log_info, vha, 0x504c,
  1895. "Additional code -- 0x55AA.\n");
  1896. done:
  1897. WRT_REG_DWORD(&reg->iobase_window, 0x0000);
  1898. RD_REG_DWORD(&reg->iobase_window);
  1899. }
  1900. /**
  1901. * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
  1902. * @irq:
  1903. * @dev_id: SCSI driver HA context
  1904. *
  1905. * Called by system whenever the host adapter generates an interrupt.
  1906. *
  1907. * Returns handled flag.
  1908. */
  1909. irqreturn_t
  1910. qla24xx_intr_handler(int irq, void *dev_id)
  1911. {
  1912. scsi_qla_host_t *vha;
  1913. struct qla_hw_data *ha;
  1914. struct device_reg_24xx __iomem *reg;
  1915. int status;
  1916. unsigned long iter;
  1917. uint32_t stat;
  1918. uint32_t hccr;
  1919. uint16_t mb[4];
  1920. struct rsp_que *rsp;
  1921. unsigned long flags;
  1922. rsp = (struct rsp_que *) dev_id;
  1923. if (!rsp) {
  1924. printk(KERN_INFO
  1925. "%s(): NULL response queue pointer.\n", __func__);
  1926. return IRQ_NONE;
  1927. }
  1928. ha = rsp->hw;
  1929. reg = &ha->iobase->isp24;
  1930. status = 0;
  1931. if (unlikely(pci_channel_offline(ha->pdev)))
  1932. return IRQ_HANDLED;
  1933. spin_lock_irqsave(&ha->hardware_lock, flags);
  1934. vha = pci_get_drvdata(ha->pdev);
  1935. for (iter = 50; iter--; ) {
  1936. stat = RD_REG_DWORD(&reg->host_status);
  1937. if (stat & HSRX_RISC_PAUSED) {
  1938. if (unlikely(pci_channel_offline(ha->pdev)))
  1939. break;
  1940. hccr = RD_REG_DWORD(&reg->hccr);
  1941. ql_log(ql_log_warn, vha, 0x504b,
  1942. "RISC paused -- HCCR=%x, Dumping firmware.\n",
  1943. hccr);
  1944. qla2xxx_check_risc_status(vha);
  1945. ha->isp_ops->fw_dump(vha, 1);
  1946. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  1947. break;
  1948. } else if ((stat & HSRX_RISC_INT) == 0)
  1949. break;
  1950. switch (stat & 0xff) {
  1951. case 0x1:
  1952. case 0x2:
  1953. case 0x10:
  1954. case 0x11:
  1955. qla24xx_mbx_completion(vha, MSW(stat));
  1956. status |= MBX_INTERRUPT;
  1957. break;
  1958. case 0x12:
  1959. mb[0] = MSW(stat);
  1960. mb[1] = RD_REG_WORD(&reg->mailbox1);
  1961. mb[2] = RD_REG_WORD(&reg->mailbox2);
  1962. mb[3] = RD_REG_WORD(&reg->mailbox3);
  1963. qla2x00_async_event(vha, rsp, mb);
  1964. break;
  1965. case 0x13:
  1966. case 0x14:
  1967. qla24xx_process_response_queue(vha, rsp);
  1968. break;
  1969. default:
  1970. ql_dbg(ql_dbg_async, vha, 0x504f,
  1971. "Unrecognized interrupt type (%d).\n", stat * 0xff);
  1972. break;
  1973. }
  1974. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  1975. RD_REG_DWORD_RELAXED(&reg->hccr);
  1976. }
  1977. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1978. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  1979. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  1980. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  1981. complete(&ha->mbx_intr_comp);
  1982. }
  1983. return IRQ_HANDLED;
  1984. }
  1985. static irqreturn_t
  1986. qla24xx_msix_rsp_q(int irq, void *dev_id)
  1987. {
  1988. struct qla_hw_data *ha;
  1989. struct rsp_que *rsp;
  1990. struct device_reg_24xx __iomem *reg;
  1991. struct scsi_qla_host *vha;
  1992. unsigned long flags;
  1993. rsp = (struct rsp_que *) dev_id;
  1994. if (!rsp) {
  1995. printk(KERN_INFO
  1996. "%s(): NULL response queue pointer.\n", __func__);
  1997. return IRQ_NONE;
  1998. }
  1999. ha = rsp->hw;
  2000. reg = &ha->iobase->isp24;
  2001. spin_lock_irqsave(&ha->hardware_lock, flags);
  2002. vha = pci_get_drvdata(ha->pdev);
  2003. qla24xx_process_response_queue(vha, rsp);
  2004. if (!ha->flags.disable_msix_handshake) {
  2005. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  2006. RD_REG_DWORD_RELAXED(&reg->hccr);
  2007. }
  2008. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2009. return IRQ_HANDLED;
  2010. }
  2011. static irqreturn_t
  2012. qla25xx_msix_rsp_q(int irq, void *dev_id)
  2013. {
  2014. struct qla_hw_data *ha;
  2015. struct rsp_que *rsp;
  2016. struct device_reg_24xx __iomem *reg;
  2017. unsigned long flags;
  2018. rsp = (struct rsp_que *) dev_id;
  2019. if (!rsp) {
  2020. printk(KERN_INFO
  2021. "%s(): NULL response queue pointer.\n", __func__);
  2022. return IRQ_NONE;
  2023. }
  2024. ha = rsp->hw;
  2025. /* Clear the interrupt, if enabled, for this response queue */
  2026. if (!ha->flags.disable_msix_handshake) {
  2027. reg = &ha->iobase->isp24;
  2028. spin_lock_irqsave(&ha->hardware_lock, flags);
  2029. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  2030. RD_REG_DWORD_RELAXED(&reg->hccr);
  2031. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2032. }
  2033. queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
  2034. return IRQ_HANDLED;
  2035. }
  2036. static irqreturn_t
  2037. qla24xx_msix_default(int irq, void *dev_id)
  2038. {
  2039. scsi_qla_host_t *vha;
  2040. struct qla_hw_data *ha;
  2041. struct rsp_que *rsp;
  2042. struct device_reg_24xx __iomem *reg;
  2043. int status;
  2044. uint32_t stat;
  2045. uint32_t hccr;
  2046. uint16_t mb[4];
  2047. unsigned long flags;
  2048. rsp = (struct rsp_que *) dev_id;
  2049. if (!rsp) {
  2050. printk(KERN_INFO
  2051. "%s(): NULL response queue pointer.\n", __func__);
  2052. return IRQ_NONE;
  2053. }
  2054. ha = rsp->hw;
  2055. reg = &ha->iobase->isp24;
  2056. status = 0;
  2057. spin_lock_irqsave(&ha->hardware_lock, flags);
  2058. vha = pci_get_drvdata(ha->pdev);
  2059. do {
  2060. stat = RD_REG_DWORD(&reg->host_status);
  2061. if (stat & HSRX_RISC_PAUSED) {
  2062. if (unlikely(pci_channel_offline(ha->pdev)))
  2063. break;
  2064. hccr = RD_REG_DWORD(&reg->hccr);
  2065. ql_log(ql_log_info, vha, 0x5050,
  2066. "RISC paused -- HCCR=%x, Dumping firmware.\n",
  2067. hccr);
  2068. qla2xxx_check_risc_status(vha);
  2069. ha->isp_ops->fw_dump(vha, 1);
  2070. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  2071. break;
  2072. } else if ((stat & HSRX_RISC_INT) == 0)
  2073. break;
  2074. switch (stat & 0xff) {
  2075. case 0x1:
  2076. case 0x2:
  2077. case 0x10:
  2078. case 0x11:
  2079. qla24xx_mbx_completion(vha, MSW(stat));
  2080. status |= MBX_INTERRUPT;
  2081. break;
  2082. case 0x12:
  2083. mb[0] = MSW(stat);
  2084. mb[1] = RD_REG_WORD(&reg->mailbox1);
  2085. mb[2] = RD_REG_WORD(&reg->mailbox2);
  2086. mb[3] = RD_REG_WORD(&reg->mailbox3);
  2087. qla2x00_async_event(vha, rsp, mb);
  2088. break;
  2089. case 0x13:
  2090. case 0x14:
  2091. qla24xx_process_response_queue(vha, rsp);
  2092. break;
  2093. default:
  2094. ql_dbg(ql_dbg_async, vha, 0x5051,
  2095. "Unrecognized interrupt type (%d).\n", stat & 0xff);
  2096. break;
  2097. }
  2098. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  2099. } while (0);
  2100. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2101. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  2102. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  2103. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  2104. complete(&ha->mbx_intr_comp);
  2105. }
  2106. return IRQ_HANDLED;
  2107. }
  2108. /* Interrupt handling helpers. */
  2109. struct qla_init_msix_entry {
  2110. const char *name;
  2111. irq_handler_t handler;
  2112. };
  2113. static struct qla_init_msix_entry msix_entries[3] = {
  2114. { "qla2xxx (default)", qla24xx_msix_default },
  2115. { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
  2116. { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
  2117. };
  2118. static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
  2119. { "qla2xxx (default)", qla82xx_msix_default },
  2120. { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
  2121. };
  2122. static void
  2123. qla24xx_disable_msix(struct qla_hw_data *ha)
  2124. {
  2125. int i;
  2126. struct qla_msix_entry *qentry;
  2127. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  2128. for (i = 0; i < ha->msix_count; i++) {
  2129. qentry = &ha->msix_entries[i];
  2130. if (qentry->have_irq)
  2131. free_irq(qentry->vector, qentry->rsp);
  2132. }
  2133. pci_disable_msix(ha->pdev);
  2134. kfree(ha->msix_entries);
  2135. ha->msix_entries = NULL;
  2136. ha->flags.msix_enabled = 0;
  2137. ql_dbg(ql_dbg_init, vha, 0x0042,
  2138. "Disabled the MSI.\n");
  2139. }
  2140. static int
  2141. qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
  2142. {
  2143. #define MIN_MSIX_COUNT 2
  2144. int i, ret;
  2145. struct msix_entry *entries;
  2146. struct qla_msix_entry *qentry;
  2147. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  2148. entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
  2149. GFP_KERNEL);
  2150. if (!entries) {
  2151. ql_log(ql_log_warn, vha, 0x00bc,
  2152. "Failed to allocate memory for msix_entry.\n");
  2153. return -ENOMEM;
  2154. }
  2155. for (i = 0; i < ha->msix_count; i++)
  2156. entries[i].entry = i;
  2157. ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
  2158. if (ret) {
  2159. if (ret < MIN_MSIX_COUNT)
  2160. goto msix_failed;
  2161. ql_log(ql_log_warn, vha, 0x00c6,
  2162. "MSI-X: Failed to enable support "
  2163. "-- %d/%d\n Retry with %d vectors.\n",
  2164. ha->msix_count, ret, ret);
  2165. ha->msix_count = ret;
  2166. ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
  2167. if (ret) {
  2168. msix_failed:
  2169. ql_log(ql_log_fatal, vha, 0x00c7,
  2170. "MSI-X: Failed to enable support, "
  2171. "giving up -- %d/%d.\n",
  2172. ha->msix_count, ret);
  2173. goto msix_out;
  2174. }
  2175. ha->max_rsp_queues = ha->msix_count - 1;
  2176. }
  2177. ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
  2178. ha->msix_count, GFP_KERNEL);
  2179. if (!ha->msix_entries) {
  2180. ql_log(ql_log_fatal, vha, 0x00c8,
  2181. "Failed to allocate memory for ha->msix_entries.\n");
  2182. ret = -ENOMEM;
  2183. goto msix_out;
  2184. }
  2185. ha->flags.msix_enabled = 1;
  2186. for (i = 0; i < ha->msix_count; i++) {
  2187. qentry = &ha->msix_entries[i];
  2188. qentry->vector = entries[i].vector;
  2189. qentry->entry = entries[i].entry;
  2190. qentry->have_irq = 0;
  2191. qentry->rsp = NULL;
  2192. }
  2193. /* Enable MSI-X vectors for the base queue */
  2194. for (i = 0; i < 2; i++) {
  2195. qentry = &ha->msix_entries[i];
  2196. if (IS_QLA82XX(ha)) {
  2197. ret = request_irq(qentry->vector,
  2198. qla82xx_msix_entries[i].handler,
  2199. 0, qla82xx_msix_entries[i].name, rsp);
  2200. } else {
  2201. ret = request_irq(qentry->vector,
  2202. msix_entries[i].handler,
  2203. 0, msix_entries[i].name, rsp);
  2204. }
  2205. if (ret) {
  2206. ql_log(ql_log_fatal, vha, 0x00cb,
  2207. "MSI-X: unable to register handler -- %x/%d.\n",
  2208. qentry->vector, ret);
  2209. qla24xx_disable_msix(ha);
  2210. ha->mqenable = 0;
  2211. goto msix_out;
  2212. }
  2213. qentry->have_irq = 1;
  2214. qentry->rsp = rsp;
  2215. rsp->msix = qentry;
  2216. }
  2217. /* Enable MSI-X vector for response queue update for queue 0 */
  2218. if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
  2219. ha->mqenable = 1;
  2220. ql_dbg(ql_dbg_multiq, vha, 0xc005,
  2221. "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
  2222. ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
  2223. ql_dbg(ql_dbg_init, vha, 0x0055,
  2224. "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
  2225. ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
  2226. msix_out:
  2227. kfree(entries);
  2228. return ret;
  2229. }
  2230. int
  2231. qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
  2232. {
  2233. int ret;
  2234. device_reg_t __iomem *reg = ha->iobase;
  2235. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  2236. /* If possible, enable MSI-X. */
  2237. if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
  2238. !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
  2239. goto skip_msi;
  2240. if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
  2241. (ha->pdev->subsystem_device == 0x7040 ||
  2242. ha->pdev->subsystem_device == 0x7041 ||
  2243. ha->pdev->subsystem_device == 0x1705)) {
  2244. ql_log(ql_log_warn, vha, 0x0034,
  2245. "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
  2246. ha->pdev->subsystem_vendor,
  2247. ha->pdev->subsystem_device);
  2248. goto skip_msi;
  2249. }
  2250. if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
  2251. ql_log(ql_log_warn, vha, 0x0035,
  2252. "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
  2253. ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
  2254. goto skip_msix;
  2255. }
  2256. ret = qla24xx_enable_msix(ha, rsp);
  2257. if (!ret) {
  2258. ql_dbg(ql_dbg_init, vha, 0x0036,
  2259. "MSI-X: Enabled (0x%X, 0x%X).\n",
  2260. ha->chip_revision, ha->fw_attributes);
  2261. goto clear_risc_ints;
  2262. }
  2263. ql_log(ql_log_info, vha, 0x0037,
  2264. "MSI-X Falling back-to MSI mode -%d.\n", ret);
  2265. skip_msix:
  2266. if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
  2267. !IS_QLA8001(ha))
  2268. goto skip_msi;
  2269. ret = pci_enable_msi(ha->pdev);
  2270. if (!ret) {
  2271. ql_dbg(ql_dbg_init, vha, 0x0038,
  2272. "MSI: Enabled.\n");
  2273. ha->flags.msi_enabled = 1;
  2274. } else
  2275. ql_log(ql_log_warn, vha, 0x0039,
  2276. "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
  2277. skip_msi:
  2278. ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
  2279. ha->flags.msi_enabled ? 0 : IRQF_SHARED,
  2280. QLA2XXX_DRIVER_NAME, rsp);
  2281. if (ret) {
  2282. ql_log(ql_log_warn, vha, 0x003a,
  2283. "Failed to reserve interrupt %d already in use.\n",
  2284. ha->pdev->irq);
  2285. goto fail;
  2286. }
  2287. clear_risc_ints:
  2288. /*
  2289. * FIXME: Noted that 8014s were being dropped during NK testing.
  2290. * Timing deltas during MSI-X/INTa transitions?
  2291. */
  2292. if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
  2293. goto fail;
  2294. spin_lock_irq(&ha->hardware_lock);
  2295. if (IS_FWI2_CAPABLE(ha)) {
  2296. WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
  2297. WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
  2298. } else {
  2299. WRT_REG_WORD(&reg->isp.semaphore, 0);
  2300. WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
  2301. WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
  2302. }
  2303. spin_unlock_irq(&ha->hardware_lock);
  2304. fail:
  2305. return ret;
  2306. }
  2307. void
  2308. qla2x00_free_irqs(scsi_qla_host_t *vha)
  2309. {
  2310. struct qla_hw_data *ha = vha->hw;
  2311. struct rsp_que *rsp = ha->rsp_q_map[0];
  2312. if (ha->flags.msix_enabled)
  2313. qla24xx_disable_msix(ha);
  2314. else if (ha->flags.msi_enabled) {
  2315. free_irq(ha->pdev->irq, rsp);
  2316. pci_disable_msi(ha->pdev);
  2317. } else
  2318. free_irq(ha->pdev->irq, rsp);
  2319. }
  2320. int qla25xx_request_irq(struct rsp_que *rsp)
  2321. {
  2322. struct qla_hw_data *ha = rsp->hw;
  2323. struct qla_init_msix_entry *intr = &msix_entries[2];
  2324. struct qla_msix_entry *msix = rsp->msix;
  2325. scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
  2326. int ret;
  2327. ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
  2328. if (ret) {
  2329. ql_log(ql_log_fatal, vha, 0x00e6,
  2330. "MSI-X: Unable to register handler -- %x/%d.\n",
  2331. msix->vector, ret);
  2332. return ret;
  2333. }
  2334. msix->have_irq = 1;
  2335. msix->rsp = rsp;
  2336. return ret;
  2337. }