qla_isr.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2005 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <scsi/scsi_tcq.h>
  9. static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
  10. static void qla2x00_async_event(scsi_qla_host_t *, uint16_t *);
  11. static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
  12. static void qla2x00_status_entry(scsi_qla_host_t *, void *);
  13. static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
  14. static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
  15. static void qla2x00_ms_entry(scsi_qla_host_t *, ms_iocb_entry_t *);
  16. static void qla24xx_ms_entry(scsi_qla_host_t *, struct ct_entry_24xx *);
  17. /**
  18. * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
  19. * @irq:
  20. * @dev_id: SCSI driver HA context
  21. *
  22. * Called by system whenever the host adapter generates an interrupt.
  23. *
  24. * Returns handled flag.
  25. */
  26. irqreturn_t
  27. qla2100_intr_handler(int irq, void *dev_id)
  28. {
  29. scsi_qla_host_t *ha;
  30. struct device_reg_2xxx __iomem *reg;
  31. int status;
  32. unsigned long flags;
  33. unsigned long iter;
  34. uint16_t mb[4];
  35. ha = (scsi_qla_host_t *) dev_id;
  36. if (!ha) {
  37. printk(KERN_INFO
  38. "%s(): NULL host pointer\n", __func__);
  39. return (IRQ_NONE);
  40. }
  41. reg = &ha->iobase->isp;
  42. status = 0;
  43. spin_lock_irqsave(&ha->hardware_lock, flags);
  44. for (iter = 50; iter--; ) {
  45. if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
  46. break;
  47. if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
  48. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  49. RD_REG_WORD(&reg->hccr);
  50. /* Get mailbox data. */
  51. mb[0] = RD_MAILBOX_REG(ha, reg, 0);
  52. if (mb[0] > 0x3fff && mb[0] < 0x8000) {
  53. qla2x00_mbx_completion(ha, mb[0]);
  54. status |= MBX_INTERRUPT;
  55. } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
  56. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  57. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  58. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  59. qla2x00_async_event(ha, mb);
  60. } else {
  61. /*EMPTY*/
  62. DEBUG2(printk("scsi(%ld): Unrecognized "
  63. "interrupt type (%d).\n",
  64. ha->host_no, mb[0]));
  65. }
  66. /* Release mailbox registers. */
  67. WRT_REG_WORD(&reg->semaphore, 0);
  68. RD_REG_WORD(&reg->semaphore);
  69. } else {
  70. qla2x00_process_response_queue(ha);
  71. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  72. RD_REG_WORD(&reg->hccr);
  73. }
  74. }
  75. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  76. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  77. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  78. spin_lock_irqsave(&ha->mbx_reg_lock, flags);
  79. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  80. up(&ha->mbx_intr_sem);
  81. spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
  82. }
  83. return (IRQ_HANDLED);
  84. }
  85. /**
  86. * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
  87. * @irq:
  88. * @dev_id: SCSI driver HA context
  89. *
  90. * Called by system whenever the host adapter generates an interrupt.
  91. *
  92. * Returns handled flag.
  93. */
  94. irqreturn_t
  95. qla2300_intr_handler(int irq, void *dev_id)
  96. {
  97. scsi_qla_host_t *ha;
  98. struct device_reg_2xxx __iomem *reg;
  99. int status;
  100. unsigned long flags;
  101. unsigned long iter;
  102. uint32_t stat;
  103. uint16_t hccr;
  104. uint16_t mb[4];
  105. ha = (scsi_qla_host_t *) dev_id;
  106. if (!ha) {
  107. printk(KERN_INFO
  108. "%s(): NULL host pointer\n", __func__);
  109. return (IRQ_NONE);
  110. }
  111. reg = &ha->iobase->isp;
  112. status = 0;
  113. spin_lock_irqsave(&ha->hardware_lock, flags);
  114. for (iter = 50; iter--; ) {
  115. stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
  116. if (stat & HSR_RISC_PAUSED) {
  117. hccr = RD_REG_WORD(&reg->hccr);
  118. if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
  119. qla_printk(KERN_INFO, ha,
  120. "Parity error -- HCCR=%x.\n", hccr);
  121. else
  122. qla_printk(KERN_INFO, ha,
  123. "RISC paused -- HCCR=%x.\n", hccr);
  124. /*
  125. * Issue a "HARD" reset in order for the RISC
  126. * interrupt bit to be cleared. Schedule a big
  127. * hammmer to get out of the RISC PAUSED state.
  128. */
  129. WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
  130. RD_REG_WORD(&reg->hccr);
  131. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  132. break;
  133. } else if ((stat & HSR_RISC_INT) == 0)
  134. break;
  135. switch (stat & 0xff) {
  136. case 0x1:
  137. case 0x2:
  138. case 0x10:
  139. case 0x11:
  140. qla2x00_mbx_completion(ha, MSW(stat));
  141. status |= MBX_INTERRUPT;
  142. /* Release mailbox registers. */
  143. WRT_REG_WORD(&reg->semaphore, 0);
  144. break;
  145. case 0x12:
  146. mb[0] = MSW(stat);
  147. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  148. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  149. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  150. qla2x00_async_event(ha, mb);
  151. break;
  152. case 0x13:
  153. qla2x00_process_response_queue(ha);
  154. break;
  155. case 0x15:
  156. mb[0] = MBA_CMPLT_1_16BIT;
  157. mb[1] = MSW(stat);
  158. qla2x00_async_event(ha, mb);
  159. break;
  160. case 0x16:
  161. mb[0] = MBA_SCSI_COMPLETION;
  162. mb[1] = MSW(stat);
  163. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  164. qla2x00_async_event(ha, mb);
  165. break;
  166. default:
  167. DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
  168. "(%d).\n",
  169. ha->host_no, stat & 0xff));
  170. break;
  171. }
  172. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  173. RD_REG_WORD_RELAXED(&reg->hccr);
  174. }
  175. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  176. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  177. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  178. spin_lock_irqsave(&ha->mbx_reg_lock, flags);
  179. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  180. up(&ha->mbx_intr_sem);
  181. spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
  182. }
  183. return (IRQ_HANDLED);
  184. }
  185. /**
  186. * qla2x00_mbx_completion() - Process mailbox command completions.
  187. * @ha: SCSI driver HA context
  188. * @mb0: Mailbox0 register
  189. */
  190. static void
  191. qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
  192. {
  193. uint16_t cnt;
  194. uint16_t __iomem *wptr;
  195. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  196. /* Load return mailbox registers. */
  197. ha->flags.mbox_int = 1;
  198. ha->mailbox_out[0] = mb0;
  199. wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
  200. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  201. if (IS_QLA2200(ha) && cnt == 8)
  202. wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
  203. if (cnt == 4 || cnt == 5)
  204. ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
  205. else
  206. ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
  207. wptr++;
  208. }
  209. if (ha->mcp) {
  210. DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
  211. __func__, ha->host_no, ha->mcp->mb[0]));
  212. } else {
  213. DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
  214. __func__, ha->host_no));
  215. }
  216. }
  217. /**
  218. * qla2x00_async_event() - Process aynchronous events.
  219. * @ha: SCSI driver HA context
  220. * @mb: Mailbox registers (0 - 3)
  221. */
  222. static void
  223. qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
  224. {
  225. #define LS_UNKNOWN 2
  226. static char *link_speeds[5] = { "1", "2", "?", "4", "10" };
  227. char *link_speed;
  228. uint16_t handle_cnt;
  229. uint16_t cnt;
  230. uint32_t handles[5];
  231. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  232. uint32_t rscn_entry, host_pid;
  233. uint8_t rscn_queue_index;
  234. /* Setup to process RIO completion. */
  235. handle_cnt = 0;
  236. switch (mb[0]) {
  237. case MBA_SCSI_COMPLETION:
  238. handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
  239. handle_cnt = 1;
  240. break;
  241. case MBA_CMPLT_1_16BIT:
  242. handles[0] = mb[1];
  243. handle_cnt = 1;
  244. mb[0] = MBA_SCSI_COMPLETION;
  245. break;
  246. case MBA_CMPLT_2_16BIT:
  247. handles[0] = mb[1];
  248. handles[1] = mb[2];
  249. handle_cnt = 2;
  250. mb[0] = MBA_SCSI_COMPLETION;
  251. break;
  252. case MBA_CMPLT_3_16BIT:
  253. handles[0] = mb[1];
  254. handles[1] = mb[2];
  255. handles[2] = mb[3];
  256. handle_cnt = 3;
  257. mb[0] = MBA_SCSI_COMPLETION;
  258. break;
  259. case MBA_CMPLT_4_16BIT:
  260. handles[0] = mb[1];
  261. handles[1] = mb[2];
  262. handles[2] = mb[3];
  263. handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
  264. handle_cnt = 4;
  265. mb[0] = MBA_SCSI_COMPLETION;
  266. break;
  267. case MBA_CMPLT_5_16BIT:
  268. handles[0] = mb[1];
  269. handles[1] = mb[2];
  270. handles[2] = mb[3];
  271. handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
  272. handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
  273. handle_cnt = 5;
  274. mb[0] = MBA_SCSI_COMPLETION;
  275. break;
  276. case MBA_CMPLT_2_32BIT:
  277. handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
  278. handles[1] = le32_to_cpu(
  279. ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
  280. RD_MAILBOX_REG(ha, reg, 6));
  281. handle_cnt = 2;
  282. mb[0] = MBA_SCSI_COMPLETION;
  283. break;
  284. default:
  285. break;
  286. }
  287. switch (mb[0]) {
  288. case MBA_SCSI_COMPLETION: /* Fast Post */
  289. if (!ha->flags.online)
  290. break;
  291. for (cnt = 0; cnt < handle_cnt; cnt++)
  292. qla2x00_process_completed_request(ha, handles[cnt]);
  293. break;
  294. case MBA_RESET: /* Reset */
  295. DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));
  296. set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
  297. break;
  298. case MBA_SYSTEM_ERR: /* System Error */
  299. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  300. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  301. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  302. qla_printk(KERN_INFO, ha,
  303. "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
  304. mb[1], mb[2], mb[3]);
  305. ha->isp_ops.fw_dump(ha, 1);
  306. if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
  307. if (mb[1] == 0 && mb[2] == 0) {
  308. qla_printk(KERN_ERR, ha,
  309. "Unrecoverable Hardware Error: adapter "
  310. "marked OFFLINE!\n");
  311. ha->flags.online = 0;
  312. } else
  313. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  314. } else if (mb[1] == 0) {
  315. qla_printk(KERN_INFO, ha,
  316. "Unrecoverable Hardware Error: adapter marked "
  317. "OFFLINE!\n");
  318. ha->flags.online = 0;
  319. } else
  320. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  321. break;
  322. case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
  323. DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
  324. ha->host_no));
  325. qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
  326. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  327. break;
  328. case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
  329. DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
  330. ha->host_no));
  331. qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
  332. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  333. break;
  334. case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
  335. DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
  336. ha->host_no));
  337. break;
  338. case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
  339. DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
  340. mb[1]));
  341. qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);
  342. if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
  343. atomic_set(&ha->loop_state, LOOP_DOWN);
  344. atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
  345. qla2x00_mark_all_devices_lost(ha, 1);
  346. }
  347. set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
  348. ha->flags.management_server_logged_in = 0;
  349. break;
  350. case MBA_LOOP_UP: /* Loop Up Event */
  351. if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
  352. link_speed = link_speeds[0];
  353. ha->link_data_rate = PORT_SPEED_1GB;
  354. } else {
  355. link_speed = link_speeds[LS_UNKNOWN];
  356. if (mb[1] < 5)
  357. link_speed = link_speeds[mb[1]];
  358. ha->link_data_rate = mb[1];
  359. }
  360. DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
  361. ha->host_no, link_speed));
  362. qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
  363. link_speed);
  364. ha->flags.management_server_logged_in = 0;
  365. break;
  366. case MBA_LOOP_DOWN: /* Loop Down Event */
  367. DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN (%x).\n",
  368. ha->host_no, mb[1]));
  369. qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x).\n", mb[1]);
  370. if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
  371. atomic_set(&ha->loop_state, LOOP_DOWN);
  372. atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
  373. ha->device_flags |= DFLG_NO_CABLE;
  374. qla2x00_mark_all_devices_lost(ha, 1);
  375. }
  376. ha->flags.management_server_logged_in = 0;
  377. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  378. if (ql2xfdmienable)
  379. set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
  380. break;
  381. case MBA_LIP_RESET: /* LIP reset occurred */
  382. DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
  383. ha->host_no, mb[1]));
  384. qla_printk(KERN_INFO, ha,
  385. "LIP reset occured (%x).\n", mb[1]);
  386. if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
  387. atomic_set(&ha->loop_state, LOOP_DOWN);
  388. atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
  389. qla2x00_mark_all_devices_lost(ha, 1);
  390. }
  391. set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
  392. ha->operating_mode = LOOP;
  393. ha->flags.management_server_logged_in = 0;
  394. break;
  395. case MBA_POINT_TO_POINT: /* Point-to-Point */
  396. if (IS_QLA2100(ha))
  397. break;
  398. DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
  399. ha->host_no));
  400. /*
  401. * Until there's a transition from loop down to loop up, treat
  402. * this as loop down only.
  403. */
  404. if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
  405. atomic_set(&ha->loop_state, LOOP_DOWN);
  406. if (!atomic_read(&ha->loop_down_timer))
  407. atomic_set(&ha->loop_down_timer,
  408. LOOP_DOWN_TIME);
  409. qla2x00_mark_all_devices_lost(ha, 1);
  410. }
  411. if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
  412. set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
  413. }
  414. set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
  415. ha->flags.gpsc_supported = 1;
  416. break;
  417. case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
  418. if (IS_QLA2100(ha))
  419. break;
  420. DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
  421. "received.\n",
  422. ha->host_no));
  423. qla_printk(KERN_INFO, ha,
  424. "Configuration change detected: value=%x.\n", mb[1]);
  425. if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
  426. atomic_set(&ha->loop_state, LOOP_DOWN);
  427. if (!atomic_read(&ha->loop_down_timer))
  428. atomic_set(&ha->loop_down_timer,
  429. LOOP_DOWN_TIME);
  430. qla2x00_mark_all_devices_lost(ha, 1);
  431. }
  432. set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
  433. set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
  434. break;
  435. case MBA_PORT_UPDATE: /* Port database update */
  436. /*
  437. * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
  438. * event etc. earlier indicating loop is down) then process
  439. * it. Otherwise ignore it and Wait for RSCN to come in.
  440. */
  441. atomic_set(&ha->loop_down_timer, 0);
  442. if (atomic_read(&ha->loop_state) != LOOP_DOWN &&
  443. atomic_read(&ha->loop_state) != LOOP_DEAD) {
  444. DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
  445. "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1],
  446. mb[2], mb[3]));
  447. break;
  448. }
  449. DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
  450. ha->host_no));
  451. DEBUG(printk(KERN_INFO
  452. "scsi(%ld): Port database changed %04x %04x %04x.\n",
  453. ha->host_no, mb[1], mb[2], mb[3]));
  454. /*
  455. * Mark all devices as missing so we will login again.
  456. */
  457. atomic_set(&ha->loop_state, LOOP_UP);
  458. qla2x00_mark_all_devices_lost(ha, 1);
  459. ha->flags.rscn_queue_overflow = 1;
  460. set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
  461. set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
  462. break;
  463. case MBA_RSCN_UPDATE: /* State Change Registration */
  464. DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
  465. ha->host_no));
  466. DEBUG(printk(KERN_INFO
  467. "scsi(%ld): RSCN database changed -- %04x %04x.\n",
  468. ha->host_no, mb[1], mb[2]));
  469. rscn_entry = (mb[1] << 16) | mb[2];
  470. host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
  471. ha->d_id.b.al_pa;
  472. if (rscn_entry == host_pid) {
  473. DEBUG(printk(KERN_INFO
  474. "scsi(%ld): Ignoring RSCN update to local host "
  475. "port ID (%06x)\n",
  476. ha->host_no, host_pid));
  477. break;
  478. }
  479. rscn_queue_index = ha->rscn_in_ptr + 1;
  480. if (rscn_queue_index == MAX_RSCN_COUNT)
  481. rscn_queue_index = 0;
  482. if (rscn_queue_index != ha->rscn_out_ptr) {
  483. ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
  484. ha->rscn_in_ptr = rscn_queue_index;
  485. } else {
  486. ha->flags.rscn_queue_overflow = 1;
  487. }
  488. atomic_set(&ha->loop_state, LOOP_UPDATE);
  489. atomic_set(&ha->loop_down_timer, 0);
  490. ha->flags.management_server_logged_in = 0;
  491. set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
  492. set_bit(RSCN_UPDATE, &ha->dpc_flags);
  493. break;
  494. /* case MBA_RIO_RESPONSE: */
  495. case MBA_ZIO_RESPONSE:
  496. DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
  497. ha->host_no));
  498. DEBUG(printk(KERN_INFO
  499. "scsi(%ld): [R|Z]IO update completion.\n",
  500. ha->host_no));
  501. if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
  502. qla24xx_process_response_queue(ha);
  503. else
  504. qla2x00_process_response_queue(ha);
  505. break;
  506. case MBA_DISCARD_RND_FRAME:
  507. DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
  508. "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
  509. break;
  510. case MBA_TRACE_NOTIFICATION:
  511. DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
  512. ha->host_no, mb[1], mb[2]));
  513. break;
  514. }
  515. }
  516. static void
  517. qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
  518. {
  519. fc_port_t *fcport = data;
  520. if (fcport->ha->max_q_depth <= sdev->queue_depth)
  521. return;
  522. if (sdev->ordered_tags)
  523. scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
  524. sdev->queue_depth + 1);
  525. else
  526. scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
  527. sdev->queue_depth + 1);
  528. fcport->last_ramp_up = jiffies;
  529. DEBUG2(qla_printk(KERN_INFO, fcport->ha,
  530. "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
  531. fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
  532. sdev->queue_depth));
  533. }
  534. static void
  535. qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
  536. {
  537. fc_port_t *fcport = data;
  538. if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
  539. return;
  540. DEBUG2(qla_printk(KERN_INFO, fcport->ha,
  541. "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
  542. fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
  543. sdev->queue_depth));
  544. }
  545. static inline void
  546. qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
  547. {
  548. fc_port_t *fcport;
  549. struct scsi_device *sdev;
  550. sdev = sp->cmd->device;
  551. if (sdev->queue_depth >= ha->max_q_depth)
  552. return;
  553. fcport = sp->fcport;
  554. if (time_before(jiffies,
  555. fcport->last_ramp_up + ql2xqfullrampup * HZ))
  556. return;
  557. if (time_before(jiffies,
  558. fcport->last_queue_full + ql2xqfullrampup * HZ))
  559. return;
  560. spin_unlock_irq(&ha->hardware_lock);
  561. starget_for_each_device(sdev->sdev_target, fcport,
  562. qla2x00_adjust_sdev_qdepth_up);
  563. spin_lock_irq(&ha->hardware_lock);
  564. }
  565. /**
  566. * qla2x00_process_completed_request() - Process a Fast Post response.
  567. * @ha: SCSI driver HA context
  568. * @index: SRB index
  569. */
  570. static void
  571. qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
  572. {
  573. srb_t *sp;
  574. /* Validate handle. */
  575. if (index >= MAX_OUTSTANDING_COMMANDS) {
  576. DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
  577. ha->host_no, index));
  578. qla_printk(KERN_WARNING, ha,
  579. "Invalid SCSI completion handle %d.\n", index);
  580. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  581. return;
  582. }
  583. sp = ha->outstanding_cmds[index];
  584. if (sp) {
  585. /* Free outstanding command slot. */
  586. ha->outstanding_cmds[index] = NULL;
  587. CMD_COMPL_STATUS(sp->cmd) = 0L;
  588. CMD_SCSI_STATUS(sp->cmd) = 0L;
  589. /* Save ISP completion status */
  590. sp->cmd->result = DID_OK << 16;
  591. qla2x00_ramp_up_queue_depth(ha, sp);
  592. qla2x00_sp_compl(ha, sp);
  593. } else {
  594. DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
  595. ha->host_no));
  596. qla_printk(KERN_WARNING, ha,
  597. "Invalid ISP SCSI completion handle\n");
  598. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  599. }
  600. }
  601. /**
  602. * qla2x00_process_response_queue() - Process response queue entries.
  603. * @ha: SCSI driver HA context
  604. */
  605. void
  606. qla2x00_process_response_queue(struct scsi_qla_host *ha)
  607. {
  608. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  609. sts_entry_t *pkt;
  610. uint16_t handle_cnt;
  611. uint16_t cnt;
  612. if (!ha->flags.online)
  613. return;
  614. while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
  615. pkt = (sts_entry_t *)ha->response_ring_ptr;
  616. ha->rsp_ring_index++;
  617. if (ha->rsp_ring_index == ha->response_q_length) {
  618. ha->rsp_ring_index = 0;
  619. ha->response_ring_ptr = ha->response_ring;
  620. } else {
  621. ha->response_ring_ptr++;
  622. }
  623. if (pkt->entry_status != 0) {
  624. DEBUG3(printk(KERN_INFO
  625. "scsi(%ld): Process error entry.\n", ha->host_no));
  626. qla2x00_error_entry(ha, pkt);
  627. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  628. wmb();
  629. continue;
  630. }
  631. switch (pkt->entry_type) {
  632. case STATUS_TYPE:
  633. qla2x00_status_entry(ha, pkt);
  634. break;
  635. case STATUS_TYPE_21:
  636. handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
  637. for (cnt = 0; cnt < handle_cnt; cnt++) {
  638. qla2x00_process_completed_request(ha,
  639. ((sts21_entry_t *)pkt)->handle[cnt]);
  640. }
  641. break;
  642. case STATUS_TYPE_22:
  643. handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
  644. for (cnt = 0; cnt < handle_cnt; cnt++) {
  645. qla2x00_process_completed_request(ha,
  646. ((sts22_entry_t *)pkt)->handle[cnt]);
  647. }
  648. break;
  649. case STATUS_CONT_TYPE:
  650. qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
  651. break;
  652. case MS_IOCB_TYPE:
  653. qla2x00_ms_entry(ha, (ms_iocb_entry_t *)pkt);
  654. break;
  655. default:
  656. /* Type Not Supported. */
  657. DEBUG4(printk(KERN_WARNING
  658. "scsi(%ld): Received unknown response pkt type %x "
  659. "entry status=%x.\n",
  660. ha->host_no, pkt->entry_type, pkt->entry_status));
  661. break;
  662. }
  663. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  664. wmb();
  665. }
  666. /* Adjust ring index */
  667. WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
  668. }
  669. /**
  670. * qla2x00_status_entry() - Process a Status IOCB entry.
  671. * @ha: SCSI driver HA context
  672. * @pkt: Entry pointer
  673. */
  674. static void
  675. qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
  676. {
  677. srb_t *sp;
  678. fc_port_t *fcport;
  679. struct scsi_cmnd *cp;
  680. sts_entry_t *sts;
  681. struct sts_entry_24xx *sts24;
  682. uint16_t comp_status;
  683. uint16_t scsi_status;
  684. uint8_t lscsi_status;
  685. int32_t resid;
  686. uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
  687. uint8_t *rsp_info, *sense_data;
  688. sts = (sts_entry_t *) pkt;
  689. sts24 = (struct sts_entry_24xx *) pkt;
  690. if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
  691. comp_status = le16_to_cpu(sts24->comp_status);
  692. scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
  693. } else {
  694. comp_status = le16_to_cpu(sts->comp_status);
  695. scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
  696. }
  697. /* Fast path completion. */
  698. if (comp_status == CS_COMPLETE && scsi_status == 0) {
  699. qla2x00_process_completed_request(ha, sts->handle);
  700. return;
  701. }
  702. /* Validate handle. */
  703. if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
  704. sp = ha->outstanding_cmds[sts->handle];
  705. ha->outstanding_cmds[sts->handle] = NULL;
  706. } else
  707. sp = NULL;
  708. if (sp == NULL) {
  709. DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
  710. ha->host_no));
  711. qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
  712. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  713. qla2xxx_wake_dpc(ha);
  714. return;
  715. }
  716. cp = sp->cmd;
  717. if (cp == NULL) {
  718. DEBUG2(printk("scsi(%ld): Command already returned back to OS "
  719. "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp));
  720. qla_printk(KERN_WARNING, ha,
  721. "Command is NULL: already returned to OS (sp=%p)\n", sp);
  722. return;
  723. }
  724. lscsi_status = scsi_status & STATUS_MASK;
  725. CMD_ENTRY_STATUS(cp) = sts->entry_status;
  726. CMD_COMPL_STATUS(cp) = comp_status;
  727. CMD_SCSI_STATUS(cp) = scsi_status;
  728. fcport = sp->fcport;
  729. sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
  730. if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
  731. sense_len = le32_to_cpu(sts24->sense_len);
  732. rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
  733. resid_len = le32_to_cpu(sts24->rsp_residual_count);
  734. fw_resid_len = le32_to_cpu(sts24->residual_len);
  735. rsp_info = sts24->data;
  736. sense_data = sts24->data;
  737. host_to_fcp_swap(sts24->data, sizeof(sts24->data));
  738. } else {
  739. sense_len = le16_to_cpu(sts->req_sense_length);
  740. rsp_info_len = le16_to_cpu(sts->rsp_info_len);
  741. resid_len = le32_to_cpu(sts->residual_length);
  742. rsp_info = sts->rsp_info;
  743. sense_data = sts->req_sense_data;
  744. }
  745. /* Check for any FCP transport errors. */
  746. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
  747. /* Sense data lies beyond any FCP RESPONSE data. */
  748. if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
  749. sense_data += rsp_info_len;
  750. if (rsp_info_len > 3 && rsp_info[3]) {
  751. DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
  752. "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
  753. "retrying command\n", ha->host_no,
  754. cp->device->channel, cp->device->id,
  755. cp->device->lun, rsp_info_len, rsp_info[0],
  756. rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
  757. rsp_info[5], rsp_info[6], rsp_info[7]));
  758. cp->result = DID_BUS_BUSY << 16;
  759. qla2x00_sp_compl(ha, sp);
  760. return;
  761. }
  762. }
  763. /*
  764. * Based on Host and scsi status generate status code for Linux
  765. */
  766. switch (comp_status) {
  767. case CS_COMPLETE:
  768. case CS_QUEUE_FULL:
  769. if (scsi_status == 0) {
  770. cp->result = DID_OK << 16;
  771. break;
  772. }
  773. if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
  774. resid = resid_len;
  775. cp->resid = resid;
  776. CMD_RESID_LEN(cp) = resid;
  777. if (!lscsi_status &&
  778. ((unsigned)(cp->request_bufflen - resid) <
  779. cp->underflow)) {
  780. qla_printk(KERN_INFO, ha,
  781. "scsi(%ld:%d:%d:%d): Mid-layer underflow "
  782. "detected (%x of %x bytes)...returning "
  783. "error status.\n", ha->host_no,
  784. cp->device->channel, cp->device->id,
  785. cp->device->lun, resid,
  786. cp->request_bufflen);
  787. cp->result = DID_ERROR << 16;
  788. break;
  789. }
  790. }
  791. cp->result = DID_OK << 16 | lscsi_status;
  792. if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
  793. DEBUG2(printk(KERN_INFO
  794. "scsi(%ld): QUEUE FULL status detected "
  795. "0x%x-0x%x.\n", ha->host_no, comp_status,
  796. scsi_status));
  797. /* Adjust queue depth for all luns on the port. */
  798. fcport->last_queue_full = jiffies;
  799. spin_unlock_irq(&ha->hardware_lock);
  800. starget_for_each_device(cp->device->sdev_target,
  801. fcport, qla2x00_adjust_sdev_qdepth_down);
  802. spin_lock_irq(&ha->hardware_lock);
  803. break;
  804. }
  805. if (lscsi_status != SS_CHECK_CONDITION)
  806. break;
  807. /* Copy Sense Data into sense buffer. */
  808. memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer));
  809. if (!(scsi_status & SS_SENSE_LEN_VALID))
  810. break;
  811. if (sense_len >= sizeof(cp->sense_buffer))
  812. sense_len = sizeof(cp->sense_buffer);
  813. CMD_ACTUAL_SNSLEN(cp) = sense_len;
  814. sp->request_sense_length = sense_len;
  815. sp->request_sense_ptr = cp->sense_buffer;
  816. if (sp->request_sense_length > 32)
  817. sense_len = 32;
  818. memcpy(cp->sense_buffer, sense_data, sense_len);
  819. sp->request_sense_ptr += sense_len;
  820. sp->request_sense_length -= sense_len;
  821. if (sp->request_sense_length != 0)
  822. ha->status_srb = sp;
  823. DEBUG5(printk("%s(): Check condition Sense data, "
  824. "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n", __func__,
  825. ha->host_no, cp->device->channel, cp->device->id,
  826. cp->device->lun, cp, cp->serial_number));
  827. if (sense_len)
  828. DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
  829. CMD_ACTUAL_SNSLEN(cp)));
  830. break;
  831. case CS_DATA_UNDERRUN:
  832. resid = resid_len;
  833. /* Use F/W calculated residual length. */
  834. if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
  835. resid = fw_resid_len;
  836. if (scsi_status & SS_RESIDUAL_UNDER) {
  837. cp->resid = resid;
  838. CMD_RESID_LEN(cp) = resid;
  839. } else {
  840. DEBUG2(printk(KERN_INFO
  841. "scsi(%ld:%d:%d) UNDERRUN status detected "
  842. "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
  843. "os_underflow=0x%x\n", ha->host_no,
  844. cp->device->id, cp->device->lun, comp_status,
  845. scsi_status, resid_len, resid, cp->cmnd[0],
  846. cp->underflow));
  847. }
  848. /*
  849. * Check to see if SCSI Status is non zero. If so report SCSI
  850. * Status.
  851. */
  852. if (lscsi_status != 0) {
  853. cp->result = DID_OK << 16 | lscsi_status;
  854. if (lscsi_status != SS_CHECK_CONDITION)
  855. break;
  856. /* Copy Sense Data into sense buffer */
  857. memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer));
  858. if (!(scsi_status & SS_SENSE_LEN_VALID))
  859. break;
  860. if (sense_len >= sizeof(cp->sense_buffer))
  861. sense_len = sizeof(cp->sense_buffer);
  862. CMD_ACTUAL_SNSLEN(cp) = sense_len;
  863. sp->request_sense_length = sense_len;
  864. sp->request_sense_ptr = cp->sense_buffer;
  865. if (sp->request_sense_length > 32)
  866. sense_len = 32;
  867. memcpy(cp->sense_buffer, sense_data, sense_len);
  868. sp->request_sense_ptr += sense_len;
  869. sp->request_sense_length -= sense_len;
  870. if (sp->request_sense_length != 0)
  871. ha->status_srb = sp;
  872. DEBUG5(printk("%s(): Check condition Sense data, "
  873. "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n",
  874. __func__, ha->host_no, cp->device->channel,
  875. cp->device->id, cp->device->lun, cp,
  876. cp->serial_number));
  877. if (sense_len)
  878. DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
  879. CMD_ACTUAL_SNSLEN(cp)));
  880. } else {
  881. /*
  882. * If RISC reports underrun and target does not report
  883. * it then we must have a lost frame, so tell upper
  884. * layer to retry it by reporting a bus busy.
  885. */
  886. if (!(scsi_status & SS_RESIDUAL_UNDER)) {
  887. DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
  888. "frame(s) detected (%x of %x bytes)..."
  889. "retrying command.\n", ha->host_no,
  890. cp->device->channel, cp->device->id,
  891. cp->device->lun, resid,
  892. cp->request_bufflen));
  893. cp->result = DID_BUS_BUSY << 16;
  894. break;
  895. }
  896. /* Handle mid-layer underflow */
  897. if ((unsigned)(cp->request_bufflen - resid) <
  898. cp->underflow) {
  899. qla_printk(KERN_INFO, ha,
  900. "scsi(%ld:%d:%d:%d): Mid-layer underflow "
  901. "detected (%x of %x bytes)...returning "
  902. "error status.\n", ha->host_no,
  903. cp->device->channel, cp->device->id,
  904. cp->device->lun, resid,
  905. cp->request_bufflen);
  906. cp->result = DID_ERROR << 16;
  907. break;
  908. }
  909. /* Everybody online, looking good... */
  910. cp->result = DID_OK << 16;
  911. }
  912. break;
  913. case CS_DATA_OVERRUN:
  914. DEBUG2(printk(KERN_INFO
  915. "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
  916. ha->host_no, cp->device->id, cp->device->lun, comp_status,
  917. scsi_status));
  918. DEBUG2(printk(KERN_INFO
  919. "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
  920. cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
  921. cp->cmnd[4], cp->cmnd[5]));
  922. DEBUG2(printk(KERN_INFO
  923. "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
  924. "status!\n",
  925. cp->serial_number, cp->request_bufflen, resid_len));
  926. cp->result = DID_ERROR << 16;
  927. break;
  928. case CS_PORT_LOGGED_OUT:
  929. case CS_PORT_CONFIG_CHG:
  930. case CS_PORT_BUSY:
  931. case CS_INCOMPLETE:
  932. case CS_PORT_UNAVAILABLE:
  933. /*
  934. * If the port is in Target Down state, return all IOs for this
  935. * Target with DID_NO_CONNECT ELSE Queue the IOs in the
  936. * retry_queue.
  937. */
  938. DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
  939. "pid=%ld, compl status=0x%x, port state=0x%x\n",
  940. ha->host_no, cp->device->id, cp->device->lun,
  941. cp->serial_number, comp_status,
  942. atomic_read(&fcport->state)));
  943. cp->result = DID_BUS_BUSY << 16;
  944. if (atomic_read(&fcport->state) == FCS_ONLINE) {
  945. qla2x00_mark_device_lost(ha, fcport, 1, 1);
  946. }
  947. break;
  948. case CS_RESET:
  949. DEBUG2(printk(KERN_INFO
  950. "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
  951. ha->host_no, comp_status, scsi_status));
  952. cp->result = DID_RESET << 16;
  953. break;
  954. case CS_ABORTED:
  955. /*
  956. * hv2.19.12 - DID_ABORT does not retry the request if we
  957. * aborted this request then abort otherwise it must be a
  958. * reset.
  959. */
  960. DEBUG2(printk(KERN_INFO
  961. "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
  962. ha->host_no, comp_status, scsi_status));
  963. cp->result = DID_RESET << 16;
  964. break;
  965. case CS_TIMEOUT:
  966. cp->result = DID_BUS_BUSY << 16;
  967. if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
  968. DEBUG2(printk(KERN_INFO
  969. "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
  970. "0x%x-0x%x\n", ha->host_no, cp->device->channel,
  971. cp->device->id, cp->device->lun, comp_status,
  972. scsi_status));
  973. break;
  974. }
  975. DEBUG2(printk(KERN_INFO
  976. "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
  977. "sflags=%x.\n", ha->host_no, cp->device->channel,
  978. cp->device->id, cp->device->lun, comp_status, scsi_status,
  979. le16_to_cpu(sts->status_flags)));
  980. /* Check to see if logout occurred. */
  981. if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
  982. qla2x00_mark_device_lost(ha, fcport, 1, 1);
  983. break;
  984. default:
  985. DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
  986. "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status));
  987. qla_printk(KERN_INFO, ha,
  988. "Unknown status detected 0x%x-0x%x.\n",
  989. comp_status, scsi_status);
  990. cp->result = DID_ERROR << 16;
  991. break;
  992. }
  993. /* Place command on done queue. */
  994. if (ha->status_srb == NULL)
  995. qla2x00_sp_compl(ha, sp);
  996. }
  997. /**
  998. * qla2x00_status_cont_entry() - Process a Status Continuations entry.
  999. * @ha: SCSI driver HA context
  1000. * @pkt: Entry pointer
  1001. *
  1002. * Extended sense data.
  1003. */
  1004. static void
  1005. qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
  1006. {
  1007. uint8_t sense_sz = 0;
  1008. srb_t *sp = ha->status_srb;
  1009. struct scsi_cmnd *cp;
  1010. if (sp != NULL && sp->request_sense_length != 0) {
  1011. cp = sp->cmd;
  1012. if (cp == NULL) {
  1013. DEBUG2(printk("%s(): Cmd already returned back to OS "
  1014. "sp=%p.\n", __func__, sp));
  1015. qla_printk(KERN_INFO, ha,
  1016. "cmd is NULL: already returned to OS (sp=%p)\n",
  1017. sp);
  1018. ha->status_srb = NULL;
  1019. return;
  1020. }
  1021. if (sp->request_sense_length > sizeof(pkt->data)) {
  1022. sense_sz = sizeof(pkt->data);
  1023. } else {
  1024. sense_sz = sp->request_sense_length;
  1025. }
  1026. /* Move sense data. */
  1027. if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
  1028. host_to_fcp_swap(pkt->data, sizeof(pkt->data));
  1029. memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
  1030. DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
  1031. sp->request_sense_ptr += sense_sz;
  1032. sp->request_sense_length -= sense_sz;
  1033. /* Place command on done queue. */
  1034. if (sp->request_sense_length == 0) {
  1035. ha->status_srb = NULL;
  1036. qla2x00_sp_compl(ha, sp);
  1037. }
  1038. }
  1039. }
  1040. /**
  1041. * qla2x00_error_entry() - Process an error entry.
  1042. * @ha: SCSI driver HA context
  1043. * @pkt: Entry pointer
  1044. */
  1045. static void
  1046. qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
  1047. {
  1048. srb_t *sp;
  1049. #if defined(QL_DEBUG_LEVEL_2)
  1050. if (pkt->entry_status & RF_INV_E_ORDER)
  1051. qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
  1052. else if (pkt->entry_status & RF_INV_E_COUNT)
  1053. qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
  1054. else if (pkt->entry_status & RF_INV_E_PARAM)
  1055. qla_printk(KERN_ERR, ha,
  1056. "%s: Invalid Entry Parameter\n", __func__);
  1057. else if (pkt->entry_status & RF_INV_E_TYPE)
  1058. qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
  1059. else if (pkt->entry_status & RF_BUSY)
  1060. qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
  1061. else
  1062. qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
  1063. #endif
  1064. /* Validate handle. */
  1065. if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
  1066. sp = ha->outstanding_cmds[pkt->handle];
  1067. else
  1068. sp = NULL;
  1069. if (sp) {
  1070. /* Free outstanding command slot. */
  1071. ha->outstanding_cmds[pkt->handle] = NULL;
  1072. /* Bad payload or header */
  1073. if (pkt->entry_status &
  1074. (RF_INV_E_ORDER | RF_INV_E_COUNT |
  1075. RF_INV_E_PARAM | RF_INV_E_TYPE)) {
  1076. sp->cmd->result = DID_ERROR << 16;
  1077. } else if (pkt->entry_status & RF_BUSY) {
  1078. sp->cmd->result = DID_BUS_BUSY << 16;
  1079. } else {
  1080. sp->cmd->result = DID_ERROR << 16;
  1081. }
  1082. qla2x00_sp_compl(ha, sp);
  1083. } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
  1084. COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
  1085. DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
  1086. ha->host_no));
  1087. qla_printk(KERN_WARNING, ha,
  1088. "Error entry - invalid handle\n");
  1089. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  1090. qla2xxx_wake_dpc(ha);
  1091. }
  1092. }
  1093. /**
  1094. * qla2x00_ms_entry() - Process a Management Server entry.
  1095. * @ha: SCSI driver HA context
  1096. * @index: Response queue out pointer
  1097. */
  1098. static void
  1099. qla2x00_ms_entry(scsi_qla_host_t *ha, ms_iocb_entry_t *pkt)
  1100. {
  1101. srb_t *sp;
  1102. DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
  1103. __func__, ha->host_no, pkt, pkt->handle1));
  1104. /* Validate handle. */
  1105. if (pkt->handle1 < MAX_OUTSTANDING_COMMANDS)
  1106. sp = ha->outstanding_cmds[pkt->handle1];
  1107. else
  1108. sp = NULL;
  1109. if (sp == NULL) {
  1110. DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
  1111. ha->host_no));
  1112. qla_printk(KERN_WARNING, ha, "MS entry - invalid handle\n");
  1113. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  1114. return;
  1115. }
  1116. CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->status);
  1117. CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
  1118. /* Free outstanding command slot. */
  1119. ha->outstanding_cmds[pkt->handle1] = NULL;
  1120. qla2x00_sp_compl(ha, sp);
  1121. }
  1122. /**
  1123. * qla24xx_mbx_completion() - Process mailbox command completions.
  1124. * @ha: SCSI driver HA context
  1125. * @mb0: Mailbox0 register
  1126. */
  1127. static void
  1128. qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
  1129. {
  1130. uint16_t cnt;
  1131. uint16_t __iomem *wptr;
  1132. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  1133. /* Load return mailbox registers. */
  1134. ha->flags.mbox_int = 1;
  1135. ha->mailbox_out[0] = mb0;
  1136. wptr = (uint16_t __iomem *)&reg->mailbox1;
  1137. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  1138. ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
  1139. wptr++;
  1140. }
  1141. if (ha->mcp) {
  1142. DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
  1143. __func__, ha->host_no, ha->mcp->mb[0]));
  1144. } else {
  1145. DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
  1146. __func__, ha->host_no));
  1147. }
  1148. }
  1149. /**
  1150. * qla24xx_process_response_queue() - Process response queue entries.
  1151. * @ha: SCSI driver HA context
  1152. */
  1153. void
  1154. qla24xx_process_response_queue(struct scsi_qla_host *ha)
  1155. {
  1156. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  1157. struct sts_entry_24xx *pkt;
  1158. if (!ha->flags.online)
  1159. return;
  1160. while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
  1161. pkt = (struct sts_entry_24xx *)ha->response_ring_ptr;
  1162. ha->rsp_ring_index++;
  1163. if (ha->rsp_ring_index == ha->response_q_length) {
  1164. ha->rsp_ring_index = 0;
  1165. ha->response_ring_ptr = ha->response_ring;
  1166. } else {
  1167. ha->response_ring_ptr++;
  1168. }
  1169. if (pkt->entry_status != 0) {
  1170. DEBUG3(printk(KERN_INFO
  1171. "scsi(%ld): Process error entry.\n", ha->host_no));
  1172. qla2x00_error_entry(ha, (sts_entry_t *) pkt);
  1173. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  1174. wmb();
  1175. continue;
  1176. }
  1177. switch (pkt->entry_type) {
  1178. case STATUS_TYPE:
  1179. qla2x00_status_entry(ha, pkt);
  1180. break;
  1181. case STATUS_CONT_TYPE:
  1182. qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
  1183. break;
  1184. case MS_IOCB_TYPE:
  1185. qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt);
  1186. break;
  1187. default:
  1188. /* Type Not Supported. */
  1189. DEBUG4(printk(KERN_WARNING
  1190. "scsi(%ld): Received unknown response pkt type %x "
  1191. "entry status=%x.\n",
  1192. ha->host_no, pkt->entry_type, pkt->entry_status));
  1193. break;
  1194. }
  1195. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  1196. wmb();
  1197. }
  1198. /* Adjust ring index */
  1199. WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index);
  1200. }
  1201. /**
  1202. * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
  1203. * @irq:
  1204. * @dev_id: SCSI driver HA context
  1205. *
  1206. * Called by system whenever the host adapter generates an interrupt.
  1207. *
  1208. * Returns handled flag.
  1209. */
  1210. irqreturn_t
  1211. qla24xx_intr_handler(int irq, void *dev_id)
  1212. {
  1213. scsi_qla_host_t *ha;
  1214. struct device_reg_24xx __iomem *reg;
  1215. int status;
  1216. unsigned long flags;
  1217. unsigned long iter;
  1218. uint32_t stat;
  1219. uint32_t hccr;
  1220. uint16_t mb[4];
  1221. ha = (scsi_qla_host_t *) dev_id;
  1222. if (!ha) {
  1223. printk(KERN_INFO
  1224. "%s(): NULL host pointer\n", __func__);
  1225. return IRQ_NONE;
  1226. }
  1227. reg = &ha->iobase->isp24;
  1228. status = 0;
  1229. spin_lock_irqsave(&ha->hardware_lock, flags);
  1230. for (iter = 50; iter--; ) {
  1231. stat = RD_REG_DWORD(&reg->host_status);
  1232. if (stat & HSRX_RISC_PAUSED) {
  1233. hccr = RD_REG_DWORD(&reg->hccr);
  1234. qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
  1235. "Dumping firmware!\n", hccr);
  1236. qla24xx_fw_dump(ha, 1);
  1237. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  1238. break;
  1239. } else if ((stat & HSRX_RISC_INT) == 0)
  1240. break;
  1241. switch (stat & 0xff) {
  1242. case 0x1:
  1243. case 0x2:
  1244. case 0x10:
  1245. case 0x11:
  1246. qla24xx_mbx_completion(ha, MSW(stat));
  1247. status |= MBX_INTERRUPT;
  1248. break;
  1249. case 0x12:
  1250. mb[0] = MSW(stat);
  1251. mb[1] = RD_REG_WORD(&reg->mailbox1);
  1252. mb[2] = RD_REG_WORD(&reg->mailbox2);
  1253. mb[3] = RD_REG_WORD(&reg->mailbox3);
  1254. qla2x00_async_event(ha, mb);
  1255. break;
  1256. case 0x13:
  1257. qla24xx_process_response_queue(ha);
  1258. break;
  1259. default:
  1260. DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
  1261. "(%d).\n",
  1262. ha->host_no, stat & 0xff));
  1263. break;
  1264. }
  1265. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  1266. RD_REG_DWORD_RELAXED(&reg->hccr);
  1267. }
  1268. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1269. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  1270. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  1271. spin_lock_irqsave(&ha->mbx_reg_lock, flags);
  1272. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  1273. up(&ha->mbx_intr_sem);
  1274. spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
  1275. }
  1276. return IRQ_HANDLED;
  1277. }
  1278. /**
  1279. * qla24xx_ms_entry() - Process a Management Server entry.
  1280. * @ha: SCSI driver HA context
  1281. * @index: Response queue out pointer
  1282. */
  1283. static void
  1284. qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt)
  1285. {
  1286. srb_t *sp;
  1287. DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
  1288. __func__, ha->host_no, pkt, pkt->handle));
  1289. DEBUG9(printk("%s: ct pkt dump:\n", __func__));
  1290. DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx)));
  1291. /* Validate handle. */
  1292. if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
  1293. sp = ha->outstanding_cmds[pkt->handle];
  1294. else
  1295. sp = NULL;
  1296. if (sp == NULL) {
  1297. DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
  1298. ha->host_no));
  1299. DEBUG10(printk("scsi(%ld): MS entry - invalid handle\n",
  1300. ha->host_no));
  1301. qla_printk(KERN_WARNING, ha, "MS entry - invalid handle %d\n",
  1302. pkt->handle);
  1303. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  1304. return;
  1305. }
  1306. CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->comp_status);
  1307. CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
  1308. /* Free outstanding command slot. */
  1309. ha->outstanding_cmds[pkt->handle] = NULL;
  1310. qla2x00_sp_compl(ha, sp);
  1311. }