qla_isr.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2005 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <scsi/scsi_tcq.h>
  9. static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
  10. static void qla2x00_async_event(scsi_qla_host_t *, uint16_t *);
  11. static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
  12. static void qla2x00_status_entry(scsi_qla_host_t *, void *);
  13. static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
  14. static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
  15. static void qla2x00_ms_entry(scsi_qla_host_t *, ms_iocb_entry_t *);
  16. static void qla24xx_ms_entry(scsi_qla_host_t *, struct ct_entry_24xx *);
  17. /**
  18. * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
  19. * @irq:
  20. * @dev_id: SCSI driver HA context
  21. *
  22. * Called by system whenever the host adapter generates an interrupt.
  23. *
  24. * Returns handled flag.
  25. */
  26. irqreturn_t
  27. qla2100_intr_handler(int irq, void *dev_id)
  28. {
  29. scsi_qla_host_t *ha;
  30. struct device_reg_2xxx __iomem *reg;
  31. int status;
  32. unsigned long flags;
  33. unsigned long iter;
  34. uint16_t mb[4];
  35. ha = (scsi_qla_host_t *) dev_id;
  36. if (!ha) {
  37. printk(KERN_INFO
  38. "%s(): NULL host pointer\n", __func__);
  39. return (IRQ_NONE);
  40. }
  41. reg = &ha->iobase->isp;
  42. status = 0;
  43. spin_lock_irqsave(&ha->hardware_lock, flags);
  44. for (iter = 50; iter--; ) {
  45. if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
  46. break;
  47. if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
  48. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  49. RD_REG_WORD(&reg->hccr);
  50. /* Get mailbox data. */
  51. mb[0] = RD_MAILBOX_REG(ha, reg, 0);
  52. if (mb[0] > 0x3fff && mb[0] < 0x8000) {
  53. qla2x00_mbx_completion(ha, mb[0]);
  54. status |= MBX_INTERRUPT;
  55. } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
  56. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  57. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  58. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  59. qla2x00_async_event(ha, mb);
  60. } else {
  61. /*EMPTY*/
  62. DEBUG2(printk("scsi(%ld): Unrecognized "
  63. "interrupt type (%d).\n",
  64. ha->host_no, mb[0]));
  65. }
  66. /* Release mailbox registers. */
  67. WRT_REG_WORD(&reg->semaphore, 0);
  68. RD_REG_WORD(&reg->semaphore);
  69. } else {
  70. qla2x00_process_response_queue(ha);
  71. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  72. RD_REG_WORD(&reg->hccr);
  73. }
  74. }
  75. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  76. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  77. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  78. spin_lock_irqsave(&ha->mbx_reg_lock, flags);
  79. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  80. up(&ha->mbx_intr_sem);
  81. spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
  82. }
  83. return (IRQ_HANDLED);
  84. }
  85. /**
  86. * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
  87. * @irq:
  88. * @dev_id: SCSI driver HA context
  89. *
  90. * Called by system whenever the host adapter generates an interrupt.
  91. *
  92. * Returns handled flag.
  93. */
  94. irqreturn_t
  95. qla2300_intr_handler(int irq, void *dev_id)
  96. {
  97. scsi_qla_host_t *ha;
  98. struct device_reg_2xxx __iomem *reg;
  99. int status;
  100. unsigned long flags;
  101. unsigned long iter;
  102. uint32_t stat;
  103. uint16_t hccr;
  104. uint16_t mb[4];
  105. ha = (scsi_qla_host_t *) dev_id;
  106. if (!ha) {
  107. printk(KERN_INFO
  108. "%s(): NULL host pointer\n", __func__);
  109. return (IRQ_NONE);
  110. }
  111. reg = &ha->iobase->isp;
  112. status = 0;
  113. spin_lock_irqsave(&ha->hardware_lock, flags);
  114. for (iter = 50; iter--; ) {
  115. stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
  116. if (stat & HSR_RISC_PAUSED) {
  117. hccr = RD_REG_WORD(&reg->hccr);
  118. if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
  119. qla_printk(KERN_INFO, ha,
  120. "Parity error -- HCCR=%x.\n", hccr);
  121. else
  122. qla_printk(KERN_INFO, ha,
  123. "RISC paused -- HCCR=%x.\n", hccr);
  124. /*
  125. * Issue a "HARD" reset in order for the RISC
  126. * interrupt bit to be cleared. Schedule a big
  127. * hammmer to get out of the RISC PAUSED state.
  128. */
  129. WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
  130. RD_REG_WORD(&reg->hccr);
  131. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  132. break;
  133. } else if ((stat & HSR_RISC_INT) == 0)
  134. break;
  135. switch (stat & 0xff) {
  136. case 0x1:
  137. case 0x2:
  138. case 0x10:
  139. case 0x11:
  140. qla2x00_mbx_completion(ha, MSW(stat));
  141. status |= MBX_INTERRUPT;
  142. /* Release mailbox registers. */
  143. WRT_REG_WORD(&reg->semaphore, 0);
  144. break;
  145. case 0x12:
  146. mb[0] = MSW(stat);
  147. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  148. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  149. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  150. qla2x00_async_event(ha, mb);
  151. break;
  152. case 0x13:
  153. qla2x00_process_response_queue(ha);
  154. break;
  155. case 0x15:
  156. mb[0] = MBA_CMPLT_1_16BIT;
  157. mb[1] = MSW(stat);
  158. qla2x00_async_event(ha, mb);
  159. break;
  160. case 0x16:
  161. mb[0] = MBA_SCSI_COMPLETION;
  162. mb[1] = MSW(stat);
  163. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  164. qla2x00_async_event(ha, mb);
  165. break;
  166. default:
  167. DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
  168. "(%d).\n",
  169. ha->host_no, stat & 0xff));
  170. break;
  171. }
  172. WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
  173. RD_REG_WORD_RELAXED(&reg->hccr);
  174. }
  175. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  176. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  177. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  178. spin_lock_irqsave(&ha->mbx_reg_lock, flags);
  179. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  180. up(&ha->mbx_intr_sem);
  181. spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
  182. }
  183. return (IRQ_HANDLED);
  184. }
  185. /**
  186. * qla2x00_mbx_completion() - Process mailbox command completions.
  187. * @ha: SCSI driver HA context
  188. * @mb0: Mailbox0 register
  189. */
  190. static void
  191. qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
  192. {
  193. uint16_t cnt;
  194. uint16_t __iomem *wptr;
  195. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  196. /* Load return mailbox registers. */
  197. ha->flags.mbox_int = 1;
  198. ha->mailbox_out[0] = mb0;
  199. wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
  200. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  201. if (IS_QLA2200(ha) && cnt == 8)
  202. wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
  203. if (cnt == 4 || cnt == 5)
  204. ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
  205. else
  206. ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
  207. wptr++;
  208. }
  209. if (ha->mcp) {
  210. DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
  211. __func__, ha->host_no, ha->mcp->mb[0]));
  212. } else {
  213. DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
  214. __func__, ha->host_no));
  215. }
  216. }
  217. /**
  218. * qla2x00_async_event() - Process aynchronous events.
  219. * @ha: SCSI driver HA context
  220. * @mb: Mailbox registers (0 - 3)
  221. */
  222. static void
  223. qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
  224. {
  225. #define LS_UNKNOWN 2
  226. static char *link_speeds[5] = { "1", "2", "?", "4", "10" };
  227. char *link_speed;
  228. uint16_t handle_cnt;
  229. uint16_t cnt;
  230. uint32_t handles[5];
  231. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  232. uint32_t rscn_entry, host_pid;
  233. uint8_t rscn_queue_index;
  234. /* Setup to process RIO completion. */
  235. handle_cnt = 0;
  236. switch (mb[0]) {
  237. case MBA_SCSI_COMPLETION:
  238. handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
  239. handle_cnt = 1;
  240. break;
  241. case MBA_CMPLT_1_16BIT:
  242. handles[0] = mb[1];
  243. handle_cnt = 1;
  244. mb[0] = MBA_SCSI_COMPLETION;
  245. break;
  246. case MBA_CMPLT_2_16BIT:
  247. handles[0] = mb[1];
  248. handles[1] = mb[2];
  249. handle_cnt = 2;
  250. mb[0] = MBA_SCSI_COMPLETION;
  251. break;
  252. case MBA_CMPLT_3_16BIT:
  253. handles[0] = mb[1];
  254. handles[1] = mb[2];
  255. handles[2] = mb[3];
  256. handle_cnt = 3;
  257. mb[0] = MBA_SCSI_COMPLETION;
  258. break;
  259. case MBA_CMPLT_4_16BIT:
  260. handles[0] = mb[1];
  261. handles[1] = mb[2];
  262. handles[2] = mb[3];
  263. handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
  264. handle_cnt = 4;
  265. mb[0] = MBA_SCSI_COMPLETION;
  266. break;
  267. case MBA_CMPLT_5_16BIT:
  268. handles[0] = mb[1];
  269. handles[1] = mb[2];
  270. handles[2] = mb[3];
  271. handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
  272. handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
  273. handle_cnt = 5;
  274. mb[0] = MBA_SCSI_COMPLETION;
  275. break;
  276. case MBA_CMPLT_2_32BIT:
  277. handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
  278. handles[1] = le32_to_cpu(
  279. ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
  280. RD_MAILBOX_REG(ha, reg, 6));
  281. handle_cnt = 2;
  282. mb[0] = MBA_SCSI_COMPLETION;
  283. break;
  284. default:
  285. break;
  286. }
  287. switch (mb[0]) {
  288. case MBA_SCSI_COMPLETION: /* Fast Post */
  289. if (!ha->flags.online)
  290. break;
  291. for (cnt = 0; cnt < handle_cnt; cnt++)
  292. qla2x00_process_completed_request(ha, handles[cnt]);
  293. break;
  294. case MBA_RESET: /* Reset */
  295. DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));
  296. set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
  297. break;
  298. case MBA_SYSTEM_ERR: /* System Error */
  299. mb[1] = RD_MAILBOX_REG(ha, reg, 1);
  300. mb[2] = RD_MAILBOX_REG(ha, reg, 2);
  301. mb[3] = RD_MAILBOX_REG(ha, reg, 3);
  302. qla_printk(KERN_INFO, ha,
  303. "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
  304. mb[1], mb[2], mb[3]);
  305. ha->isp_ops.fw_dump(ha, 1);
  306. if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
  307. if (mb[1] == 0 && mb[2] == 0) {
  308. qla_printk(KERN_ERR, ha,
  309. "Unrecoverable Hardware Error: adapter "
  310. "marked OFFLINE!\n");
  311. ha->flags.online = 0;
  312. } else
  313. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  314. } else if (mb[1] == 0) {
  315. qla_printk(KERN_INFO, ha,
  316. "Unrecoverable Hardware Error: adapter marked "
  317. "OFFLINE!\n");
  318. ha->flags.online = 0;
  319. } else
  320. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  321. break;
  322. case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
  323. DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
  324. ha->host_no));
  325. qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
  326. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  327. break;
  328. case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
  329. DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
  330. ha->host_no));
  331. qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
  332. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  333. break;
  334. case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
  335. DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
  336. ha->host_no));
  337. break;
  338. case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
  339. DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
  340. mb[1]));
  341. qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);
  342. if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
  343. atomic_set(&ha->loop_state, LOOP_DOWN);
  344. atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
  345. qla2x00_mark_all_devices_lost(ha, 1);
  346. }
  347. set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
  348. ha->flags.management_server_logged_in = 0;
  349. break;
  350. case MBA_LOOP_UP: /* Loop Up Event */
  351. if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
  352. link_speed = link_speeds[0];
  353. ha->link_data_rate = PORT_SPEED_1GB;
  354. } else {
  355. link_speed = link_speeds[LS_UNKNOWN];
  356. if (mb[1] < 5)
  357. link_speed = link_speeds[mb[1]];
  358. ha->link_data_rate = mb[1];
  359. }
  360. DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
  361. ha->host_no, link_speed));
  362. qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
  363. link_speed);
  364. ha->flags.management_server_logged_in = 0;
  365. break;
  366. case MBA_LOOP_DOWN: /* Loop Down Event */
  367. DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN (%x).\n",
  368. ha->host_no, mb[1]));
  369. qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x).\n", mb[1]);
  370. if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
  371. atomic_set(&ha->loop_state, LOOP_DOWN);
  372. atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
  373. ha->device_flags |= DFLG_NO_CABLE;
  374. qla2x00_mark_all_devices_lost(ha, 1);
  375. }
  376. ha->flags.management_server_logged_in = 0;
  377. ha->link_data_rate = PORT_SPEED_UNKNOWN;
  378. if (ql2xfdmienable)
  379. set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
  380. break;
  381. case MBA_LIP_RESET: /* LIP reset occurred */
  382. DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
  383. ha->host_no, mb[1]));
  384. qla_printk(KERN_INFO, ha,
  385. "LIP reset occured (%x).\n", mb[1]);
  386. if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
  387. atomic_set(&ha->loop_state, LOOP_DOWN);
  388. atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
  389. qla2x00_mark_all_devices_lost(ha, 1);
  390. }
  391. set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
  392. ha->operating_mode = LOOP;
  393. ha->flags.management_server_logged_in = 0;
  394. break;
  395. case MBA_POINT_TO_POINT: /* Point-to-Point */
  396. if (IS_QLA2100(ha))
  397. break;
  398. DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
  399. ha->host_no));
  400. /*
  401. * Until there's a transition from loop down to loop up, treat
  402. * this as loop down only.
  403. */
  404. if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
  405. atomic_set(&ha->loop_state, LOOP_DOWN);
  406. if (!atomic_read(&ha->loop_down_timer))
  407. atomic_set(&ha->loop_down_timer,
  408. LOOP_DOWN_TIME);
  409. qla2x00_mark_all_devices_lost(ha, 1);
  410. }
  411. if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
  412. set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
  413. }
  414. set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
  415. break;
  416. case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
  417. if (IS_QLA2100(ha))
  418. break;
  419. DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
  420. "received.\n",
  421. ha->host_no));
  422. qla_printk(KERN_INFO, ha,
  423. "Configuration change detected: value=%x.\n", mb[1]);
  424. if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
  425. atomic_set(&ha->loop_state, LOOP_DOWN);
  426. if (!atomic_read(&ha->loop_down_timer))
  427. atomic_set(&ha->loop_down_timer,
  428. LOOP_DOWN_TIME);
  429. qla2x00_mark_all_devices_lost(ha, 1);
  430. }
  431. set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
  432. set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
  433. break;
  434. case MBA_PORT_UPDATE: /* Port database update */
  435. /*
  436. * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
  437. * event etc. earlier indicating loop is down) then process
  438. * it. Otherwise ignore it and Wait for RSCN to come in.
  439. */
  440. atomic_set(&ha->loop_down_timer, 0);
  441. if (atomic_read(&ha->loop_state) != LOOP_DOWN &&
  442. atomic_read(&ha->loop_state) != LOOP_DEAD) {
  443. DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
  444. "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1],
  445. mb[2], mb[3]));
  446. break;
  447. }
  448. DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
  449. ha->host_no));
  450. DEBUG(printk(KERN_INFO
  451. "scsi(%ld): Port database changed %04x %04x %04x.\n",
  452. ha->host_no, mb[1], mb[2], mb[3]));
  453. /*
  454. * Mark all devices as missing so we will login again.
  455. */
  456. atomic_set(&ha->loop_state, LOOP_UP);
  457. qla2x00_mark_all_devices_lost(ha, 1);
  458. ha->flags.rscn_queue_overflow = 1;
  459. set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
  460. set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
  461. break;
  462. case MBA_RSCN_UPDATE: /* State Change Registration */
  463. DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
  464. ha->host_no));
  465. DEBUG(printk(KERN_INFO
  466. "scsi(%ld): RSCN database changed -- %04x %04x.\n",
  467. ha->host_no, mb[1], mb[2]));
  468. rscn_entry = (mb[1] << 16) | mb[2];
  469. host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
  470. ha->d_id.b.al_pa;
  471. if (rscn_entry == host_pid) {
  472. DEBUG(printk(KERN_INFO
  473. "scsi(%ld): Ignoring RSCN update to local host "
  474. "port ID (%06x)\n",
  475. ha->host_no, host_pid));
  476. break;
  477. }
  478. rscn_queue_index = ha->rscn_in_ptr + 1;
  479. if (rscn_queue_index == MAX_RSCN_COUNT)
  480. rscn_queue_index = 0;
  481. if (rscn_queue_index != ha->rscn_out_ptr) {
  482. ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
  483. ha->rscn_in_ptr = rscn_queue_index;
  484. } else {
  485. ha->flags.rscn_queue_overflow = 1;
  486. }
  487. atomic_set(&ha->loop_state, LOOP_UPDATE);
  488. atomic_set(&ha->loop_down_timer, 0);
  489. ha->flags.management_server_logged_in = 0;
  490. set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
  491. set_bit(RSCN_UPDATE, &ha->dpc_flags);
  492. break;
  493. /* case MBA_RIO_RESPONSE: */
  494. case MBA_ZIO_RESPONSE:
  495. DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
  496. ha->host_no));
  497. DEBUG(printk(KERN_INFO
  498. "scsi(%ld): [R|Z]IO update completion.\n",
  499. ha->host_no));
  500. if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
  501. qla24xx_process_response_queue(ha);
  502. else
  503. qla2x00_process_response_queue(ha);
  504. break;
  505. case MBA_DISCARD_RND_FRAME:
  506. DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
  507. "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
  508. break;
  509. case MBA_TRACE_NOTIFICATION:
  510. DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
  511. ha->host_no, mb[1], mb[2]));
  512. break;
  513. }
  514. }
  515. static void
  516. qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
  517. {
  518. fc_port_t *fcport = data;
  519. if (fcport->ha->max_q_depth <= sdev->queue_depth)
  520. return;
  521. if (sdev->ordered_tags)
  522. scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
  523. sdev->queue_depth + 1);
  524. else
  525. scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
  526. sdev->queue_depth + 1);
  527. fcport->last_ramp_up = jiffies;
  528. DEBUG2(qla_printk(KERN_INFO, fcport->ha,
  529. "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
  530. fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
  531. sdev->queue_depth));
  532. }
  533. static void
  534. qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
  535. {
  536. fc_port_t *fcport = data;
  537. if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
  538. return;
  539. DEBUG2(qla_printk(KERN_INFO, fcport->ha,
  540. "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
  541. fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
  542. sdev->queue_depth));
  543. }
  544. static inline void
  545. qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
  546. {
  547. fc_port_t *fcport;
  548. struct scsi_device *sdev;
  549. sdev = sp->cmd->device;
  550. if (sdev->queue_depth >= ha->max_q_depth)
  551. return;
  552. fcport = sp->fcport;
  553. if (time_before(jiffies,
  554. fcport->last_ramp_up + ql2xqfullrampup * HZ))
  555. return;
  556. if (time_before(jiffies,
  557. fcport->last_queue_full + ql2xqfullrampup * HZ))
  558. return;
  559. starget_for_each_device(sdev->sdev_target, fcport,
  560. qla2x00_adjust_sdev_qdepth_up);
  561. }
  562. /**
  563. * qla2x00_process_completed_request() - Process a Fast Post response.
  564. * @ha: SCSI driver HA context
  565. * @index: SRB index
  566. */
  567. static void
  568. qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
  569. {
  570. srb_t *sp;
  571. /* Validate handle. */
  572. if (index >= MAX_OUTSTANDING_COMMANDS) {
  573. DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
  574. ha->host_no, index));
  575. qla_printk(KERN_WARNING, ha,
  576. "Invalid SCSI completion handle %d.\n", index);
  577. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  578. return;
  579. }
  580. sp = ha->outstanding_cmds[index];
  581. if (sp) {
  582. /* Free outstanding command slot. */
  583. ha->outstanding_cmds[index] = NULL;
  584. CMD_COMPL_STATUS(sp->cmd) = 0L;
  585. CMD_SCSI_STATUS(sp->cmd) = 0L;
  586. /* Save ISP completion status */
  587. sp->cmd->result = DID_OK << 16;
  588. qla2x00_ramp_up_queue_depth(ha, sp);
  589. qla2x00_sp_compl(ha, sp);
  590. } else {
  591. DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
  592. ha->host_no));
  593. qla_printk(KERN_WARNING, ha,
  594. "Invalid ISP SCSI completion handle\n");
  595. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  596. }
  597. }
  598. /**
  599. * qla2x00_process_response_queue() - Process response queue entries.
  600. * @ha: SCSI driver HA context
  601. */
  602. void
  603. qla2x00_process_response_queue(struct scsi_qla_host *ha)
  604. {
  605. struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  606. sts_entry_t *pkt;
  607. uint16_t handle_cnt;
  608. uint16_t cnt;
  609. if (!ha->flags.online)
  610. return;
  611. while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
  612. pkt = (sts_entry_t *)ha->response_ring_ptr;
  613. ha->rsp_ring_index++;
  614. if (ha->rsp_ring_index == ha->response_q_length) {
  615. ha->rsp_ring_index = 0;
  616. ha->response_ring_ptr = ha->response_ring;
  617. } else {
  618. ha->response_ring_ptr++;
  619. }
  620. if (pkt->entry_status != 0) {
  621. DEBUG3(printk(KERN_INFO
  622. "scsi(%ld): Process error entry.\n", ha->host_no));
  623. qla2x00_error_entry(ha, pkt);
  624. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  625. wmb();
  626. continue;
  627. }
  628. switch (pkt->entry_type) {
  629. case STATUS_TYPE:
  630. qla2x00_status_entry(ha, pkt);
  631. break;
  632. case STATUS_TYPE_21:
  633. handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
  634. for (cnt = 0; cnt < handle_cnt; cnt++) {
  635. qla2x00_process_completed_request(ha,
  636. ((sts21_entry_t *)pkt)->handle[cnt]);
  637. }
  638. break;
  639. case STATUS_TYPE_22:
  640. handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
  641. for (cnt = 0; cnt < handle_cnt; cnt++) {
  642. qla2x00_process_completed_request(ha,
  643. ((sts22_entry_t *)pkt)->handle[cnt]);
  644. }
  645. break;
  646. case STATUS_CONT_TYPE:
  647. qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
  648. break;
  649. case MS_IOCB_TYPE:
  650. qla2x00_ms_entry(ha, (ms_iocb_entry_t *)pkt);
  651. break;
  652. default:
  653. /* Type Not Supported. */
  654. DEBUG4(printk(KERN_WARNING
  655. "scsi(%ld): Received unknown response pkt type %x "
  656. "entry status=%x.\n",
  657. ha->host_no, pkt->entry_type, pkt->entry_status));
  658. break;
  659. }
  660. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  661. wmb();
  662. }
  663. /* Adjust ring index */
  664. WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
  665. }
  666. /**
  667. * qla2x00_status_entry() - Process a Status IOCB entry.
  668. * @ha: SCSI driver HA context
  669. * @pkt: Entry pointer
  670. */
  671. static void
  672. qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
  673. {
  674. srb_t *sp;
  675. fc_port_t *fcport;
  676. struct scsi_cmnd *cp;
  677. sts_entry_t *sts;
  678. struct sts_entry_24xx *sts24;
  679. uint16_t comp_status;
  680. uint16_t scsi_status;
  681. uint8_t lscsi_status;
  682. int32_t resid;
  683. uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
  684. uint8_t *rsp_info, *sense_data;
  685. sts = (sts_entry_t *) pkt;
  686. sts24 = (struct sts_entry_24xx *) pkt;
  687. if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
  688. comp_status = le16_to_cpu(sts24->comp_status);
  689. scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
  690. } else {
  691. comp_status = le16_to_cpu(sts->comp_status);
  692. scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
  693. }
  694. /* Fast path completion. */
  695. if (comp_status == CS_COMPLETE && scsi_status == 0) {
  696. qla2x00_process_completed_request(ha, sts->handle);
  697. return;
  698. }
  699. /* Validate handle. */
  700. if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
  701. sp = ha->outstanding_cmds[sts->handle];
  702. ha->outstanding_cmds[sts->handle] = NULL;
  703. } else
  704. sp = NULL;
  705. if (sp == NULL) {
  706. DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
  707. ha->host_no));
  708. qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
  709. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  710. qla2xxx_wake_dpc(ha);
  711. return;
  712. }
  713. cp = sp->cmd;
  714. if (cp == NULL) {
  715. DEBUG2(printk("scsi(%ld): Command already returned back to OS "
  716. "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp));
  717. qla_printk(KERN_WARNING, ha,
  718. "Command is NULL: already returned to OS (sp=%p)\n", sp);
  719. return;
  720. }
  721. lscsi_status = scsi_status & STATUS_MASK;
  722. CMD_ENTRY_STATUS(cp) = sts->entry_status;
  723. CMD_COMPL_STATUS(cp) = comp_status;
  724. CMD_SCSI_STATUS(cp) = scsi_status;
  725. fcport = sp->fcport;
  726. sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
  727. if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
  728. sense_len = le32_to_cpu(sts24->sense_len);
  729. rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
  730. resid_len = le32_to_cpu(sts24->rsp_residual_count);
  731. fw_resid_len = le32_to_cpu(sts24->residual_len);
  732. rsp_info = sts24->data;
  733. sense_data = sts24->data;
  734. host_to_fcp_swap(sts24->data, sizeof(sts24->data));
  735. } else {
  736. sense_len = le16_to_cpu(sts->req_sense_length);
  737. rsp_info_len = le16_to_cpu(sts->rsp_info_len);
  738. resid_len = le32_to_cpu(sts->residual_length);
  739. rsp_info = sts->rsp_info;
  740. sense_data = sts->req_sense_data;
  741. }
  742. /* Check for any FCP transport errors. */
  743. if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
  744. /* Sense data lies beyond any FCP RESPONSE data. */
  745. if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
  746. sense_data += rsp_info_len;
  747. if (rsp_info_len > 3 && rsp_info[3]) {
  748. DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
  749. "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
  750. "retrying command\n", ha->host_no,
  751. cp->device->channel, cp->device->id,
  752. cp->device->lun, rsp_info_len, rsp_info[0],
  753. rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
  754. rsp_info[5], rsp_info[6], rsp_info[7]));
  755. cp->result = DID_BUS_BUSY << 16;
  756. qla2x00_sp_compl(ha, sp);
  757. return;
  758. }
  759. }
  760. /*
  761. * Based on Host and scsi status generate status code for Linux
  762. */
  763. switch (comp_status) {
  764. case CS_COMPLETE:
  765. case CS_QUEUE_FULL:
  766. if (scsi_status == 0) {
  767. cp->result = DID_OK << 16;
  768. break;
  769. }
  770. if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
  771. resid = resid_len;
  772. cp->resid = resid;
  773. CMD_RESID_LEN(cp) = resid;
  774. if (!lscsi_status &&
  775. ((unsigned)(cp->request_bufflen - resid) <
  776. cp->underflow)) {
  777. qla_printk(KERN_INFO, ha,
  778. "scsi(%ld:%d:%d:%d): Mid-layer underflow "
  779. "detected (%x of %x bytes)...returning "
  780. "error status.\n", ha->host_no,
  781. cp->device->channel, cp->device->id,
  782. cp->device->lun, resid,
  783. cp->request_bufflen);
  784. cp->result = DID_ERROR << 16;
  785. break;
  786. }
  787. }
  788. cp->result = DID_OK << 16 | lscsi_status;
  789. if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
  790. DEBUG2(printk(KERN_INFO
  791. "scsi(%ld): QUEUE FULL status detected "
  792. "0x%x-0x%x.\n", ha->host_no, comp_status,
  793. scsi_status));
  794. /* Adjust queue depth for all luns on the port. */
  795. fcport->last_queue_full = jiffies;
  796. starget_for_each_device(cp->device->sdev_target,
  797. fcport, qla2x00_adjust_sdev_qdepth_down);
  798. break;
  799. }
  800. if (lscsi_status != SS_CHECK_CONDITION)
  801. break;
  802. /* Copy Sense Data into sense buffer. */
  803. memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer));
  804. if (!(scsi_status & SS_SENSE_LEN_VALID))
  805. break;
  806. if (sense_len >= sizeof(cp->sense_buffer))
  807. sense_len = sizeof(cp->sense_buffer);
  808. CMD_ACTUAL_SNSLEN(cp) = sense_len;
  809. sp->request_sense_length = sense_len;
  810. sp->request_sense_ptr = cp->sense_buffer;
  811. if (sp->request_sense_length > 32)
  812. sense_len = 32;
  813. memcpy(cp->sense_buffer, sense_data, sense_len);
  814. sp->request_sense_ptr += sense_len;
  815. sp->request_sense_length -= sense_len;
  816. if (sp->request_sense_length != 0)
  817. ha->status_srb = sp;
  818. DEBUG5(printk("%s(): Check condition Sense data, "
  819. "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n", __func__,
  820. ha->host_no, cp->device->channel, cp->device->id,
  821. cp->device->lun, cp, cp->serial_number));
  822. if (sense_len)
  823. DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
  824. CMD_ACTUAL_SNSLEN(cp)));
  825. break;
  826. case CS_DATA_UNDERRUN:
  827. resid = resid_len;
  828. /* Use F/W calculated residual length. */
  829. if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
  830. resid = fw_resid_len;
  831. if (scsi_status & SS_RESIDUAL_UNDER) {
  832. cp->resid = resid;
  833. CMD_RESID_LEN(cp) = resid;
  834. } else {
  835. DEBUG2(printk(KERN_INFO
  836. "scsi(%ld:%d:%d) UNDERRUN status detected "
  837. "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
  838. "os_underflow=0x%x\n", ha->host_no,
  839. cp->device->id, cp->device->lun, comp_status,
  840. scsi_status, resid_len, resid, cp->cmnd[0],
  841. cp->underflow));
  842. }
  843. /*
  844. * Check to see if SCSI Status is non zero. If so report SCSI
  845. * Status.
  846. */
  847. if (lscsi_status != 0) {
  848. cp->result = DID_OK << 16 | lscsi_status;
  849. if (lscsi_status != SS_CHECK_CONDITION)
  850. break;
  851. /* Copy Sense Data into sense buffer */
  852. memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer));
  853. if (!(scsi_status & SS_SENSE_LEN_VALID))
  854. break;
  855. if (sense_len >= sizeof(cp->sense_buffer))
  856. sense_len = sizeof(cp->sense_buffer);
  857. CMD_ACTUAL_SNSLEN(cp) = sense_len;
  858. sp->request_sense_length = sense_len;
  859. sp->request_sense_ptr = cp->sense_buffer;
  860. if (sp->request_sense_length > 32)
  861. sense_len = 32;
  862. memcpy(cp->sense_buffer, sense_data, sense_len);
  863. sp->request_sense_ptr += sense_len;
  864. sp->request_sense_length -= sense_len;
  865. if (sp->request_sense_length != 0)
  866. ha->status_srb = sp;
  867. DEBUG5(printk("%s(): Check condition Sense data, "
  868. "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n",
  869. __func__, ha->host_no, cp->device->channel,
  870. cp->device->id, cp->device->lun, cp,
  871. cp->serial_number));
  872. if (sense_len)
  873. DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
  874. CMD_ACTUAL_SNSLEN(cp)));
  875. } else {
  876. /*
  877. * If RISC reports underrun and target does not report
  878. * it then we must have a lost frame, so tell upper
  879. * layer to retry it by reporting a bus busy.
  880. */
  881. if (!(scsi_status & SS_RESIDUAL_UNDER)) {
  882. DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
  883. "frame(s) detected (%x of %x bytes)..."
  884. "retrying command.\n", ha->host_no,
  885. cp->device->channel, cp->device->id,
  886. cp->device->lun, resid,
  887. cp->request_bufflen));
  888. cp->result = DID_BUS_BUSY << 16;
  889. break;
  890. }
  891. /* Handle mid-layer underflow */
  892. if ((unsigned)(cp->request_bufflen - resid) <
  893. cp->underflow) {
  894. qla_printk(KERN_INFO, ha,
  895. "scsi(%ld:%d:%d:%d): Mid-layer underflow "
  896. "detected (%x of %x bytes)...returning "
  897. "error status.\n", ha->host_no,
  898. cp->device->channel, cp->device->id,
  899. cp->device->lun, resid,
  900. cp->request_bufflen);
  901. cp->result = DID_ERROR << 16;
  902. break;
  903. }
  904. /* Everybody online, looking good... */
  905. cp->result = DID_OK << 16;
  906. }
  907. break;
  908. case CS_DATA_OVERRUN:
  909. DEBUG2(printk(KERN_INFO
  910. "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
  911. ha->host_no, cp->device->id, cp->device->lun, comp_status,
  912. scsi_status));
  913. DEBUG2(printk(KERN_INFO
  914. "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
  915. cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
  916. cp->cmnd[4], cp->cmnd[5]));
  917. DEBUG2(printk(KERN_INFO
  918. "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
  919. "status!\n",
  920. cp->serial_number, cp->request_bufflen, resid_len));
  921. cp->result = DID_ERROR << 16;
  922. break;
  923. case CS_PORT_LOGGED_OUT:
  924. case CS_PORT_CONFIG_CHG:
  925. case CS_PORT_BUSY:
  926. case CS_INCOMPLETE:
  927. case CS_PORT_UNAVAILABLE:
  928. /*
  929. * If the port is in Target Down state, return all IOs for this
  930. * Target with DID_NO_CONNECT ELSE Queue the IOs in the
  931. * retry_queue.
  932. */
  933. DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
  934. "pid=%ld, compl status=0x%x, port state=0x%x\n",
  935. ha->host_no, cp->device->id, cp->device->lun,
  936. cp->serial_number, comp_status,
  937. atomic_read(&fcport->state)));
  938. cp->result = DID_BUS_BUSY << 16;
  939. if (atomic_read(&fcport->state) == FCS_ONLINE) {
  940. qla2x00_mark_device_lost(ha, fcport, 1, 1);
  941. }
  942. break;
  943. case CS_RESET:
  944. DEBUG2(printk(KERN_INFO
  945. "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
  946. ha->host_no, comp_status, scsi_status));
  947. cp->result = DID_RESET << 16;
  948. break;
  949. case CS_ABORTED:
  950. /*
  951. * hv2.19.12 - DID_ABORT does not retry the request if we
  952. * aborted this request then abort otherwise it must be a
  953. * reset.
  954. */
  955. DEBUG2(printk(KERN_INFO
  956. "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
  957. ha->host_no, comp_status, scsi_status));
  958. cp->result = DID_RESET << 16;
  959. break;
  960. case CS_TIMEOUT:
  961. cp->result = DID_BUS_BUSY << 16;
  962. if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
  963. DEBUG2(printk(KERN_INFO
  964. "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
  965. "0x%x-0x%x\n", ha->host_no, cp->device->channel,
  966. cp->device->id, cp->device->lun, comp_status,
  967. scsi_status));
  968. break;
  969. }
  970. DEBUG2(printk(KERN_INFO
  971. "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
  972. "sflags=%x.\n", ha->host_no, cp->device->channel,
  973. cp->device->id, cp->device->lun, comp_status, scsi_status,
  974. le16_to_cpu(sts->status_flags)));
  975. /* Check to see if logout occurred. */
  976. if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
  977. qla2x00_mark_device_lost(ha, fcport, 1, 1);
  978. break;
  979. default:
  980. DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
  981. "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status));
  982. qla_printk(KERN_INFO, ha,
  983. "Unknown status detected 0x%x-0x%x.\n",
  984. comp_status, scsi_status);
  985. cp->result = DID_ERROR << 16;
  986. break;
  987. }
  988. /* Place command on done queue. */
  989. if (ha->status_srb == NULL)
  990. qla2x00_sp_compl(ha, sp);
  991. }
  992. /**
  993. * qla2x00_status_cont_entry() - Process a Status Continuations entry.
  994. * @ha: SCSI driver HA context
  995. * @pkt: Entry pointer
  996. *
  997. * Extended sense data.
  998. */
  999. static void
  1000. qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
  1001. {
  1002. uint8_t sense_sz = 0;
  1003. srb_t *sp = ha->status_srb;
  1004. struct scsi_cmnd *cp;
  1005. if (sp != NULL && sp->request_sense_length != 0) {
  1006. cp = sp->cmd;
  1007. if (cp == NULL) {
  1008. DEBUG2(printk("%s(): Cmd already returned back to OS "
  1009. "sp=%p.\n", __func__, sp));
  1010. qla_printk(KERN_INFO, ha,
  1011. "cmd is NULL: already returned to OS (sp=%p)\n",
  1012. sp);
  1013. ha->status_srb = NULL;
  1014. return;
  1015. }
  1016. if (sp->request_sense_length > sizeof(pkt->data)) {
  1017. sense_sz = sizeof(pkt->data);
  1018. } else {
  1019. sense_sz = sp->request_sense_length;
  1020. }
  1021. /* Move sense data. */
  1022. if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
  1023. host_to_fcp_swap(pkt->data, sizeof(pkt->data));
  1024. memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
  1025. DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
  1026. sp->request_sense_ptr += sense_sz;
  1027. sp->request_sense_length -= sense_sz;
  1028. /* Place command on done queue. */
  1029. if (sp->request_sense_length == 0) {
  1030. ha->status_srb = NULL;
  1031. qla2x00_sp_compl(ha, sp);
  1032. }
  1033. }
  1034. }
  1035. /**
  1036. * qla2x00_error_entry() - Process an error entry.
  1037. * @ha: SCSI driver HA context
  1038. * @pkt: Entry pointer
  1039. */
  1040. static void
  1041. qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
  1042. {
  1043. srb_t *sp;
  1044. #if defined(QL_DEBUG_LEVEL_2)
  1045. if (pkt->entry_status & RF_INV_E_ORDER)
  1046. qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
  1047. else if (pkt->entry_status & RF_INV_E_COUNT)
  1048. qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
  1049. else if (pkt->entry_status & RF_INV_E_PARAM)
  1050. qla_printk(KERN_ERR, ha,
  1051. "%s: Invalid Entry Parameter\n", __func__);
  1052. else if (pkt->entry_status & RF_INV_E_TYPE)
  1053. qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
  1054. else if (pkt->entry_status & RF_BUSY)
  1055. qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
  1056. else
  1057. qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
  1058. #endif
  1059. /* Validate handle. */
  1060. if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
  1061. sp = ha->outstanding_cmds[pkt->handle];
  1062. else
  1063. sp = NULL;
  1064. if (sp) {
  1065. /* Free outstanding command slot. */
  1066. ha->outstanding_cmds[pkt->handle] = NULL;
  1067. /* Bad payload or header */
  1068. if (pkt->entry_status &
  1069. (RF_INV_E_ORDER | RF_INV_E_COUNT |
  1070. RF_INV_E_PARAM | RF_INV_E_TYPE)) {
  1071. sp->cmd->result = DID_ERROR << 16;
  1072. } else if (pkt->entry_status & RF_BUSY) {
  1073. sp->cmd->result = DID_BUS_BUSY << 16;
  1074. } else {
  1075. sp->cmd->result = DID_ERROR << 16;
  1076. }
  1077. qla2x00_sp_compl(ha, sp);
  1078. } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
  1079. COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
  1080. DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
  1081. ha->host_no));
  1082. qla_printk(KERN_WARNING, ha,
  1083. "Error entry - invalid handle\n");
  1084. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  1085. qla2xxx_wake_dpc(ha);
  1086. }
  1087. }
  1088. /**
  1089. * qla2x00_ms_entry() - Process a Management Server entry.
  1090. * @ha: SCSI driver HA context
  1091. * @index: Response queue out pointer
  1092. */
  1093. static void
  1094. qla2x00_ms_entry(scsi_qla_host_t *ha, ms_iocb_entry_t *pkt)
  1095. {
  1096. srb_t *sp;
  1097. DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
  1098. __func__, ha->host_no, pkt, pkt->handle1));
  1099. /* Validate handle. */
  1100. if (pkt->handle1 < MAX_OUTSTANDING_COMMANDS)
  1101. sp = ha->outstanding_cmds[pkt->handle1];
  1102. else
  1103. sp = NULL;
  1104. if (sp == NULL) {
  1105. DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
  1106. ha->host_no));
  1107. qla_printk(KERN_WARNING, ha, "MS entry - invalid handle\n");
  1108. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  1109. return;
  1110. }
  1111. CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->status);
  1112. CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
  1113. /* Free outstanding command slot. */
  1114. ha->outstanding_cmds[pkt->handle1] = NULL;
  1115. qla2x00_sp_compl(ha, sp);
  1116. }
  1117. /**
  1118. * qla24xx_mbx_completion() - Process mailbox command completions.
  1119. * @ha: SCSI driver HA context
  1120. * @mb0: Mailbox0 register
  1121. */
  1122. static void
  1123. qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
  1124. {
  1125. uint16_t cnt;
  1126. uint16_t __iomem *wptr;
  1127. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  1128. /* Load return mailbox registers. */
  1129. ha->flags.mbox_int = 1;
  1130. ha->mailbox_out[0] = mb0;
  1131. wptr = (uint16_t __iomem *)&reg->mailbox1;
  1132. for (cnt = 1; cnt < ha->mbx_count; cnt++) {
  1133. ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
  1134. wptr++;
  1135. }
  1136. if (ha->mcp) {
  1137. DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
  1138. __func__, ha->host_no, ha->mcp->mb[0]));
  1139. } else {
  1140. DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
  1141. __func__, ha->host_no));
  1142. }
  1143. }
  1144. /**
  1145. * qla24xx_process_response_queue() - Process response queue entries.
  1146. * @ha: SCSI driver HA context
  1147. */
  1148. void
  1149. qla24xx_process_response_queue(struct scsi_qla_host *ha)
  1150. {
  1151. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  1152. struct sts_entry_24xx *pkt;
  1153. if (!ha->flags.online)
  1154. return;
  1155. while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
  1156. pkt = (struct sts_entry_24xx *)ha->response_ring_ptr;
  1157. ha->rsp_ring_index++;
  1158. if (ha->rsp_ring_index == ha->response_q_length) {
  1159. ha->rsp_ring_index = 0;
  1160. ha->response_ring_ptr = ha->response_ring;
  1161. } else {
  1162. ha->response_ring_ptr++;
  1163. }
  1164. if (pkt->entry_status != 0) {
  1165. DEBUG3(printk(KERN_INFO
  1166. "scsi(%ld): Process error entry.\n", ha->host_no));
  1167. qla2x00_error_entry(ha, (sts_entry_t *) pkt);
  1168. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  1169. wmb();
  1170. continue;
  1171. }
  1172. switch (pkt->entry_type) {
  1173. case STATUS_TYPE:
  1174. qla2x00_status_entry(ha, pkt);
  1175. break;
  1176. case STATUS_CONT_TYPE:
  1177. qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
  1178. break;
  1179. case MS_IOCB_TYPE:
  1180. qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt);
  1181. break;
  1182. default:
  1183. /* Type Not Supported. */
  1184. DEBUG4(printk(KERN_WARNING
  1185. "scsi(%ld): Received unknown response pkt type %x "
  1186. "entry status=%x.\n",
  1187. ha->host_no, pkt->entry_type, pkt->entry_status));
  1188. break;
  1189. }
  1190. ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
  1191. wmb();
  1192. }
  1193. /* Adjust ring index */
  1194. WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index);
  1195. }
  1196. /**
  1197. * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
  1198. * @irq:
  1199. * @dev_id: SCSI driver HA context
  1200. *
  1201. * Called by system whenever the host adapter generates an interrupt.
  1202. *
  1203. * Returns handled flag.
  1204. */
  1205. irqreturn_t
  1206. qla24xx_intr_handler(int irq, void *dev_id)
  1207. {
  1208. scsi_qla_host_t *ha;
  1209. struct device_reg_24xx __iomem *reg;
  1210. int status;
  1211. unsigned long flags;
  1212. unsigned long iter;
  1213. uint32_t stat;
  1214. uint32_t hccr;
  1215. uint16_t mb[4];
  1216. ha = (scsi_qla_host_t *) dev_id;
  1217. if (!ha) {
  1218. printk(KERN_INFO
  1219. "%s(): NULL host pointer\n", __func__);
  1220. return IRQ_NONE;
  1221. }
  1222. reg = &ha->iobase->isp24;
  1223. status = 0;
  1224. spin_lock_irqsave(&ha->hardware_lock, flags);
  1225. for (iter = 50; iter--; ) {
  1226. stat = RD_REG_DWORD(&reg->host_status);
  1227. if (stat & HSRX_RISC_PAUSED) {
  1228. hccr = RD_REG_DWORD(&reg->hccr);
  1229. qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
  1230. "Dumping firmware!\n", hccr);
  1231. qla24xx_fw_dump(ha, 1);
  1232. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  1233. break;
  1234. } else if ((stat & HSRX_RISC_INT) == 0)
  1235. break;
  1236. switch (stat & 0xff) {
  1237. case 0x1:
  1238. case 0x2:
  1239. case 0x10:
  1240. case 0x11:
  1241. qla24xx_mbx_completion(ha, MSW(stat));
  1242. status |= MBX_INTERRUPT;
  1243. break;
  1244. case 0x12:
  1245. mb[0] = MSW(stat);
  1246. mb[1] = RD_REG_WORD(&reg->mailbox1);
  1247. mb[2] = RD_REG_WORD(&reg->mailbox2);
  1248. mb[3] = RD_REG_WORD(&reg->mailbox3);
  1249. qla2x00_async_event(ha, mb);
  1250. break;
  1251. case 0x13:
  1252. qla24xx_process_response_queue(ha);
  1253. break;
  1254. default:
  1255. DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
  1256. "(%d).\n",
  1257. ha->host_no, stat & 0xff));
  1258. break;
  1259. }
  1260. WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
  1261. RD_REG_DWORD_RELAXED(&reg->hccr);
  1262. }
  1263. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1264. if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
  1265. (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
  1266. spin_lock_irqsave(&ha->mbx_reg_lock, flags);
  1267. set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  1268. up(&ha->mbx_intr_sem);
  1269. spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
  1270. }
  1271. return IRQ_HANDLED;
  1272. }
  1273. /**
  1274. * qla24xx_ms_entry() - Process a Management Server entry.
  1275. * @ha: SCSI driver HA context
  1276. * @index: Response queue out pointer
  1277. */
  1278. static void
  1279. qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt)
  1280. {
  1281. srb_t *sp;
  1282. DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
  1283. __func__, ha->host_no, pkt, pkt->handle));
  1284. DEBUG9(printk("%s: ct pkt dump:\n", __func__));
  1285. DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx)));
  1286. /* Validate handle. */
  1287. if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
  1288. sp = ha->outstanding_cmds[pkt->handle];
  1289. else
  1290. sp = NULL;
  1291. if (sp == NULL) {
  1292. DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
  1293. ha->host_no));
  1294. DEBUG10(printk("scsi(%ld): MS entry - invalid handle\n",
  1295. ha->host_no));
  1296. qla_printk(KERN_WARNING, ha, "MS entry - invalid handle %d\n",
  1297. pkt->handle);
  1298. set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
  1299. return;
  1300. }
  1301. CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->comp_status);
  1302. CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
  1303. /* Free outstanding command slot. */
  1304. ha->outstanding_cmds[pkt->handle] = NULL;
  1305. qla2x00_sp_compl(ha, sp);
  1306. }