ql4_isr.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796
  1. /*
  2. * QLogic iSCSI HBA Driver
  3. * Copyright (c) 2003-2006 QLogic Corporation
  4. *
  5. * See LICENSE.qla4xxx for copyright and licensing details.
  6. */
  7. #include "ql4_def.h"
  8. /**
  9. * qla2x00_process_completed_request() - Process a Fast Post response.
  10. * @ha: SCSI driver HA context
  11. * @index: SRB index
  12. **/
  13. static void qla4xxx_process_completed_request(struct scsi_qla_host *ha,
  14. uint32_t index)
  15. {
  16. struct srb *srb;
  17. srb = qla4xxx_del_from_active_array(ha, index);
  18. if (srb) {
  19. /* Save ISP completion status */
  20. srb->cmd->result = DID_OK << 16;
  21. qla4xxx_srb_compl(ha, srb);
  22. } else {
  23. DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = "
  24. "%d\n", ha->host_no, index));
  25. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  26. }
  27. }
  28. /**
  29. * qla4xxx_status_entry - processes status IOCBs
  30. * @ha: Pointer to host adapter structure.
  31. * @sts_entry: Pointer to status entry structure.
  32. **/
  33. static void qla4xxx_status_entry(struct scsi_qla_host *ha,
  34. struct status_entry *sts_entry)
  35. {
  36. uint8_t scsi_status;
  37. struct scsi_cmnd *cmd;
  38. struct srb *srb;
  39. struct ddb_entry *ddb_entry;
  40. uint32_t residual;
  41. uint16_t sensebytecnt;
  42. if (sts_entry->completionStatus == SCS_COMPLETE &&
  43. sts_entry->scsiStatus == 0) {
  44. qla4xxx_process_completed_request(ha,
  45. le32_to_cpu(sts_entry->
  46. handle));
  47. return;
  48. }
  49. srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
  50. if (!srb) {
  51. /* FIXMEdg: Don't we need to reset ISP in this case??? */
  52. DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
  53. "handle 0x%x, sp=%p. This cmd may have already "
  54. "been completed.\n", ha->host_no, __func__,
  55. le32_to_cpu(sts_entry->handle), srb));
  56. return;
  57. }
  58. cmd = srb->cmd;
  59. if (cmd == NULL) {
  60. DEBUG2(printk("scsi%ld: %s: Command already returned back to "
  61. "OS pkt->handle=%d srb=%p srb->state:%d\n",
  62. ha->host_no, __func__, sts_entry->handle,
  63. srb, srb->state));
  64. dev_warn(&ha->pdev->dev, "Command is NULL:"
  65. " already returned to OS (srb=%p)\n", srb);
  66. return;
  67. }
  68. ddb_entry = srb->ddb;
  69. if (ddb_entry == NULL) {
  70. cmd->result = DID_NO_CONNECT << 16;
  71. goto status_entry_exit;
  72. }
  73. residual = le32_to_cpu(sts_entry->residualByteCnt);
  74. /* Translate ISP error to a Linux SCSI error. */
  75. scsi_status = sts_entry->scsiStatus;
  76. switch (sts_entry->completionStatus) {
  77. case SCS_COMPLETE:
  78. if (scsi_status == 0) {
  79. cmd->result = DID_OK << 16;
  80. break;
  81. }
  82. if (sts_entry->iscsiFlags &
  83. (ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER))
  84. cmd->resid = residual;
  85. cmd->result = DID_OK << 16 | scsi_status;
  86. if (scsi_status != SCSI_CHECK_CONDITION)
  87. break;
  88. /* Copy Sense Data into sense buffer. */
  89. memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
  90. sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
  91. if (sensebytecnt == 0)
  92. break;
  93. memcpy(cmd->sense_buffer, sts_entry->senseData,
  94. min(sensebytecnt,
  95. (uint16_t) sizeof(cmd->sense_buffer)));
  96. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
  97. "ASC/ASCQ = %02x/%02x\n", ha->host_no,
  98. cmd->device->channel, cmd->device->id,
  99. cmd->device->lun, __func__,
  100. sts_entry->senseData[2] & 0x0f,
  101. sts_entry->senseData[12],
  102. sts_entry->senseData[13]));
  103. srb->flags |= SRB_GOT_SENSE;
  104. break;
  105. case SCS_INCOMPLETE:
  106. /* Always set the status to DID_ERROR, since
  107. * all conditions result in that status anyway */
  108. cmd->result = DID_ERROR << 16;
  109. break;
  110. case SCS_RESET_OCCURRED:
  111. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
  112. ha->host_no, cmd->device->channel,
  113. cmd->device->id, cmd->device->lun, __func__));
  114. cmd->result = DID_RESET << 16;
  115. break;
  116. case SCS_ABORTED:
  117. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
  118. ha->host_no, cmd->device->channel,
  119. cmd->device->id, cmd->device->lun, __func__));
  120. cmd->result = DID_RESET << 16;
  121. break;
  122. case SCS_TIMEOUT:
  123. DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
  124. ha->host_no, cmd->device->channel,
  125. cmd->device->id, cmd->device->lun));
  126. cmd->result = DID_BUS_BUSY << 16;
  127. /*
  128. * Mark device missing so that we won't continue to send
  129. * I/O to this device. We should get a ddb state change
  130. * AEN soon.
  131. */
  132. if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
  133. qla4xxx_mark_device_missing(ha, ddb_entry);
  134. break;
  135. case SCS_DATA_UNDERRUN:
  136. case SCS_DATA_OVERRUN:
  137. if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
  138. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
  139. "residual = 0x%x\n", ha->host_no,
  140. cmd->device->channel, cmd->device->id,
  141. cmd->device->lun, __func__, residual));
  142. cmd->result = DID_ERROR << 16;
  143. break;
  144. }
  145. if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
  146. /*
  147. * Firmware detected a SCSI transport underrun
  148. * condition
  149. */
  150. cmd->resid = residual;
  151. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status "
  152. "detected, xferlen = 0x%x, residual = "
  153. "0x%x\n",
  154. ha->host_no, cmd->device->channel,
  155. cmd->device->id,
  156. cmd->device->lun, __func__,
  157. cmd->request_bufflen,
  158. residual));
  159. }
  160. /*
  161. * If there is scsi_status, it takes precedense over
  162. * underflow condition.
  163. */
  164. if (scsi_status != 0) {
  165. cmd->result = DID_OK << 16 | scsi_status;
  166. if (scsi_status != SCSI_CHECK_CONDITION)
  167. break;
  168. /* Copy Sense Data into sense buffer. */
  169. memset(cmd->sense_buffer, 0,
  170. sizeof(cmd->sense_buffer));
  171. sensebytecnt =
  172. le16_to_cpu(sts_entry->senseDataByteCnt);
  173. if (sensebytecnt == 0)
  174. break;
  175. memcpy(cmd->sense_buffer, sts_entry->senseData,
  176. min(sensebytecnt,
  177. (uint16_t) sizeof(cmd->sense_buffer)));
  178. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
  179. "ASC/ASCQ = %02x/%02x\n", ha->host_no,
  180. cmd->device->channel, cmd->device->id,
  181. cmd->device->lun, __func__,
  182. sts_entry->senseData[2] & 0x0f,
  183. sts_entry->senseData[12],
  184. sts_entry->senseData[13]));
  185. } else {
  186. /*
  187. * If RISC reports underrun and target does not
  188. * report it then we must have a lost frame, so
  189. * tell upper layer to retry it by reporting a
  190. * bus busy.
  191. */
  192. if ((sts_entry->iscsiFlags &
  193. ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
  194. cmd->result = DID_BUS_BUSY << 16;
  195. } else if ((cmd->request_bufflen - residual) <
  196. cmd->underflow) {
  197. /*
  198. * Handle mid-layer underflow???
  199. *
  200. * For kernels less than 2.4, the driver must
  201. * return an error if an underflow is detected.
  202. * For kernels equal-to and above 2.4, the
  203. * mid-layer will appearantly handle the
  204. * underflow by detecting the residual count --
  205. * unfortunately, we do not see where this is
  206. * actually being done. In the interim, we
  207. * will return DID_ERROR.
  208. */
  209. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
  210. "Mid-layer Data underrun, "
  211. "xferlen = 0x%x, "
  212. "residual = 0x%x\n", ha->host_no,
  213. cmd->device->channel,
  214. cmd->device->id,
  215. cmd->device->lun, __func__,
  216. cmd->request_bufflen, residual));
  217. cmd->result = DID_ERROR << 16;
  218. } else {
  219. cmd->result = DID_OK << 16;
  220. }
  221. }
  222. break;
  223. case SCS_DEVICE_LOGGED_OUT:
  224. case SCS_DEVICE_UNAVAILABLE:
  225. /*
  226. * Mark device missing so that we won't continue to
  227. * send I/O to this device. We should get a ddb
  228. * state change AEN soon.
  229. */
  230. if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
  231. qla4xxx_mark_device_missing(ha, ddb_entry);
  232. cmd->result = DID_BUS_BUSY << 16;
  233. break;
  234. case SCS_QUEUE_FULL:
  235. /*
  236. * SCSI Mid-Layer handles device queue full
  237. */
  238. cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
  239. DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
  240. "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
  241. " iResp=%02x\n", ha->host_no, cmd->device->id,
  242. cmd->device->lun, __func__,
  243. sts_entry->completionStatus,
  244. sts_entry->scsiStatus, sts_entry->state_flags,
  245. sts_entry->iscsiFlags,
  246. sts_entry->iscsiResponse));
  247. break;
  248. default:
  249. cmd->result = DID_ERROR << 16;
  250. break;
  251. }
  252. status_entry_exit:
  253. /* complete the request */
  254. srb->cc_stat = sts_entry->completionStatus;
  255. qla4xxx_srb_compl(ha, srb);
  256. }
  257. /**
  258. * qla4xxx_process_response_queue - process response queue completions
  259. * @ha: Pointer to host adapter structure.
  260. *
  261. * This routine process response queue completions in interrupt context.
  262. * Hardware_lock locked upon entry
  263. **/
  264. static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
  265. {
  266. uint32_t count = 0;
  267. struct srb *srb = NULL;
  268. struct status_entry *sts_entry;
  269. /* Process all responses from response queue */
  270. while ((ha->response_in =
  271. (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
  272. ha->response_out) {
  273. sts_entry = (struct status_entry *) ha->response_ptr;
  274. count++;
  275. /* Advance pointers for next entry */
  276. if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
  277. ha->response_out = 0;
  278. ha->response_ptr = ha->response_ring;
  279. } else {
  280. ha->response_out++;
  281. ha->response_ptr++;
  282. }
  283. /* process entry */
  284. switch (sts_entry->hdr.entryType) {
  285. case ET_STATUS:
  286. /*
  287. * Common status - Single completion posted in single
  288. * IOSB.
  289. */
  290. qla4xxx_status_entry(ha, sts_entry);
  291. break;
  292. case ET_PASSTHRU_STATUS:
  293. break;
  294. case ET_STATUS_CONTINUATION:
  295. /* Just throw away the status continuation entries */
  296. DEBUG2(printk("scsi%ld: %s: Status Continuation entry "
  297. "- ignoring\n", ha->host_no, __func__));
  298. break;
  299. case ET_COMMAND:
  300. /* ISP device queue is full. Command not
  301. * accepted by ISP. Queue command for
  302. * later */
  303. srb = qla4xxx_del_from_active_array(ha,
  304. le32_to_cpu(sts_entry->
  305. handle));
  306. if (srb == NULL)
  307. goto exit_prq_invalid_handle;
  308. DEBUG2(printk("scsi%ld: %s: FW device queue full, "
  309. "srb %p\n", ha->host_no, __func__, srb));
  310. /* ETRY normally by sending it back with
  311. * DID_BUS_BUSY */
  312. srb->cmd->result = DID_BUS_BUSY << 16;
  313. qla4xxx_srb_compl(ha, srb);
  314. break;
  315. case ET_CONTINUE:
  316. /* Just throw away the continuation entries */
  317. DEBUG2(printk("scsi%ld: %s: Continuation entry - "
  318. "ignoring\n", ha->host_no, __func__));
  319. break;
  320. default:
  321. /*
  322. * Invalid entry in response queue, reset RISC
  323. * firmware.
  324. */
  325. DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
  326. "response queue \n", ha->host_no,
  327. __func__,
  328. sts_entry->hdr.entryType));
  329. goto exit_prq_error;
  330. }
  331. }
  332. /*
  333. * Done with responses, update the ISP For QLA4010, this also clears
  334. * the interrupt.
  335. */
  336. writel(ha->response_out, &ha->reg->rsp_q_out);
  337. readl(&ha->reg->rsp_q_out);
  338. return;
  339. exit_prq_invalid_handle:
  340. DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
  341. ha->host_no, __func__, srb, sts_entry->hdr.entryType,
  342. sts_entry->completionStatus));
  343. exit_prq_error:
  344. writel(ha->response_out, &ha->reg->rsp_q_out);
  345. readl(&ha->reg->rsp_q_out);
  346. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  347. }
  348. /**
  349. * qla4xxx_isr_decode_mailbox - decodes mailbox status
  350. * @ha: Pointer to host adapter structure.
  351. * @mailbox_status: Mailbox status.
  352. *
  353. * This routine decodes the mailbox status during the ISR.
  354. * Hardware_lock locked upon entry. runs in interrupt context.
  355. **/
  356. static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
  357. uint32_t mbox_status)
  358. {
  359. int i;
  360. if ((mbox_status == MBOX_STS_BUSY) ||
  361. (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
  362. (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
  363. ha->mbox_status[0] = mbox_status;
  364. if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
  365. /*
  366. * Copy all mailbox registers to a temporary
  367. * location and set mailbox command done flag
  368. */
  369. for (i = 1; i < ha->mbox_status_count; i++)
  370. ha->mbox_status[i] =
  371. readl(&ha->reg->mailbox[i]);
  372. set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
  373. wake_up(&ha->mailbox_wait_queue);
  374. }
  375. } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
  376. /* Immediately process the AENs that don't require much work.
  377. * Only queue the database_changed AENs */
  378. switch (mbox_status) {
  379. case MBOX_ASTS_SYSTEM_ERROR:
  380. /* Log Mailbox registers */
  381. if (ql4xdontresethba) {
  382. DEBUG2(printk("%s:Dont Reset HBA\n",
  383. __func__));
  384. } else {
  385. set_bit(AF_GET_CRASH_RECORD, &ha->flags);
  386. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  387. }
  388. break;
  389. case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
  390. case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
  391. case MBOX_ASTS_NVRAM_INVALID:
  392. case MBOX_ASTS_IP_ADDRESS_CHANGED:
  393. case MBOX_ASTS_DHCP_LEASE_EXPIRED:
  394. DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
  395. "Reset HA\n", ha->host_no, mbox_status));
  396. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  397. break;
  398. case MBOX_ASTS_LINK_UP:
  399. DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
  400. ha->host_no, mbox_status));
  401. set_bit(AF_LINK_UP, &ha->flags);
  402. break;
  403. case MBOX_ASTS_LINK_DOWN:
  404. DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
  405. ha->host_no, mbox_status));
  406. clear_bit(AF_LINK_UP, &ha->flags);
  407. break;
  408. case MBOX_ASTS_HEARTBEAT:
  409. ha->seconds_since_last_heartbeat = 0;
  410. break;
  411. case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
  412. DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
  413. "ACQUIRED\n", ha->host_no, mbox_status));
  414. set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
  415. break;
  416. case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
  417. case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
  418. * mode
  419. * only */
  420. case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
  421. case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
  422. case MBOX_ASTS_SUBNET_STATE_CHANGE:
  423. /* No action */
  424. DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
  425. mbox_status));
  426. break;
  427. case MBOX_ASTS_MAC_ADDRESS_CHANGED:
  428. case MBOX_ASTS_DNS:
  429. /* No action */
  430. DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
  431. "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
  432. ha->host_no, mbox_status,
  433. readl(&ha->reg->mailbox[1]),
  434. readl(&ha->reg->mailbox[2])));
  435. break;
  436. case MBOX_ASTS_SELF_TEST_FAILED:
  437. case MBOX_ASTS_LOGIN_FAILED:
  438. /* No action */
  439. DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
  440. "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
  441. ha->host_no, mbox_status,
  442. readl(&ha->reg->mailbox[1]),
  443. readl(&ha->reg->mailbox[2]),
  444. readl(&ha->reg->mailbox[3])));
  445. break;
  446. case MBOX_ASTS_DATABASE_CHANGED:
  447. /* Queue AEN information and process it in the DPC
  448. * routine */
  449. if (ha->aen_q_count > 0) {
  450. /* advance pointer */
  451. if (ha->aen_in == (MAX_AEN_ENTRIES - 1))
  452. ha->aen_in = 0;
  453. else
  454. ha->aen_in++;
  455. /* decrement available counter */
  456. ha->aen_q_count--;
  457. for (i = 1; i < MBOX_AEN_REG_COUNT; i++)
  458. ha->aen_q[ha->aen_in].mbox_sts[i] =
  459. readl(&ha->reg->mailbox[i]);
  460. ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
  461. /* print debug message */
  462. DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
  463. " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
  464. ha->host_no, ha->aen_in,
  465. mbox_status,
  466. ha->aen_q[ha->aen_in].mbox_sts[1],
  467. ha->aen_q[ha->aen_in].mbox_sts[2],
  468. ha->aen_q[ha->aen_in].mbox_sts[3],
  469. ha->aen_q[ha->aen_in]. mbox_sts[4]));
  470. /* The DPC routine will process the aen */
  471. set_bit(DPC_AEN, &ha->dpc_flags);
  472. } else {
  473. DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
  474. "overflowed! AEN LOST!!\n",
  475. ha->host_no, __func__,
  476. mbox_status));
  477. DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
  478. ha->host_no));
  479. for (i = 0; i < MAX_AEN_ENTRIES; i++) {
  480. DEBUG2(printk("AEN[%d] %04x %04x %04x "
  481. "%04x\n", i,
  482. ha->aen_q[i].mbox_sts[0],
  483. ha->aen_q[i].mbox_sts[1],
  484. ha->aen_q[i].mbox_sts[2],
  485. ha->aen_q[i].mbox_sts[3]));
  486. }
  487. }
  488. break;
  489. default:
  490. DEBUG2(printk(KERN_WARNING
  491. "scsi%ld: AEN %04x UNKNOWN\n",
  492. ha->host_no, mbox_status));
  493. break;
  494. }
  495. } else {
  496. DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
  497. ha->host_no, mbox_status));
  498. ha->mbox_status[0] = mbox_status;
  499. }
  500. }
  501. /**
  502. * qla4xxx_interrupt_service_routine - isr
  503. * @ha: pointer to host adapter structure.
  504. *
  505. * This is the main interrupt service routine.
  506. * hardware_lock locked upon entry. runs in interrupt context.
  507. **/
  508. void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
  509. uint32_t intr_status)
  510. {
  511. /* Process response queue interrupt. */
  512. if (intr_status & CSR_SCSI_COMPLETION_INTR)
  513. qla4xxx_process_response_queue(ha);
  514. /* Process mailbox/asynch event interrupt.*/
  515. if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
  516. qla4xxx_isr_decode_mailbox(ha,
  517. readl(&ha->reg->mailbox[0]));
  518. /* Clear Mailbox Interrupt */
  519. writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
  520. &ha->reg->ctrl_status);
  521. readl(&ha->reg->ctrl_status);
  522. }
  523. }
  524. /**
  525. * qla4xxx_intr_handler - hardware interrupt handler.
  526. * @irq: Unused
  527. * @dev_id: Pointer to host adapter structure
  528. **/
  529. irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
  530. {
  531. struct scsi_qla_host *ha;
  532. uint32_t intr_status;
  533. unsigned long flags = 0;
  534. uint8_t reqs_count = 0;
  535. ha = (struct scsi_qla_host *) dev_id;
  536. if (!ha) {
  537. DEBUG2(printk(KERN_INFO
  538. "qla4xxx: Interrupt with NULL host ptr\n"));
  539. return IRQ_NONE;
  540. }
  541. spin_lock_irqsave(&ha->hardware_lock, flags);
  542. /*
  543. * Repeatedly service interrupts up to a maximum of
  544. * MAX_REQS_SERVICED_PER_INTR
  545. */
  546. while (1) {
  547. /*
  548. * Read interrupt status
  549. */
  550. if (le32_to_cpu(ha->shadow_regs->rsp_q_in) !=
  551. ha->response_out)
  552. intr_status = CSR_SCSI_COMPLETION_INTR;
  553. else
  554. intr_status = readl(&ha->reg->ctrl_status);
  555. if ((intr_status &
  556. (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) ==
  557. 0) {
  558. if (reqs_count == 0)
  559. ha->spurious_int_count++;
  560. break;
  561. }
  562. if (intr_status & CSR_FATAL_ERROR) {
  563. DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
  564. "Status 0x%04x\n", ha->host_no,
  565. readl(isp_port_error_status (ha))));
  566. /* Issue Soft Reset to clear this error condition.
  567. * This will prevent the RISC from repeatedly
  568. * interrupting the driver; thus, allowing the DPC to
  569. * get scheduled to continue error recovery.
  570. * NOTE: Disabling RISC interrupts does not work in
  571. * this case, as CSR_FATAL_ERROR overrides
  572. * CSR_SCSI_INTR_ENABLE */
  573. if ((readl(&ha->reg->ctrl_status) &
  574. CSR_SCSI_RESET_INTR) == 0) {
  575. writel(set_rmask(CSR_SOFT_RESET),
  576. &ha->reg->ctrl_status);
  577. readl(&ha->reg->ctrl_status);
  578. }
  579. writel(set_rmask(CSR_FATAL_ERROR),
  580. &ha->reg->ctrl_status);
  581. readl(&ha->reg->ctrl_status);
  582. __qla4xxx_disable_intrs(ha);
  583. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  584. break;
  585. } else if (intr_status & CSR_SCSI_RESET_INTR) {
  586. clear_bit(AF_ONLINE, &ha->flags);
  587. __qla4xxx_disable_intrs(ha);
  588. writel(set_rmask(CSR_SCSI_RESET_INTR),
  589. &ha->reg->ctrl_status);
  590. readl(&ha->reg->ctrl_status);
  591. set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
  592. break;
  593. } else if (intr_status & INTR_PENDING) {
  594. qla4xxx_interrupt_service_routine(ha, intr_status);
  595. ha->total_io_count++;
  596. if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
  597. break;
  598. intr_status = 0;
  599. }
  600. }
  601. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  602. return IRQ_HANDLED;
  603. }
  604. /**
  605. * qla4xxx_process_aen - processes AENs generated by firmware
  606. * @ha: pointer to host adapter structure.
  607. * @process_aen: type of AENs to process
  608. *
  609. * Processes specific types of Asynchronous Events generated by firmware.
  610. * The type of AENs to process is specified by process_aen and can be
  611. * PROCESS_ALL_AENS 0
  612. * FLUSH_DDB_CHANGED_AENS 1
  613. * RELOGIN_DDB_CHANGED_AENS 2
  614. **/
  615. void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
  616. {
  617. uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
  618. struct aen *aen;
  619. int i;
  620. unsigned long flags;
  621. spin_lock_irqsave(&ha->hardware_lock, flags);
  622. while (ha->aen_out != ha->aen_in) {
  623. /* Advance pointers for next entry */
  624. if (ha->aen_out == (MAX_AEN_ENTRIES - 1))
  625. ha->aen_out = 0;
  626. else
  627. ha->aen_out++;
  628. ha->aen_q_count++;
  629. aen = &ha->aen_q[ha->aen_out];
  630. /* copy aen information to local structure */
  631. for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
  632. mbox_sts[i] = aen->mbox_sts[i];
  633. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  634. DEBUG(printk("scsi%ld: AEN[%d] %04x, index [%d] state=%04x "
  635. "mod=%x conerr=%08x \n", ha->host_no, ha->aen_out,
  636. mbox_sts[0], mbox_sts[2], mbox_sts[3],
  637. mbox_sts[1], mbox_sts[4]));
  638. switch (mbox_sts[0]) {
  639. case MBOX_ASTS_DATABASE_CHANGED:
  640. if (process_aen == FLUSH_DDB_CHANGED_AENS) {
  641. DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
  642. "[%d] state=%04x FLUSHED!\n",
  643. ha->host_no, ha->aen_out,
  644. mbox_sts[0], mbox_sts[2],
  645. mbox_sts[3]));
  646. break;
  647. } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
  648. /* for use during init time, we only want to
  649. * relogin non-active ddbs */
  650. struct ddb_entry *ddb_entry;
  651. ddb_entry =
  652. /* FIXME: name length? */
  653. qla4xxx_lookup_ddb_by_fw_index(ha,
  654. mbox_sts[2]);
  655. if (!ddb_entry)
  656. break;
  657. ddb_entry->dev_scan_wait_to_complete_relogin =
  658. 0;
  659. ddb_entry->dev_scan_wait_to_start_relogin =
  660. jiffies +
  661. ((ddb_entry->default_time2wait +
  662. 4) * HZ);
  663. DEBUG2(printk("scsi%ld: ddb index [%d] initate"
  664. " RELOGIN after %d seconds\n",
  665. ha->host_no,
  666. ddb_entry->fw_ddb_index,
  667. ddb_entry->default_time2wait +
  668. 4));
  669. break;
  670. }
  671. if (mbox_sts[1] == 0) { /* Global DB change. */
  672. qla4xxx_reinitialize_ddb_list(ha);
  673. } else if (mbox_sts[1] == 1) { /* Specific device. */
  674. qla4xxx_process_ddb_changed(ha, mbox_sts[2],
  675. mbox_sts[3]);
  676. }
  677. break;
  678. }
  679. spin_lock_irqsave(&ha->hardware_lock, flags);
  680. }
  681. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  682. }