ql4_isr.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * QLogic iSCSI HBA Driver
  3. * Copyright (c) 2003-2006 QLogic Corporation
  4. *
  5. * See LICENSE.qla4xxx for copyright and licensing details.
  6. */
  7. #include "ql4_def.h"
  8. /**
  9. * qla2x00_process_completed_request() - Process a Fast Post response.
  10. * @ha: SCSI driver HA context
  11. * @index: SRB index
  12. **/
  13. static void qla4xxx_process_completed_request(struct scsi_qla_host *ha,
  14. uint32_t index)
  15. {
  16. struct srb *srb;
  17. srb = qla4xxx_del_from_active_array(ha, index);
  18. if (srb) {
  19. /* Save ISP completion status */
  20. srb->cmd->result = DID_OK << 16;
  21. qla4xxx_srb_compl(ha, srb);
  22. } else {
  23. DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = "
  24. "%d\n", ha->host_no, index));
  25. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  26. }
  27. }
  28. /**
  29. * qla4xxx_status_entry - processes status IOCBs
  30. * @ha: Pointer to host adapter structure.
  31. * @sts_entry: Pointer to status entry structure.
  32. **/
  33. static void qla4xxx_status_entry(struct scsi_qla_host *ha,
  34. struct status_entry *sts_entry)
  35. {
  36. uint8_t scsi_status;
  37. struct scsi_cmnd *cmd;
  38. struct srb *srb;
  39. struct ddb_entry *ddb_entry;
  40. uint32_t residual;
  41. uint16_t sensebytecnt;
  42. if (sts_entry->completionStatus == SCS_COMPLETE &&
  43. sts_entry->scsiStatus == 0) {
  44. qla4xxx_process_completed_request(ha,
  45. le32_to_cpu(sts_entry->
  46. handle));
  47. return;
  48. }
  49. srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
  50. if (!srb) {
  51. /* FIXMEdg: Don't we need to reset ISP in this case??? */
  52. DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
  53. "handle 0x%x, sp=%p. This cmd may have already "
  54. "been completed.\n", ha->host_no, __func__,
  55. le32_to_cpu(sts_entry->handle), srb));
  56. return;
  57. }
  58. cmd = srb->cmd;
  59. if (cmd == NULL) {
  60. DEBUG2(printk("scsi%ld: %s: Command already returned back to "
  61. "OS pkt->handle=%d srb=%p srb->state:%d\n",
  62. ha->host_no, __func__, sts_entry->handle,
  63. srb, srb->state));
  64. dev_warn(&ha->pdev->dev, "Command is NULL:"
  65. " already returned to OS (srb=%p)\n", srb);
  66. return;
  67. }
  68. ddb_entry = srb->ddb;
  69. if (ddb_entry == NULL) {
  70. cmd->result = DID_NO_CONNECT << 16;
  71. goto status_entry_exit;
  72. }
  73. residual = le32_to_cpu(sts_entry->residualByteCnt);
  74. /* Translate ISP error to a Linux SCSI error. */
  75. scsi_status = sts_entry->scsiStatus;
  76. switch (sts_entry->completionStatus) {
  77. case SCS_COMPLETE:
  78. if (scsi_status == 0) {
  79. cmd->result = DID_OK << 16;
  80. break;
  81. }
  82. if (sts_entry->iscsiFlags &
  83. (ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER))
  84. cmd->resid = residual;
  85. cmd->result = DID_OK << 16 | scsi_status;
  86. if (scsi_status != SCSI_CHECK_CONDITION)
  87. break;
  88. /* Copy Sense Data into sense buffer. */
  89. memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
  90. sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
  91. if (sensebytecnt == 0)
  92. break;
  93. memcpy(cmd->sense_buffer, sts_entry->senseData,
  94. min(sensebytecnt,
  95. (uint16_t) sizeof(cmd->sense_buffer)));
  96. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
  97. "ASC/ASCQ = %02x/%02x\n", ha->host_no,
  98. cmd->device->channel, cmd->device->id,
  99. cmd->device->lun, __func__,
  100. sts_entry->senseData[2] & 0x0f,
  101. sts_entry->senseData[12],
  102. sts_entry->senseData[13]));
  103. srb->flags |= SRB_GOT_SENSE;
  104. break;
  105. case SCS_INCOMPLETE:
  106. /* Always set the status to DID_ERROR, since
  107. * all conditions result in that status anyway */
  108. cmd->result = DID_ERROR << 16;
  109. break;
  110. case SCS_RESET_OCCURRED:
  111. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
  112. ha->host_no, cmd->device->channel,
  113. cmd->device->id, cmd->device->lun, __func__));
  114. cmd->result = DID_RESET << 16;
  115. break;
  116. case SCS_ABORTED:
  117. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
  118. ha->host_no, cmd->device->channel,
  119. cmd->device->id, cmd->device->lun, __func__));
  120. cmd->result = DID_RESET << 16;
  121. break;
  122. case SCS_TIMEOUT:
  123. DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
  124. ha->host_no, cmd->device->channel,
  125. cmd->device->id, cmd->device->lun));
  126. cmd->result = DID_BUS_BUSY << 16;
  127. /*
  128. * Mark device missing so that we won't continue to send
  129. * I/O to this device. We should get a ddb state change
  130. * AEN soon.
  131. */
  132. if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
  133. qla4xxx_mark_device_missing(ha, ddb_entry);
  134. break;
  135. case SCS_DATA_UNDERRUN:
  136. case SCS_DATA_OVERRUN:
  137. if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
  138. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
  139. "residual = 0x%x\n", ha->host_no,
  140. cmd->device->channel, cmd->device->id,
  141. cmd->device->lun, __func__, residual));
  142. cmd->result = DID_ERROR << 16;
  143. break;
  144. }
  145. if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
  146. /*
  147. * Firmware detected a SCSI transport underrun
  148. * condition
  149. */
  150. cmd->resid = residual;
  151. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status "
  152. "detected, xferlen = 0x%x, residual = "
  153. "0x%x\n",
  154. ha->host_no, cmd->device->channel,
  155. cmd->device->id,
  156. cmd->device->lun, __func__,
  157. cmd->request_bufflen,
  158. residual));
  159. }
  160. /*
  161. * If there is scsi_status, it takes precedense over
  162. * underflow condition.
  163. */
  164. if (scsi_status != 0) {
  165. cmd->result = DID_OK << 16 | scsi_status;
  166. if (scsi_status != SCSI_CHECK_CONDITION)
  167. break;
  168. /* Copy Sense Data into sense buffer. */
  169. memset(cmd->sense_buffer, 0,
  170. sizeof(cmd->sense_buffer));
  171. sensebytecnt =
  172. le16_to_cpu(sts_entry->senseDataByteCnt);
  173. if (sensebytecnt == 0)
  174. break;
  175. memcpy(cmd->sense_buffer, sts_entry->senseData,
  176. min(sensebytecnt,
  177. (uint16_t) sizeof(cmd->sense_buffer)));
  178. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
  179. "ASC/ASCQ = %02x/%02x\n", ha->host_no,
  180. cmd->device->channel, cmd->device->id,
  181. cmd->device->lun, __func__,
  182. sts_entry->senseData[2] & 0x0f,
  183. sts_entry->senseData[12],
  184. sts_entry->senseData[13]));
  185. } else {
  186. /*
  187. * If RISC reports underrun and target does not
  188. * report it then we must have a lost frame, so
  189. * tell upper layer to retry it by reporting a
  190. * bus busy.
  191. */
  192. if ((sts_entry->iscsiFlags &
  193. ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
  194. cmd->result = DID_BUS_BUSY << 16;
  195. } else if ((cmd->request_bufflen - residual) <
  196. cmd->underflow) {
  197. /*
  198. * Handle mid-layer underflow???
  199. *
  200. * For kernels less than 2.4, the driver must
  201. * return an error if an underflow is detected.
  202. * For kernels equal-to and above 2.4, the
  203. * mid-layer will appearantly handle the
  204. * underflow by detecting the residual count --
  205. * unfortunately, we do not see where this is
  206. * actually being done. In the interim, we
  207. * will return DID_ERROR.
  208. */
  209. DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
  210. "Mid-layer Data underrun, "
  211. "xferlen = 0x%x, "
  212. "residual = 0x%x\n", ha->host_no,
  213. cmd->device->channel,
  214. cmd->device->id,
  215. cmd->device->lun, __func__,
  216. cmd->request_bufflen, residual));
  217. cmd->result = DID_ERROR << 16;
  218. } else {
  219. cmd->result = DID_OK << 16;
  220. }
  221. }
  222. break;
  223. case SCS_DEVICE_LOGGED_OUT:
  224. case SCS_DEVICE_UNAVAILABLE:
  225. /*
  226. * Mark device missing so that we won't continue to
  227. * send I/O to this device. We should get a ddb
  228. * state change AEN soon.
  229. */
  230. if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
  231. qla4xxx_mark_device_missing(ha, ddb_entry);
  232. cmd->result = DID_BUS_BUSY << 16;
  233. break;
  234. case SCS_QUEUE_FULL:
  235. /*
  236. * SCSI Mid-Layer handles device queue full
  237. */
  238. cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
  239. DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
  240. "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
  241. " iResp=%02x\n", ha->host_no, cmd->device->id,
  242. cmd->device->lun, __func__,
  243. sts_entry->completionStatus,
  244. sts_entry->scsiStatus, sts_entry->state_flags,
  245. sts_entry->iscsiFlags,
  246. sts_entry->iscsiResponse));
  247. break;
  248. default:
  249. cmd->result = DID_ERROR << 16;
  250. break;
  251. }
  252. status_entry_exit:
  253. /* complete the request */
  254. srb->cc_stat = sts_entry->completionStatus;
  255. qla4xxx_srb_compl(ha, srb);
  256. }
  257. /**
  258. * qla4xxx_process_response_queue - process response queue completions
  259. * @ha: Pointer to host adapter structure.
  260. *
  261. * This routine process response queue completions in interrupt context.
  262. * Hardware_lock locked upon entry
  263. **/
  264. static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
  265. {
  266. uint32_t count = 0;
  267. struct srb *srb = NULL;
  268. struct status_entry *sts_entry;
  269. /* Process all responses from response queue */
  270. while ((ha->response_in =
  271. (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
  272. ha->response_out) {
  273. sts_entry = (struct status_entry *) ha->response_ptr;
  274. count++;
  275. /* Advance pointers for next entry */
  276. if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
  277. ha->response_out = 0;
  278. ha->response_ptr = ha->response_ring;
  279. } else {
  280. ha->response_out++;
  281. ha->response_ptr++;
  282. }
  283. /* process entry */
  284. switch (sts_entry->hdr.entryType) {
  285. case ET_STATUS:
  286. /*
  287. * Common status - Single completion posted in single
  288. * IOSB.
  289. */
  290. qla4xxx_status_entry(ha, sts_entry);
  291. break;
  292. case ET_PASSTHRU_STATUS:
  293. break;
  294. case ET_STATUS_CONTINUATION:
  295. /* Just throw away the status continuation entries */
  296. DEBUG2(printk("scsi%ld: %s: Status Continuation entry "
  297. "- ignoring\n", ha->host_no, __func__));
  298. break;
  299. case ET_COMMAND:
  300. /* ISP device queue is full. Command not
  301. * accepted by ISP. Queue command for
  302. * later */
  303. srb = qla4xxx_del_from_active_array(ha,
  304. le32_to_cpu(sts_entry->
  305. handle));
  306. if (srb == NULL)
  307. goto exit_prq_invalid_handle;
  308. DEBUG2(printk("scsi%ld: %s: FW device queue full, "
  309. "srb %p\n", ha->host_no, __func__, srb));
  310. /* ETRY normally by sending it back with
  311. * DID_BUS_BUSY */
  312. srb->cmd->result = DID_BUS_BUSY << 16;
  313. qla4xxx_srb_compl(ha, srb);
  314. break;
  315. case ET_CONTINUE:
  316. /* Just throw away the continuation entries */
  317. DEBUG2(printk("scsi%ld: %s: Continuation entry - "
  318. "ignoring\n", ha->host_no, __func__));
  319. break;
  320. default:
  321. /*
  322. * Invalid entry in response queue, reset RISC
  323. * firmware.
  324. */
  325. DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
  326. "response queue \n", ha->host_no,
  327. __func__,
  328. sts_entry->hdr.entryType));
  329. goto exit_prq_error;
  330. }
  331. }
  332. /*
  333. * Done with responses, update the ISP For QLA4010, this also clears
  334. * the interrupt.
  335. */
  336. writel(ha->response_out, &ha->reg->rsp_q_out);
  337. readl(&ha->reg->rsp_q_out);
  338. return;
  339. exit_prq_invalid_handle:
  340. DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
  341. ha->host_no, __func__, srb, sts_entry->hdr.entryType,
  342. sts_entry->completionStatus));
  343. exit_prq_error:
  344. writel(ha->response_out, &ha->reg->rsp_q_out);
  345. readl(&ha->reg->rsp_q_out);
  346. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  347. }
  348. /**
  349. * qla4xxx_isr_decode_mailbox - decodes mailbox status
  350. * @ha: Pointer to host adapter structure.
  351. * @mailbox_status: Mailbox status.
  352. *
  353. * This routine decodes the mailbox status during the ISR.
  354. * Hardware_lock locked upon entry. runs in interrupt context.
  355. **/
  356. static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
  357. uint32_t mbox_status)
  358. {
  359. int i;
  360. if ((mbox_status == MBOX_STS_BUSY) ||
  361. (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
  362. (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
  363. ha->mbox_status[0] = mbox_status;
  364. if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
  365. /*
  366. * Copy all mailbox registers to a temporary
  367. * location and set mailbox command done flag
  368. */
  369. for (i = 1; i < ha->mbox_status_count; i++)
  370. ha->mbox_status[i] =
  371. readl(&ha->reg->mailbox[i]);
  372. set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
  373. }
  374. } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
  375. /* Immediately process the AENs that don't require much work.
  376. * Only queue the database_changed AENs */
  377. switch (mbox_status) {
  378. case MBOX_ASTS_SYSTEM_ERROR:
  379. /* Log Mailbox registers */
  380. if (ql4xdontresethba) {
  381. DEBUG2(printk("%s:Dont Reset HBA\n",
  382. __func__));
  383. } else {
  384. set_bit(AF_GET_CRASH_RECORD, &ha->flags);
  385. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  386. }
  387. break;
  388. case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
  389. case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
  390. case MBOX_ASTS_NVRAM_INVALID:
  391. case MBOX_ASTS_IP_ADDRESS_CHANGED:
  392. case MBOX_ASTS_DHCP_LEASE_EXPIRED:
  393. DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
  394. "Reset HA\n", ha->host_no, mbox_status));
  395. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  396. break;
  397. case MBOX_ASTS_LINK_UP:
  398. DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
  399. ha->host_no, mbox_status));
  400. set_bit(AF_LINK_UP, &ha->flags);
  401. break;
  402. case MBOX_ASTS_LINK_DOWN:
  403. DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
  404. ha->host_no, mbox_status));
  405. clear_bit(AF_LINK_UP, &ha->flags);
  406. break;
  407. case MBOX_ASTS_HEARTBEAT:
  408. ha->seconds_since_last_heartbeat = 0;
  409. break;
  410. case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
  411. DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
  412. "ACQUIRED\n", ha->host_no, mbox_status));
  413. set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
  414. break;
  415. case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
  416. case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
  417. * mode
  418. * only */
  419. case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
  420. case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
  421. case MBOX_ASTS_SUBNET_STATE_CHANGE:
  422. /* No action */
  423. DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
  424. mbox_status));
  425. break;
  426. case MBOX_ASTS_MAC_ADDRESS_CHANGED:
  427. case MBOX_ASTS_DNS:
  428. /* No action */
  429. DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
  430. "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
  431. ha->host_no, mbox_status,
  432. readl(&ha->reg->mailbox[1]),
  433. readl(&ha->reg->mailbox[2])));
  434. break;
  435. case MBOX_ASTS_SELF_TEST_FAILED:
  436. case MBOX_ASTS_LOGIN_FAILED:
  437. /* No action */
  438. DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
  439. "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
  440. ha->host_no, mbox_status,
  441. readl(&ha->reg->mailbox[1]),
  442. readl(&ha->reg->mailbox[2]),
  443. readl(&ha->reg->mailbox[3])));
  444. break;
  445. case MBOX_ASTS_DATABASE_CHANGED:
  446. /* Queue AEN information and process it in the DPC
  447. * routine */
  448. if (ha->aen_q_count > 0) {
  449. /* advance pointer */
  450. if (ha->aen_in == (MAX_AEN_ENTRIES - 1))
  451. ha->aen_in = 0;
  452. else
  453. ha->aen_in++;
  454. /* decrement available counter */
  455. ha->aen_q_count--;
  456. for (i = 1; i < MBOX_AEN_REG_COUNT; i++)
  457. ha->aen_q[ha->aen_in].mbox_sts[i] =
  458. readl(&ha->reg->mailbox[i]);
  459. ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
  460. /* print debug message */
  461. DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
  462. " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
  463. ha->host_no, ha->aen_in,
  464. mbox_status,
  465. ha->aen_q[ha->aen_in].mbox_sts[1],
  466. ha->aen_q[ha->aen_in].mbox_sts[2],
  467. ha->aen_q[ha->aen_in].mbox_sts[3],
  468. ha->aen_q[ha->aen_in]. mbox_sts[4]));
  469. /* The DPC routine will process the aen */
  470. set_bit(DPC_AEN, &ha->dpc_flags);
  471. } else {
  472. DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
  473. "overflowed! AEN LOST!!\n",
  474. ha->host_no, __func__,
  475. mbox_status));
  476. DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
  477. ha->host_no));
  478. for (i = 0; i < MAX_AEN_ENTRIES; i++) {
  479. DEBUG2(printk("AEN[%d] %04x %04x %04x "
  480. "%04x\n", i,
  481. ha->aen_q[i].mbox_sts[0],
  482. ha->aen_q[i].mbox_sts[1],
  483. ha->aen_q[i].mbox_sts[2],
  484. ha->aen_q[i].mbox_sts[3]));
  485. }
  486. }
  487. break;
  488. default:
  489. DEBUG2(printk(KERN_WARNING
  490. "scsi%ld: AEN %04x UNKNOWN\n",
  491. ha->host_no, mbox_status));
  492. break;
  493. }
  494. } else {
  495. DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
  496. ha->host_no, mbox_status));
  497. ha->mbox_status[0] = mbox_status;
  498. }
  499. }
  500. /**
  501. * qla4xxx_interrupt_service_routine - isr
  502. * @ha: pointer to host adapter structure.
  503. *
  504. * This is the main interrupt service routine.
  505. * hardware_lock locked upon entry. runs in interrupt context.
  506. **/
  507. void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
  508. uint32_t intr_status)
  509. {
  510. /* Process response queue interrupt. */
  511. if (intr_status & CSR_SCSI_COMPLETION_INTR)
  512. qla4xxx_process_response_queue(ha);
  513. /* Process mailbox/asynch event interrupt.*/
  514. if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
  515. qla4xxx_isr_decode_mailbox(ha,
  516. readl(&ha->reg->mailbox[0]));
  517. /* Clear Mailbox Interrupt */
  518. writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
  519. &ha->reg->ctrl_status);
  520. readl(&ha->reg->ctrl_status);
  521. }
  522. }
  523. /**
  524. * qla4xxx_intr_handler - hardware interrupt handler.
  525. * @irq: Unused
  526. * @dev_id: Pointer to host adapter structure
  527. **/
  528. irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
  529. {
  530. struct scsi_qla_host *ha;
  531. uint32_t intr_status;
  532. unsigned long flags = 0;
  533. uint8_t reqs_count = 0;
  534. ha = (struct scsi_qla_host *) dev_id;
  535. if (!ha) {
  536. DEBUG2(printk(KERN_INFO
  537. "qla4xxx: Interrupt with NULL host ptr\n"));
  538. return IRQ_NONE;
  539. }
  540. spin_lock_irqsave(&ha->hardware_lock, flags);
  541. ha->isr_count++;
  542. /*
  543. * Repeatedly service interrupts up to a maximum of
  544. * MAX_REQS_SERVICED_PER_INTR
  545. */
  546. while (1) {
  547. /*
  548. * Read interrupt status
  549. */
  550. if (le32_to_cpu(ha->shadow_regs->rsp_q_in) !=
  551. ha->response_out)
  552. intr_status = CSR_SCSI_COMPLETION_INTR;
  553. else
  554. intr_status = readl(&ha->reg->ctrl_status);
  555. if ((intr_status &
  556. (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) ==
  557. 0) {
  558. if (reqs_count == 0)
  559. ha->spurious_int_count++;
  560. break;
  561. }
  562. if (intr_status & CSR_FATAL_ERROR) {
  563. DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
  564. "Status 0x%04x\n", ha->host_no,
  565. readl(isp_port_error_status (ha))));
  566. /* Issue Soft Reset to clear this error condition.
  567. * This will prevent the RISC from repeatedly
  568. * interrupting the driver; thus, allowing the DPC to
  569. * get scheduled to continue error recovery.
  570. * NOTE: Disabling RISC interrupts does not work in
  571. * this case, as CSR_FATAL_ERROR overrides
  572. * CSR_SCSI_INTR_ENABLE */
  573. if ((readl(&ha->reg->ctrl_status) &
  574. CSR_SCSI_RESET_INTR) == 0) {
  575. writel(set_rmask(CSR_SOFT_RESET),
  576. &ha->reg->ctrl_status);
  577. readl(&ha->reg->ctrl_status);
  578. }
  579. writel(set_rmask(CSR_FATAL_ERROR),
  580. &ha->reg->ctrl_status);
  581. readl(&ha->reg->ctrl_status);
  582. __qla4xxx_disable_intrs(ha);
  583. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  584. break;
  585. } else if (intr_status & CSR_SCSI_RESET_INTR) {
  586. clear_bit(AF_ONLINE, &ha->flags);
  587. __qla4xxx_disable_intrs(ha);
  588. writel(set_rmask(CSR_SCSI_RESET_INTR),
  589. &ha->reg->ctrl_status);
  590. readl(&ha->reg->ctrl_status);
  591. if (!ql4_mod_unload)
  592. set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
  593. break;
  594. } else if (intr_status & INTR_PENDING) {
  595. qla4xxx_interrupt_service_routine(ha, intr_status);
  596. ha->total_io_count++;
  597. if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
  598. break;
  599. intr_status = 0;
  600. }
  601. }
  602. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  603. return IRQ_HANDLED;
  604. }
  605. /**
  606. * qla4xxx_process_aen - processes AENs generated by firmware
  607. * @ha: pointer to host adapter structure.
  608. * @process_aen: type of AENs to process
  609. *
  610. * Processes specific types of Asynchronous Events generated by firmware.
  611. * The type of AENs to process is specified by process_aen and can be
  612. * PROCESS_ALL_AENS 0
  613. * FLUSH_DDB_CHANGED_AENS 1
  614. * RELOGIN_DDB_CHANGED_AENS 2
  615. **/
  616. void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
  617. {
  618. uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
  619. struct aen *aen;
  620. int i;
  621. unsigned long flags;
  622. spin_lock_irqsave(&ha->hardware_lock, flags);
  623. while (ha->aen_out != ha->aen_in) {
  624. /* Advance pointers for next entry */
  625. if (ha->aen_out == (MAX_AEN_ENTRIES - 1))
  626. ha->aen_out = 0;
  627. else
  628. ha->aen_out++;
  629. ha->aen_q_count++;
  630. aen = &ha->aen_q[ha->aen_out];
  631. /* copy aen information to local structure */
  632. for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
  633. mbox_sts[i] = aen->mbox_sts[i];
  634. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  635. DEBUG(printk("scsi%ld: AEN[%d] %04x, index [%d] state=%04x "
  636. "mod=%x conerr=%08x \n", ha->host_no, ha->aen_out,
  637. mbox_sts[0], mbox_sts[2], mbox_sts[3],
  638. mbox_sts[1], mbox_sts[4]));
  639. switch (mbox_sts[0]) {
  640. case MBOX_ASTS_DATABASE_CHANGED:
  641. if (process_aen == FLUSH_DDB_CHANGED_AENS) {
  642. DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
  643. "[%d] state=%04x FLUSHED!\n",
  644. ha->host_no, ha->aen_out,
  645. mbox_sts[0], mbox_sts[2],
  646. mbox_sts[3]));
  647. break;
  648. } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
  649. /* for use during init time, we only want to
  650. * relogin non-active ddbs */
  651. struct ddb_entry *ddb_entry;
  652. ddb_entry =
  653. /* FIXME: name length? */
  654. qla4xxx_lookup_ddb_by_fw_index(ha,
  655. mbox_sts[2]);
  656. if (!ddb_entry)
  657. break;
  658. ddb_entry->dev_scan_wait_to_complete_relogin =
  659. 0;
  660. ddb_entry->dev_scan_wait_to_start_relogin =
  661. jiffies +
  662. ((ddb_entry->default_time2wait +
  663. 4) * HZ);
  664. DEBUG2(printk("scsi%ld: ddb index [%d] initate"
  665. " RELOGIN after %d seconds\n",
  666. ha->host_no,
  667. ddb_entry->fw_ddb_index,
  668. ddb_entry->default_time2wait +
  669. 4));
  670. break;
  671. }
  672. if (mbox_sts[1] == 0) { /* Global DB change. */
  673. qla4xxx_reinitialize_ddb_list(ha);
  674. } else if (mbox_sts[1] == 1) { /* Specific device. */
  675. qla4xxx_process_ddb_changed(ha, mbox_sts[2],
  676. mbox_sts[3]);
  677. }
  678. break;
  679. }
  680. spin_lock_irqsave(&ha->hardware_lock, flags);
  681. }
  682. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  683. }