|
@@ -226,15 +226,23 @@ int fnic_fw_reset_handler(struct fnic *fnic)
|
|
|
|
|
|
if (!vnic_wq_copy_desc_avail(wq))
|
|
|
ret = -EAGAIN;
|
|
|
- else
|
|
|
+ else {
|
|
|
fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
|
|
|
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
|
|
|
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
|
|
|
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
|
|
|
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
|
|
|
+ atomic64_read(
|
|
|
+ &fnic->fnic_stats.fw_stats.active_fw_reqs));
|
|
|
+ }
|
|
|
|
|
|
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
|
|
|
|
|
|
- if (!ret)
|
|
|
+ if (!ret) {
|
|
|
+ atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
|
|
|
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
|
|
"Issued fw reset\n");
|
|
|
- else {
|
|
|
+ } else {
|
|
|
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
|
|
|
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
|
|
"Failed to issue fw reset\n");
|
|
@@ -291,6 +299,12 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
|
|
|
fc_id, fnic->ctlr.map_dest, gw_mac);
|
|
|
}
|
|
|
|
|
|
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
|
|
|
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
|
|
|
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
|
|
|
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
|
|
|
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
|
|
|
+
|
|
|
flogi_reg_ioreq_end:
|
|
|
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
|
|
|
return ret;
|
|
@@ -310,6 +324,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
|
|
|
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
|
|
|
struct fc_rport_libfc_priv *rp = rport->dd_data;
|
|
|
struct host_sg_desc *desc;
|
|
|
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
|
|
|
u8 pri_tag = 0;
|
|
|
unsigned int i;
|
|
|
unsigned long intr_flags;
|
|
@@ -358,6 +373,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
|
|
|
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
|
|
|
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
|
|
"fnic_queue_wq_copy_desc failure - no descriptors\n");
|
|
|
+ atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
|
|
|
return SCSI_MLQUEUE_HOST_BUSY;
|
|
|
}
|
|
|
|
|
@@ -386,6 +402,12 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
|
|
|
rport->maxframe_size, rp->r_a_tov,
|
|
|
rp->e_d_tov);
|
|
|
|
|
|
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
|
|
|
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
|
|
|
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
|
|
|
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
|
|
|
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
|
|
|
+
|
|
|
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
|
|
|
return 0;
|
|
|
}
|
|
@@ -401,6 +423,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
|
|
struct fc_rport *rport;
|
|
|
struct fnic_io_req *io_req = NULL;
|
|
|
struct fnic *fnic = lport_priv(lp);
|
|
|
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
|
|
|
struct vnic_wq_copy *wq;
|
|
|
int ret;
|
|
|
u64 cmd_trace;
|
|
@@ -414,6 +437,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
|
|
rport = starget_to_rport(scsi_target(sc->device));
|
|
|
ret = fc_remote_port_chkready(rport);
|
|
|
if (ret) {
|
|
|
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
|
|
|
sc->result = ret;
|
|
|
done(sc);
|
|
|
return 0;
|
|
@@ -436,6 +460,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
|
|
/* Get a new io_req for this SCSI IO */
|
|
|
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
|
|
|
if (!io_req) {
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.alloc_failures);
|
|
|
ret = SCSI_MLQUEUE_HOST_BUSY;
|
|
|
goto out;
|
|
|
}
|
|
@@ -462,6 +487,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
|
|
mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
|
|
|
GFP_ATOMIC);
|
|
|
if (!io_req->sgl_list) {
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.alloc_failures);
|
|
|
ret = SCSI_MLQUEUE_HOST_BUSY;
|
|
|
scsi_dma_unmap(sc);
|
|
|
mempool_free(io_req, fnic->io_req_pool);
|
|
@@ -509,6 +535,13 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
|
|
mempool_free(io_req, fnic->io_req_pool);
|
|
|
}
|
|
|
} else {
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.active_ios);
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.num_ios);
|
|
|
+ if (atomic64_read(&fnic_stats->io_stats.active_ios) >
|
|
|
+ atomic64_read(&fnic_stats->io_stats.max_active_ios))
|
|
|
+ atomic64_set(&fnic_stats->io_stats.max_active_ios,
|
|
|
+ atomic64_read(&fnic_stats->io_stats.active_ios));
|
|
|
+
|
|
|
/* REVISIT: Use per IO lock in the final code */
|
|
|
CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
|
|
|
}
|
|
@@ -542,12 +575,18 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
|
|
|
struct fcpio_tag tag;
|
|
|
int ret = 0;
|
|
|
unsigned long flags;
|
|
|
+ struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
|
|
|
|
|
|
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
|
|
|
|
|
|
+ atomic64_inc(&reset_stats->fw_reset_completions);
|
|
|
+
|
|
|
/* Clean up all outstanding io requests */
|
|
|
fnic_cleanup_io(fnic, SCSI_NO_TAG);
|
|
|
|
|
|
+ atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
|
|
|
+ atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
|
|
|
+
|
|
|
spin_lock_irqsave(&fnic->fnic_lock, flags);
|
|
|
|
|
|
/* fnic should be in FC_TRANS_ETH_MODE */
|
|
@@ -571,6 +610,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
|
|
|
* reset the firmware. Free the cached flogi
|
|
|
*/
|
|
|
fnic->state = FNIC_IN_FC_MODE;
|
|
|
+ atomic64_inc(&reset_stats->fw_reset_failures);
|
|
|
ret = -1;
|
|
|
}
|
|
|
} else {
|
|
@@ -578,6 +618,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
|
|
|
fnic->lport->host,
|
|
|
"Unexpected state %s while processing"
|
|
|
" reset cmpl\n", fnic_state_to_str(fnic->state));
|
|
|
+ atomic64_inc(&reset_stats->fw_reset_failures);
|
|
|
ret = -1;
|
|
|
}
|
|
|
|
|
@@ -701,10 +742,14 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
|
|
|
wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
|
|
|
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
|
|
|
|
|
|
+ fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
|
|
|
if (is_ack_index_in_range(wq, request_out)) {
|
|
|
fnic->fw_ack_index[0] = request_out;
|
|
|
fnic->fw_ack_recd[0] = 1;
|
|
|
- }
|
|
|
+ } else
|
|
|
+ atomic64_inc(
|
|
|
+ &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
|
|
|
+
|
|
|
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
|
|
|
FNIC_TRACE(fnic_fcpio_ack_handler,
|
|
|
fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
|
|
@@ -726,6 +771,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
|
|
|
struct fcpio_icmnd_cmpl *icmnd_cmpl;
|
|
|
struct fnic_io_req *io_req;
|
|
|
struct scsi_cmnd *sc;
|
|
|
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
|
|
|
unsigned long flags;
|
|
|
spinlock_t *io_lock;
|
|
|
u64 cmd_trace;
|
|
@@ -746,6 +792,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
|
|
|
sc = scsi_host_find_tag(fnic->lport->host, id);
|
|
|
WARN_ON_ONCE(!sc);
|
|
|
if (!sc) {
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.sc_null);
|
|
|
shost_printk(KERN_ERR, fnic->lport->host,
|
|
|
"icmnd_cmpl sc is null - "
|
|
|
"hdr status = %s tag = 0x%x desc = 0x%p\n",
|
|
@@ -766,6 +813,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
|
|
|
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
|
|
WARN_ON_ONCE(!io_req);
|
|
|
if (!io_req) {
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.ioreq_null);
|
|
|
CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
|
|
|
spin_unlock_irqrestore(io_lock, flags);
|
|
|
shost_printk(KERN_ERR, fnic->lport->host,
|
|
@@ -824,31 +872,54 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
|
|
|
if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
|
|
|
xfer_len -= icmnd_cmpl->residual;
|
|
|
|
|
|
+ if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
|
|
|
+ atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
|
|
|
break;
|
|
|
|
|
|
case FCPIO_TIMEOUT: /* request was timed out */
|
|
|
+ atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
|
|
|
sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
|
|
|
break;
|
|
|
|
|
|
case FCPIO_ABORTED: /* request was aborted */
|
|
|
+ atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
|
|
|
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
|
|
|
break;
|
|
|
|
|
|
case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
|
|
|
+ atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
|
|
|
scsi_set_resid(sc, icmnd_cmpl->residual);
|
|
|
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
|
|
|
break;
|
|
|
|
|
|
case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
|
|
|
+ atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
|
|
|
sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
|
|
|
break;
|
|
|
- case FCPIO_INVALID_HEADER: /* header contains invalid data */
|
|
|
- case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
|
|
|
- case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
|
|
|
+
|
|
|
case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.io_not_found);
|
|
|
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
|
|
|
+ break;
|
|
|
+
|
|
|
case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
|
|
|
- case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
|
|
|
+ atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
|
|
|
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
|
|
|
+ break;
|
|
|
+
|
|
|
case FCPIO_FW_ERR: /* request was terminated due fw error */
|
|
|
+ atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
|
|
|
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
|
|
|
+ break;
|
|
|
+
|
|
|
+ case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
|
|
|
+ atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
|
|
|
+ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
|
|
|
+ break;
|
|
|
+
|
|
|
+ case FCPIO_INVALID_HEADER: /* header contains invalid data */
|
|
|
+ case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
|
|
|
+ case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
|
|
|
default:
|
|
|
shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
|
|
|
fnic_fcpio_status_to_str(hdr_status));
|
|
@@ -856,6 +927,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ if (hdr_status != FCPIO_SUCCESS) {
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.io_failures);
|
|
|
+ shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
|
|
|
+ fnic_fcpio_status_to_str(hdr_status));
|
|
|
+ }
|
|
|
/* Break link with the SCSI command */
|
|
|
CMD_SP(sc) = NULL;
|
|
|
CMD_FLAGS(sc) |= FNIC_IO_DONE;
|
|
@@ -889,6 +965,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
|
|
|
} else
|
|
|
fnic->lport->host_stats.fcp_control_requests++;
|
|
|
|
|
|
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
|
|
|
+ if (atomic64_read(&fnic->io_cmpl_skip))
|
|
|
+ atomic64_dec(&fnic->io_cmpl_skip);
|
|
|
+ else
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
|
|
|
+
|
|
|
/* Call SCSI completion function to complete the IO */
|
|
|
if (sc->scsi_done)
|
|
|
sc->scsi_done(sc);
|
|
@@ -906,6 +988,10 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|
|
u32 id;
|
|
|
struct scsi_cmnd *sc;
|
|
|
struct fnic_io_req *io_req;
|
|
|
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
|
|
|
+ struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
|
|
|
+ struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
|
|
|
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
|
|
|
unsigned long flags;
|
|
|
spinlock_t *io_lock;
|
|
|
unsigned long start_time;
|
|
@@ -923,6 +1009,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|
|
sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
|
|
|
WARN_ON_ONCE(!sc);
|
|
|
if (!sc) {
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.sc_null);
|
|
|
shost_printk(KERN_ERR, fnic->lport->host,
|
|
|
"itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
|
|
|
fnic_fcpio_status_to_str(hdr_status), id);
|
|
@@ -933,6 +1020,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|
|
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
|
|
WARN_ON_ONCE(!io_req);
|
|
|
if (!io_req) {
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.ioreq_null);
|
|
|
spin_unlock_irqrestore(io_lock, flags);
|
|
|
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
|
|
|
shost_printk(KERN_ERR, fnic->lport->host,
|
|
@@ -957,6 +1045,31 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|
|
spin_unlock_irqrestore(io_lock, flags);
|
|
|
} else if (id & FNIC_TAG_ABORT) {
|
|
|
/* Completion of abort cmd */
|
|
|
+ switch (hdr_status) {
|
|
|
+ case FCPIO_SUCCESS:
|
|
|
+ break;
|
|
|
+ case FCPIO_TIMEOUT:
|
|
|
+ if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
|
|
|
+ atomic64_inc(&abts_stats->abort_fw_timeouts);
|
|
|
+ else
|
|
|
+ atomic64_inc(
|
|
|
+ &term_stats->terminate_fw_timeouts);
|
|
|
+ break;
|
|
|
+ case FCPIO_IO_NOT_FOUND:
|
|
|
+ if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
|
|
|
+ atomic64_inc(&abts_stats->abort_io_not_found);
|
|
|
+ else
|
|
|
+ atomic64_inc(
|
|
|
+ &term_stats->terminate_io_not_found);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
|
|
|
+ atomic64_inc(&abts_stats->abort_failures);
|
|
|
+ else
|
|
|
+ atomic64_inc(
|
|
|
+ &term_stats->terminate_failures);
|
|
|
+ break;
|
|
|
+ }
|
|
|
if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
|
|
|
/* This is a late completion. Ignore it */
|
|
|
spin_unlock_irqrestore(io_lock, flags);
|
|
@@ -964,6 +1077,16 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
|
|
|
}
|
|
|
CMD_ABTS_STATUS(sc) = hdr_status;
|
|
|
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
|
|
|
+
|
|
|
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
|
|
|
+ if (atomic64_read(&fnic->io_cmpl_skip))
|
|
|
+ atomic64_dec(&fnic->io_cmpl_skip);
|
|
|
+ else
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
|
|
|
+
|
|
|
+ if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
|
|
|
+ atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
|
|
|
+
|
|
|
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
|
|
"abts cmpl recd. id %d status %s\n",
|
|
|
(int)(id & FNIC_TAG_MASK),
|
|
@@ -1066,6 +1189,18 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
|
|
|
{
|
|
|
struct fnic *fnic = vnic_dev_priv(vdev);
|
|
|
|
|
|
+ switch (desc->hdr.type) {
|
|
|
+ case FCPIO_ICMND_CMPL: /* fw completed a command */
|
|
|
+ case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
|
|
|
+ case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
|
|
|
+ case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
|
|
|
+ case FCPIO_RESET_CMPL: /* fw completed reset */
|
|
|
+ atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
switch (desc->hdr.type) {
|
|
|
case FCPIO_ACK: /* fw copied copy wq desc to its queue */
|
|
|
fnic_fcpio_ack_handler(fnic, cq_index, desc);
|
|
@@ -1126,6 +1261,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
|
|
|
struct scsi_cmnd *sc;
|
|
|
spinlock_t *io_lock;
|
|
|
unsigned long start_time = 0;
|
|
|
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
|
|
|
|
|
|
for (i = 0; i < fnic->fnic_max_tag_id; i++) {
|
|
|
if (i == exclude_id)
|
|
@@ -1179,6 +1315,11 @@ cleanup_scsi_cmd:
|
|
|
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
|
|
|
" DID_TRANSPORT_DISRUPTED\n");
|
|
|
|
|
|
+ if (atomic64_read(&fnic->io_cmpl_skip))
|
|
|
+ atomic64_dec(&fnic->io_cmpl_skip);
|
|
|
+ else
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
|
|
|
+
|
|
|
/* Complete the command to SCSI */
|
|
|
if (sc->scsi_done) {
|
|
|
FNIC_TRACE(fnic_cleanup_io,
|
|
@@ -1262,6 +1403,7 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
|
|
|
{
|
|
|
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
|
|
|
struct Scsi_Host *host = fnic->lport->host;
|
|
|
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
|
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(host->host_lock, flags);
|
|
@@ -1283,12 +1425,19 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
|
|
|
atomic_dec(&fnic->in_flight);
|
|
|
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
|
|
"fnic_queue_abort_io_req: failure: no descriptors\n");
|
|
|
+ atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
|
|
|
return 1;
|
|
|
}
|
|
|
fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
|
|
|
0, task_req, tag, fc_lun, io_req->port_id,
|
|
|
fnic->config.ra_tov, fnic->config.ed_tov);
|
|
|
|
|
|
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
|
|
|
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
|
|
|
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
|
|
|
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
|
|
|
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
|
|
|
+
|
|
|
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
|
|
|
atomic_dec(&fnic->in_flight);
|
|
|
|
|
@@ -1299,10 +1448,13 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
|
|
|
{
|
|
|
int tag;
|
|
|
int abt_tag;
|
|
|
+ int term_cnt = 0;
|
|
|
struct fnic_io_req *io_req;
|
|
|
spinlock_t *io_lock;
|
|
|
unsigned long flags;
|
|
|
struct scsi_cmnd *sc;
|
|
|
+ struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
|
|
|
+ struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
|
|
|
struct scsi_lun fc_lun;
|
|
|
enum fnic_ioreq_state old_ioreq_state;
|
|
|
|
|
@@ -1366,6 +1518,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
|
|
|
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
|
|
|
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
|
|
|
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
|
|
|
+ atomic64_inc(&reset_stats->device_reset_terminates);
|
|
|
abt_tag = (tag | FNIC_TAG_DEV_RST);
|
|
|
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
|
|
"fnic_rport_exch_reset dev rst sc 0x%p\n",
|
|
@@ -1402,8 +1555,12 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
|
|
|
else
|
|
|
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
|
|
|
spin_unlock_irqrestore(io_lock, flags);
|
|
|
+ atomic64_inc(&term_stats->terminates);
|
|
|
+ term_cnt++;
|
|
|
}
|
|
|
}
|
|
|
+ if (term_cnt > atomic64_read(&term_stats->max_terminates))
|
|
|
+ atomic64_set(&term_stats->max_terminates, term_cnt);
|
|
|
|
|
|
}
|
|
|
|
|
@@ -1411,6 +1568,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
|
|
|
{
|
|
|
int tag;
|
|
|
int abt_tag;
|
|
|
+ int term_cnt = 0;
|
|
|
struct fnic_io_req *io_req;
|
|
|
spinlock_t *io_lock;
|
|
|
unsigned long flags;
|
|
@@ -1420,6 +1578,8 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
|
|
|
struct fc_lport *lport;
|
|
|
struct fnic *fnic;
|
|
|
struct fc_rport *cmd_rport;
|
|
|
+ struct reset_stats *reset_stats;
|
|
|
+ struct terminate_stats *term_stats;
|
|
|
enum fnic_ioreq_state old_ioreq_state;
|
|
|
|
|
|
if (!rport) {
|
|
@@ -1448,6 +1608,9 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
|
|
|
if (fnic->in_remove)
|
|
|
return;
|
|
|
|
|
|
+ reset_stats = &fnic->fnic_stats.reset_stats;
|
|
|
+ term_stats = &fnic->fnic_stats.term_stats;
|
|
|
+
|
|
|
for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
|
|
|
abt_tag = tag;
|
|
|
io_lock = fnic_io_lock_tag(fnic, tag);
|
|
@@ -1504,6 +1667,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
|
|
|
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
|
|
|
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
|
|
|
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
|
|
|
+ atomic64_inc(&reset_stats->device_reset_terminates);
|
|
|
abt_tag = (tag | FNIC_TAG_DEV_RST);
|
|
|
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
|
|
"fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
|
|
@@ -1540,8 +1704,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
|
|
|
else
|
|
|
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
|
|
|
spin_unlock_irqrestore(io_lock, flags);
|
|
|
+ atomic64_inc(&term_stats->terminates);
|
|
|
+ term_cnt++;
|
|
|
}
|
|
|
}
|
|
|
+ if (term_cnt > atomic64_read(&term_stats->max_terminates))
|
|
|
+ atomic64_set(&term_stats->max_terminates, term_cnt);
|
|
|
|
|
|
}
|
|
|
|
|
@@ -1562,6 +1730,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|
|
int ret = SUCCESS;
|
|
|
u32 task_req = 0;
|
|
|
struct scsi_lun fc_lun;
|
|
|
+ struct fnic_stats *fnic_stats;
|
|
|
+ struct abort_stats *abts_stats;
|
|
|
+ struct terminate_stats *term_stats;
|
|
|
int tag;
|
|
|
DECLARE_COMPLETION_ONSTACK(tm_done);
|
|
|
|
|
@@ -1572,6 +1743,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|
|
lp = shost_priv(sc->device->host);
|
|
|
|
|
|
fnic = lport_priv(lp);
|
|
|
+ fnic_stats = &fnic->fnic_stats;
|
|
|
+ abts_stats = &fnic->fnic_stats.abts_stats;
|
|
|
+ term_stats = &fnic->fnic_stats.term_stats;
|
|
|
+
|
|
|
rport = starget_to_rport(scsi_target(sc->device));
|
|
|
tag = sc->request->tag;
|
|
|
FNIC_SCSI_DBG(KERN_DEBUG,
|
|
@@ -1630,8 +1805,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|
|
*/
|
|
|
if (fc_remote_port_chkready(rport) == 0)
|
|
|
task_req = FCPIO_ITMF_ABT_TASK;
|
|
|
- else
|
|
|
+ else {
|
|
|
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
|
|
|
task_req = FCPIO_ITMF_ABT_TASK_TERM;
|
|
|
+ }
|
|
|
|
|
|
/* Now queue the abort command to firmware */
|
|
|
int_to_scsilun(sc->device->lun, &fc_lun);
|
|
@@ -1646,10 +1823,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|
|
ret = FAILED;
|
|
|
goto fnic_abort_cmd_end;
|
|
|
}
|
|
|
- if (task_req == FCPIO_ITMF_ABT_TASK)
|
|
|
+ if (task_req == FCPIO_ITMF_ABT_TASK) {
|
|
|
CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
|
|
|
- else
|
|
|
+ atomic64_inc(&fnic_stats->abts_stats.aborts);
|
|
|
+ } else {
|
|
|
CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
|
|
|
+ atomic64_inc(&fnic_stats->term_stats.terminates);
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* We queued an abort IO, wait for its completion.
|
|
@@ -1667,6 +1847,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|
|
|
|
|
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
|
|
if (!io_req) {
|
|
|
+ atomic64_inc(&fnic_stats->io_stats.ioreq_null);
|
|
|
spin_unlock_irqrestore(io_lock, flags);
|
|
|
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
|
|
|
ret = FAILED;
|
|
@@ -1677,6 +1858,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|
|
/* fw did not complete abort, timed out */
|
|
|
if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
|
|
|
spin_unlock_irqrestore(io_lock, flags);
|
|
|
+ if (task_req == FCPIO_ITMF_ABT_TASK) {
|
|
|
+ FNIC_SCSI_DBG(KERN_INFO,
|
|
|
+ fnic->lport->host, "Abort Driver Timeout\n");
|
|
|
+ atomic64_inc(&abts_stats->abort_drv_timeouts);
|
|
|
+ } else {
|
|
|
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
|
|
+ "Terminate Driver Timeout\n");
|
|
|
+ atomic64_inc(&term_stats->terminate_drv_timeouts);
|
|
|
+ }
|
|
|
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
|
|
|
ret = FAILED;
|
|
|
goto fnic_abort_cmd_end;
|
|
@@ -1721,6 +1911,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
|
|
|
{
|
|
|
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
|
|
|
struct Scsi_Host *host = fnic->lport->host;
|
|
|
+ struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
|
|
|
struct scsi_lun fc_lun;
|
|
|
int ret = 0;
|
|
|
unsigned long intr_flags;
|
|
@@ -1742,6 +1933,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
|
|
|
if (!vnic_wq_copy_desc_avail(wq)) {
|
|
|
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
|
|
"queue_dr_io_req failure - no descriptors\n");
|
|
|
+ atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
|
|
|
ret = -EAGAIN;
|
|
|
goto lr_io_req_end;
|
|
|
}
|
|
@@ -1754,6 +1946,12 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
|
|
|
fc_lun.scsi_lun, io_req->port_id,
|
|
|
fnic->config.ra_tov, fnic->config.ed_tov);
|
|
|
|
|
|
+ atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
|
|
|
+ if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
|
|
|
+ atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
|
|
|
+ atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
|
|
|
+ atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
|
|
|
+
|
|
|
lr_io_req_end:
|
|
|
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
|
|
|
atomic_dec(&fnic->in_flight);
|
|
@@ -1988,6 +2186,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|
|
unsigned long flags;
|
|
|
unsigned long start_time = 0;
|
|
|
struct scsi_lun fc_lun;
|
|
|
+ struct fnic_stats *fnic_stats;
|
|
|
+ struct reset_stats *reset_stats;
|
|
|
int tag = 0;
|
|
|
DECLARE_COMPLETION_ONSTACK(tm_done);
|
|
|
int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
|
|
@@ -1999,6 +2199,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|
|
lp = shost_priv(sc->device->host);
|
|
|
|
|
|
fnic = lport_priv(lp);
|
|
|
+ fnic_stats = &fnic->fnic_stats;
|
|
|
+ reset_stats = &fnic->fnic_stats.reset_stats;
|
|
|
+
|
|
|
+ atomic64_inc(&reset_stats->device_resets);
|
|
|
|
|
|
rport = starget_to_rport(scsi_target(sc->device));
|
|
|
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
|
@@ -2009,8 +2213,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|
|
goto fnic_device_reset_end;
|
|
|
|
|
|
/* Check if remote port up */
|
|
|
- if (fc_remote_port_chkready(rport))
|
|
|
+ if (fc_remote_port_chkready(rport)) {
|
|
|
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
|
|
|
goto fnic_device_reset_end;
|
|
|
+ }
|
|
|
|
|
|
CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
|
|
|
/* Allocate tag if not present */
|
|
@@ -2086,6 +2292,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
|
|
|
* gets cleaned up during higher levels of EH
|
|
|
*/
|
|
|
if (status == FCPIO_INVALID_CODE) {
|
|
|
+ atomic64_inc(&reset_stats->device_reset_timeouts);
|
|
|
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
|
|
"Device reset timed out\n");
|
|
|
CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
|
|
@@ -2199,6 +2406,10 @@ fnic_device_reset_end:
|
|
|
"Returning from device reset %s\n",
|
|
|
(ret == SUCCESS) ?
|
|
|
"SUCCESS" : "FAILED");
|
|
|
+
|
|
|
+ if (ret == FAILED)
|
|
|
+ atomic64_inc(&reset_stats->device_reset_failures);
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -2208,13 +2419,17 @@ int fnic_reset(struct Scsi_Host *shost)
|
|
|
struct fc_lport *lp;
|
|
|
struct fnic *fnic;
|
|
|
int ret = 0;
|
|
|
+ struct reset_stats *reset_stats;
|
|
|
|
|
|
lp = shost_priv(shost);
|
|
|
fnic = lport_priv(lp);
|
|
|
+ reset_stats = &fnic->fnic_stats.reset_stats;
|
|
|
|
|
|
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
|
|
"fnic_reset called\n");
|
|
|
|
|
|
+ atomic64_inc(&reset_stats->fnic_resets);
|
|
|
+
|
|
|
/*
|
|
|
* Reset local port, this will clean up libFC exchanges,
|
|
|
* reset remote port sessions, and if link is up, begin flogi
|
|
@@ -2226,6 +2441,11 @@ int fnic_reset(struct Scsi_Host *shost)
|
|
|
(ret == 0) ?
|
|
|
"SUCCESS" : "FAILED");
|
|
|
|
|
|
+ if (ret == 0)
|
|
|
+ atomic64_inc(&reset_stats->fnic_reset_completions);
|
|
|
+ else
|
|
|
+ atomic64_inc(&reset_stats->fnic_reset_failures);
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|