|
@@ -145,7 +145,49 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
|
|
|
return (cont_pkt);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
+static inline int
|
|
|
+qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
|
|
|
+{
|
|
|
+ uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
|
|
|
+
|
|
|
+ /* We only support T10 DIF right now */
|
|
|
+ if (guard != SHOST_DIX_GUARD_CRC) {
|
|
|
+ DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* We always use DIFF Bundling for best performance */
|
|
|
+ *fw_prot_opts = 0;
|
|
|
+
|
|
|
+ /* Translate SCSI opcode to a protection opcode */
|
|
|
+ switch (scsi_get_prot_op(sp->cmd)) {
|
|
|
+ case SCSI_PROT_READ_STRIP:
|
|
|
+ *fw_prot_opts |= PO_MODE_DIF_REMOVE;
|
|
|
+ break;
|
|
|
+ case SCSI_PROT_WRITE_INSERT:
|
|
|
+ *fw_prot_opts |= PO_MODE_DIF_INSERT;
|
|
|
+ break;
|
|
|
+ case SCSI_PROT_READ_INSERT:
|
|
|
+ *fw_prot_opts |= PO_MODE_DIF_INSERT;
|
|
|
+ break;
|
|
|
+ case SCSI_PROT_WRITE_STRIP:
|
|
|
+ *fw_prot_opts |= PO_MODE_DIF_REMOVE;
|
|
|
+ break;
|
|
|
+ case SCSI_PROT_READ_PASS:
|
|
|
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
|
|
|
+ break;
|
|
|
+ case SCSI_PROT_WRITE_PASS:
|
|
|
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
|
|
|
+ break;
|
|
|
+ default: /* Normal Request */
|
|
|
+ *fw_prot_opts |= PO_MODE_DIF_PASS;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return scsi_prot_sg_count(sp->cmd);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
* qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
|
|
|
* capable IOCB types.
|
|
|
*
|
|
@@ -636,6 +678,8 @@ qla24xx_calc_iocbs(uint16_t dsds)
|
|
|
if ((dsds - 1) % 5)
|
|
|
iocbs++;
|
|
|
}
|
|
|
+ DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
|
|
|
+ __func__, iocbs));
|
|
|
return iocbs;
|
|
|
}
|
|
|
|
|
@@ -716,6 +760,453 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+struct fw_dif_context {
|
|
|
+ uint32_t ref_tag;
|
|
|
+ uint16_t app_tag;
|
|
|
+ uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
|
|
|
+ uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
|
|
|
+};
|
|
|
+
|
|
|
+/*
|
|
|
+ * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
|
|
|
+ *
|
|
|
+ */
|
|
|
+static inline void
|
|
|
+qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
|
|
|
+ unsigned int protcnt)
|
|
|
+{
|
|
|
+ struct sd_dif_tuple *spt;
|
|
|
+ unsigned char op = scsi_get_prot_op(cmd);
|
|
|
+
|
|
|
+ switch (scsi_get_prot_type(cmd)) {
|
|
|
+ /* For TYPE 0 protection: no checking */
|
|
|
+ case SCSI_PROT_DIF_TYPE0:
|
|
|
+ pkt->ref_tag_mask[0] = 0x00;
|
|
|
+ pkt->ref_tag_mask[1] = 0x00;
|
|
|
+ pkt->ref_tag_mask[2] = 0x00;
|
|
|
+ pkt->ref_tag_mask[3] = 0x00;
|
|
|
+ break;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
|
|
|
+ * match LBA in CDB + N
|
|
|
+ */
|
|
|
+ case SCSI_PROT_DIF_TYPE2:
|
|
|
+ break;
|
|
|
+
|
|
|
+ /* For Type 3 protection: 16 bit GUARD only */
|
|
|
+ case SCSI_PROT_DIF_TYPE3:
|
|
|
+ pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
|
|
|
+ pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
|
|
|
+ 0x00;
|
|
|
+ break;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
|
|
|
+ * 16 bit app tag.
|
|
|
+ */
|
|
|
+ case SCSI_PROT_DIF_TYPE1:
|
|
|
+ if (!ql2xenablehba_err_chk)
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
|
|
|
+ op == SCSI_PROT_WRITE_PASS)) {
|
|
|
+ spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
|
|
|
+ scsi_prot_sglist(cmd)[0].offset;
|
|
|
+ DEBUG18(printk(KERN_DEBUG
|
|
|
+ "%s(): LBA from user %p, lba = 0x%x\n",
|
|
|
+ __func__, spt, (int)spt->ref_tag));
|
|
|
+ pkt->ref_tag = swab32(spt->ref_tag);
|
|
|
+ pkt->app_tag_mask[0] = 0x0;
|
|
|
+ pkt->app_tag_mask[1] = 0x0;
|
|
|
+ } else {
|
|
|
+ pkt->ref_tag = cpu_to_le32((uint32_t)
|
|
|
+ (0xffffffff & scsi_get_lba(cmd)));
|
|
|
+ pkt->app_tag = __constant_cpu_to_le16(0);
|
|
|
+ pkt->app_tag_mask[0] = 0x0;
|
|
|
+ pkt->app_tag_mask[1] = 0x0;
|
|
|
+ }
|
|
|
+ /* enable ALL bytes of the ref tag */
|
|
|
+ pkt->ref_tag_mask[0] = 0xff;
|
|
|
+ pkt->ref_tag_mask[1] = 0xff;
|
|
|
+ pkt->ref_tag_mask[2] = 0xff;
|
|
|
+ pkt->ref_tag_mask[3] = 0xff;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ DEBUG18(printk(KERN_DEBUG
|
|
|
+ "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
|
|
|
+ " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
|
|
|
+ " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
|
|
|
+ (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+static int
|
|
|
+qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
|
|
|
+ uint16_t tot_dsds)
|
|
|
+{
|
|
|
+ void *next_dsd;
|
|
|
+ uint8_t avail_dsds = 0;
|
|
|
+ uint32_t dsd_list_len;
|
|
|
+ struct dsd_dma *dsd_ptr;
|
|
|
+ struct scatterlist *sg;
|
|
|
+ uint32_t *cur_dsd = dsd;
|
|
|
+ int i;
|
|
|
+ uint16_t used_dsds = tot_dsds;
|
|
|
+
|
|
|
+ uint8_t *cp;
|
|
|
+
|
|
|
+ scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
|
|
|
+ dma_addr_t sle_dma;
|
|
|
+
|
|
|
+ /* Allocate additional continuation packets? */
|
|
|
+ if (avail_dsds == 0) {
|
|
|
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
|
|
|
+ QLA_DSDS_PER_IOCB : used_dsds;
|
|
|
+ dsd_list_len = (avail_dsds + 1) * 12;
|
|
|
+ used_dsds -= avail_dsds;
|
|
|
+
|
|
|
+ /* allocate tracking DS */
|
|
|
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
|
|
|
+ if (!dsd_ptr)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ /* allocate new list */
|
|
|
+ dsd_ptr->dsd_addr = next_dsd =
|
|
|
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
|
|
|
+ &dsd_ptr->dsd_list_dma);
|
|
|
+
|
|
|
+ if (!next_dsd) {
|
|
|
+ /*
|
|
|
+ * Need to cleanup only this dsd_ptr, rest
|
|
|
+ * will be done by sp_free_dma()
|
|
|
+ */
|
|
|
+ kfree(dsd_ptr);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ list_add_tail(&dsd_ptr->list,
|
|
|
+ &((struct crc_context *)sp->ctx)->dsd_list);
|
|
|
+
|
|
|
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
|
|
|
+
|
|
|
+ /* add new list to cmd iocb or last list */
|
|
|
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
|
|
|
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
|
|
|
+ *cur_dsd++ = dsd_list_len;
|
|
|
+ cur_dsd = (uint32_t *)next_dsd;
|
|
|
+ }
|
|
|
+ sle_dma = sg_dma_address(sg);
|
|
|
+ DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
|
|
|
+ " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
|
|
|
+ MSD(sle_dma), sg_dma_len(sg)));
|
|
|
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
|
|
|
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
|
|
|
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
|
|
|
+ avail_dsds--;
|
|
|
+
|
|
|
+ if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
|
|
|
+ cp = page_address(sg_page(sg)) + sg->offset;
|
|
|
+ DEBUG18(printk("%s(): User Data buffer= %p:\n",
|
|
|
+ __func__ , cp));
|
|
|
+ }
|
|
|
+ }
|
|
|
+ /* Null termination */
|
|
|
+ *cur_dsd++ = 0;
|
|
|
+ *cur_dsd++ = 0;
|
|
|
+ *cur_dsd++ = 0;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
|
|
|
+ uint32_t *dsd,
|
|
|
+ uint16_t tot_dsds)
|
|
|
+{
|
|
|
+ void *next_dsd;
|
|
|
+ uint8_t avail_dsds = 0;
|
|
|
+ uint32_t dsd_list_len;
|
|
|
+ struct dsd_dma *dsd_ptr;
|
|
|
+ struct scatterlist *sg;
|
|
|
+ int i;
|
|
|
+ struct scsi_cmnd *cmd;
|
|
|
+ uint32_t *cur_dsd = dsd;
|
|
|
+ uint16_t used_dsds = tot_dsds;
|
|
|
+
|
|
|
+ uint8_t *cp;
|
|
|
+
|
|
|
+
|
|
|
+ cmd = sp->cmd;
|
|
|
+ scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
|
|
|
+ dma_addr_t sle_dma;
|
|
|
+
|
|
|
+ /* Allocate additional continuation packets? */
|
|
|
+ if (avail_dsds == 0) {
|
|
|
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
|
|
|
+ QLA_DSDS_PER_IOCB : used_dsds;
|
|
|
+ dsd_list_len = (avail_dsds + 1) * 12;
|
|
|
+ used_dsds -= avail_dsds;
|
|
|
+
|
|
|
+ /* allocate tracking DS */
|
|
|
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
|
|
|
+ if (!dsd_ptr)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ /* allocate new list */
|
|
|
+ dsd_ptr->dsd_addr = next_dsd =
|
|
|
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
|
|
|
+ &dsd_ptr->dsd_list_dma);
|
|
|
+
|
|
|
+ if (!next_dsd) {
|
|
|
+ /*
|
|
|
+ * Need to cleanup only this dsd_ptr, rest
|
|
|
+ * will be done by sp_free_dma()
|
|
|
+ */
|
|
|
+ kfree(dsd_ptr);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ list_add_tail(&dsd_ptr->list,
|
|
|
+ &((struct crc_context *)sp->ctx)->dsd_list);
|
|
|
+
|
|
|
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
|
|
|
+
|
|
|
+ /* add new list to cmd iocb or last list */
|
|
|
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
|
|
|
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
|
|
|
+ *cur_dsd++ = dsd_list_len;
|
|
|
+ cur_dsd = (uint32_t *)next_dsd;
|
|
|
+ }
|
|
|
+ sle_dma = sg_dma_address(sg);
|
|
|
+ if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
|
|
|
+ DEBUG18(printk(KERN_DEBUG
|
|
|
+ "%s(): %p, sg entry %d - addr =0x%x"
|
|
|
+ "0x%x, len =%d\n", __func__ , cur_dsd, i,
|
|
|
+ LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
|
|
|
+ }
|
|
|
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
|
|
|
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
|
|
|
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
|
|
|
+
|
|
|
+ if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
|
|
|
+ cp = page_address(sg_page(sg)) + sg->offset;
|
|
|
+ DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
|
|
|
+ __func__ , cp));
|
|
|
+ }
|
|
|
+ avail_dsds--;
|
|
|
+ }
|
|
|
+ /* Null termination */
|
|
|
+ *cur_dsd++ = 0;
|
|
|
+ *cur_dsd++ = 0;
|
|
|
+ *cur_dsd++ = 0;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
|
|
|
+ * Type 6 IOCB types.
|
|
|
+ *
|
|
|
+ * @sp: SRB command to process
|
|
|
+ * @cmd_pkt: Command type 3 IOCB
|
|
|
+ * @tot_dsds: Total number of segments to transfer
|
|
|
+ */
|
|
|
+static inline int
|
|
|
+qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
|
|
+ uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
|
|
|
+{
|
|
|
+ uint32_t *cur_dsd, *fcp_dl;
|
|
|
+ scsi_qla_host_t *vha;
|
|
|
+ struct scsi_cmnd *cmd;
|
|
|
+ struct scatterlist *cur_seg;
|
|
|
+ int sgc;
|
|
|
+ uint32_t total_bytes;
|
|
|
+ uint32_t data_bytes;
|
|
|
+ uint32_t dif_bytes;
|
|
|
+ uint8_t bundling = 1;
|
|
|
+ uint16_t blk_size;
|
|
|
+ uint8_t *clr_ptr;
|
|
|
+ struct crc_context *crc_ctx_pkt = NULL;
|
|
|
+ struct qla_hw_data *ha;
|
|
|
+ uint8_t additional_fcpcdb_len;
|
|
|
+ uint16_t fcp_cmnd_len;
|
|
|
+ struct fcp_cmnd *fcp_cmnd;
|
|
|
+ dma_addr_t crc_ctx_dma;
|
|
|
+
|
|
|
+ cmd = sp->cmd;
|
|
|
+
|
|
|
+ sgc = 0;
|
|
|
+ /* Update entry type to indicate Command Type CRC_2 IOCB */
|
|
|
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
|
|
|
+ __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
|
|
|
+
|
|
|
+ /* No data transfer */
|
|
|
+ data_bytes = scsi_bufflen(cmd);
|
|
|
+ if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
|
|
|
+ DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
|
|
|
+ __func__, data_bytes));
|
|
|
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
|
|
|
+ return QLA_SUCCESS;
|
|
|
+ }
|
|
|
+
|
|
|
+ vha = sp->fcport->vha;
|
|
|
+ ha = vha->hw;
|
|
|
+
|
|
|
+ DEBUG18(printk(KERN_DEBUG
|
|
|
+ "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__,
|
|
|
+ vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd)));
|
|
|
+
|
|
|
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
|
|
|
+
|
|
|
+ /* Set transfer direction */
|
|
|
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
|
|
|
+ cmd_pkt->control_flags =
|
|
|
+ __constant_cpu_to_le16(CF_WRITE_DATA);
|
|
|
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
|
|
+ cmd_pkt->control_flags =
|
|
|
+ __constant_cpu_to_le16(CF_READ_DATA);
|
|
|
+ }
|
|
|
+
|
|
|
+ tot_prot_dsds = scsi_prot_sg_count(cmd);
|
|
|
+ if (!tot_prot_dsds)
|
|
|
+ bundling = 0;
|
|
|
+
|
|
|
+ /* Allocate CRC context from global pool */
|
|
|
+ crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
|
|
|
+ GFP_ATOMIC, &crc_ctx_dma);
|
|
|
+
|
|
|
+ if (!crc_ctx_pkt)
|
|
|
+ goto crc_queuing_error;
|
|
|
+
|
|
|
+ /* Zero out CTX area. */
|
|
|
+ clr_ptr = (uint8_t *)crc_ctx_pkt;
|
|
|
+ memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
|
|
|
+
|
|
|
+ crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
|
|
|
+
|
|
|
+ sp->flags |= SRB_CRC_CTX_DMA_VALID;
|
|
|
+
|
|
|
+ /* Set handle */
|
|
|
+ crc_ctx_pkt->handle = cmd_pkt->handle;
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
|
|
|
+
|
|
|
+ qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
|
|
|
+ &crc_ctx_pkt->ref_tag, tot_prot_dsds);
|
|
|
+
|
|
|
+ cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
|
|
|
+ cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
|
|
|
+ cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
|
|
|
+
|
|
|
+ /* Determine SCSI command length -- align to 4 byte boundary */
|
|
|
+ if (cmd->cmd_len > 16) {
|
|
|
+ DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
|
|
|
+ __func__));
|
|
|
+ additional_fcpcdb_len = cmd->cmd_len - 16;
|
|
|
+ if ((cmd->cmd_len % 4) != 0) {
|
|
|
+ /* SCSI cmd > 16 bytes must be multiple of 4 */
|
|
|
+ goto crc_queuing_error;
|
|
|
+ }
|
|
|
+ fcp_cmnd_len = 12 + cmd->cmd_len + 4;
|
|
|
+ } else {
|
|
|
+ additional_fcpcdb_len = 0;
|
|
|
+ fcp_cmnd_len = 12 + 16 + 4;
|
|
|
+ }
|
|
|
+
|
|
|
+ fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
|
|
|
+
|
|
|
+ fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
|
|
|
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
|
|
|
+ fcp_cmnd->additional_cdb_len |= 1;
|
|
|
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
|
|
|
+ fcp_cmnd->additional_cdb_len |= 2;
|
|
|
+
|
|
|
+ int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
|
|
|
+ memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
|
|
|
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
|
|
|
+ cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
|
|
|
+ LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
|
|
|
+ cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
|
|
|
+ MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
|
|
|
+ fcp_cmnd->task_attribute = 0;
|
|
|
+ fcp_cmnd->task_managment = 0;
|
|
|
+
|
|
|
+ cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
|
|
|
+
|
|
|
+ DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
|
|
|
+ "entries %d, data bytes %d, Protection entries %d\n",
|
|
|
+ __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
|
|
|
+ data_bytes, tot_prot_dsds));
|
|
|
+
|
|
|
+ /* Compute dif len and adjust data len to incude protection */
|
|
|
+ total_bytes = data_bytes;
|
|
|
+ dif_bytes = 0;
|
|
|
+ blk_size = cmd->device->sector_size;
|
|
|
+ if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE1) {
|
|
|
+ dif_bytes = (data_bytes / blk_size) * 8;
|
|
|
+ total_bytes += dif_bytes;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!ql2xenablehba_err_chk)
|
|
|
+ fw_prot_opts |= 0x10; /* Disable Guard tag checking */
|
|
|
+
|
|
|
+ if (!bundling) {
|
|
|
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * Configure Bundling if we need to fetch interlaving
|
|
|
+ * protection PCI accesses
|
|
|
+ */
|
|
|
+ fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
|
|
|
+ crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
|
|
|
+ crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
|
|
|
+ tot_prot_dsds);
|
|
|
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Finish the common fields of CRC pkt */
|
|
|
+ crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
|
|
|
+ crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
|
|
|
+ crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
|
|
|
+ crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
|
|
|
+ /* Fibre channel byte count */
|
|
|
+ cmd_pkt->byte_count = cpu_to_le32(total_bytes);
|
|
|
+ fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
|
|
|
+ additional_fcpcdb_len);
|
|
|
+ *fcp_dl = htonl(total_bytes);
|
|
|
+
|
|
|
+ DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
|
|
|
+ " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
|
|
|
+ vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
|
|
|
+ crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
|
|
|
+
|
|
|
+ /* Walks data segments */
|
|
|
+
|
|
|
+ cmd_pkt->control_flags |=
|
|
|
+ __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
|
|
|
+ if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
|
|
|
+ (tot_dsds - tot_prot_dsds)))
|
|
|
+ goto crc_queuing_error;
|
|
|
+
|
|
|
+ if (bundling && tot_prot_dsds) {
|
|
|
+ /* Walks dif segments */
|
|
|
+ cur_seg = scsi_prot_sglist(cmd);
|
|
|
+ cmd_pkt->control_flags |=
|
|
|
+ __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
|
|
|
+ cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
|
|
|
+ if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
|
|
|
+ tot_prot_dsds))
|
|
|
+ goto crc_queuing_error;
|
|
|
+ }
|
|
|
+ return QLA_SUCCESS;
|
|
|
+
|
|
|
+crc_queuing_error:
|
|
|
+ DEBUG18(qla_printk(KERN_INFO, ha,
|
|
|
+ "CMD sent FAILED crc_q error:sp = %p\n", sp));
|
|
|
+ /* Cleanup will be performed by the caller */
|
|
|
+
|
|
|
+ return QLA_FUNCTION_FAILED;
|
|
|
+}
|
|
|
|
|
|
/**
|
|
|
* qla24xx_start_scsi() - Send a SCSI command to the ISP
|
|
@@ -869,6 +1360,191 @@ queuing_error:
|
|
|
return QLA_FUNCTION_FAILED;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+/**
|
|
|
+ * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
|
|
|
+ * @sp: command to send to the ISP
|
|
|
+ *
|
|
|
+ * Returns non-zero if a failure occurred, else zero.
|
|
|
+ */
|
|
|
+int
|
|
|
+qla24xx_dif_start_scsi(srb_t *sp)
|
|
|
+{
|
|
|
+ int nseg;
|
|
|
+ unsigned long flags;
|
|
|
+ uint32_t *clr_ptr;
|
|
|
+ uint32_t index;
|
|
|
+ uint32_t handle;
|
|
|
+ uint16_t cnt;
|
|
|
+ uint16_t req_cnt = 0;
|
|
|
+ uint16_t tot_dsds;
|
|
|
+ uint16_t tot_prot_dsds;
|
|
|
+ uint16_t fw_prot_opts = 0;
|
|
|
+ struct req_que *req = NULL;
|
|
|
+ struct rsp_que *rsp = NULL;
|
|
|
+ struct scsi_cmnd *cmd = sp->cmd;
|
|
|
+ struct scsi_qla_host *vha = sp->fcport->vha;
|
|
|
+ struct qla_hw_data *ha = vha->hw;
|
|
|
+ struct cmd_type_crc_2 *cmd_pkt;
|
|
|
+ uint32_t status = 0;
|
|
|
+
|
|
|
+#define QDSS_GOT_Q_SPACE BIT_0
|
|
|
+
|
|
|
+ /* Only process protection in this routine */
|
|
|
+ if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL)
|
|
|
+ return qla24xx_start_scsi(sp);
|
|
|
+
|
|
|
+ /* Setup device pointers. */
|
|
|
+
|
|
|
+ qla25xx_set_que(sp, &rsp);
|
|
|
+ req = vha->req;
|
|
|
+
|
|
|
+ /* So we know we haven't pci_map'ed anything yet */
|
|
|
+ tot_dsds = 0;
|
|
|
+
|
|
|
+ /* Send marker if required */
|
|
|
+ if (vha->marker_needed != 0) {
|
|
|
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
|
|
|
+ QLA_SUCCESS)
|
|
|
+ return QLA_FUNCTION_FAILED;
|
|
|
+ vha->marker_needed = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Acquire ring specific lock */
|
|
|
+ spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
+
|
|
|
+ /* Check for room in outstanding command list. */
|
|
|
+ handle = req->current_outstanding_cmd;
|
|
|
+ for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
|
|
|
+ handle++;
|
|
|
+ if (handle == MAX_OUTSTANDING_COMMANDS)
|
|
|
+ handle = 1;
|
|
|
+ if (!req->outstanding_cmds[handle])
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (index == MAX_OUTSTANDING_COMMANDS)
|
|
|
+ goto queuing_error;
|
|
|
+
|
|
|
+ /* Compute number of required data segments */
|
|
|
+ /* Map the sg table so we have an accurate count of sg entries needed */
|
|
|
+ if (scsi_sg_count(cmd)) {
|
|
|
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
|
|
|
+ scsi_sg_count(cmd), cmd->sc_data_direction);
|
|
|
+ if (unlikely(!nseg))
|
|
|
+ goto queuing_error;
|
|
|
+ else
|
|
|
+ sp->flags |= SRB_DMA_VALID;
|
|
|
+ } else
|
|
|
+ nseg = 0;
|
|
|
+
|
|
|
+ /* number of required data segments */
|
|
|
+ tot_dsds = nseg;
|
|
|
+
|
|
|
+ /* Compute number of required protection segments */
|
|
|
+ if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
|
|
|
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
|
|
|
+ scsi_prot_sg_count(cmd), cmd->sc_data_direction);
|
|
|
+ if (unlikely(!nseg))
|
|
|
+ goto queuing_error;
|
|
|
+ else
|
|
|
+ sp->flags |= SRB_CRC_PROT_DMA_VALID;
|
|
|
+ } else {
|
|
|
+ nseg = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ req_cnt = 1;
|
|
|
+ /* Total Data and protection sg segment(s) */
|
|
|
+ tot_prot_dsds = nseg;
|
|
|
+ tot_dsds += nseg;
|
|
|
+ if (req->cnt < (req_cnt + 2)) {
|
|
|
+ cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
|
|
|
+
|
|
|
+ if (req->ring_index < cnt)
|
|
|
+ req->cnt = cnt - req->ring_index;
|
|
|
+ else
|
|
|
+ req->cnt = req->length -
|
|
|
+ (req->ring_index - cnt);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (req->cnt < (req_cnt + 2))
|
|
|
+ goto queuing_error;
|
|
|
+
|
|
|
+ status |= QDSS_GOT_Q_SPACE;
|
|
|
+
|
|
|
+ /* Build header part of command packet (excluding the OPCODE). */
|
|
|
+ req->current_outstanding_cmd = handle;
|
|
|
+ req->outstanding_cmds[handle] = sp;
|
|
|
+ sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
|
|
|
+ req->cnt -= req_cnt;
|
|
|
+
|
|
|
+ /* Fill-in common area */
|
|
|
+ cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
|
|
|
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
|
|
|
+
|
|
|
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
|
|
|
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
|
|
|
+
|
|
|
+ /* Set NPORT-ID and LUN number*/
|
|
|
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
|
|
|
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
|
|
|
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
|
|
|
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
|
|
|
+
|
|
|
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
|
|
|
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
|
|
|
+
|
|
|
+ /* Total Data and protection segment(s) */
|
|
|
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
|
|
|
+
|
|
|
+ /* Build IOCB segments and adjust for data protection segments */
|
|
|
+ if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
|
|
|
+ req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
|
|
|
+ QLA_SUCCESS)
|
|
|
+ goto queuing_error;
|
|
|
+
|
|
|
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
|
|
|
+ /* Specify response queue number where completion should happen */
|
|
|
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
|
|
|
+ cmd_pkt->timeout = __constant_cpu_to_le16(0);
|
|
|
+ wmb();
|
|
|
+
|
|
|
+ /* Adjust ring index. */
|
|
|
+ req->ring_index++;
|
|
|
+ if (req->ring_index == req->length) {
|
|
|
+ req->ring_index = 0;
|
|
|
+ req->ring_ptr = req->ring;
|
|
|
+ } else
|
|
|
+ req->ring_ptr++;
|
|
|
+
|
|
|
+ /* Set chip new ring index. */
|
|
|
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
|
|
|
+ RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
|
|
|
+
|
|
|
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
|
|
|
+ if (vha->flags.process_response_queue &&
|
|
|
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
|
|
|
+ qla24xx_process_response_queue(vha, rsp);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
+
|
|
|
+ return QLA_SUCCESS;
|
|
|
+
|
|
|
+queuing_error:
|
|
|
+ if (status & QDSS_GOT_Q_SPACE) {
|
|
|
+ req->outstanding_cmds[handle] = NULL;
|
|
|
+ req->cnt += req_cnt;
|
|
|
+ }
|
|
|
+ /* Cleanup will be performed by the caller (queuecommand) */
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
+
|
|
|
+ DEBUG18(qla_printk(KERN_INFO, ha,
|
|
|
+ "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
|
|
|
+ return QLA_FUNCTION_FAILED;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
|
|
|
{
|
|
|
struct scsi_cmnd *cmd = sp->cmd;
|