|
@@ -709,20 +709,28 @@ struct fw_dif_context {
|
|
|
*
|
|
|
*/
|
|
|
static inline void
|
|
|
-qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
|
|
|
+qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
|
|
|
unsigned int protcnt)
|
|
|
{
|
|
|
- struct sd_dif_tuple *spt;
|
|
|
+ struct scsi_cmnd *cmd = sp->cmd;
|
|
|
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
|
|
|
- unsigned char op = scsi_get_prot_op(cmd);
|
|
|
|
|
|
switch (scsi_get_prot_type(cmd)) {
|
|
|
- /* For TYPE 0 protection: no checking */
|
|
|
case SCSI_PROT_DIF_TYPE0:
|
|
|
- pkt->ref_tag_mask[0] = 0x00;
|
|
|
- pkt->ref_tag_mask[1] = 0x00;
|
|
|
- pkt->ref_tag_mask[2] = 0x00;
|
|
|
- pkt->ref_tag_mask[3] = 0x00;
|
|
|
+ /*
|
|
|
+ * No check for ql2xenablehba_err_chk, as it would be an
|
|
|
+ * I/O error if hba tag generation is not done.
|
|
|
+ */
|
|
|
+ pkt->ref_tag = cpu_to_le32((uint32_t)
|
|
|
+ (0xffffffff & scsi_get_lba(cmd)));
|
|
|
+
|
|
|
+ if (!qla2x00_hba_err_chk_enabled(sp))
|
|
|
+ break;
|
|
|
+
|
|
|
+ pkt->ref_tag_mask[0] = 0xff;
|
|
|
+ pkt->ref_tag_mask[1] = 0xff;
|
|
|
+ pkt->ref_tag_mask[2] = 0xff;
|
|
|
+ pkt->ref_tag_mask[3] = 0xff;
|
|
|
break;
|
|
|
|
|
|
/*
|
|
@@ -730,20 +738,16 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
|
|
|
* match LBA in CDB + N
|
|
|
*/
|
|
|
case SCSI_PROT_DIF_TYPE2:
|
|
|
- if (!ql2xenablehba_err_chk)
|
|
|
- break;
|
|
|
-
|
|
|
- if (scsi_prot_sg_count(cmd)) {
|
|
|
- spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
|
|
|
- scsi_prot_sglist(cmd)[0].offset;
|
|
|
- pkt->app_tag = swab32(spt->app_tag);
|
|
|
- pkt->app_tag_mask[0] = 0xff;
|
|
|
- pkt->app_tag_mask[1] = 0xff;
|
|
|
- }
|
|
|
+ pkt->app_tag = __constant_cpu_to_le16(0);
|
|
|
+ pkt->app_tag_mask[0] = 0x0;
|
|
|
+ pkt->app_tag_mask[1] = 0x0;
|
|
|
|
|
|
pkt->ref_tag = cpu_to_le32((uint32_t)
|
|
|
(0xffffffff & scsi_get_lba(cmd)));
|
|
|
|
|
|
+ if (!qla2x00_hba_err_chk_enabled(sp))
|
|
|
+ break;
|
|
|
+
|
|
|
/* enable ALL bytes of the ref tag */
|
|
|
pkt->ref_tag_mask[0] = 0xff;
|
|
|
pkt->ref_tag_mask[1] = 0xff;
|
|
@@ -763,26 +767,15 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
|
|
|
* 16 bit app tag.
|
|
|
*/
|
|
|
case SCSI_PROT_DIF_TYPE1:
|
|
|
- if (!ql2xenablehba_err_chk)
|
|
|
+ pkt->ref_tag = cpu_to_le32((uint32_t)
|
|
|
+ (0xffffffff & scsi_get_lba(cmd)));
|
|
|
+ pkt->app_tag = __constant_cpu_to_le16(0);
|
|
|
+ pkt->app_tag_mask[0] = 0x0;
|
|
|
+ pkt->app_tag_mask[1] = 0x0;
|
|
|
+
|
|
|
+ if (!qla2x00_hba_err_chk_enabled(sp))
|
|
|
break;
|
|
|
|
|
|
- if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
|
|
|
- op == SCSI_PROT_WRITE_PASS)) {
|
|
|
- spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
|
|
|
- scsi_prot_sglist(cmd)[0].offset;
|
|
|
- ql_dbg(ql_dbg_io, vha, 0x3008,
|
|
|
- "LBA from user %p, lba = 0x%x for cmd=%p.\n",
|
|
|
- spt, (int)spt->ref_tag, cmd);
|
|
|
- pkt->ref_tag = swab32(spt->ref_tag);
|
|
|
- pkt->app_tag_mask[0] = 0x0;
|
|
|
- pkt->app_tag_mask[1] = 0x0;
|
|
|
- } else {
|
|
|
- pkt->ref_tag = cpu_to_le32((uint32_t)
|
|
|
- (0xffffffff & scsi_get_lba(cmd)));
|
|
|
- pkt->app_tag = __constant_cpu_to_le16(0);
|
|
|
- pkt->app_tag_mask[0] = 0x0;
|
|
|
- pkt->app_tag_mask[1] = 0x0;
|
|
|
- }
|
|
|
/* enable ALL bytes of the ref tag */
|
|
|
pkt->ref_tag_mask[0] = 0xff;
|
|
|
pkt->ref_tag_mask[1] = 0xff;
|
|
@@ -798,7 +791,161 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
|
|
|
scsi_get_prot_type(cmd), cmd);
|
|
|
}
|
|
|
|
|
|
+struct qla2_sgx {
|
|
|
+ dma_addr_t dma_addr; /* OUT */
|
|
|
+ uint32_t dma_len; /* OUT */
|
|
|
+
|
|
|
+ uint32_t tot_bytes; /* IN */
|
|
|
+ struct scatterlist *cur_sg; /* IN */
|
|
|
+
|
|
|
+ /* for book keeping, bzero on initial invocation */
|
|
|
+ uint32_t bytes_consumed;
|
|
|
+ uint32_t num_bytes;
|
|
|
+ uint32_t tot_partial;
|
|
|
+
|
|
|
+ /* for debugging */
|
|
|
+ uint32_t num_sg;
|
|
|
+ srb_t *sp;
|
|
|
+};
|
|
|
|
|
|
+static int
|
|
|
+qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
|
|
|
+ uint32_t *partial)
|
|
|
+{
|
|
|
+ struct scatterlist *sg;
|
|
|
+ uint32_t cumulative_partial, sg_len;
|
|
|
+ dma_addr_t sg_dma_addr;
|
|
|
+
|
|
|
+ if (sgx->num_bytes == sgx->tot_bytes)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ sg = sgx->cur_sg;
|
|
|
+ cumulative_partial = sgx->tot_partial;
|
|
|
+
|
|
|
+ sg_dma_addr = sg_dma_address(sg);
|
|
|
+ sg_len = sg_dma_len(sg);
|
|
|
+
|
|
|
+ sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
|
|
|
+
|
|
|
+ if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
|
|
|
+ sgx->dma_len = (blk_sz - cumulative_partial);
|
|
|
+ sgx->tot_partial = 0;
|
|
|
+ sgx->num_bytes += blk_sz;
|
|
|
+ *partial = 0;
|
|
|
+ } else {
|
|
|
+ sgx->dma_len = sg_len - sgx->bytes_consumed;
|
|
|
+ sgx->tot_partial += sgx->dma_len;
|
|
|
+ *partial = 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ sgx->bytes_consumed += sgx->dma_len;
|
|
|
+
|
|
|
+ if (sg_len == sgx->bytes_consumed) {
|
|
|
+ sg = sg_next(sg);
|
|
|
+ sgx->num_sg++;
|
|
|
+ sgx->cur_sg = sg;
|
|
|
+ sgx->bytes_consumed = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
|
|
|
+ uint32_t *dsd, uint16_t tot_dsds)
|
|
|
+{
|
|
|
+ void *next_dsd;
|
|
|
+ uint8_t avail_dsds = 0;
|
|
|
+ uint32_t dsd_list_len;
|
|
|
+ struct dsd_dma *dsd_ptr;
|
|
|
+ struct scatterlist *sg_prot;
|
|
|
+ uint32_t *cur_dsd = dsd;
|
|
|
+ uint16_t used_dsds = tot_dsds;
|
|
|
+
|
|
|
+ uint32_t prot_int;
|
|
|
+ uint32_t partial;
|
|
|
+ struct qla2_sgx sgx;
|
|
|
+ dma_addr_t sle_dma;
|
|
|
+ uint32_t sle_dma_len, tot_prot_dma_len = 0;
|
|
|
+ struct scsi_cmnd *cmd = sp->cmd;
|
|
|
+
|
|
|
+ prot_int = cmd->device->sector_size;
|
|
|
+
|
|
|
+ memset(&sgx, 0, sizeof(struct qla2_sgx));
|
|
|
+ sgx.tot_bytes = scsi_bufflen(sp->cmd);
|
|
|
+ sgx.cur_sg = scsi_sglist(sp->cmd);
|
|
|
+ sgx.sp = sp;
|
|
|
+
|
|
|
+ sg_prot = scsi_prot_sglist(sp->cmd);
|
|
|
+
|
|
|
+ while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
|
|
|
+
|
|
|
+ sle_dma = sgx.dma_addr;
|
|
|
+ sle_dma_len = sgx.dma_len;
|
|
|
+alloc_and_fill:
|
|
|
+ /* Allocate additional continuation packets? */
|
|
|
+ if (avail_dsds == 0) {
|
|
|
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
|
|
|
+ QLA_DSDS_PER_IOCB : used_dsds;
|
|
|
+ dsd_list_len = (avail_dsds + 1) * 12;
|
|
|
+ used_dsds -= avail_dsds;
|
|
|
+
|
|
|
+ /* allocate tracking DS */
|
|
|
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
|
|
|
+ if (!dsd_ptr)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ /* allocate new list */
|
|
|
+ dsd_ptr->dsd_addr = next_dsd =
|
|
|
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
|
|
|
+ &dsd_ptr->dsd_list_dma);
|
|
|
+
|
|
|
+ if (!next_dsd) {
|
|
|
+ /*
|
|
|
+ * Need to cleanup only this dsd_ptr, rest
|
|
|
+ * will be done by sp_free_dma()
|
|
|
+ */
|
|
|
+ kfree(dsd_ptr);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ list_add_tail(&dsd_ptr->list,
|
|
|
+ &((struct crc_context *)sp->ctx)->dsd_list);
|
|
|
+
|
|
|
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
|
|
|
+
|
|
|
+ /* add new list to cmd iocb or last list */
|
|
|
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
|
|
|
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
|
|
|
+ *cur_dsd++ = dsd_list_len;
|
|
|
+ cur_dsd = (uint32_t *)next_dsd;
|
|
|
+ }
|
|
|
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
|
|
|
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
|
|
|
+ *cur_dsd++ = cpu_to_le32(sle_dma_len);
|
|
|
+ avail_dsds--;
|
|
|
+
|
|
|
+ if (partial == 0) {
|
|
|
+ /* Got a full protection interval */
|
|
|
+ sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
|
|
|
+ sle_dma_len = 8;
|
|
|
+
|
|
|
+ tot_prot_dma_len += sle_dma_len;
|
|
|
+ if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
|
|
|
+ tot_prot_dma_len = 0;
|
|
|
+ sg_prot = sg_next(sg_prot);
|
|
|
+ }
|
|
|
+
|
|
|
+ partial = 1; /* So as to not re-enter this block */
|
|
|
+ goto alloc_and_fill;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ /* Null termination */
|
|
|
+ *cur_dsd++ = 0;
|
|
|
+ *cur_dsd++ = 0;
|
|
|
+ *cur_dsd++ = 0;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
static int
|
|
|
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
|
|
|
uint16_t tot_dsds)
|
|
@@ -981,7 +1128,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
|
|
struct scsi_cmnd *cmd;
|
|
|
struct scatterlist *cur_seg;
|
|
|
int sgc;
|
|
|
- uint32_t total_bytes;
|
|
|
+ uint32_t total_bytes = 0;
|
|
|
uint32_t data_bytes;
|
|
|
uint32_t dif_bytes;
|
|
|
uint8_t bundling = 1;
|
|
@@ -1023,8 +1170,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
|
|
__constant_cpu_to_le16(CF_READ_DATA);
|
|
|
}
|
|
|
|
|
|
- tot_prot_dsds = scsi_prot_sg_count(cmd);
|
|
|
- if (!tot_prot_dsds)
|
|
|
+ if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
|
|
|
+ (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
|
|
|
+ (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
|
|
|
+ (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
|
|
|
bundling = 0;
|
|
|
|
|
|
/* Allocate CRC context from global pool */
|
|
@@ -1047,7 +1196,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
|
|
|
|
|
INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
|
|
|
|
|
|
- qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
|
|
|
+ qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
|
|
|
&crc_ctx_pkt->ref_tag, tot_prot_dsds);
|
|
|
|
|
|
cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
|
|
@@ -1076,7 +1225,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
|
|
fcp_cmnd->additional_cdb_len |= 2;
|
|
|
|
|
|
int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
|
|
|
- host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
|
|
|
memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
|
|
|
cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
|
|
|
cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
|
|
@@ -1107,15 +1255,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
|
|
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
|
|
|
|
|
|
/* Compute dif len and adjust data len to incude protection */
|
|
|
- total_bytes = data_bytes;
|
|
|
dif_bytes = 0;
|
|
|
blk_size = cmd->device->sector_size;
|
|
|
- if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
|
|
|
- dif_bytes = (data_bytes / blk_size) * 8;
|
|
|
- total_bytes += dif_bytes;
|
|
|
+ dif_bytes = (data_bytes / blk_size) * 8;
|
|
|
+
|
|
|
+ switch (scsi_get_prot_op(sp->cmd)) {
|
|
|
+ case SCSI_PROT_READ_INSERT:
|
|
|
+ case SCSI_PROT_WRITE_STRIP:
|
|
|
+ total_bytes = data_bytes;
|
|
|
+ data_bytes += dif_bytes;
|
|
|
+ break;
|
|
|
+
|
|
|
+ case SCSI_PROT_READ_STRIP:
|
|
|
+ case SCSI_PROT_WRITE_INSERT:
|
|
|
+ case SCSI_PROT_READ_PASS:
|
|
|
+ case SCSI_PROT_WRITE_PASS:
|
|
|
+ total_bytes = data_bytes + dif_bytes;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ BUG();
|
|
|
}
|
|
|
|
|
|
- if (!ql2xenablehba_err_chk)
|
|
|
+ if (!qla2x00_hba_err_chk_enabled(sp))
|
|
|
fw_prot_opts |= 0x10; /* Disable Guard tag checking */
|
|
|
|
|
|
if (!bundling) {
|
|
@@ -1151,7 +1312,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
|
|
|
|
|
cmd_pkt->control_flags |=
|
|
|
__constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
|
|
|
- if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
|
|
|
+
|
|
|
+ if (!bundling && tot_prot_dsds) {
|
|
|
+ if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
|
|
|
+ cur_dsd, tot_dsds))
|
|
|
+ goto crc_queuing_error;
|
|
|
+ } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
|
|
|
(tot_dsds - tot_prot_dsds)))
|
|
|
goto crc_queuing_error;
|
|
|
|
|
@@ -1414,6 +1580,22 @@ qla24xx_dif_start_scsi(srb_t *sp)
|
|
|
goto queuing_error;
|
|
|
else
|
|
|
sp->flags |= SRB_DMA_VALID;
|
|
|
+
|
|
|
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
|
|
|
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
|
|
|
+ struct qla2_sgx sgx;
|
|
|
+ uint32_t partial;
|
|
|
+
|
|
|
+ memset(&sgx, 0, sizeof(struct qla2_sgx));
|
|
|
+ sgx.tot_bytes = scsi_bufflen(cmd);
|
|
|
+ sgx.cur_sg = scsi_sglist(cmd);
|
|
|
+ sgx.sp = sp;
|
|
|
+
|
|
|
+ nseg = 0;
|
|
|
+ while (qla24xx_get_one_block_sg(
|
|
|
+ cmd->device->sector_size, &sgx, &partial))
|
|
|
+ nseg++;
|
|
|
+ }
|
|
|
} else
|
|
|
nseg = 0;
|
|
|
|
|
@@ -1428,6 +1610,11 @@ qla24xx_dif_start_scsi(srb_t *sp)
|
|
|
goto queuing_error;
|
|
|
else
|
|
|
sp->flags |= SRB_CRC_PROT_DMA_VALID;
|
|
|
+
|
|
|
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
|
|
|
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
|
|
|
+ nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
|
|
|
+ }
|
|
|
} else {
|
|
|
nseg = 0;
|
|
|
}
|
|
@@ -1454,6 +1641,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
|
|
|
/* Build header part of command packet (excluding the OPCODE). */
|
|
|
req->current_outstanding_cmd = handle;
|
|
|
req->outstanding_cmds[handle] = sp;
|
|
|
+ sp->handle = handle;
|
|
|
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
|
|
|
req->cnt -= req_cnt;
|
|
|
|