Procházet zdrojové kódy

target: Updates from AGrover and HCH (round 3)

This patch contains a squashed version of third round series cleanups,
improvements ,and simplfications from Andy and Christoph ahead of the
heavy lifting between round 3 -> 4 for the target core SGL conversion.

This include cleanups to the main target I/O path and other miscellaneous
updates.

target: Replace custom sg<->buf functions with lib funcs
target: Simplify sector limiting code
target: get_cdb should never return NULL
target: Simplify transport_memcpy_se_mem_read_contig
target: Use assignment rather than increment for t_task_cdbs
target: Don't pass dma_size to generic_get_mem
target: Pass sg with type scatterlist in transport_map_sg_to_mem
target: Move task_sg_num next to task_sg in struct se_task
target: inline struct se_transport_task into struct se_cmd
target: Change name & semantics of transport_get_sectors()
target: Remove unused members of se_cmd
target: Rename se_cmd.t_task_cdbs to t_task_list_num
target: Fix some spelling
target: Remove unused var from transport_generic_do_tmr
target: map_sg_to_mem: return sg_count in return value
target/pscsi: Use min_t for sector limits
target/pscsi: Unused param for pscsi_get_bio()
target: Rename get_cdb_count to allocate_tasks
target: Make transport_generic_new_cmd() available for iscsi-target
target: Remove fabric callback to allocate iovecs
target: Fix transport_generic_new_cmd WRITE comment

(hch: Use __GFP_ZERO usage for alloc_pages() usage)

Signed-off-by: Andy Grover <agrover@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Andy Grover před 14 roky
rodič
revize
a1d8b49abd

+ 2 - 9
drivers/target/loopback/tcm_loop.c

@@ -118,7 +118,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
 	 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
 	 */
 	if (scsi_bidi_cmnd(sc))
-		se_cmd->t_task.t_tasks_bidi = 1;
+		se_cmd->t_tasks_bidi = 1;
 	/*
 	 * Locate the struct se_lun pointer and attach it to struct se_cmd
 	 */
@@ -169,7 +169,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
 	 * For BIDI commands, pass in the extra READ buffer
 	 * to transport_generic_map_mem_to_cmd() below..
 	 */
-	if (se_cmd->t_task.t_tasks_bidi) {
+	if (se_cmd->t_tasks_bidi) {
 		struct scsi_data_buffer *sdb = scsi_in(sc);
 
 		sgl_bidi = sdb->table.sgl;
@@ -1423,13 +1423,6 @@ static int tcm_loop_register_configfs(void)
 	fabric->tf_ops.tpg_release_fabric_acl =
 					&tcm_loop_tpg_release_fabric_acl;
 	fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
-	/*
-	 * Since tcm_loop is mapping physical memory from Linux/SCSI
-	 * struct scatterlist arrays for each struct scsi_cmnd I/O,
-	 * we do not need TCM to allocate a iovec array for
-	 * virtual memory address mappings
-	 */
-	fabric->tf_ops.alloc_cmd_iovecs = NULL;
 	/*
 	 * Used for setting up remaining TCM resources in process context
 	 */

+ 2 - 2
drivers/target/target_core_alua.c

@@ -65,7 +65,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
 	struct se_port *port;
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
 				    Target port group descriptor */
 
@@ -157,7 +157,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
 	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
 	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
 	u32 len = 4; /* Skip over RESERVED area in header */
 	int alua_access_state, primary = 0, rc;

+ 20 - 19
drivers/target/target_core_cdb.c

@@ -66,7 +66,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
 {
 	struct se_lun *lun = cmd->se_lun;
 	struct se_device *dev = cmd->se_dev;
-	unsigned char *buf = cmd->t_task.t_task_buf;
+	unsigned char *buf = cmd->t_task_buf;
 
 	/*
 	 * Make sure we at least have 6 bytes of INQUIRY response
@@ -621,8 +621,8 @@ static int
 target_emulate_inquiry(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
-	unsigned char *buf = cmd->t_task.t_task_buf;
-	unsigned char *cdb = cmd->t_task.t_task_cdb;
+	unsigned char *buf = cmd->t_task_buf;
+	unsigned char *cdb = cmd->t_task_cdb;
 
 	if (!(cdb[1] & 0x1))
 		return target_emulate_inquiry_std(cmd);
@@ -666,7 +666,7 @@ static int
 target_emulate_readcapacity(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
-	unsigned char *buf = cmd->t_task.t_task_buf;
+	unsigned char *buf = cmd->t_task_buf;
 	unsigned long long blocks_long = dev->transport->get_blocks(dev);
 	u32 blocks;
 
@@ -696,7 +696,7 @@ static int
 target_emulate_readcapacity_16(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
-	unsigned char *buf = cmd->t_task.t_task_buf;
+	unsigned char *buf = cmd->t_task_buf;
 	unsigned long long blocks = dev->transport->get_blocks(dev);
 
 	buf[0] = (blocks >> 56) & 0xff;
@@ -831,8 +831,8 @@ static int
 target_emulate_modesense(struct se_cmd *cmd, int ten)
 {
 	struct se_device *dev = cmd->se_dev;
-	char *cdb = cmd->t_task.t_task_cdb;
-	unsigned char *rbuf = cmd->t_task.t_task_buf;
+	char *cdb = cmd->t_task_cdb;
+	unsigned char *rbuf = cmd->t_task_buf;
 	int type = dev->transport->get_device_type(dev);
 	int offset = (ten) ? 8 : 4;
 	int length = 0;
@@ -903,8 +903,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
 static int
 target_emulate_request_sense(struct se_cmd *cmd)
 {
-	unsigned char *cdb = cmd->t_task.t_task_cdb;
-	unsigned char *buf = cmd->t_task.t_task_buf;
+	unsigned char *cdb = cmd->t_task_cdb;
+	unsigned char *buf = cmd->t_task_buf;
 	u8 ua_asc = 0, ua_ascq = 0;
 
 	if (cdb[1] & 0x01) {
@@ -965,8 +965,8 @@ target_emulate_unmap(struct se_task *task)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
-	unsigned char *buf = cmd->t_task.t_task_buf, *ptr = NULL;
-	unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
+	unsigned char *buf = cmd->t_task_buf, *ptr = NULL;
+	unsigned char *cdb = &cmd->t_task_cdb[0];
 	sector_t lba;
 	unsigned int size = cmd->data_length, range;
 	int ret, offset;
@@ -1012,7 +1012,8 @@ target_emulate_write_same(struct se_task *task, int write_same32)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
-	sector_t range, lba = cmd->t_task.t_task_lba;
+	sector_t range;
+	sector_t lba = cmd->t_task_lba;
 	unsigned int num_blocks;
 	int ret;
 	/*
@@ -1021,9 +1022,9 @@ target_emulate_write_same(struct se_task *task, int write_same32)
 	 * range based on ->get_blocks() - starting LBA.
 	 */
 	if (write_same32)
-		num_blocks = get_unaligned_be32(&cmd->t_task.t_task_cdb[28]);
+		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
 	else
-		num_blocks = get_unaligned_be32(&cmd->t_task.t_task_cdb[10]);
+		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
 
 	if (num_blocks != 0)
 		range = num_blocks;
@@ -1052,7 +1053,7 @@ transport_emulate_control_cdb(struct se_task *task)
 	unsigned short service_action;
 	int ret = 0;
 
-	switch (cmd->t_task.t_task_cdb[0]) {
+	switch (cmd->t_task_cdb[0]) {
 	case INQUIRY:
 		ret = target_emulate_inquiry(cmd);
 		break;
@@ -1066,13 +1067,13 @@ transport_emulate_control_cdb(struct se_task *task)
 		ret = target_emulate_modesense(cmd, 1);
 		break;
 	case SERVICE_ACTION_IN:
-		switch (cmd->t_task.t_task_cdb[1] & 0x1f) {
+		switch (cmd->t_task_cdb[1] & 0x1f) {
 		case SAI_READ_CAPACITY_16:
 			ret = target_emulate_readcapacity_16(cmd);
 			break;
 		default:
 			printk(KERN_ERR "Unsupported SA: 0x%02x\n",
-				cmd->t_task.t_task_cdb[1] & 0x1f);
+				cmd->t_task_cdb[1] & 0x1f);
 			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		}
 		break;
@@ -1097,7 +1098,7 @@ transport_emulate_control_cdb(struct se_task *task)
 		break;
 	case VARIABLE_LENGTH_CMD:
 		service_action =
-			get_unaligned_be16(&cmd->t_task.t_task_cdb[8]);
+			get_unaligned_be16(&cmd->t_task_cdb[8]);
 		switch (service_action) {
 		case WRITE_SAME_32:
 			if (!dev->transport->do_discard) {
@@ -1136,7 +1137,7 @@ transport_emulate_control_cdb(struct se_task *task)
 		break;
 	default:
 		printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
-			cmd->t_task.t_task_cdb[0], dev->transport->name);
+			cmd->t_task_cdb[0], dev->transport->name);
 		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 	}
 

+ 3 - 3
drivers/target/target_core_device.c

@@ -168,7 +168,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 	 */
 	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
 	list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
-	atomic_set(&se_cmd->t_task.transport_lun_active, 1);
+	atomic_set(&se_cmd->transport_lun_active, 1);
 	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
 
 	return 0;
@@ -656,10 +656,10 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
 	struct se_lun *se_lun;
 	struct se_session *se_sess = se_cmd->se_sess;
 	struct se_task *se_task;
-	unsigned char *buf = se_cmd->t_task.t_task_buf;
+	unsigned char *buf = se_cmd->t_task_buf;
 	u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
 
-	list_for_each_entry(se_task, &se_cmd->t_task.t_task_list, t_list)
+	list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
 		break;
 
 	if (!(se_task)) {

+ 4 - 4
drivers/target/target_core_file.c

@@ -377,7 +377,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	struct fd_dev *fd_dev = dev->dev_ptr;
-	int immed = (cmd->t_task.t_task_cdb[1] & 0x2);
+	int immed = (cmd->t_task_cdb[1] & 0x2);
 	loff_t start, end;
 	int ret;
 
@@ -391,11 +391,11 @@ static void fd_emulate_sync_cache(struct se_task *task)
 	/*
 	 * Determine if we will be flushing the entire device.
 	 */
-	if (cmd->t_task.t_task_lba == 0 && cmd->data_length == 0) {
+	if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
 		start = 0;
 		end = LLONG_MAX;
 	} else {
-		start = cmd->t_task.t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+		start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
 		if (cmd->data_length)
 			end = start + cmd->data_length;
 		else
@@ -475,7 +475,7 @@ static int fd_do_task(struct se_task *task)
 		if (ret > 0 &&
 		    dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
 		    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-		    cmd->t_task.t_tasks_fua) {
+		    cmd->t_tasks_fua) {
 			/*
 			 * We might need to be a bit smarter here
 			 * and return some sense data to let the initiator

+ 2 - 2
drivers/target/target_core_iblock.c

@@ -331,7 +331,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
 {
 	struct se_cmd *cmd = task->task_se_cmd;
 	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
-	int immed = (cmd->t_task.t_task_cdb[1] & 0x2);
+	int immed = (cmd->t_task_cdb[1] & 0x2);
 	sector_t error_sector;
 	int ret;
 
@@ -400,7 +400,7 @@ static int iblock_do_task(struct se_task *task)
 		 */
 		if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
 		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-		     task->task_se_cmd->t_task.t_tasks_fua))
+		     task->task_se_cmd->t_tasks_fua))
 			rw = WRITE_FUA;
 		else
 			rw = WRITE;

+ 11 - 11
drivers/target/target_core_pr.c

@@ -157,8 +157,8 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
 	struct se_session *sess = cmd->se_sess;
 	struct se_portal_group *tpg = sess->se_tpg;
 
-	if ((cmd->t_task.t_task_cdb[1] & 0x01) &&
-	    (cmd->t_task.t_task_cdb[1] & 0x02)) {
+	if ((cmd->t_task_cdb[1] & 0x01) &&
+	    (cmd->t_task_cdb[1] & 0x02)) {
 		printk(KERN_ERR "LongIO and Obselete Bits set, returning"
 				" ILLEGAL_REQUEST\n");
 		return PYX_TRANSPORT_ILLEGAL_REQUEST;
@@ -216,7 +216,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
 	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
 	struct t10_pr_registration *pr_reg;
 	struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
-	unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
+	unsigned char *cdb = &cmd->t_task_cdb[0];
 	int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
 	int conflict = 0;
 
@@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port(
 	struct list_head tid_dest_list;
 	struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
 	struct target_core_fabric_ops *tmp_tf_ops;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
 	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
 	u32 tpdl, tid_len = 0;
@@ -3307,7 +3307,7 @@ static int core_scsi3_emulate_pro_register_and_move(
 	struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
 	struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
 	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	unsigned char *initiator_str;
 	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
 	u32 tid_len, tmp_tid_len;
@@ -3723,7 +3723,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
  */
 static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
 {
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	u64 res_key, sa_res_key;
 	int sa, scope, type, aptpl;
 	int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
@@ -3830,7 +3830,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
 	struct se_device *se_dev = cmd->se_dev;
 	struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
 	struct t10_pr_registration *pr_reg;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	u32 add_len = 0, off = 8;
 
 	if (cmd->data_length < 8) {
@@ -3885,7 +3885,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
 	struct se_device *se_dev = cmd->se_dev;
 	struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
 	struct t10_pr_registration *pr_reg;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	u64 pr_res_key;
 	u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
 
@@ -3965,7 +3965,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	u16 add_len = 8; /* Hardcoded to 8. */
 
 	if (cmd->data_length < 6) {
@@ -4020,7 +4020,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
 	struct se_portal_group *se_tpg;
 	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
 	struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
-	unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf;
+	unsigned char *buf = (unsigned char *)cmd->t_task_buf;
 	u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
 	u32 off = 8; /* off into first Full Status descriptor */
 	int format_code = 0;
@@ -4174,7 +4174,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
 
 int core_scsi3_emulate_pr(struct se_cmd *cmd)
 {
-	unsigned char *cdb = &cmd->t_task.t_task_cdb[0];
+	unsigned char *cdb = &cmd->t_task_cdb[0];
 	struct se_device *dev = cmd->se_dev;
 	/*
 	 * Following spc2r20 5.5.1 Reservations overview:

+ 9 - 11
drivers/target/target_core_pscsi.c

@@ -328,10 +328,8 @@ static struct se_device *pscsi_add_device_to_list(
 	q = sd->request_queue;
 	limits = &dev_limits.limits;
 	limits->logical_block_size = sd->sector_size;
-	limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
-				  queue_max_hw_sectors(q) : sd->host->max_sectors;
-	limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
-				  queue_max_sectors(q) : sd->host->max_sectors;
+	limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+	limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
 	dev_limits.hw_queue_depth = sd->queue_depth;
 	dev_limits.queue_depth = sd->queue_depth;
 	/*
@@ -697,7 +695,7 @@ static int pscsi_transport_complete(struct se_task *task)
 
 		if (task->task_se_cmd->se_deve->lun_flags &
 				TRANSPORT_LUNFLAGS_READ_ONLY) {
-			unsigned char *buf = task->task_se_cmd->t_task.t_task_buf;
+			unsigned char *buf = task->task_se_cmd->t_task_buf;
 
 			if (cdb[0] == MODE_SENSE_10) {
 				if (!(buf[3] & 0x80))
@@ -763,7 +761,7 @@ static struct se_task *
 pscsi_alloc_task(struct se_cmd *cmd)
 {
 	struct pscsi_plugin_task *pt;
-	unsigned char *cdb = cmd->t_task.t_task_cdb;
+	unsigned char *cdb = cmd->t_task_cdb;
 
 	pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
 	if (!pt) {
@@ -776,7 +774,7 @@ pscsi_alloc_task(struct se_cmd *cmd)
 	 * allocate the extended CDB buffer for per struct se_task context
 	 * pt->pscsi_cdb now.
 	 */
-	if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb) {
+	if (cmd->t_task_cdb != cmd->__t_task_cdb) {
 
 		pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
 		if (!(pt->pscsi_cdb)) {
@@ -889,7 +887,7 @@ static void pscsi_free_task(struct se_task *task)
 	 * Release the extended CDB allocation from pscsi_alloc_task()
 	 * if one exists.
 	 */
-	if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb)
+	if (cmd->t_task_cdb != cmd->__t_task_cdb)
 		kfree(pt->pscsi_cdb);
 	/*
 	 * We do not release the bio(s) here associated with this task, as
@@ -1053,7 +1051,7 @@ static void pscsi_bi_endio(struct bio *bio, int error)
 	bio_put(bio);
 }
 
-static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
+static inline struct bio *pscsi_get_bio(int sg_num)
 {
 	struct bio *bio;
 	/*
@@ -1126,7 +1124,7 @@ static int __pscsi_map_task_SG(
 				/*
 				 * Calls bio_kmalloc() and sets bio->bi_end_io()
 				 */
-				bio = pscsi_get_bio(pdv, nr_vecs);
+				bio = pscsi_get_bio(nr_vecs);
 				if (!(bio))
 					goto fail;
 
@@ -1266,7 +1264,7 @@ static int pscsi_map_task_non_SG(struct se_task *task)
 		return 0;
 
 	ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
-			pt->pscsi_req, cmd->t_task.t_task_buf,
+			pt->pscsi_req, cmd->t_task_buf,
 			task->task_size, GFP_KERNEL);
 	if (ret < 0) {
 		printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);

+ 4 - 4
drivers/target/target_core_rd.c

@@ -737,7 +737,7 @@ check_eot:
 	}
 
 out:
-	task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt;
+	task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
 #ifdef DEBUG_RAMDISK_DR
 	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
 			*se_mem_cnt);
@@ -819,7 +819,7 @@ static int rd_DIRECT_without_offset(
 	}
 
 out:
-	task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt;
+	task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
 #ifdef DEBUG_RAMDISK_DR
 	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
 			*se_mem_cnt);
@@ -880,14 +880,14 @@ static int rd_DIRECT_do_se_mem_map(
 	 * across multiple struct se_task->task_sg[].
 	 */
 	ret = transport_init_task_sg(task,
-			list_first_entry(&cmd->t_task.t_mem_list,
+			list_first_entry(&cmd->t_mem_list,
 				   struct se_mem, se_list),
 			task_offset);
 	if (ret <= 0)
 		return ret;
 
 	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
-			list_first_entry(&cmd->t_task.t_mem_list,
+			list_first_entry(&cmd->t_mem_list,
 				   struct se_mem, se_list),
 			out_se_mem, se_mem_cnt, task_offset_in);
 }

+ 28 - 28
drivers/target/target_core_tmr.c

@@ -178,14 +178,14 @@ int core_tmr_lun_reset(
 			continue;
 		spin_unlock(&dev->se_tmr_lock);
 
-		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-		if (!(atomic_read(&cmd->t_task.t_transport_active))) {
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
+		if (!(atomic_read(&cmd->t_transport_active))) {
+			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 			spin_lock(&dev->se_tmr_lock);
 			continue;
 		}
 		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 			spin_lock(&dev->se_tmr_lock);
 			continue;
 		}
@@ -193,7 +193,7 @@ int core_tmr_lun_reset(
 			" Response: 0x%02x, t_state: %d\n",
 			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
 			tmr_p->function, tmr_p->response, cmd->t_state);
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 		transport_cmd_finish_abort_tmr(cmd);
 		spin_lock(&dev->se_tmr_lock);
@@ -247,38 +247,38 @@ int core_tmr_lun_reset(
 		atomic_set(&task->task_state_active, 0);
 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 
-		spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
 		DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
 			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
 			"def_t_state: %d/%d cdb: 0x%02x\n",
 			(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
 			cmd->se_tfo->get_task_tag(cmd), 0,
 			cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
-			cmd->deferred_t_state, cmd->t_task.t_task_cdb[0]);
+			cmd->deferred_t_state, cmd->t_task_cdb[0]);
 		DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
 			" t_task_cdbs: %d t_task_cdbs_left: %d"
 			" t_task_cdbs_sent: %d -- t_transport_active: %d"
 			" t_transport_stop: %d t_transport_sent: %d\n",
 			cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
-			cmd->t_task.t_task_cdbs,
-			atomic_read(&cmd->t_task.t_task_cdbs_left),
-			atomic_read(&cmd->t_task.t_task_cdbs_sent),
-			atomic_read(&cmd->t_task.t_transport_active),
-			atomic_read(&cmd->t_task.t_transport_stop),
-			atomic_read(&cmd->t_task.t_transport_sent));
+			cmd->t_task_list_num,
+			atomic_read(&cmd->t_task_cdbs_left),
+			atomic_read(&cmd->t_task_cdbs_sent),
+			atomic_read(&cmd->t_transport_active),
+			atomic_read(&cmd->t_transport_stop),
+			atomic_read(&cmd->t_transport_sent));
 
 		if (atomic_read(&task->task_active)) {
 			atomic_set(&task->task_stop, 1);
 			spin_unlock_irqrestore(
-				&cmd->t_task.t_state_lock, flags);
+				&cmd->t_state_lock, flags);
 
 			DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
 				" for dev: %p\n", task, dev);
 			wait_for_completion(&task->task_stop_comp);
 			DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
 				" dev: %p\n", task, dev);
-			spin_lock_irqsave(&cmd->t_task.t_state_lock, flags);
-			atomic_dec(&cmd->t_task.t_task_cdbs_left);
+			spin_lock_irqsave(&cmd->t_state_lock, flags);
+			atomic_dec(&cmd->t_task_cdbs_left);
 
 			atomic_set(&task->task_active, 0);
 			atomic_set(&task->task_stop, 0);
@@ -288,24 +288,24 @@ int core_tmr_lun_reset(
 		}
 		__transport_stop_task_timer(task, &flags);
 
-		if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) {
+		if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) {
 			spin_unlock_irqrestore(
-					&cmd->t_task.t_state_lock, flags);
+					&cmd->t_state_lock, flags);
 			DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
 				" t_task_cdbs_ex_left: %d\n", task, dev,
-				atomic_read(&cmd->t_task.t_task_cdbs_ex_left));
+				atomic_read(&cmd->t_task_cdbs_ex_left));
 
 			spin_lock_irqsave(&dev->execute_task_lock, flags);
 			continue;
 		}
-		fe_count = atomic_read(&cmd->t_task.t_fe_count);
+		fe_count = atomic_read(&cmd->t_fe_count);
 
-		if (atomic_read(&cmd->t_task.t_transport_active)) {
+		if (atomic_read(&cmd->t_transport_active)) {
 			DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
 				" task: %p, t_fe_count: %d dev: %p\n", task,
 				fe_count, dev);
-			atomic_set(&cmd->t_task.t_transport_aborted, 1);
-			spin_unlock_irqrestore(&cmd->t_task.t_state_lock,
+			atomic_set(&cmd->t_transport_aborted, 1);
+			spin_unlock_irqrestore(&cmd->t_state_lock,
 						flags);
 			core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
 
@@ -314,8 +314,8 @@ int core_tmr_lun_reset(
 		}
 		DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
 			" t_fe_count: %d dev: %p\n", task, fe_count, dev);
-		atomic_set(&cmd->t_task.t_transport_aborted, 1);
-		spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags);
+		atomic_set(&cmd->t_transport_aborted, 1);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
 
 		spin_lock_irqsave(&dev->execute_task_lock, flags);
@@ -345,7 +345,7 @@ int core_tmr_lun_reset(
 		if (prout_cmd == cmd)
 			continue;
 
-		atomic_dec(&cmd->t_task.t_transport_queue_active);
+		atomic_dec(&cmd->t_transport_queue_active);
 		atomic_dec(&qobj->queue_cnt);
 		list_del(&cmd->se_queue_node);
 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
@@ -353,7 +353,7 @@ int core_tmr_lun_reset(
 		DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
 			" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
 			"Preempt" : "", cmd, cmd->t_state,
-			atomic_read(&cmd->t_task.t_fe_count));
+			atomic_read(&cmd->t_fe_count));
 		/*
 		 * Signal that the command has failed via cmd->se_cmd_flags,
 		 * and call TFO->new_cmd_failure() to wakeup any fabric
@@ -365,7 +365,7 @@ int core_tmr_lun_reset(
 		transport_new_cmd_failure(cmd);
 
 		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
-				atomic_read(&cmd->t_task.t_fe_count));
+				atomic_read(&cmd->t_fe_count));
 		spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
 	}
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);

Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 176 - 179
drivers/target/target_core_transport.c


+ 1 - 1
drivers/target/target_core_ua.c

@@ -270,7 +270,7 @@ void core_scsi3_ua_for_check_condition(
 		nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
 		(dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
 		"Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
-		cmd->orig_fe_lun, cmd->t_task.t_task_cdb[0], *asc, *ascq);
+		cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
 }
 
 int core_scsi3_ua_clear_for_request_sense(

+ 6 - 8
drivers/target/tcm_fc/tfc_cmd.c

@@ -60,7 +60,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
 	struct fc_seq *sp;
 	struct se_cmd *se_cmd;
 	struct se_mem *mem;
-	struct se_transport_task *task;
 
 	if (!(ft_debug_logging & FT_DEBUG_IO))
 		return;
@@ -72,12 +71,11 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
 		caller, cmd, cmd->cdb);
 	printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
 
-	task = &se_cmd->t_task;
-	printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
-	       caller, cmd, task, task->t_tasks_se_num,
-	       task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
+	printk(KERN_INFO "%s: cmd %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
+	       caller, cmd, se_cmd->t_tasks_se_num,
+	       se_cmd->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
 
-	list_for_each_entry(mem, &task->t_mem_list, se_list)
+	list_for_each_entry(mem, &se_cmd->t_mem_list, se_list)
 		printk(KERN_INFO "%s: cmd %p mem %p page %p "
 		       "len 0x%x off 0x%x\n",
 		       caller, cmd, mem,
@@ -262,9 +260,9 @@ int ft_write_pending(struct se_cmd *se_cmd)
 				 * TCM/LIO target
 				 */
 				transport_do_task_sg_chain(se_cmd);
-				cmd->sg = se_cmd->t_task.t_tasks_sg_chained;
+				cmd->sg = se_cmd->t_tasks_sg_chained;
 				cmd->sg_cnt =
-					se_cmd->t_task.t_tasks_sg_chained_no;
+					se_cmd->t_tasks_sg_chained_no;
 			}
 			if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
 						    cmd->sg, cmd->sg_cnt))

+ 8 - 13
drivers/target/tcm_fc/tfc_io.c

@@ -65,7 +65,6 @@
 int ft_queue_data_in(struct se_cmd *se_cmd)
 {
 	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-	struct se_transport_task *task;
 	struct fc_frame *fp = NULL;
 	struct fc_exch *ep;
 	struct fc_lport *lport;
@@ -90,14 +89,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 	lport = ep->lp;
 	cmd->seq = lport->tt.seq_start_next(cmd->seq);
 
-	task = &se_cmd->t_task;
 	remaining = se_cmd->data_length;
 
 	/*
 	 * Setup to use first mem list entry if any.
 	 */
-	if (task->t_tasks_se_num) {
-		mem = list_first_entry(&task->t_mem_list,
+	if (se_cmd->t_tasks_se_num) {
+		mem = list_first_entry(&se_cmd->t_mem_list,
 			 struct se_mem, se_list);
 		mem_len = mem->se_len;
 		mem_off = mem->se_off;
@@ -148,8 +146,8 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 
 		if (use_sg) {
 			if (!mem) {
-				BUG_ON(!task->t_task_buf);
-				page_addr = task->t_task_buf + mem_off;
+				BUG_ON(!se_cmd->t_task_buf);
+				page_addr = se_cmd->t_task_buf + mem_off;
 				/*
 				 * In this case, offset is 'offset_in_page' of
 				 * (t_task_buf + mem_off) instead of 'mem_off'.
@@ -180,7 +178,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 			kunmap_atomic(page_addr, KM_SOFTIRQ0);
 			to += tlen;
 		} else {
-			from = task->t_task_buf + mem_off;
+			from = se_cmd->t_task_buf + mem_off;
 			memcpy(to, from, tlen);
 			to += tlen;
 		}
@@ -220,7 +218,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
 	struct fc_seq *seq = cmd->seq;
 	struct fc_exch *ep;
 	struct fc_lport *lport;
-	struct se_transport_task *task;
 	struct fc_frame_header *fh;
 	struct se_mem *mem;
 	u32 mem_off;
@@ -235,8 +232,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
 	u32 f_ctl;
 	void *buf;
 
-	task = &se_cmd->t_task;
-
 	fh = fc_frame_header_get(fp);
 	if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
 		goto drop;
@@ -312,8 +307,8 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
 	/*
 	 * Setup to use first mem list entry if any.
 	 */
-	if (task->t_tasks_se_num) {
-		mem = list_first_entry(&task->t_mem_list,
+	if (se_cmd->t_tasks_se_num) {
+		mem = list_first_entry(&se_cmd->t_mem_list,
 				       struct se_mem, se_list);
 		mem_len = mem->se_len;
 		mem_off = mem->se_off;
@@ -355,7 +350,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
 			memcpy(to, from, tlen);
 			kunmap_atomic(page_addr, KM_SOFTIRQ0);
 		} else {
-			to = task->t_task_buf + mem_off;
+			to = se_cmd->t_task_buf + mem_off;
 			memcpy(to, from, tlen);
 		}
 		from += tlen;

+ 47 - 58
include/target/target_core_base.h

@@ -403,64 +403,10 @@ struct se_queue_obj {
 	wait_queue_head_t	thread_wq;
 } ____cacheline_aligned;
 
-/*
- * Used one per struct se_cmd to hold all extra struct se_task
- * metadata.  This structure is setup and allocated in
- * drivers/target/target_core_transport.c:__transport_alloc_se_cmd()
- */
-struct se_transport_task {
-	unsigned char		*t_task_cdb;
-	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
-	unsigned long long	t_task_lba;
-	int			t_tasks_failed;
-	int			t_tasks_fua;
-	bool			t_tasks_bidi;
-	u32			t_task_cdbs;
-	u32			t_tasks_check;
-	u32			t_tasks_no;
-	u32			t_tasks_sectors;
-	u32			t_tasks_se_num;
-	u32			t_tasks_se_bidi_num;
-	u32			t_tasks_sg_chained_no;
-	atomic_t		t_fe_count;
-	atomic_t		t_se_count;
-	atomic_t		t_task_cdbs_left;
-	atomic_t		t_task_cdbs_ex_left;
-	atomic_t		t_task_cdbs_timeout_left;
-	atomic_t		t_task_cdbs_sent;
-	atomic_t		t_transport_aborted;
-	atomic_t		t_transport_active;
-	atomic_t		t_transport_complete;
-	atomic_t		t_transport_queue_active;
-	atomic_t		t_transport_sent;
-	atomic_t		t_transport_stop;
-	atomic_t		t_transport_timeout;
-	atomic_t		transport_dev_active;
-	atomic_t		transport_lun_active;
-	atomic_t		transport_lun_fe_stop;
-	atomic_t		transport_lun_stop;
-	spinlock_t		t_state_lock;
-	struct completion	t_transport_stop_comp;
-	struct completion	transport_lun_fe_stop_comp;
-	struct completion	transport_lun_stop_comp;
-	struct scatterlist	*t_tasks_sg_chained;
-	struct scatterlist	t_tasks_sg_bounce;
-	void			*t_task_buf;
-	/*
-	 * Used for pre-registered fabric SGL passthrough WRITE and READ
-	 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
-	 * and other HW target mode fabric modules.
-	 */
-	struct scatterlist	*t_task_pt_sgl;
-	struct list_head	t_mem_list;
-	/* Used for BIDI READ */
-	struct list_head	t_mem_bidi_list;
-	struct list_head	t_task_list;
-} ____cacheline_aligned;
-
 struct se_task {
 	unsigned char	task_sense;
 	struct scatterlist *task_sg;
+	u32		task_sg_num;
 	struct scatterlist *task_sg_bidi;
 	u8		task_scsi_status;
 	u8		task_flags;
@@ -471,8 +417,6 @@ struct se_task {
 	u32		task_no;
 	u32		task_sectors;
 	u32		task_size;
-	u32		task_sg_num;
-	u32		task_sg_offset;
 	enum dma_data_direction	task_data_direction;
 	struct se_cmd *task_se_cmd;
 	struct se_device	*se_dev;
@@ -534,13 +478,58 @@ struct se_cmd {
 	/* Only used for internal passthrough and legacy TCM fabric modules */
 	struct se_session	*se_sess;
 	struct se_tmr_req	*se_tmr_req;
-	struct se_transport_task t_task;
 	struct list_head	se_queue_node;
 	struct target_core_fabric_ops *se_tfo;
 	int (*transport_emulate_cdb)(struct se_cmd *);
 	void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
 	void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
 	void (*transport_complete_callback)(struct se_cmd *);
+	unsigned char		*t_task_cdb;
+	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
+	unsigned long long	t_task_lba;
+	int			t_tasks_failed;
+	int			t_tasks_fua;
+	bool			t_tasks_bidi;
+	u32			t_tasks_se_num;
+	u32			t_tasks_se_bidi_num;
+	u32			t_tasks_sg_chained_no;
+	atomic_t		t_fe_count;
+	atomic_t		t_se_count;
+	atomic_t		t_task_cdbs_left;
+	atomic_t		t_task_cdbs_ex_left;
+	atomic_t		t_task_cdbs_timeout_left;
+	atomic_t		t_task_cdbs_sent;
+	atomic_t		t_transport_aborted;
+	atomic_t		t_transport_active;
+	atomic_t		t_transport_complete;
+	atomic_t		t_transport_queue_active;
+	atomic_t		t_transport_sent;
+	atomic_t		t_transport_stop;
+	atomic_t		t_transport_timeout;
+	atomic_t		transport_dev_active;
+	atomic_t		transport_lun_active;
+	atomic_t		transport_lun_fe_stop;
+	atomic_t		transport_lun_stop;
+	spinlock_t		t_state_lock;
+	struct completion	t_transport_stop_comp;
+	struct completion	transport_lun_fe_stop_comp;
+	struct completion	transport_lun_stop_comp;
+	struct scatterlist	*t_tasks_sg_chained;
+	struct scatterlist	t_tasks_sg_bounce;
+	void			*t_task_buf;
+	/*
+	 * Used for pre-registered fabric SGL passthrough WRITE and READ
+	 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
+	 * and other HW target mode fabric modules.
+	 */
+	struct scatterlist	*t_task_pt_sgl;
+	u32			t_task_pt_sgl_num;
+	struct list_head	t_mem_list;
+	/* Used for BIDI READ */
+	struct list_head	t_mem_bidi_list;
+	struct list_head	t_task_list;
+	u32			t_task_list_num;
+
 } ____cacheline_aligned;
 
 struct se_tmr_req {

+ 0 - 5
include/target/target_core_fabric_ops.h

@@ -38,11 +38,6 @@ struct target_core_fabric_ops {
 	 * initially allocated in interrupt context.
 	 */
 	int (*new_cmd_map)(struct se_cmd *);
-	/*
-	 * Optional function pointer for TCM fabric modules that use
-	 * Linux/NET sockets to allocate struct iovec array to struct se_cmd
-	 */
-	int (*alloc_cmd_iovecs)(struct se_cmd *);
 	/*
 	 * Optional to release struct se_cmd and fabric dependent allocated
 	 * I/O descriptor in transport_cmd_check_stop()

+ 2 - 1
include/target/target_core_transport.h

@@ -184,10 +184,11 @@ extern void transport_generic_free_cmd(struct se_cmd *, int, int, int);
 extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
 extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32);
 extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
-					void *, struct se_mem *,
+					struct scatterlist *, struct se_mem *,
 					struct se_mem **, u32 *, u32 *);
 extern void transport_do_task_sg_chain(struct se_cmd *);
 extern void transport_generic_process_write(struct se_cmd *);
+extern int transport_generic_new_cmd(struct se_cmd *);
 extern int transport_generic_do_tmr(struct se_cmd *);
 /* From target_core_alua.c */
 extern int core_alua_check_nonop_delay(struct se_cmd *);

Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů