Bladeren bron

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (21 commits)
  [SCSI] Revert "[SCSI] aacraid: fib context lock for management ioctls"
  [SCSI] bsg: copy the cmd_type field to the subordinate request for bidi
  [SCSI] handle scsi_init_queue failure properly
  [SCSI] destroy scsi_bidi_sdb_cache in scsi_exit_queue
  [SCSI] scsi_debug: add XDWRITEREAD_10 support
  [SCSI] scsi_debug: add bidi data transfer support
  [SCSI] scsi_debug: add get_data_transfer_info helper function
  [SCSI] remove use_sg_chaining
  [SCSI] bidirectional: fix up for the new blk_end_request code
  [SCSI] bidirectional command support
  [SCSI] implement scsi_data_buffer
  [SCSI] tgt: use scsi_init_io instead of scsi_alloc_sgtable
  [SCSI] aic7xxx: fix warnings with CONFIG_PM disabled
  [SCSI] aic79xx: fix warnings with CONFIG_PM disabled
  [SCSI] aic7xxx: fix ahc_done check SCB_ACTIVE for tagged transactions
  [SCSI] sgiwd93: use cached memory access to make driver work on IP28
  [SCSI] zfcp: fix sense_buffer access bug
  [SCSI] ncr53c8xx: fix sense_buffer access bug
  [SCSI] aic79xx: fix sense_buffer access bug
  [SCSI] hptiop: fix sense_buffer access bug
  ...
Linus Torvalds 17 jaren geleden
bovenliggende
commit
2419505acc
67 gewijzigde bestanden met toevoegingen van 493 en 407 verwijderingen
  1. 0 1
      arch/ia64/hp/sim/simscsi.c
  2. 1 0
      block/bsg.c
  3. 0 1
      drivers/infiniband/ulp/srp/ib_srp.c
  4. 2 2
      drivers/s390/scsi/zfcp_fsf.c
  5. 0 1
      drivers/scsi/3w-9xxx.c
  6. 0 1
      drivers/scsi/3w-xxxx.c
  7. 0 1
      drivers/scsi/BusLogic.c
  8. 1 1
      drivers/scsi/Kconfig
  9. 0 1
      drivers/scsi/NCR53c406a.c
  10. 0 1
      drivers/scsi/a100u2w.c
  11. 12 17
      drivers/scsi/aacraid/commctrl.c
  12. 0 1
      drivers/scsi/aacraid/linit.c
  13. 0 1
      drivers/scsi/aha1740.c
  14. 4 1
      drivers/scsi/aic7xxx/aic79xx.h
  15. 2 0
      drivers/scsi/aic7xxx/aic79xx_core.c
  16. 1 2
      drivers/scsi/aic7xxx/aic79xx_osm.c
  17. 12 21
      drivers/scsi/aic7xxx/aic79xx_osm_pci.c
  18. 2 0
      drivers/scsi/aic7xxx/aic79xx_pci.c
  19. 4 0
      drivers/scsi/aic7xxx/aic7xxx.h
  20. 2 1
      drivers/scsi/aic7xxx/aic7xxx_core.c
  21. 6 4
      drivers/scsi/aic7xxx/aic7xxx_osm.c
  22. 12 21
      drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
  23. 2 0
      drivers/scsi/aic7xxx/aic7xxx_pci.c
  24. 0 1
      drivers/scsi/aic7xxx_old.c
  25. 0 1
      drivers/scsi/arcmsr/arcmsr_hba.c
  26. 0 1
      drivers/scsi/dc395x.c
  27. 0 1
      drivers/scsi/dpt_i2o.c
  28. 0 1
      drivers/scsi/eata.c
  29. 0 1
      drivers/scsi/hosts.c
  30. 1 2
      drivers/scsi/hptiop.c
  31. 0 1
      drivers/scsi/ibmmca.c
  32. 0 1
      drivers/scsi/ibmvscsi/ibmvscsi.c
  33. 0 1
      drivers/scsi/initio.c
  34. 0 1
      drivers/scsi/iscsi_tcp.c
  35. 2 2
      drivers/scsi/libsrp.c
  36. 0 2
      drivers/scsi/lpfc/lpfc_scsi.c
  37. 0 1
      drivers/scsi/mac53c94.c
  38. 0 1
      drivers/scsi/megaraid.c
  39. 0 1
      drivers/scsi/megaraid/megaraid_mbox.c
  40. 0 1
      drivers/scsi/megaraid/megaraid_sas.c
  41. 0 1
      drivers/scsi/mesh.c
  42. 1 1
      drivers/scsi/ncr53c8xx.c
  43. 0 1
      drivers/scsi/nsp32.c
  44. 0 1
      drivers/scsi/pcmcia/sym53c500_cs.c
  45. 0 1
      drivers/scsi/qla1280.c
  46. 0 2
      drivers/scsi/qla2xxx/qla_os.c
  47. 0 1
      drivers/scsi/qla4xxx/ql4_os.c
  48. 0 1
      drivers/scsi/qlogicfas.c
  49. 1 1
      drivers/scsi/scsi.c
  50. 118 56
      drivers/scsi/scsi_debug.c
  51. 14 19
      drivers/scsi/scsi_error.c
  52. 161 113
      drivers/scsi/scsi_lib.c
  53. 5 23
      drivers/scsi/scsi_tgt_lib.c
  54. 2 2
      drivers/scsi/sd.c
  55. 39 25
      drivers/scsi/sgiwd93.c
  56. 13 12
      drivers/scsi/sr.c
  57. 0 1
      drivers/scsi/stex.c
  58. 0 1
      drivers/scsi/sym53c416.c
  59. 1 2
      drivers/scsi/sym53c8xx_2/sym_glue.c
  60. 0 1
      drivers/scsi/u14-34f.c
  61. 0 1
      drivers/scsi/ultrastor.c
  62. 0 1
      drivers/scsi/wd7000.c
  63. 4 4
      drivers/usb/storage/isd200.c
  64. 20 0
      include/scsi/scsi.h
  65. 42 17
      include/scsi/scsi_cmnd.h
  66. 4 5
      include/scsi/scsi_eh.h
  67. 2 15
      include/scsi/scsi_host.h

+ 0 - 1
arch/ia64/hp/sim/simscsi.c

@@ -360,7 +360,6 @@ static struct scsi_host_template driver_template = {
 	.max_sectors		= 1024,
 	.cmd_per_lun		= SIMSCSI_REQ_QUEUE_LEN,
 	.use_clustering		= DISABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 };
 
 static int __init

+ 1 - 0
block/bsg.c

@@ -279,6 +279,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
 			goto out;
 		}
 		rq->next_rq = next_rq;
+		next_rq->cmd_type = rq->cmd_type;
 
 		dxferp = (void*)(unsigned long)hdr->din_xferp;
 		ret =  blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);

+ 0 - 1
drivers/infiniband/ulp/srp/ib_srp.c

@@ -1571,7 +1571,6 @@ static struct scsi_host_template srp_template = {
 	.this_id			= -1,
 	.cmd_per_lun			= SRP_SQ_SIZE,
 	.use_clustering			= ENABLE_CLUSTERING,
-	.use_sg_chaining		= ENABLE_SG_CHAINING,
 	.shost_attrs			= srp_host_attrs
 };
 

+ 2 - 2
drivers/s390/scsi/zfcp_fsf.c

@@ -4224,10 +4224,10 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
 
 		ZFCP_LOG_TRACE("%i bytes sense data provided by FCP\n",
 			       fcp_rsp_iu->fcp_sns_len);
-		memcpy(&scpnt->sense_buffer,
+		memcpy(scpnt->sense_buffer,
 		       zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
 		ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
-			      (void *) &scpnt->sense_buffer, sns_len);
+			      (void *)scpnt->sense_buffer, sns_len);
 	}
 
 	/* check for overrun */

+ 0 - 1
drivers/scsi/3w-9xxx.c

@@ -1990,7 +1990,6 @@ static struct scsi_host_template driver_template = {
 	.max_sectors		= TW_MAX_SECTORS,
 	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.shost_attrs		= twa_host_attrs,
 	.emulated		= 1
 };

+ 0 - 1
drivers/scsi/3w-xxxx.c

@@ -2261,7 +2261,6 @@ static struct scsi_host_template driver_template = {
 	.max_sectors		= TW_MAX_SECTORS,
 	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,	
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.shost_attrs		= tw_host_attrs,
 	.emulated		= 1
 };

+ 0 - 1
drivers/scsi/BusLogic.c

@@ -3575,7 +3575,6 @@ static struct scsi_host_template Bus_Logic_template = {
 	.unchecked_isa_dma = 1,
 	.max_sectors = 128,
 	.use_clustering = ENABLE_CLUSTERING,
-	.use_sg_chaining = ENABLE_SG_CHAINING,
 };
 
 /*

+ 1 - 1
drivers/scsi/Kconfig

@@ -345,7 +345,7 @@ config ISCSI_TCP
 
 config SGIWD93_SCSI
 	tristate "SGI WD93C93 SCSI Driver"
-	depends on SGI_IP22 && SCSI
+	depends on SGI_HAS_WD93 && SCSI
   	help
 	  If you have a Western Digital WD93 SCSI controller on
 	  an SGI MIPS system, say Y.  Otherwise, say N.

+ 0 - 1
drivers/scsi/NCR53c406a.c

@@ -1065,7 +1065,6 @@ static struct scsi_host_template driver_template =
      .cmd_per_lun       	= 1			/* commands per lun */, 
      .unchecked_isa_dma 	= 1			/* unchecked_isa_dma */,
      .use_clustering    	= ENABLE_CLUSTERING,
-     .use_sg_chaining           = ENABLE_SG_CHAINING,
 };
 
 #include "scsi_module.c"

+ 0 - 1
drivers/scsi/a100u2w.c

@@ -1071,7 +1071,6 @@ static struct scsi_host_template inia100_template = {
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun 		= 1,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 };
 
 static int __devinit inia100_probe_one(struct pci_dev *pdev,

+ 12 - 17
drivers/scsi/aacraid/commctrl.c

@@ -243,7 +243,6 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
 	 *	Search the list of AdapterFibContext addresses on the adapter
 	 *	to be sure this is a valid address
 	 */
-	spin_lock_irqsave(&dev->fib_lock, flags);
 	entry = dev->fib_list.next;
 	fibctx = NULL;
 
@@ -252,25 +251,24 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
 		/*
 		 *	Extract the AdapterFibContext from the Input parameters.
 		 */
-		if (fibctx->unique == f.fibctx) { /* We found a winner */
+		if (fibctx->unique == f.fibctx) {   /* We found a winner */
 			break;
 		}
 		entry = entry->next;
 		fibctx = NULL;
 	}
 	if (!fibctx) {
-		spin_unlock_irqrestore(&dev->fib_lock, flags);
 		dprintk ((KERN_INFO "Fib Context not found\n"));
 		return -EINVAL;
 	}
 
 	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
 		 (fibctx->size != sizeof(struct aac_fib_context))) {
-		spin_unlock_irqrestore(&dev->fib_lock, flags);
 		dprintk ((KERN_INFO "Fib Context corrupt?\n"));
 		return -EINVAL;
 	}
 	status = 0;
+	spin_lock_irqsave(&dev->fib_lock, flags);
 	/*
 	 *	If there are no fibs to send back, then either wait or return
 	 *	-EAGAIN
@@ -328,9 +326,7 @@ return_fib:
 int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
 {
 	struct fib *fib;
-	unsigned long flags;
 
-	spin_lock_irqsave(&dev->fib_lock, flags);
 	/*
 	 *	First free any FIBs that have not been consumed.
 	 */
@@ -353,7 +349,6 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
 	 *	Remove the Context from the AdapterFibContext List
 	 */
 	list_del(&fibctx->next);
-	spin_unlock_irqrestore(&dev->fib_lock, flags);
 	/*
 	 *	Invalidate context
 	 */
@@ -419,8 +414,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
  *	@arg: ioctl arguments
  *
  *	This routine returns the driver version.
- *	Under Linux, there have been no version incompatibilities, so this is
- *	simple!
+ *      Under Linux, there have been no version incompatibilities, so this is
+ *      simple!
  */
 
 static int check_revision(struct aac_dev *dev, void __user *arg)
@@ -468,7 +463,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
 	u32 data_dir;
 	void __user *sg_user[32];
 	void *sg_list[32];
-	u32 sg_indx = 0;
+	u32   sg_indx = 0;
 	u32 byte_count = 0;
 	u32 actual_fibsize64, actual_fibsize = 0;
 	int i;
@@ -522,11 +517,11 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
 	// Fix up srb for endian and force some values
 
 	srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);	// Force this
-	srbcmd->channel	 = cpu_to_le32(user_srbcmd->channel);
+	srbcmd->channel  = cpu_to_le32(user_srbcmd->channel);
 	srbcmd->id	 = cpu_to_le32(user_srbcmd->id);
-	srbcmd->lun	 = cpu_to_le32(user_srbcmd->lun);
-	srbcmd->timeout	 = cpu_to_le32(user_srbcmd->timeout);
-	srbcmd->flags	 = cpu_to_le32(flags);
+	srbcmd->lun      = cpu_to_le32(user_srbcmd->lun);
+	srbcmd->timeout  = cpu_to_le32(user_srbcmd->timeout);
+	srbcmd->flags    = cpu_to_le32(flags);
 	srbcmd->retry_limit = 0; // Obsolete parameter
 	srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
 	memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
@@ -791,9 +786,9 @@ static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
 	pci_info.bus = dev->pdev->bus->number;
 	pci_info.slot = PCI_SLOT(dev->pdev->devfn);
 
-	if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
-		dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
-		return -EFAULT;
+       if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
+	       dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
+	       return -EFAULT;
 	}
 	return 0;
 }

+ 0 - 1
drivers/scsi/aacraid/linit.c

@@ -1032,7 +1032,6 @@ static struct scsi_host_template aac_driver_template = {
 	.cmd_per_lun    		= AAC_NUM_IO_FIB,
 #endif
 	.use_clustering			= ENABLE_CLUSTERING,
-	.use_sg_chaining		= ENABLE_SG_CHAINING,
 	.emulated                       = 1,
 };
 

+ 0 - 1
drivers/scsi/aha1740.c

@@ -563,7 +563,6 @@ static struct scsi_host_template aha1740_template = {
 	.sg_tablesize     = AHA1740_SCATTER,
 	.cmd_per_lun      = AHA1740_CMDLUN,
 	.use_clustering   = ENABLE_CLUSTERING,
-	.use_sg_chaining  = ENABLE_SG_CHAINING,
 	.eh_abort_handler = aha1740_eh_abort_handler,
 };
 

+ 4 - 1
drivers/scsi/aic7xxx/aic79xx.h

@@ -1340,8 +1340,10 @@ struct	ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
 int			  ahd_pci_config(struct ahd_softc *,
 					 struct ahd_pci_identity *);
 int	ahd_pci_test_register_access(struct ahd_softc *);
+#ifdef CONFIG_PM
 void	ahd_pci_suspend(struct ahd_softc *);
 void	ahd_pci_resume(struct ahd_softc *);
+#endif
 
 /************************** SCB and SCB queue management **********************/
 void		ahd_qinfifo_requeue_tail(struct ahd_softc *ahd,
@@ -1352,8 +1354,10 @@ struct ahd_softc	*ahd_alloc(void *platform_arg, char *name);
 int			 ahd_softc_init(struct ahd_softc *);
 void			 ahd_controller_info(struct ahd_softc *ahd, char *buf);
 int			 ahd_init(struct ahd_softc *ahd);
+#ifdef CONFIG_PM
 int			 ahd_suspend(struct ahd_softc *ahd);
 void			 ahd_resume(struct ahd_softc *ahd);
+#endif
 int			 ahd_default_config(struct ahd_softc *ahd);
 int			 ahd_parse_vpddata(struct ahd_softc *ahd,
 					   struct vpd_config *vpd);
@@ -1361,7 +1365,6 @@ int			 ahd_parse_cfgdata(struct ahd_softc *ahd,
 					   struct seeprom_config *sc);
 void			 ahd_intr_enable(struct ahd_softc *ahd, int enable);
 void			 ahd_pause_and_flushwork(struct ahd_softc *ahd);
-int			 ahd_suspend(struct ahd_softc *ahd); 
 void			 ahd_set_unit(struct ahd_softc *, int);
 void			 ahd_set_name(struct ahd_softc *, char *);
 struct scb		*ahd_get_scb(struct ahd_softc *ahd, u_int col_idx);

+ 2 - 0
drivers/scsi/aic7xxx/aic79xx_core.c

@@ -7175,6 +7175,7 @@ ahd_pause_and_flushwork(struct ahd_softc *ahd)
 	ahd->flags &= ~AHD_ALL_INTERRUPTS;
 }
 
+#ifdef CONFIG_PM
 int
 ahd_suspend(struct ahd_softc *ahd)
 {
@@ -7197,6 +7198,7 @@ ahd_resume(struct ahd_softc *ahd)
 	ahd_intr_enable(ahd, TRUE); 
 	ahd_restart(ahd);
 }
+#endif
 
 /************************** Busy Target Table *********************************/
 /*

+ 1 - 2
drivers/scsi/aic7xxx/aic79xx_osm.c

@@ -766,7 +766,6 @@ struct scsi_host_template aic79xx_driver_template = {
 	.max_sectors		= 8192,
 	.cmd_per_lun		= 2,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.slave_alloc		= ahd_linux_slave_alloc,
 	.slave_configure	= ahd_linux_slave_configure,
 	.target_alloc		= ahd_linux_target_alloc,
@@ -1922,7 +1921,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
 				struct scsi_sense_data *sense;
 				
 				sense = (struct scsi_sense_data *)
-					&cmd->sense_buffer;
+					cmd->sense_buffer;
 				if (sense->extra_len >= 5 &&
 				    (sense->add_sense_code == 0x47
 				     || sense->add_sense_code == 0x48))

+ 12 - 21
drivers/scsi/aic7xxx/aic79xx_osm_pci.c

@@ -43,17 +43,6 @@
 #include "aic79xx_inline.h"
 #include "aic79xx_pci.h"
 
-static int	ahd_linux_pci_dev_probe(struct pci_dev *pdev,
-					const struct pci_device_id *ent);
-static int	ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd,
-						 u_long *base, u_long *base2);
-static int	ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
-						 u_long *bus_addr,
-						 uint8_t __iomem **maddr);
-static int	ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg);
-static int	ahd_linux_pci_dev_resume(struct pci_dev *pdev);
-static void	ahd_linux_pci_dev_remove(struct pci_dev *pdev);
-
 /* Define the macro locally since it's different for different class of chips.
  */
 #define ID(x)            \
@@ -85,17 +74,7 @@ static struct pci_device_id ahd_linux_pci_id_table[] = {
 
 MODULE_DEVICE_TABLE(pci, ahd_linux_pci_id_table);
 
-static struct pci_driver aic79xx_pci_driver = {
-	.name		= "aic79xx",
-	.probe		= ahd_linux_pci_dev_probe,
 #ifdef CONFIG_PM
-	.suspend	= ahd_linux_pci_dev_suspend,
-	.resume		= ahd_linux_pci_dev_resume,
-#endif
-	.remove		= ahd_linux_pci_dev_remove,
-	.id_table	= ahd_linux_pci_id_table
-};
-
 static int
 ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
 {
@@ -139,6 +118,7 @@ ahd_linux_pci_dev_resume(struct pci_dev *pdev)
 
 	return rc;
 }
+#endif
 
 static void
 ahd_linux_pci_dev_remove(struct pci_dev *pdev)
@@ -245,6 +225,17 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	return (0);
 }
 
+static struct pci_driver aic79xx_pci_driver = {
+	.name		= "aic79xx",
+	.probe		= ahd_linux_pci_dev_probe,
+#ifdef CONFIG_PM
+	.suspend	= ahd_linux_pci_dev_suspend,
+	.resume		= ahd_linux_pci_dev_resume,
+#endif
+	.remove		= ahd_linux_pci_dev_remove,
+	.id_table	= ahd_linux_pci_id_table
+};
+
 int
 ahd_linux_pci_init(void)
 {

+ 2 - 0
drivers/scsi/aic7xxx/aic79xx_pci.c

@@ -389,6 +389,7 @@ ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry)
 	return error;
 }
 
+#ifdef CONFIG_PM
 void
 ahd_pci_suspend(struct ahd_softc *ahd)
 {
@@ -415,6 +416,7 @@ ahd_pci_resume(struct ahd_softc *ahd)
 	ahd_pci_write_config(ahd->dev_softc, CSIZE_LATTIME,
 			     ahd->suspend_state.pci_state.csize_lattime, /*bytes*/1);
 }
+#endif
 
 /*
  * Perform some simple tests that should catch situations where

+ 4 - 0
drivers/scsi/aic7xxx/aic7xxx.h

@@ -1143,7 +1143,9 @@ struct ahc_pci_identity	*ahc_find_pci_device(ahc_dev_softc_t);
 int			 ahc_pci_config(struct ahc_softc *,
 					struct ahc_pci_identity *);
 int			 ahc_pci_test_register_access(struct ahc_softc *);
+#ifdef CONFIG_PM
 void			 ahc_pci_resume(struct ahc_softc *ahc);
+#endif
 
 /*************************** EISA/VL Front End ********************************/
 struct aic7770_identity *aic7770_find_device(uint32_t);
@@ -1170,8 +1172,10 @@ int			 ahc_chip_init(struct ahc_softc *ahc);
 int			 ahc_init(struct ahc_softc *ahc);
 void			 ahc_intr_enable(struct ahc_softc *ahc, int enable);
 void			 ahc_pause_and_flushwork(struct ahc_softc *ahc);
+#ifdef CONFIG_PM
 int			 ahc_suspend(struct ahc_softc *ahc); 
 int			 ahc_resume(struct ahc_softc *ahc);
+#endif
 void			 ahc_set_unit(struct ahc_softc *, int);
 void			 ahc_set_name(struct ahc_softc *, char *);
 void			 ahc_alloc_scbs(struct ahc_softc *ahc);

+ 2 - 1
drivers/scsi/aic7xxx/aic7xxx_core.c

@@ -5078,6 +5078,7 @@ ahc_pause_and_flushwork(struct ahc_softc *ahc)
 	ahc->flags &= ~AHC_ALL_INTERRUPTS;
 }
 
+#ifdef CONFIG_PM
 int
 ahc_suspend(struct ahc_softc *ahc)
 {
@@ -5113,7 +5114,7 @@ ahc_resume(struct ahc_softc *ahc)
 	ahc_restart(ahc);
 	return (0);
 }
-
+#endif
 /************************** Busy Target Table *********************************/
 /*
  * Return the untagged transaction id for a given target/channel lun.

+ 6 - 4
drivers/scsi/aic7xxx/aic7xxx_osm.c

@@ -747,7 +747,6 @@ struct scsi_host_template aic7xxx_driver_template = {
 	.max_sectors		= 8192,
 	.cmd_per_lun		= 2,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.slave_alloc		= ahc_linux_slave_alloc,
 	.slave_configure	= ahc_linux_slave_configure,
 	.target_alloc		= ahc_linux_target_alloc,
@@ -1658,9 +1657,12 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
 		untagged_q = &(ahc->untagged_queues[target_offset]);
 		TAILQ_REMOVE(untagged_q, scb, links.tqe);
 		BUG_ON(!TAILQ_EMPTY(untagged_q));
-	}
-
-	if ((scb->flags & SCB_ACTIVE) == 0) {
+	} else if ((scb->flags & SCB_ACTIVE) == 0) {
+		/*
+		 * Transactions aborted from the untagged queue may
+		 * not have been dispatched to the controller, so
+		 * only check the SCB_ACTIVE flag for tagged transactions.
+		 */
 		printf("SCB %d done'd twice\n", scb->hscb->tag);
 		ahc_dump_card_state(ahc);
 		panic("Stopping for safety");

+ 12 - 21
drivers/scsi/aic7xxx/aic7xxx_osm_pci.c

@@ -42,17 +42,6 @@
 #include "aic7xxx_osm.h"
 #include "aic7xxx_pci.h"
 
-static int	ahc_linux_pci_dev_probe(struct pci_dev *pdev,
-					const struct pci_device_id *ent);
-static int	ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc,
-						u_long *base);
-static int	ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
-						 u_long *bus_addr,
-						 uint8_t __iomem **maddr);
-static int	ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg);
-static int	ahc_linux_pci_dev_resume(struct pci_dev *pdev);
-static void	ahc_linux_pci_dev_remove(struct pci_dev *pdev);
-
 /* Define the macro locally since it's different for different class of chips.
 */
 #define ID(x)	ID_C(x, PCI_CLASS_STORAGE_SCSI)
@@ -132,17 +121,7 @@ static struct pci_device_id ahc_linux_pci_id_table[] = {
 
 MODULE_DEVICE_TABLE(pci, ahc_linux_pci_id_table);
 
-static struct pci_driver aic7xxx_pci_driver = {
-	.name		= "aic7xxx",
-	.probe		= ahc_linux_pci_dev_probe,
 #ifdef CONFIG_PM
-	.suspend	= ahc_linux_pci_dev_suspend,
-	.resume		= ahc_linux_pci_dev_resume,
-#endif
-	.remove		= ahc_linux_pci_dev_remove,
-	.id_table	= ahc_linux_pci_id_table
-};
-
 static int
 ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
 {
@@ -182,6 +161,7 @@ ahc_linux_pci_dev_resume(struct pci_dev *pdev)
 
 	return (ahc_resume(ahc));
 }
+#endif
 
 static void
 ahc_linux_pci_dev_remove(struct pci_dev *pdev)
@@ -289,6 +269,17 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	return (0);
 }
 
+static struct pci_driver aic7xxx_pci_driver = {
+	.name		= "aic7xxx",
+	.probe		= ahc_linux_pci_dev_probe,
+#ifdef CONFIG_PM
+	.suspend	= ahc_linux_pci_dev_suspend,
+	.resume		= ahc_linux_pci_dev_resume,
+#endif
+	.remove		= ahc_linux_pci_dev_remove,
+	.id_table	= ahc_linux_pci_id_table
+};
+
 int
 ahc_linux_pci_init(void)
 {

+ 2 - 0
drivers/scsi/aic7xxx/aic7xxx_pci.c

@@ -2020,6 +2020,7 @@ ahc_pci_chip_init(struct ahc_softc *ahc)
 	return (ahc_chip_init(ahc));
 }
 
+#ifdef CONFIG_PM
 void
 ahc_pci_resume(struct ahc_softc *ahc)
 {
@@ -2051,6 +2052,7 @@ ahc_pci_resume(struct ahc_softc *ahc)
 		ahc_release_seeprom(&sd);
 	}
 }
+#endif
 
 static int
 ahc_aic785X_setup(struct ahc_softc *ahc)

+ 0 - 1
drivers/scsi/aic7xxx_old.c

@@ -11141,7 +11141,6 @@ static struct scsi_host_template driver_template = {
 	.max_sectors		= 2048,
 	.cmd_per_lun		= 3,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 };
 
 #include "scsi_module.c"

+ 0 - 1
drivers/scsi/arcmsr/arcmsr_hba.c

@@ -122,7 +122,6 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
 	.max_sectors    	= ARCMSR_MAX_XFER_SECTORS,
 	.cmd_per_lun		= ARCMSR_MAX_CMD_PERLUN,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.shost_attrs		= arcmsr_host_attrs,
 };
 #ifdef CONFIG_SCSI_ARCMSR_AER

+ 0 - 1
drivers/scsi/dc395x.c

@@ -4763,7 +4763,6 @@ static struct scsi_host_template dc395x_driver_template = {
 	.eh_bus_reset_handler   = dc395x_eh_bus_reset,
 	.unchecked_isa_dma      = 0,
 	.use_clustering         = DISABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 };
 
 

+ 0 - 1
drivers/scsi/dpt_i2o.c

@@ -3340,7 +3340,6 @@ static struct scsi_host_template driver_template = {
 	.this_id		= 7,
 	.cmd_per_lun		= 1,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 };
 #include "scsi_module.c"
 MODULE_LICENSE("GPL");

+ 0 - 1
drivers/scsi/eata.c

@@ -524,7 +524,6 @@ static struct scsi_host_template driver_template = {
 	.this_id = 7,
 	.unchecked_isa_dma = 1,
 	.use_clustering = ENABLE_CLUSTERING,
-	.use_sg_chaining = ENABLE_SG_CHAINING,
 };
 
 #if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)

+ 0 - 1
drivers/scsi/hosts.c

@@ -342,7 +342,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
 	shost->use_clustering = sht->use_clustering;
 	shost->ordered_tag = sht->ordered_tag;
 	shost->active_mode = sht->supported_mode;
-	shost->use_sg_chaining = sht->use_sg_chaining;
 
 	if (sht->supported_mode == MODE_UNKNOWN)
 		/* means we didn't set it ... default to INITIATOR */

+ 1 - 2
drivers/scsi/hptiop.c

@@ -573,7 +573,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
 		scsi_set_resid(scp,
 			scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
 		scp->result = SAM_STAT_CHECK_CONDITION;
-		memcpy(&scp->sense_buffer, &req->sg_list,
+		memcpy(scp->sense_buffer, &req->sg_list,
 				min_t(size_t, SCSI_SENSE_BUFFERSIZE,
 					le32_to_cpu(req->dataxfer_length)));
 		break;
@@ -906,7 +906,6 @@ static struct scsi_host_template driver_template = {
 	.unchecked_isa_dma          = 0,
 	.emulated                   = 0,
 	.use_clustering             = ENABLE_CLUSTERING,
-	.use_sg_chaining            = ENABLE_SG_CHAINING,
 	.proc_name                  = driver_name,
 	.shost_attrs                = hptiop_attrs,
 	.this_id                    = -1,

+ 0 - 1
drivers/scsi/ibmmca.c

@@ -1501,7 +1501,6 @@ static struct scsi_host_template ibmmca_driver_template = {
           .sg_tablesize   = 16,
           .cmd_per_lun    = 1,
           .use_clustering = ENABLE_CLUSTERING,
-          .use_sg_chaining = ENABLE_SG_CHAINING,
 };
 
 static int ibmmca_probe(struct device *dev)

+ 0 - 1
drivers/scsi/ibmvscsi/ibmvscsi.c

@@ -1600,7 +1600,6 @@ static struct scsi_host_template driver_template = {
 	.this_id = -1,
 	.sg_tablesize = SG_ALL,
 	.use_clustering = ENABLE_CLUSTERING,
-	.use_sg_chaining = ENABLE_SG_CHAINING,
 	.shost_attrs = ibmvscsi_attrs,
 };
 

+ 0 - 1
drivers/scsi/initio.c

@@ -2833,7 +2833,6 @@ static struct scsi_host_template initio_template = {
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= 1,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 };
 
 static int initio_probe_one(struct pci_dev *pdev,

+ 0 - 1
drivers/scsi/iscsi_tcp.c

@@ -1933,7 +1933,6 @@ static struct scsi_host_template iscsi_sht = {
 	.eh_device_reset_handler= iscsi_eh_device_reset,
 	.eh_host_reset_handler	= iscsi_eh_host_reset,
 	.use_clustering         = DISABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.slave_configure        = iscsi_tcp_slave_configure,
 	.proc_name		= "iscsi_tcp",
 	.this_id		= -1,

+ 2 - 2
drivers/scsi/libsrp.c

@@ -426,8 +426,8 @@ int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
 
 	sc->SCp.ptr = info;
 	memcpy(sc->cmnd, cmd->cdb, MAX_COMMAND_SIZE);
-	sc->request_bufflen = len;
-	sc->request_buffer = (void *) (unsigned long) addr;
+	sc->sdb.length = len;
+	sc->sdb.table.sgl = (void *) (unsigned long) addr;
 	sc->tag = tag;
 	err = scsi_tgt_queue_command(sc, itn_id, (struct scsi_lun *)&cmd->lun,
 				     cmd->tag);

+ 0 - 2
drivers/scsi/lpfc/lpfc_scsi.c

@@ -1459,7 +1459,6 @@ struct scsi_host_template lpfc_template = {
 	.scan_finished		= lpfc_scan_finished,
 	.this_id		= -1,
 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= lpfc_hba_attrs,
@@ -1482,7 +1481,6 @@ struct scsi_host_template lpfc_vport_template = {
 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.shost_attrs		= lpfc_vport_attrs,
 	.max_sectors		= 0xFFFF,
 };

+ 0 - 1
drivers/scsi/mac53c94.c

@@ -402,7 +402,6 @@ static struct scsi_host_template mac53c94_template = {
 	.sg_tablesize	= SG_ALL,
 	.cmd_per_lun	= 1,
 	.use_clustering	= DISABLE_CLUSTERING,
-	.use_sg_chaining = ENABLE_SG_CHAINING,
 };
 
 static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)

+ 0 - 1
drivers/scsi/megaraid.c

@@ -4490,7 +4490,6 @@ static struct scsi_host_template megaraid_template = {
 	.sg_tablesize			= MAX_SGLIST,
 	.cmd_per_lun			= DEF_CMD_PER_LUN,
 	.use_clustering			= ENABLE_CLUSTERING,
-	.use_sg_chaining		= ENABLE_SG_CHAINING,
 	.eh_abort_handler		= megaraid_abort,
 	.eh_device_reset_handler	= megaraid_reset,
 	.eh_bus_reset_handler		= megaraid_reset,

+ 0 - 1
drivers/scsi/megaraid/megaraid_mbox.c

@@ -361,7 +361,6 @@ static struct scsi_host_template megaraid_template_g = {
 	.eh_host_reset_handler		= megaraid_reset_handler,
 	.change_queue_depth		= megaraid_change_queue_depth,
 	.use_clustering			= ENABLE_CLUSTERING,
-	.use_sg_chaining		= ENABLE_SG_CHAINING,
 	.sdev_attrs			= megaraid_sdev_attrs,
 	.shost_attrs			= megaraid_shost_attrs,
 };

+ 0 - 1
drivers/scsi/megaraid/megaraid_sas.c

@@ -1192,7 +1192,6 @@ static struct scsi_host_template megasas_template = {
 	.eh_timed_out = megasas_reset_timer,
 	.bios_param = megasas_bios_param,
 	.use_clustering = ENABLE_CLUSTERING,
-	.use_sg_chaining = ENABLE_SG_CHAINING,
 };
 
 /**

+ 0 - 1
drivers/scsi/mesh.c

@@ -1843,7 +1843,6 @@ static struct scsi_host_template mesh_template = {
 	.sg_tablesize			= SG_ALL,
 	.cmd_per_lun			= 2,
 	.use_clustering			= DISABLE_CLUSTERING,
-	.use_sg_chaining		= ENABLE_SG_CHAINING,
 };
 
 static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)

+ 1 - 1
drivers/scsi/ncr53c8xx.c

@@ -4967,7 +4967,7 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
 			     sizeof(cp->sense_buf)));
 
 		if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
-			u_char * p = (u_char*) & cmd->sense_buffer;
+			u_char *p = cmd->sense_buffer;
 			int i;
 			PRINT_ADDR(cmd, "sense data:");
 			for (i=0; i<14; i++) printk (" %x", *p++);

+ 0 - 1
drivers/scsi/nsp32.c

@@ -281,7 +281,6 @@ static struct scsi_host_template nsp32_template = {
 	.cmd_per_lun			= 1,
 	.this_id			= NSP32_HOST_SCSIID,
 	.use_clustering			= DISABLE_CLUSTERING,
-	.use_sg_chaining		= ENABLE_SG_CHAINING,
 	.eh_abort_handler       	= nsp32_eh_abort,
 	.eh_bus_reset_handler		= nsp32_eh_bus_reset,
 	.eh_host_reset_handler		= nsp32_eh_host_reset,

+ 0 - 1
drivers/scsi/pcmcia/sym53c500_cs.c

@@ -692,7 +692,6 @@ static struct scsi_host_template sym53c500_driver_template = {
      .sg_tablesize		= 32,
      .cmd_per_lun		= 1,
      .use_clustering		= ENABLE_CLUSTERING,
-     .use_sg_chaining		= ENABLE_SG_CHAINING,
      .shost_attrs		= SYM53C500_shost_attrs
 };
 

+ 0 - 1
drivers/scsi/qla1280.c

@@ -4204,7 +4204,6 @@ static struct scsi_host_template qla1280_driver_template = {
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= 1,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 };
 
 

+ 0 - 2
drivers/scsi/qla2xxx/qla_os.c

@@ -131,7 +131,6 @@ static struct scsi_host_template qla2x00_driver_template = {
 	.this_id		= -1,
 	.cmd_per_lun		= 3,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.sg_tablesize		= SG_ALL,
 
 	/*
@@ -163,7 +162,6 @@ struct scsi_host_template qla24xx_driver_template = {
 	.this_id		= -1,
 	.cmd_per_lun		= 3,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.sg_tablesize		= SG_ALL,
 
 	.max_sectors		= 0xFFFF,

+ 0 - 1
drivers/scsi/qla4xxx/ql4_os.c

@@ -94,7 +94,6 @@ static struct scsi_host_template qla4xxx_driver_template = {
 	.this_id		= -1,
 	.cmd_per_lun		= 3,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.sg_tablesize		= SG_ALL,
 
 	.max_sectors		= 0xFFFF,

+ 0 - 1
drivers/scsi/qlogicfas.c

@@ -197,7 +197,6 @@ static struct scsi_host_template qlogicfas_driver_template = {
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= 1,
 	.use_clustering		= DISABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 };
 
 static __init int qlogicfas_init(void)

+ 1 - 1
drivers/scsi/scsi.c

@@ -757,7 +757,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
 				"Notifying upper driver of completion "
 				"(result %x)\n", cmd->result));
 
-	good_bytes = cmd->request_bufflen;
+	good_bytes = scsi_bufflen(cmd);
         if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
 		drv = scsi_cmd_to_driver(cmd);
 		if (drv->done)

+ 118 - 56
drivers/scsi/scsi_debug.c

@@ -280,6 +280,8 @@ static int resp_write(struct scsi_cmnd * SCpnt, unsigned long long lba,
 		      unsigned int num, struct sdebug_dev_info * devip);
 static int resp_report_luns(struct scsi_cmnd * SCpnt,
 			    struct sdebug_dev_info * devip);
+static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
+			    unsigned int num, struct sdebug_dev_info *devip);
 static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
                                 int arr_len);
 static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
@@ -311,12 +313,48 @@ static void sdebug_max_tgts_luns(void);
 static struct device pseudo_primary;
 static struct bus_type pseudo_lld_bus;
 
+static void get_data_transfer_info(unsigned char *cmd,
+				   unsigned long long *lba, unsigned int *num)
+{
+	int i;
+
+	switch (*cmd) {
+	case WRITE_16:
+	case READ_16:
+		for (*lba = 0, i = 0; i < 8; ++i) {
+			if (i > 0)
+				*lba <<= 8;
+			*lba += cmd[2 + i];
+		}
+		*num = cmd[13] + (cmd[12] << 8) +
+			(cmd[11] << 16) + (cmd[10] << 24);
+		break;
+	case WRITE_12:
+	case READ_12:
+		*lba = cmd[5] + (cmd[4] << 8) +	(cmd[3] << 16) + (cmd[2] << 24);
+		*num = cmd[9] + (cmd[8] << 8) +	(cmd[7] << 16) + (cmd[6] << 24);
+		break;
+	case WRITE_10:
+	case READ_10:
+	case XDWRITEREAD_10:
+		*lba = cmd[5] + (cmd[4] << 8) +	(cmd[3] << 16) + (cmd[2] << 24);
+		*num = cmd[8] + (cmd[7] << 8);
+		break;
+	case WRITE_6:
+	case READ_6:
+		*lba = cmd[3] + (cmd[2] << 8) + ((cmd[1] & 0x1f) << 16);
+		*num = (0 == cmd[4]) ? 256 : cmd[4];
+		break;
+	default:
+		break;
+	}
+}
 
 static
 int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
 {
 	unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
-	int len, k, j;
+	int len, k;
 	unsigned int num;
 	unsigned long long lba;
 	int errsts = 0;
@@ -452,28 +490,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
 			break;
 		if (scsi_debug_fake_rw)
 			break;
-		if ((*cmd) == READ_16) {
-			for (lba = 0, j = 0; j < 8; ++j) {
-				if (j > 0)
-					lba <<= 8;
-				lba += cmd[2 + j];
-			}
-			num = cmd[13] + (cmd[12] << 8) +
-				(cmd[11] << 16) + (cmd[10] << 24);
-		} else if ((*cmd) == READ_12) {
-			lba = cmd[5] + (cmd[4] << 8) +
-				(cmd[3] << 16) + (cmd[2] << 24);
-			num = cmd[9] + (cmd[8] << 8) +
-				(cmd[7] << 16) + (cmd[6] << 24);
-		} else if ((*cmd) == READ_10) {
-			lba = cmd[5] + (cmd[4] << 8) +
-				(cmd[3] << 16) + (cmd[2] << 24);
-			num = cmd[8] + (cmd[7] << 8);
-		} else {	/* READ (6) */
-			lba = cmd[3] + (cmd[2] << 8) +
-				((cmd[1] & 0x1f) << 16);
-			num = (0 == cmd[4]) ? 256 : cmd[4];
-		}
+		get_data_transfer_info(cmd, &lba, &num);
 		errsts = resp_read(SCpnt, lba, num, devip);
 		if (inj_recovered && (0 == errsts)) {
 			mk_sense_buffer(devip, RECOVERED_ERROR,
@@ -500,28 +517,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
 			break;
 		if (scsi_debug_fake_rw)
 			break;
-		if ((*cmd) == WRITE_16) {
-			for (lba = 0, j = 0; j < 8; ++j) {
-				if (j > 0)
-					lba <<= 8;
-				lba += cmd[2 + j];
-			}
-			num = cmd[13] + (cmd[12] << 8) +
-				(cmd[11] << 16) + (cmd[10] << 24);
-		} else if ((*cmd) == WRITE_12) {
-			lba = cmd[5] + (cmd[4] << 8) +
-				(cmd[3] << 16) + (cmd[2] << 24);
-			num = cmd[9] + (cmd[8] << 8) +
-				(cmd[7] << 16) + (cmd[6] << 24);
-		} else if ((*cmd) == WRITE_10) {
-			lba = cmd[5] + (cmd[4] << 8) +
-				(cmd[3] << 16) + (cmd[2] << 24);
-			num = cmd[8] + (cmd[7] << 8);
-		} else {	/* WRITE (6) */
-			lba = cmd[3] + (cmd[2] << 8) +
-				((cmd[1] & 0x1f) << 16);
-			num = (0 == cmd[4]) ? 256 : cmd[4];
-		}
+		get_data_transfer_info(cmd, &lba, &num);
 		errsts = resp_write(SCpnt, lba, num, devip);
 		if (inj_recovered && (0 == errsts)) {
 			mk_sense_buffer(devip, RECOVERED_ERROR,
@@ -549,6 +545,28 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
 	case WRITE_BUFFER:
 		errsts = check_readiness(SCpnt, 1, devip);
 		break;
+	case XDWRITEREAD_10:
+		if (!scsi_bidi_cmnd(SCpnt)) {
+			mk_sense_buffer(devip, ILLEGAL_REQUEST,
+					INVALID_FIELD_IN_CDB, 0);
+			errsts = check_condition_result;
+			break;
+		}
+
+		errsts = check_readiness(SCpnt, 0, devip);
+		if (errsts)
+			break;
+		if (scsi_debug_fake_rw)
+			break;
+		get_data_transfer_info(cmd, &lba, &num);
+		errsts = resp_read(SCpnt, lba, num, devip);
+		if (errsts)
+			break;
+		errsts = resp_write(SCpnt, lba, num, devip);
+		if (errsts)
+			break;
+		errsts = resp_xdwriteread(SCpnt, lba, num, devip);
+		break;
 	default:
 		if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 			printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
@@ -601,18 +619,18 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
 	int k, req_len, act_len, len, active;
 	void * kaddr;
 	void * kaddr_off;
-	struct scatterlist * sg;
+	struct scatterlist *sg;
+	struct scsi_data_buffer *sdb = scsi_in(scp);
 
-	if (0 == scsi_bufflen(scp))
+	if (!sdb->length)
 		return 0;
-	if (NULL == scsi_sglist(scp))
+	if (!sdb->table.sgl)
 		return (DID_ERROR << 16);
-	if (! ((scp->sc_data_direction == DMA_BIDIRECTIONAL) ||
-	      (scp->sc_data_direction == DMA_FROM_DEVICE)))
+	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
 		return (DID_ERROR << 16);
 	active = 1;
 	req_len = act_len = 0;
-	scsi_for_each_sg(scp, sg, scsi_sg_count(scp), k) {
+	for_each_sg(sdb->table.sgl, sg, sdb->table.nents, k) {
 		if (active) {
 			kaddr = (unsigned char *)
 				kmap_atomic(sg_page(sg), KM_USER0);
@@ -630,10 +648,10 @@ static int fill_from_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
 		}
 		req_len += sg->length;
 	}
-	if (scsi_get_resid(scp))
-		scsi_set_resid(scp, scsi_get_resid(scp) - act_len);
+	if (sdb->resid)
+		sdb->resid -= act_len;
 	else
-		scsi_set_resid(scp, req_len - act_len);
+		sdb->resid = req_len - act_len;
 	return 0;
 }
 
@@ -650,8 +668,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd * scp, unsigned char * arr,
 		return 0;
 	if (NULL == scsi_sglist(scp))
 		return -1;
-	if (! ((scp->sc_data_direction == DMA_BIDIRECTIONAL) ||
-	      (scp->sc_data_direction == DMA_TO_DEVICE)))
+	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
 		return -1;
 	req_len = fin = 0;
 	scsi_for_each_sg(scp, sg, scsi_sg_count(scp), k) {
@@ -1956,6 +1973,50 @@ static int resp_report_luns(struct scsi_cmnd * scp,
 				    min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
 }
 
+static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
+			    unsigned int num, struct sdebug_dev_info *devip)
+{
+	int i, j, ret = -1;
+	unsigned char *kaddr, *buf;
+	unsigned int offset;
+	struct scatterlist *sg;
+	struct scsi_data_buffer *sdb = scsi_in(scp);
+
+	/* better not to use temporary buffer. */
+	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
+	if (!buf)
+		return ret;
+
+	offset = 0;
+	scsi_for_each_sg(scp, sg, scsi_sg_count(scp), i) {
+		kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
+		if (!kaddr)
+			goto out;
+
+		memcpy(buf + offset, kaddr + sg->offset, sg->length);
+		offset += sg->length;
+		kunmap_atomic(kaddr, KM_USER0);
+	}
+
+	offset = 0;
+	for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
+		kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
+		if (!kaddr)
+			goto out;
+
+		for (j = 0; j < sg->length; j++)
+			*(kaddr + sg->offset + j) ^= *(buf + offset + j);
+
+		offset += sg->length;
+		kunmap_atomic(kaddr, KM_USER0);
+	}
+	ret = 0;
+out:
+	kfree(buf);
+
+	return ret;
+}
+
 /* When timer goes off this function is called. */
 static void timer_intr_handler(unsigned long indx)
 {
@@ -1989,6 +2050,7 @@ static int scsi_debug_slave_alloc(struct scsi_device * sdp)
 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 		printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
+	set_bit(QUEUE_FLAG_BIDI, &sdp->request_queue->queue_flags);
 	return 0;
 }
 

+ 14 - 19
drivers/scsi/scsi_error.c

@@ -617,29 +617,27 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
 	ses->cmd_len = scmd->cmd_len;
 	memcpy(ses->cmnd, scmd->cmnd, sizeof(scmd->cmnd));
 	ses->data_direction = scmd->sc_data_direction;
-	ses->bufflen = scmd->request_bufflen;
-	ses->buffer = scmd->request_buffer;
-	ses->use_sg = scmd->use_sg;
-	ses->resid = scmd->resid;
+	ses->sdb = scmd->sdb;
+	ses->next_rq = scmd->request->next_rq;
 	ses->result = scmd->result;
 
+	memset(&scmd->sdb, 0, sizeof(scmd->sdb));
+	scmd->request->next_rq = NULL;
+
 	if (sense_bytes) {
-		scmd->request_bufflen = min_t(unsigned,
-		                       SCSI_SENSE_BUFFERSIZE, sense_bytes);
+		scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
+					 sense_bytes);
 		sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
-		                                       scmd->request_bufflen);
-		scmd->request_buffer = &ses->sense_sgl;
+			    scmd->sdb.length);
+		scmd->sdb.table.sgl = &ses->sense_sgl;
 		scmd->sc_data_direction = DMA_FROM_DEVICE;
-		scmd->use_sg = 1;
+		scmd->sdb.table.nents = 1;
 		memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
 		scmd->cmnd[0] = REQUEST_SENSE;
-		scmd->cmnd[4] = scmd->request_bufflen;
+		scmd->cmnd[4] = scmd->sdb.length;
 		scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
 	} else {
-		scmd->request_buffer = NULL;
-		scmd->request_bufflen = 0;
 		scmd->sc_data_direction = DMA_NONE;
-		scmd->use_sg = 0;
 		if (cmnd) {
 			memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
 			memcpy(scmd->cmnd, cmnd, cmnd_size);
@@ -676,10 +674,8 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
 	scmd->cmd_len = ses->cmd_len;
 	memcpy(scmd->cmnd, ses->cmnd, sizeof(scmd->cmnd));
 	scmd->sc_data_direction = ses->data_direction;
-	scmd->request_bufflen = ses->bufflen;
-	scmd->request_buffer = ses->buffer;
-	scmd->use_sg = ses->use_sg;
-	scmd->resid = ses->resid;
+	scmd->sdb = ses->sdb;
+	scmd->request->next_rq = ses->next_rq;
 	scmd->result = ses->result;
 }
 EXPORT_SYMBOL(scsi_eh_restore_cmnd);
@@ -1700,8 +1696,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
 	memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd));
     
 	scmd->scsi_done		= scsi_reset_provider_done_command;
-	scmd->request_buffer		= NULL;
-	scmd->request_bufflen		= 0;
+	memset(&scmd->sdb, 0, sizeof(scmd->sdb));
 
 	scmd->cmd_len			= 0;
 

+ 161 - 113
drivers/scsi/scsi_lib.c

@@ -8,6 +8,7 @@
  */
 
 #include <linux/bio.h>
+#include <linux/bitops.h>
 #include <linux/blkdev.h>
 #include <linux/completion.h>
 #include <linux/kernel.h>
@@ -34,13 +35,6 @@
 #define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)
 #define SG_MEMPOOL_SIZE		2
 
-/*
- * The maximum number of SG segments that we will put inside a scatterlist
- * (unless chaining is used). Should ideally fit inside a single page, to
- * avoid a higher order allocation.
- */
-#define SCSI_MAX_SG_SEGMENTS	128
-
 struct scsi_host_sg_pool {
 	size_t		size;
 	char		*name;
@@ -48,22 +42,31 @@ struct scsi_host_sg_pool {
 	mempool_t	*pool;
 };
 
-#define SP(x) { x, "sgpool-" #x }
+#define SP(x) { x, "sgpool-" __stringify(x) }
+#if (SCSI_MAX_SG_SEGMENTS < 32)
+#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
+#endif
 static struct scsi_host_sg_pool scsi_sg_pools[] = {
 	SP(8),
 	SP(16),
-#if (SCSI_MAX_SG_SEGMENTS > 16)
-	SP(32),
 #if (SCSI_MAX_SG_SEGMENTS > 32)
-	SP(64),
+	SP(32),
 #if (SCSI_MAX_SG_SEGMENTS > 64)
+	SP(64),
+#if (SCSI_MAX_SG_SEGMENTS > 128)
 	SP(128),
+#if (SCSI_MAX_SG_SEGMENTS > 256)
+#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
+#endif
 #endif
 #endif
 #endif
+	SP(SCSI_MAX_SG_SEGMENTS)
 };
 #undef SP
 
+static struct kmem_cache *scsi_bidi_sdb_cache;
+
 static void scsi_run_queue(struct request_queue *q);
 
 /*
@@ -440,7 +443,7 @@ EXPORT_SYMBOL_GPL(scsi_execute_async);
 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
 {
 	cmd->serial_number = 0;
-	cmd->resid = 0;
+	scsi_set_resid(cmd, 0);
 	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
 	if (cmd->cmd_len == 0)
 		cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
@@ -690,42 +693,16 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
 	return NULL;
 }
 
-/*
- * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
- * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
- */
-#define SCSI_MAX_SG_CHAIN_SEGMENTS	2048
-
 static inline unsigned int scsi_sgtable_index(unsigned short nents)
 {
 	unsigned int index;
 
-	switch (nents) {
-	case 1 ... 8:
+	BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
+
+	if (nents <= 8)
 		index = 0;
-		break;
-	case 9 ... 16:
-		index = 1;
-		break;
-#if (SCSI_MAX_SG_SEGMENTS > 16)
-	case 17 ... 32:
-		index = 2;
-		break;
-#if (SCSI_MAX_SG_SEGMENTS > 32)
-	case 33 ... 64:
-		index = 3;
-		break;
-#if (SCSI_MAX_SG_SEGMENTS > 64)
-	case 65 ... 128:
-		index = 4;
-		break;
-#endif
-#endif
-#endif
-	default:
-		printk(KERN_ERR "scsi: bad segment count=%d\n", nents);
-		BUG();
-	}
+	else
+		index = get_count_order(nents) - 3;
 
 	return index;
 }
@@ -746,31 +723,27 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
 	return mempool_alloc(sgp->pool, gfp_mask);
 }
 
-int scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
+static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
+			      gfp_t gfp_mask)
 {
 	int ret;
 
-	BUG_ON(!cmd->use_sg);
+	BUG_ON(!nents);
 
-	ret = __sg_alloc_table(&cmd->sg_table, cmd->use_sg,
-			       SCSI_MAX_SG_SEGMENTS, gfp_mask, scsi_sg_alloc);
+	ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
+			       gfp_mask, scsi_sg_alloc);
 	if (unlikely(ret))
-		__sg_free_table(&cmd->sg_table, SCSI_MAX_SG_SEGMENTS,
+		__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
 				scsi_sg_free);
 
-	cmd->request_buffer = cmd->sg_table.sgl;
 	return ret;
 }
 
-EXPORT_SYMBOL(scsi_alloc_sgtable);
-
-void scsi_free_sgtable(struct scsi_cmnd *cmd)
+static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
 {
-	__sg_free_table(&cmd->sg_table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
+	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
 }
 
-EXPORT_SYMBOL(scsi_free_sgtable);
-
 /*
  * Function:    scsi_release_buffers()
  *
@@ -788,17 +761,49 @@ EXPORT_SYMBOL(scsi_free_sgtable);
  *		the scatter-gather table, and potentially any bounce
  *		buffers.
  */
-static void scsi_release_buffers(struct scsi_cmnd *cmd)
+void scsi_release_buffers(struct scsi_cmnd *cmd)
+{
+	if (cmd->sdb.table.nents)
+		scsi_free_sgtable(&cmd->sdb);
+
+	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
+
+	if (scsi_bidi_cmnd(cmd)) {
+		struct scsi_data_buffer *bidi_sdb =
+			cmd->request->next_rq->special;
+		scsi_free_sgtable(bidi_sdb);
+		kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb);
+		cmd->request->next_rq->special = NULL;
+	}
+}
+EXPORT_SYMBOL(scsi_release_buffers);
+
+/*
+ * Bidi commands Must be complete as a whole, both sides at once.
+ * If part of the bytes were written and lld returned
+ * scsi_in()->resid and/or scsi_out()->resid this information will be left
+ * in req->data_len and req->next_rq->data_len. The upper-layer driver can
+ * decide what to do with this information.
+ */
+void scsi_end_bidi_request(struct scsi_cmnd *cmd)
 {
-	if (cmd->use_sg)
-		scsi_free_sgtable(cmd);
+	struct request *req = cmd->request;
+	unsigned int dlen = req->data_len;
+	unsigned int next_dlen = req->next_rq->data_len;
+
+	req->data_len = scsi_out(cmd)->resid;
+	req->next_rq->data_len = scsi_in(cmd)->resid;
+
+	/* The req and req->next_rq have not been completed */
+	BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
+
+	scsi_release_buffers(cmd);
 
 	/*
-	 * Zero these out.  They now point to freed memory, and it is
-	 * dangerous to hang onto the pointers.
+	 * This will goose the queue request function at the end, so we don't
+	 * need to worry about launching another command.
 	 */
-	cmd->request_buffer = NULL;
-	cmd->request_bufflen = 0;
+	scsi_next_command(cmd);
 }
 
 /*
@@ -832,7 +837,7 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 {
 	int result = cmd->result;
-	int this_count = cmd->request_bufflen;
+	int this_count = scsi_bufflen(cmd);
 	struct request_queue *q = cmd->device->request_queue;
 	struct request *req = cmd->request;
 	int clear_errors = 1;
@@ -840,8 +845,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 	int sense_valid = 0;
 	int sense_deferred = 0;
 
-	scsi_release_buffers(cmd);
-
 	if (result) {
 		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
 		if (sense_valid)
@@ -864,9 +867,17 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 				req->sense_len = len;
 			}
 		}
-		req->data_len = cmd->resid;
+		if (scsi_bidi_cmnd(cmd)) {
+			/* will also release_buffers */
+			scsi_end_bidi_request(cmd);
+			return;
+		}
+		req->data_len = scsi_get_resid(cmd);
 	}
 
+	BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
+	scsi_release_buffers(cmd);
+
 	/*
 	 * Next deal with any sectors which we were able to correctly
 	 * handle.
@@ -874,7 +885,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 	SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
 				      "%d bytes done.\n",
 				      req->nr_sectors, good_bytes));
-	SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
 
 	if (clear_errors)
 		req->errors = 0;
@@ -991,52 +1001,80 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 	scsi_end_request(cmd, -EIO, this_count, !result);
 }
 
-/*
- * Function:    scsi_init_io()
- *
- * Purpose:     SCSI I/O initialize function.
- *
- * Arguments:   cmd   - Command descriptor we wish to initialize
- *
- * Returns:     0 on success
- *		BLKPREP_DEFER if the failure is retryable
- */
-static int scsi_init_io(struct scsi_cmnd *cmd)
+static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
+			     gfp_t gfp_mask)
 {
-	struct request     *req = cmd->request;
-	int		   count;
-
-	/*
-	 * We used to not use scatter-gather for single segment request,
-	 * but now we do (it makes highmem I/O easier to support without
-	 * kmapping pages)
-	 */
-	cmd->use_sg = req->nr_phys_segments;
+	int count;
 
 	/*
 	 * If sg table allocation fails, requeue request later.
 	 */
-	if (unlikely(scsi_alloc_sgtable(cmd, GFP_ATOMIC))) {
-		scsi_unprep_request(req);
+	if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
+					gfp_mask))) {
 		return BLKPREP_DEFER;
 	}
 
 	req->buffer = NULL;
 	if (blk_pc_request(req))
-		cmd->request_bufflen = req->data_len;
+		sdb->length = req->data_len;
 	else
-		cmd->request_bufflen = req->nr_sectors << 9;
+		sdb->length = req->nr_sectors << 9;
 
 	/* 
 	 * Next, walk the list, and fill in the addresses and sizes of
 	 * each segment.
 	 */
-	count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
-	BUG_ON(count > cmd->use_sg);
-	cmd->use_sg = count;
+	count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
+	BUG_ON(count > sdb->table.nents);
+	sdb->table.nents = count;
 	return BLKPREP_OK;
 }
 
+/*
+ * Function:    scsi_init_io()
+ *
+ * Purpose:     SCSI I/O initialize function.
+ *
+ * Arguments:   cmd   - Command descriptor we wish to initialize
+ *
+ * Returns:     0 on success
+ *		BLKPREP_DEFER if the failure is retryable
+ *		BLKPREP_KILL if the failure is fatal
+ */
+int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
+{
+	int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
+	if (error)
+		goto err_exit;
+
+	if (blk_bidi_rq(cmd->request)) {
+		struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
+			scsi_bidi_sdb_cache, GFP_ATOMIC);
+		if (!bidi_sdb) {
+			error = BLKPREP_DEFER;
+			goto err_exit;
+		}
+
+		cmd->request->next_rq->special = bidi_sdb;
+		error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
+								    GFP_ATOMIC);
+		if (error)
+			goto err_exit;
+	}
+
+	return BLKPREP_OK ;
+
+err_exit:
+	scsi_release_buffers(cmd);
+	if (error == BLKPREP_KILL)
+		scsi_put_command(cmd);
+	else /* BLKPREP_DEFER */
+		scsi_unprep_request(cmd->request);
+
+	return error;
+}
+EXPORT_SYMBOL(scsi_init_io);
+
 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
 		struct request *req)
 {
@@ -1081,16 +1119,14 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
 
 		BUG_ON(!req->nr_phys_segments);
 
-		ret = scsi_init_io(cmd);
+		ret = scsi_init_io(cmd, GFP_ATOMIC);
 		if (unlikely(ret))
 			return ret;
 	} else {
 		BUG_ON(req->data_len);
 		BUG_ON(req->data);
 
-		cmd->request_bufflen = 0;
-		cmd->request_buffer = NULL;
-		cmd->use_sg = 0;
+		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
 		req->buffer = NULL;
 	}
 
@@ -1132,7 +1168,7 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
 	if (unlikely(!cmd))
 		return BLKPREP_DEFER;
 
-	return scsi_init_io(cmd);
+	return scsi_init_io(cmd, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(scsi_setup_fs_cmnd);
 
@@ -1542,20 +1578,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
 	 * this limit is imposed by hardware restrictions
 	 */
 	blk_queue_max_hw_segments(q, shost->sg_tablesize);
-
-	/*
-	 * In the future, sg chaining support will be mandatory and this
-	 * ifdef can then go away. Right now we don't have all archs
-	 * converted, so better keep it safe.
-	 */
-#ifdef ARCH_HAS_SG_CHAIN
-	if (shost->use_sg_chaining)
-		blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
-	else
-		blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
-#else
-	blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS);
-#endif
+	blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
 
 	blk_queue_max_sectors(q, shost->max_sectors);
 	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
@@ -1654,6 +1677,14 @@ int __init scsi_init_queue(void)
 		return -ENOMEM;
 	}
 
+	scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb",
+					sizeof(struct scsi_data_buffer),
+					0, 0, NULL);
+	if (!scsi_bidi_sdb_cache) {
+		printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n");
+		goto cleanup_io_context;
+	}
+
 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
 		int size = sgp->size * sizeof(struct scatterlist);
@@ -1663,6 +1694,7 @@ int __init scsi_init_queue(void)
 		if (!sgp->slab) {
 			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
 					sgp->name);
+			goto cleanup_bidi_sdb;
 		}
 
 		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
@@ -1670,10 +1702,25 @@ int __init scsi_init_queue(void)
 		if (!sgp->pool) {
 			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
 					sgp->name);
+			goto cleanup_bidi_sdb;
 		}
 	}
 
 	return 0;
+
+cleanup_bidi_sdb:
+	for (i = 0; i < SG_MEMPOOL_NR; i++) {
+		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
+		if (sgp->pool)
+			mempool_destroy(sgp->pool);
+		if (sgp->slab)
+			kmem_cache_destroy(sgp->slab);
+	}
+	kmem_cache_destroy(scsi_bidi_sdb_cache);
+cleanup_io_context:
+	kmem_cache_destroy(scsi_io_context_cache);
+
+	return -ENOMEM;
 }
 
 void scsi_exit_queue(void)
@@ -1681,6 +1728,7 @@ void scsi_exit_queue(void)
 	int i;
 
 	kmem_cache_destroy(scsi_io_context_cache);
+	kmem_cache_destroy(scsi_bidi_sdb_cache);
 
 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;

+ 5 - 23
drivers/scsi/scsi_tgt_lib.c

@@ -331,8 +331,7 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
 
 	scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag);
 
-	if (scsi_sglist(cmd))
-		scsi_free_sgtable(cmd);
+	scsi_release_buffers(cmd);
 
 	queue_work(scsi_tgtd, &tcmd->work);
 }
@@ -353,25 +352,6 @@ static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
 	return 0;
 }
 
-static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
-{
-	struct request *rq = cmd->request;
-	int count;
-
-	cmd->use_sg = rq->nr_phys_segments;
-	if (scsi_alloc_sgtable(cmd, gfp_mask))
-		return -ENOMEM;
-
-	cmd->request_bufflen = rq->data_len;
-
-	dprintk("cmd %p cnt %d %lu\n", cmd, scsi_sg_count(cmd),
-		rq_data_dir(rq));
-	count = blk_rq_map_sg(rq->q, rq, scsi_sglist(cmd));
-	BUG_ON(count > cmd->use_sg);
-	cmd->use_sg = count;
-	return 0;
-}
-
 /* TODO: test this crap and replace bio_map_user with new interface maybe */
 static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
 			       unsigned long uaddr, unsigned int len, int rw)
@@ -397,9 +377,11 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
 	}
 
 	tcmd->bio = rq->bio;
-	err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
-	if (err)
+	err = scsi_init_io(cmd, GFP_KERNEL);
+	if (err) {
+		scsi_release_buffers(cmd);
 		goto unmap_rq;
+	}
 
 	return 0;
 

+ 2 - 2
drivers/scsi/sd.c

@@ -519,7 +519,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
 		SCpnt->cmnd[4] = (unsigned char) this_count;
 		SCpnt->cmnd[5] = 0;
 	}
-	SCpnt->request_bufflen = this_count * sdp->sector_size;
+	SCpnt->sdb.length = this_count * sdp->sector_size;
 
 	/*
 	 * We shouldn't disconnect in the middle of a sector, so with a dumb
@@ -926,7 +926,7 @@ static struct block_device_operations sd_fops = {
 static int sd_done(struct scsi_cmnd *SCpnt)
 {
 	int result = SCpnt->result;
- 	unsigned int xfer_size = SCpnt->request_bufflen;
+	unsigned int xfer_size = scsi_bufflen(SCpnt);
  	unsigned int good_bytes = result ? 0 : xfer_size;
  	u64 start_lba = SCpnt->request->sector;
  	u64 bad_lba;

+ 39 - 25
drivers/scsi/sgiwd93.c

@@ -33,10 +33,9 @@
 
 struct ip22_hostdata {
 	struct WD33C93_hostdata wh;
-	struct hpc_data {
-		dma_addr_t      dma;
-		void		*cpu;
-	} hd;
+	dma_addr_t dma;
+	void *cpu;
+	struct device *dev;
 };
 
 #define host_to_hostdata(host) ((struct ip22_hostdata *)((host)->hostdata))
@@ -46,6 +45,11 @@ struct hpc_chunk {
 	u32 _padding;	/* align to quadword boundary */
 };
 
+/* space for hpc dma descriptors */
+#define HPC_DMA_SIZE   PAGE_SIZE
+
+#define DMA_DIR(d)   ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
 static irqreturn_t sgiwd93_intr(int irq, void *dev_id)
 {
 	struct Scsi_Host * host = dev_id;
@@ -59,15 +63,17 @@ static irqreturn_t sgiwd93_intr(int irq, void *dev_id)
 }
 
 static inline
-void fill_hpc_entries(struct hpc_chunk *hcp, struct scsi_cmnd *cmd, int datainp)
+void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din)
 {
 	unsigned long len = cmd->SCp.this_residual;
 	void *addr = cmd->SCp.ptr;
 	dma_addr_t physaddr;
 	unsigned long count;
+	struct hpc_chunk *hcp;
 
-	physaddr = dma_map_single(NULL, addr, len, cmd->sc_data_direction);
+	physaddr = dma_map_single(hd->dev, addr, len, DMA_DIR(din));
 	cmd->SCp.dma_handle = physaddr;
+	hcp = hd->cpu;
 
 	while (len) {
 		/*
@@ -89,6 +95,9 @@ void fill_hpc_entries(struct hpc_chunk *hcp, struct scsi_cmnd *cmd, int datainp)
 	 */
 	hcp->desc.pbuf = 0;
 	hcp->desc.cntinfo = HPCDMA_EOX;
+	dma_cache_sync(hd->dev, hd->cpu,
+		       (unsigned long)(hcp + 1) - (unsigned long)hd->cpu,
+		       DMA_TO_DEVICE);
 }
 
 static int dma_setup(struct scsi_cmnd *cmd, int datainp)
@@ -96,9 +105,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int datainp)
 	struct ip22_hostdata *hdata = host_to_hostdata(cmd->device->host);
 	struct hpc3_scsiregs *hregs =
 		(struct hpc3_scsiregs *) cmd->device->host->base;
-	struct hpc_chunk *hcp = (struct hpc_chunk *) hdata->hd.cpu;
 
-	pr_debug("dma_setup: datainp<%d> hcp<%p> ", datainp, hcp);
+	pr_debug("dma_setup: datainp<%d> hcp<%p> ", datainp, hdata->cpu);
 
 	hdata->wh.dma_dir = datainp;
 
@@ -111,12 +119,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int datainp)
 	if (cmd->SCp.ptr == NULL || cmd->SCp.this_residual == 0)
 		return 1;
 
-	fill_hpc_entries(hcp, cmd, datainp);
+	fill_hpc_entries(hdata, cmd, datainp);
 
 	pr_debug(" HPCGO\n");
 
 	/* Start up the HPC. */
-	hregs->ndptr = hdata->hd.dma;
+	hregs->ndptr = hdata->dma;
 	if (datainp)
 		hregs->ctrl = HPC3_SCTRL_ACTIVE;
 	else
@@ -134,6 +142,9 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
 	if (!SCpnt)
 		return;
 
+	if (SCpnt->SCp.ptr == NULL || SCpnt->SCp.this_residual == 0)
+		return;
+
 	hregs = (struct hpc3_scsiregs *) SCpnt->device->host->base;
 
 	pr_debug("dma_stop: status<%d> ", status);
@@ -145,8 +156,9 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
 			barrier();
 	}
 	hregs->ctrl = 0;
-	dma_unmap_single(NULL, SCpnt->SCp.dma_handle, SCpnt->SCp.this_residual,
-	                 SCpnt->sc_data_direction);
+	dma_unmap_single(hdata->dev, SCpnt->SCp.dma_handle,
+			 SCpnt->SCp.this_residual,
+			 DMA_DIR(hdata->wh.dma_dir));
 
 	pr_debug("\n");
 }
@@ -161,22 +173,23 @@ void sgiwd93_reset(unsigned long base)
 }
 EXPORT_SYMBOL_GPL(sgiwd93_reset);
 
-static inline void init_hpc_chain(struct hpc_data *hd)
+static inline void init_hpc_chain(struct ip22_hostdata *hdata)
 {
-	struct hpc_chunk *hcp = (struct hpc_chunk *) hd->cpu;
-	struct hpc_chunk *dma = (struct hpc_chunk *) hd->dma;
+	struct hpc_chunk *hcp = (struct hpc_chunk *)hdata->cpu;
+	dma_addr_t dma = hdata->dma;
 	unsigned long start, end;
 
 	start = (unsigned long) hcp;
-	end = start + PAGE_SIZE;
+	end = start + HPC_DMA_SIZE;
 	while (start < end) {
-		hcp->desc.pnext = (u32) (dma + 1);
+		hcp->desc.pnext = (u32) (dma + sizeof(struct hpc_chunk));
 		hcp->desc.cntinfo = HPCDMA_EOX;
-		hcp++; dma++;
+		hcp++;
+		dma += sizeof(struct hpc_chunk);
 		start += sizeof(struct hpc_chunk);
 	};
 	hcp--;
-	hcp->desc.pnext = hd->dma;
+	hcp->desc.pnext = hdata->dma;
 }
 
 static int sgiwd93_bus_reset(struct scsi_cmnd *cmd)
@@ -235,16 +248,17 @@ static int __init sgiwd93_probe(struct platform_device *pdev)
 	host->irq = irq;
 
 	hdata = host_to_hostdata(host);
-	hdata->hd.cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
-	                                   &hdata->hd.dma, GFP_KERNEL);
-	if (!hdata->hd.cpu) {
+	hdata->dev = &pdev->dev;
+	hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE,
+					   &hdata->dma, GFP_KERNEL);
+	if (!hdata->cpu) {
 		printk(KERN_WARNING "sgiwd93: Could not allocate memory for "
 		       "host %d buffer.\n", unit);
 		err = -ENOMEM;
 		goto out_put;
 	}
 
-	init_hpc_chain(&hdata->hd);
+	init_hpc_chain(hdata);
 
 	regs.SASR = wdregs + 3;
 	regs.SCMD = wdregs + 7;
@@ -274,7 +288,7 @@ static int __init sgiwd93_probe(struct platform_device *pdev)
 out_irq:
 	free_irq(irq, host);
 out_free:
-	dma_free_coherent(NULL, PAGE_SIZE, hdata->hd.cpu, hdata->hd.dma);
+	dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
 out_put:
 	scsi_host_put(host);
 out:
@@ -290,7 +304,7 @@ static void __exit sgiwd93_remove(struct platform_device *pdev)
 
 	scsi_remove_host(host);
 	free_irq(pd->irq, host);
-	dma_free_coherent(&pdev->dev, PAGE_SIZE, hdata->hd.cpu, hdata->hd.dma);
+	dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
 	scsi_host_put(host);
 }
 

+ 13 - 12
drivers/scsi/sr.c

@@ -231,7 +231,7 @@ out:
 static int sr_done(struct scsi_cmnd *SCpnt)
 {
 	int result = SCpnt->result;
-	int this_count = SCpnt->request_bufflen;
+	int this_count = scsi_bufflen(SCpnt);
 	int good_bytes = (result == 0 ? this_count : 0);
 	int block_sectors = 0;
 	long error_sector;
@@ -379,17 +379,18 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
 	}
 
 	{
-		struct scatterlist *sg = SCpnt->request_buffer;
-		int i, size = 0;
-		for (i = 0; i < SCpnt->use_sg; i++)
-			size += sg[i].length;
+		struct scatterlist *sg;
+		int i, size = 0, sg_count = scsi_sg_count(SCpnt);
 
-		if (size != SCpnt->request_bufflen && SCpnt->use_sg) {
+		scsi_for_each_sg(SCpnt, sg, sg_count, i)
+			size += sg->length;
+
+		if (size != scsi_bufflen(SCpnt)) {
 			scmd_printk(KERN_ERR, SCpnt,
 				"mismatch count %d, bytes %d\n",
-				size, SCpnt->request_bufflen);
-			if (SCpnt->request_bufflen > size)
-				SCpnt->request_bufflen = size;
+				size, scsi_bufflen(SCpnt));
+			if (scsi_bufflen(SCpnt) > size)
+				SCpnt->sdb.length = size;
 		}
 	}
 
@@ -397,12 +398,12 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
 	 * request doesn't start on hw block boundary, add scatter pads
 	 */
 	if (((unsigned int)rq->sector % (s_size >> 9)) ||
-	    (SCpnt->request_bufflen % s_size)) {
+	    (scsi_bufflen(SCpnt) % s_size)) {
 		scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
 		goto out;
 	}
 
-	this_count = (SCpnt->request_bufflen >> 9) / (s_size >> 9);
+	this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
 
 
 	SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
@@ -416,7 +417,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
 
 	if (this_count > 0xffff) {
 		this_count = 0xffff;
-		SCpnt->request_bufflen = this_count * s_size;
+		SCpnt->sdb.length = this_count * s_size;
 	}
 
 	SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;

+ 0 - 1
drivers/scsi/stex.c

@@ -1123,7 +1123,6 @@ static struct scsi_host_template driver_template = {
 	.this_id			= -1,
 	.sg_tablesize			= ST_MAX_SG,
 	.cmd_per_lun			= ST_CMD_PER_LUN,
-	.use_sg_chaining		= ENABLE_SG_CHAINING,
 };
 
 static int stex_set_dma_mask(struct pci_dev * pdev)

+ 0 - 1
drivers/scsi/sym53c416.c

@@ -840,6 +840,5 @@ static struct scsi_host_template driver_template = {
 	.cmd_per_lun =		1,
 	.unchecked_isa_dma =	1,
 	.use_clustering =	ENABLE_CLUSTERING,
-	.use_sg_chaining =	ENABLE_SG_CHAINING,
 };
 #include "scsi_module.c"

+ 1 - 2
drivers/scsi/sym53c8xx_2/sym_glue.c

@@ -207,7 +207,7 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
 			/*
 			 *  Bounce back the sense data to user.
 			 */
-			memset(&cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+			memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
 			memcpy(cmd->sense_buffer, cp->sns_bbuf,
 			       min(SCSI_SENSE_BUFFERSIZE, SYM_SNS_BBUF_LEN));
 #if 0
@@ -1681,7 +1681,6 @@ static struct scsi_host_template sym2_template = {
 	.eh_host_reset_handler	= sym53c8xx_eh_host_reset_handler,
 	.this_id		= 7,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 	.max_sectors		= 0xFFFF,
 #ifdef SYM_LINUX_PROC_INFO_SUPPORT
 	.proc_info		= sym53c8xx_proc_info,

+ 0 - 1
drivers/scsi/u14-34f.c

@@ -451,7 +451,6 @@ static struct scsi_host_template driver_template = {
                 .this_id                 = 7,
                 .unchecked_isa_dma       = 1,
                 .use_clustering          = ENABLE_CLUSTERING,
-                .use_sg_chaining         = ENABLE_SG_CHAINING,
                 };
 
 #if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)

+ 0 - 1
drivers/scsi/ultrastor.c

@@ -1204,6 +1204,5 @@ static struct scsi_host_template driver_template = {
 	.cmd_per_lun       = ULTRASTOR_MAX_CMDS_PER_LUN,
 	.unchecked_isa_dma = 1,
 	.use_clustering    = ENABLE_CLUSTERING,
-	.use_sg_chaining   = ENABLE_SG_CHAINING,
 };
 #include "scsi_module.c"

+ 0 - 1
drivers/scsi/wd7000.c

@@ -1671,7 +1671,6 @@ static struct scsi_host_template driver_template = {
 	.cmd_per_lun		= 1,
 	.unchecked_isa_dma	= 1,
 	.use_clustering		= ENABLE_CLUSTERING,
-	.use_sg_chaining	= ENABLE_SG_CHAINING,
 };
 
 #include "scsi_module.c"

+ 4 - 4
drivers/usb/storage/isd200.c

@@ -415,14 +415,14 @@ static void isd200_set_srb(struct isd200_info *info,
 		sg_init_one(&info->sg, buff, bufflen);
 
 	srb->sc_data_direction = dir;
-	srb->request_buffer = buff ? &info->sg : NULL;
-	srb->request_bufflen = bufflen;
-	srb->use_sg = buff ? 1 : 0;
+	srb->sdb.table.sgl = buff ? &info->sg : NULL;
+	srb->sdb.length = bufflen;
+	srb->sdb.table.nents = buff ? 1 : 0;
 }
 
 static void isd200_srb_set_bufflen(struct scsi_cmnd *srb, unsigned bufflen)
 {
-	srb->request_bufflen = bufflen;
+	srb->sdb.length = bufflen;
 }
 
 

+ 20 - 0
include/scsi/scsi.h

@@ -10,6 +10,25 @@
 
 #include <linux/types.h>
 
+/*
+ * The maximum number of SG segments that we will put inside a
+ * scatterlist (unless chaining is used). Should ideally fit inside a
+ * single page, to avoid a higher order allocation.  We could define this
+ * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order.  The
+ * minimum value is 32
+ */
+#define SCSI_MAX_SG_SEGMENTS	128
+
+/*
+ * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
+ * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
+ */
+#ifdef ARCH_HAS_SG_CHAIN
+#define SCSI_MAX_SG_CHAIN_SEGMENTS	2048
+#else
+#define SCSI_MAX_SG_CHAIN_SEGMENTS	SCSI_MAX_SG_SEGMENTS
+#endif
+
 /*
  *	SCSI command lengths
  */
@@ -83,6 +102,7 @@ extern const unsigned char scsi_command_size[8];
 #define READ_TOC              0x43
 #define LOG_SELECT            0x4c
 #define LOG_SENSE             0x4d
+#define XDWRITEREAD_10        0x53
 #define MODE_SELECT_10        0x55
 #define RESERVE_10            0x56
 #define RELEASE_10            0x57

+ 42 - 17
include/scsi/scsi_cmnd.h

@@ -2,15 +2,20 @@
 #define _SCSI_SCSI_CMND_H
 
 #include <linux/dma-mapping.h>
+#include <linux/blkdev.h>
 #include <linux/list.h>
 #include <linux/types.h>
 #include <linux/timer.h>
 #include <linux/scatterlist.h>
 
-struct request;
 struct Scsi_Host;
 struct scsi_device;
 
+struct scsi_data_buffer {
+	struct sg_table table;
+	unsigned length;
+	int resid;
+};
 
 /* embedded in scsi_cmnd */
 struct scsi_pointer {
@@ -61,15 +66,11 @@ struct scsi_cmnd {
 	/* These elements define the operation we are about to perform */
 #define MAX_COMMAND_SIZE	16
 	unsigned char cmnd[MAX_COMMAND_SIZE];
-	unsigned request_bufflen;	/* Actual request size */
 
 	struct timer_list eh_timeout;	/* Used to time out the command. */
-	void *request_buffer;		/* Actual requested buffer */
 
 	/* These elements define the operation we ultimately want to perform */
-	struct sg_table sg_table;
-	unsigned short use_sg;	/* Number of pieces of scatter-gather */
-
+	struct scsi_data_buffer sdb;
 	unsigned underflow;	/* Return error if less than
 				   this amount is transferred */
 
@@ -79,10 +80,6 @@ struct scsi_cmnd {
 				   reconnects.   Probably == sector
 				   size */
 
-	int resid;		/* Number of bytes requested to be
-				   transferred less actual number
-				   transferred (0 if not supported) */
-
 	struct request *request;	/* The command we are
 				   	   working on */
 
@@ -127,27 +124,55 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
 				 size_t *offset, size_t *len);
 extern void scsi_kunmap_atomic_sg(void *virt);
 
-extern int scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
-extern void scsi_free_sgtable(struct scsi_cmnd *);
+extern int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask);
+extern void scsi_release_buffers(struct scsi_cmnd *cmd);
 
 extern int scsi_dma_map(struct scsi_cmnd *cmd);
 extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
 
-#define scsi_sg_count(cmd) ((cmd)->use_sg)
-#define scsi_sglist(cmd) ((cmd)->sg_table.sgl)
-#define scsi_bufflen(cmd) ((cmd)->request_bufflen)
+static inline unsigned scsi_sg_count(struct scsi_cmnd *cmd)
+{
+	return cmd->sdb.table.nents;
+}
+
+static inline struct scatterlist *scsi_sglist(struct scsi_cmnd *cmd)
+{
+	return cmd->sdb.table.sgl;
+}
+
+static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
+{
+	return cmd->sdb.length;
+}
 
 static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid)
 {
-	cmd->resid = resid;
+	cmd->sdb.resid = resid;
 }
 
 static inline int scsi_get_resid(struct scsi_cmnd *cmd)
 {
-	return cmd->resid;
+	return cmd->sdb.resid;
 }
 
 #define scsi_for_each_sg(cmd, sg, nseg, __i)			\
 	for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
 
+static inline int scsi_bidi_cmnd(struct scsi_cmnd *cmd)
+{
+	return blk_bidi_rq(cmd->request) &&
+		(cmd->request->next_rq->special != NULL);
+}
+
+static inline struct scsi_data_buffer *scsi_in(struct scsi_cmnd *cmd)
+{
+	return scsi_bidi_cmnd(cmd) ?
+		cmd->request->next_rq->special : &cmd->sdb;
+}
+
+static inline struct scsi_data_buffer *scsi_out(struct scsi_cmnd *cmd)
+{
+	return &cmd->sdb;
+}
+
 #endif /* _SCSI_SCSI_CMND_H */

+ 4 - 5
include/scsi/scsi_eh.h

@@ -68,16 +68,15 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
 extern int scsi_reset_provider(struct scsi_device *, int);
 
 struct scsi_eh_save {
+	/* saved state */
 	int result;
 	enum dma_data_direction data_direction;
 	unsigned char cmd_len;
 	unsigned char cmnd[MAX_COMMAND_SIZE];
+	struct scsi_data_buffer sdb;
+	struct request *next_rq;
 
-	void *buffer;
-	unsigned bufflen;
-	unsigned short use_sg;
-	int resid;
-
+	/* new command support */
 	struct scatterlist sense_sgl;
 };
 

+ 2 - 15
include/scsi/scsi_host.h

@@ -39,9 +39,6 @@ struct blk_queue_tags;
 #define DISABLE_CLUSTERING 0
 #define ENABLE_CLUSTERING 1
 
-#define DISABLE_SG_CHAINING 0
-#define ENABLE_SG_CHAINING 1
-
 enum scsi_eh_timer_return {
 	EH_NOT_HANDLED,
 	EH_HANDLED,
@@ -136,9 +133,9 @@ struct scsi_host_template {
 	 * the done callback is invoked.
 	 *
 	 * This is called to inform the LLD to transfer
-	 * cmd->request_bufflen bytes. The cmd->use_sg speciefies the
+	 * scsi_bufflen(cmd) bytes. scsi_sg_count(cmd) speciefies the
 	 * number of scatterlist entried in the command and
-	 * cmd->request_buffer contains the scatterlist.
+	 * scsi_sglist(cmd) returns the scatterlist.
 	 *
 	 * return values: see queuecommand
 	 *
@@ -445,15 +442,6 @@ struct scsi_host_template {
 	 */
 	unsigned ordered_tag:1;
 
-	/*
-	 * true if the low-level driver can support sg chaining. this
-	 * will be removed eventually when all the drivers are
-	 * converted to support sg chaining.
-	 *
-	 * Status: OBSOLETE
-	 */
-	unsigned use_sg_chaining:1;
-
 	/*
 	 * Countdown for host blocking with no commands outstanding
 	 */
@@ -598,7 +586,6 @@ struct Scsi_Host {
 	unsigned unchecked_isa_dma:1;
 	unsigned use_clustering:1;
 	unsigned use_blk_tcq:1;
-	unsigned use_sg_chaining:1;
 
 	/*
 	 * Host has requested that no further requests come through for the