|
@@ -114,6 +114,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
|
|
|
{
|
|
|
struct Scsi_Host *host = cmd->device->host;
|
|
|
struct scsi_device *device = cmd->device;
|
|
|
+ struct scsi_target *starget = scsi_target(device);
|
|
|
struct request_queue *q = device->request_queue;
|
|
|
unsigned long flags;
|
|
|
|
|
@@ -133,10 +134,17 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
|
|
|
* if a command is requeued with no other commands outstanding
|
|
|
* either for the device or for the host.
|
|
|
*/
|
|
|
- if (reason == SCSI_MLQUEUE_HOST_BUSY)
|
|
|
+ switch (reason) {
|
|
|
+ case SCSI_MLQUEUE_HOST_BUSY:
|
|
|
host->host_blocked = host->max_host_blocked;
|
|
|
- else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
|
|
|
+ break;
|
|
|
+ case SCSI_MLQUEUE_DEVICE_BUSY:
|
|
|
device->device_blocked = device->max_device_blocked;
|
|
|
+ break;
|
|
|
+ case SCSI_MLQUEUE_TARGET_BUSY:
|
|
|
+ starget->target_blocked = starget->max_target_blocked;
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Decrement the counters, since these commands are no longer
|
|
@@ -460,10 +468,12 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
|
|
|
void scsi_device_unbusy(struct scsi_device *sdev)
|
|
|
{
|
|
|
struct Scsi_Host *shost = sdev->host;
|
|
|
+ struct scsi_target *starget = scsi_target(sdev);
|
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(shost->host_lock, flags);
|
|
|
shost->host_busy--;
|
|
|
+ starget->target_busy--;
|
|
|
if (unlikely(scsi_host_in_recovery(shost) &&
|
|
|
(shost->host_failed || shost->host_eh_scheduled)))
|
|
|
scsi_eh_wakeup(shost);
|
|
@@ -519,6 +529,13 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
|
|
|
spin_unlock_irqrestore(shost->host_lock, flags);
|
|
|
}
|
|
|
|
|
|
+static inline int scsi_target_is_busy(struct scsi_target *starget)
|
|
|
+{
|
|
|
+ return ((starget->can_queue > 0 &&
|
|
|
+ starget->target_busy >= starget->can_queue) ||
|
|
|
+ starget->target_blocked);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Function: scsi_run_queue()
|
|
|
*
|
|
@@ -533,7 +550,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
|
|
|
*/
|
|
|
static void scsi_run_queue(struct request_queue *q)
|
|
|
{
|
|
|
- struct scsi_device *sdev = q->queuedata;
|
|
|
+ struct scsi_device *starved_head = NULL, *sdev = q->queuedata;
|
|
|
struct Scsi_Host *shost = sdev->host;
|
|
|
unsigned long flags;
|
|
|
|
|
@@ -560,6 +577,21 @@ static void scsi_run_queue(struct request_queue *q)
|
|
|
*/
|
|
|
sdev = list_entry(shost->starved_list.next,
|
|
|
struct scsi_device, starved_entry);
|
|
|
+ /*
|
|
|
+ * The *queue_ready functions can add a device back onto the
|
|
|
+ * starved list's tail, so we must check for a infinite loop.
|
|
|
+ */
|
|
|
+ if (sdev == starved_head)
|
|
|
+ break;
|
|
|
+ if (!starved_head)
|
|
|
+ starved_head = sdev;
|
|
|
+
|
|
|
+ if (scsi_target_is_busy(scsi_target(sdev))) {
|
|
|
+ list_move_tail(&sdev->starved_entry,
|
|
|
+ &shost->starved_list);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
list_del_init(&sdev->starved_entry);
|
|
|
spin_unlock(shost->host_lock);
|
|
|
|
|
@@ -575,13 +607,6 @@ static void scsi_run_queue(struct request_queue *q)
|
|
|
spin_unlock(sdev->request_queue->queue_lock);
|
|
|
|
|
|
spin_lock(shost->host_lock);
|
|
|
- if (unlikely(!list_empty(&sdev->starved_entry)))
|
|
|
- /*
|
|
|
- * sdev lost a race, and was put back on the
|
|
|
- * starved list. This is unlikely but without this
|
|
|
- * in theory we could loop forever.
|
|
|
- */
|
|
|
- break;
|
|
|
}
|
|
|
spin_unlock_irqrestore(shost->host_lock, flags);
|
|
|
|
|
@@ -1344,6 +1369,52 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+/*
|
|
|
+ * scsi_target_queue_ready: checks if there we can send commands to target
|
|
|
+ * @sdev: scsi device on starget to check.
|
|
|
+ *
|
|
|
+ * Called with the host lock held.
|
|
|
+ */
|
|
|
+static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
|
|
|
+ struct scsi_device *sdev)
|
|
|
+{
|
|
|
+ struct scsi_target *starget = scsi_target(sdev);
|
|
|
+
|
|
|
+ if (starget->single_lun) {
|
|
|
+ if (starget->starget_sdev_user &&
|
|
|
+ starget->starget_sdev_user != sdev)
|
|
|
+ return 0;
|
|
|
+ starget->starget_sdev_user = sdev;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (starget->target_busy == 0 && starget->target_blocked) {
|
|
|
+ /*
|
|
|
+ * unblock after target_blocked iterates to zero
|
|
|
+ */
|
|
|
+ if (--starget->target_blocked == 0) {
|
|
|
+ SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
|
|
|
+ "unblocking target at zero depth\n"));
|
|
|
+ } else {
|
|
|
+ blk_plug_device(sdev->request_queue);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (scsi_target_is_busy(starget)) {
|
|
|
+ if (list_empty(&sdev->starved_entry)) {
|
|
|
+ list_add_tail(&sdev->starved_entry,
|
|
|
+ &shost->starved_list);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* We're OK to process the command, so we can't be starved */
|
|
|
+ if (!list_empty(&sdev->starved_entry))
|
|
|
+ list_del_init(&sdev->starved_entry);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* scsi_host_queue_ready: if we can send requests to shost, return 1 else
|
|
|
* return 0. We must end up running the queue again whenever 0 is
|
|
@@ -1390,6 +1461,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
|
|
|
{
|
|
|
struct scsi_cmnd *cmd = req->special;
|
|
|
struct scsi_device *sdev = cmd->device;
|
|
|
+ struct scsi_target *starget = scsi_target(sdev);
|
|
|
struct Scsi_Host *shost = sdev->host;
|
|
|
|
|
|
blkdev_dequeue_request(req);
|
|
@@ -1413,6 +1485,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
|
|
|
spin_unlock(sdev->request_queue->queue_lock);
|
|
|
spin_lock(shost->host_lock);
|
|
|
shost->host_busy++;
|
|
|
+ starget->target_busy++;
|
|
|
spin_unlock(shost->host_lock);
|
|
|
spin_lock(sdev->request_queue->queue_lock);
|
|
|
|
|
@@ -1550,14 +1623,13 @@ static void scsi_request_fn(struct request_queue *q)
|
|
|
goto not_ready;
|
|
|
}
|
|
|
|
|
|
+ if (!scsi_target_queue_ready(shost, sdev))
|
|
|
+ goto not_ready;
|
|
|
+
|
|
|
if (!scsi_host_queue_ready(q, shost, sdev))
|
|
|
goto not_ready;
|
|
|
- if (scsi_target(sdev)->single_lun) {
|
|
|
- if (scsi_target(sdev)->starget_sdev_user &&
|
|
|
- scsi_target(sdev)->starget_sdev_user != sdev)
|
|
|
- goto not_ready;
|
|
|
- scsi_target(sdev)->starget_sdev_user = sdev;
|
|
|
- }
|
|
|
+
|
|
|
+ scsi_target(sdev)->target_busy++;
|
|
|
shost->host_busy++;
|
|
|
|
|
|
/*
|