|
@@ -2536,6 +2536,80 @@ int ata_eh_reset(struct ata_link *link, int classify,
|
|
|
goto retry;
|
|
|
}
|
|
|
|
|
|
+static inline void ata_eh_pull_park_action(struct ata_port *ap)
|
|
|
+{
|
|
|
+ struct ata_link *link;
|
|
|
+ struct ata_device *dev;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * This function can be thought of as an extended version of
|
|
|
+ * ata_eh_about_to_do() specially crafted to accommodate the
|
|
|
+ * requirements of ATA_EH_PARK handling. Since the EH thread
|
|
|
+ * does not leave the do {} while () loop in ata_eh_recover as
|
|
|
+ * long as the timeout for a park request to *one* device on
|
|
|
+ * the port has not expired, and since we still want to pick
|
|
|
+ * up park requests to other devices on the same port or
|
|
|
+ * timeout updates for the same device, we have to pull
|
|
|
+ * ATA_EH_PARK actions from eh_info into eh_context.i
|
|
|
+ * ourselves at the beginning of each pass over the loop.
|
|
|
+ *
|
|
|
+ * Additionally, all write accesses to &ap->park_req_pending
|
|
|
+ * through INIT_COMPLETION() (see below) or complete_all()
|
|
|
+ * (see ata_scsi_park_store()) are protected by the host lock.
|
|
|
+ * As a result we have that park_req_pending.done is zero on
|
|
|
+ * exit from this function, i.e. when ATA_EH_PARK actions for
|
|
|
+ * *all* devices on port ap have been pulled into the
|
|
|
+ * respective eh_context structs. If, and only if,
|
|
|
+ * park_req_pending.done is non-zero by the time we reach
|
|
|
+ * wait_for_completion_timeout(), another ATA_EH_PARK action
|
|
|
+ * has been scheduled for at least one of the devices on port
|
|
|
+ * ap and we have to cycle over the do {} while () loop in
|
|
|
+ * ata_eh_recover() again.
|
|
|
+ */
|
|
|
+
|
|
|
+ spin_lock_irqsave(ap->lock, flags);
|
|
|
+ INIT_COMPLETION(ap->park_req_pending);
|
|
|
+ ata_port_for_each_link(link, ap) {
|
|
|
+ ata_link_for_each_dev(dev, link) {
|
|
|
+ struct ata_eh_info *ehi = &link->eh_info;
|
|
|
+
|
|
|
+ link->eh_context.i.dev_action[dev->devno] |=
|
|
|
+ ehi->dev_action[dev->devno] & ATA_EH_PARK;
|
|
|
+ ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(ap->lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
|
|
|
+{
|
|
|
+ struct ata_eh_context *ehc = &dev->link->eh_context;
|
|
|
+ struct ata_taskfile tf;
|
|
|
+ unsigned int err_mask;
|
|
|
+
|
|
|
+ ata_tf_init(dev, &tf);
|
|
|
+ if (park) {
|
|
|
+ ehc->unloaded_mask |= 1 << dev->devno;
|
|
|
+ tf.command = ATA_CMD_IDLEIMMEDIATE;
|
|
|
+ tf.feature = 0x44;
|
|
|
+ tf.lbal = 0x4c;
|
|
|
+ tf.lbam = 0x4e;
|
|
|
+ tf.lbah = 0x55;
|
|
|
+ } else {
|
|
|
+ ehc->unloaded_mask &= ~(1 << dev->devno);
|
|
|
+ tf.command = ATA_CMD_CHK_POWER;
|
|
|
+ }
|
|
|
+
|
|
|
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
|
|
|
+ tf.protocol |= ATA_PROT_NODATA;
|
|
|
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
|
|
|
+ if (park && (err_mask || tf.lbal != 0xc4)) {
|
|
|
+ ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
|
|
|
+ ehc->unloaded_mask &= ~(1 << dev->devno);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static int ata_eh_revalidate_and_attach(struct ata_link *link,
|
|
|
struct ata_device **r_failed_dev)
|
|
|
{
|
|
@@ -2845,7 +2919,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
|
|
|
struct ata_device *dev;
|
|
|
int nr_failed_devs;
|
|
|
int rc;
|
|
|
- unsigned long flags;
|
|
|
+ unsigned long flags, deadline;
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
@@ -2919,6 +2993,56 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ do {
|
|
|
+ unsigned long now;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * clears ATA_EH_PARK in eh_info and resets
|
|
|
+ * ap->park_req_pending
|
|
|
+ */
|
|
|
+ ata_eh_pull_park_action(ap);
|
|
|
+
|
|
|
+ deadline = jiffies;
|
|
|
+ ata_port_for_each_link(link, ap) {
|
|
|
+ ata_link_for_each_dev(dev, link) {
|
|
|
+ struct ata_eh_context *ehc = &link->eh_context;
|
|
|
+ unsigned long tmp;
|
|
|
+
|
|
|
+ if (dev->class != ATA_DEV_ATA)
|
|
|
+ continue;
|
|
|
+ if (!(ehc->i.dev_action[dev->devno] &
|
|
|
+ ATA_EH_PARK))
|
|
|
+ continue;
|
|
|
+ tmp = dev->unpark_deadline;
|
|
|
+ if (time_before(deadline, tmp))
|
|
|
+ deadline = tmp;
|
|
|
+ else if (time_before_eq(tmp, jiffies))
|
|
|
+ continue;
|
|
|
+ if (ehc->unloaded_mask & (1 << dev->devno))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ ata_eh_park_issue_cmd(dev, 1);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ now = jiffies;
|
|
|
+ if (time_before_eq(deadline, now))
|
|
|
+ break;
|
|
|
+
|
|
|
+ deadline = wait_for_completion_timeout(&ap->park_req_pending,
|
|
|
+ deadline - now);
|
|
|
+ } while (deadline);
|
|
|
+ ata_port_for_each_link(link, ap) {
|
|
|
+ ata_link_for_each_dev(dev, link) {
|
|
|
+ if (!(link->eh_context.unloaded_mask &
|
|
|
+ (1 << dev->devno)))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ ata_eh_park_issue_cmd(dev, 0);
|
|
|
+ ata_eh_done(link, dev, ATA_EH_PARK);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/* the rest */
|
|
|
ata_port_for_each_link(link, ap) {
|
|
|
struct ata_eh_context *ehc = &link->eh_context;
|