|
@@ -1264,6 +1264,16 @@ void part_round_stats(int cpu, struct hd_struct *part)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(part_round_stats);
|
|
|
|
|
|
+#ifdef CONFIG_PM_RUNTIME
|
|
|
+static void blk_pm_put_request(struct request *rq)
|
|
|
+{
|
|
|
+ if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
|
|
|
+ pm_runtime_mark_last_busy(rq->q->dev);
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline void blk_pm_put_request(struct request *rq) {}
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* queue lock must be held
|
|
|
*/
|
|
@@ -1274,6 +1284,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
|
|
|
if (unlikely(--req->ref_count))
|
|
|
return;
|
|
|
|
|
|
+ blk_pm_put_request(req);
|
|
|
+
|
|
|
elv_completed_request(q, req);
|
|
|
|
|
|
/* this is a bio leak */
|
|
@@ -2053,6 +2065,28 @@ static void blk_account_io_done(struct request *req)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_PM_RUNTIME
|
|
|
+/*
|
|
|
+ * Don't process normal requests when queue is suspended
|
|
|
+ * or in the process of suspending/resuming
|
|
|
+ */
|
|
|
+static struct request *blk_pm_peek_request(struct request_queue *q,
|
|
|
+ struct request *rq)
|
|
|
+{
|
|
|
+ if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
|
|
|
+ (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
|
|
|
+ return NULL;
|
|
|
+ else
|
|
|
+ return rq;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline struct request *blk_pm_peek_request(struct request_queue *q,
|
|
|
+ struct request *rq)
|
|
|
+{
|
|
|
+ return rq;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
/**
|
|
|
* blk_peek_request - peek at the top of a request queue
|
|
|
* @q: request queue to peek at
|
|
@@ -2075,6 +2109,11 @@ struct request *blk_peek_request(struct request_queue *q)
|
|
|
int ret;
|
|
|
|
|
|
while ((rq = __elv_next_request(q)) != NULL) {
|
|
|
+
|
|
|
+ rq = blk_pm_peek_request(q, rq);
|
|
|
+ if (!rq)
|
|
|
+ break;
|
|
|
+
|
|
|
if (!(rq->cmd_flags & REQ_STARTED)) {
|
|
|
/*
|
|
|
* This is the first time the device driver
|