|
@@ -21,6 +21,8 @@ struct clock_data {
|
|
|
u32 epoch_cyc_copy;
|
|
|
u32 mult;
|
|
|
u32 shift;
|
|
|
+ bool suspended;
|
|
|
+ bool needs_suspend;
|
|
|
};
|
|
|
|
|
|
static void sched_clock_poll(unsigned long wrap_ticks);
|
|
@@ -49,6 +51,9 @@ static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
|
|
|
u64 epoch_ns;
|
|
|
u32 epoch_cyc;
|
|
|
|
|
|
+ if (cd.suspended)
|
|
|
+ return cd.epoch_ns;
|
|
|
+
|
|
|
/*
|
|
|
* Load the epoch_cyc and epoch_ns atomically. We do this by
|
|
|
* ensuring that we always write epoch_cyc, epoch_ns and
|
|
@@ -98,6 +103,13 @@ static void sched_clock_poll(unsigned long wrap_ticks)
|
|
|
update_sched_clock();
|
|
|
}
|
|
|
|
|
|
+void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
|
|
|
+ unsigned long rate)
|
|
|
+{
|
|
|
+ setup_sched_clock(read, bits, rate);
|
|
|
+ cd.needs_suspend = true;
|
|
|
+}
|
|
|
+
|
|
|
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
|
|
|
{
|
|
|
unsigned long r, w;
|
|
@@ -169,11 +181,23 @@ void __init sched_clock_postinit(void)
|
|
|
static int sched_clock_suspend(void)
|
|
|
{
|
|
|
sched_clock_poll(sched_clock_timer.data);
|
|
|
+ if (cd.needs_suspend)
|
|
|
+ cd.suspended = true;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void sched_clock_resume(void)
|
|
|
+{
|
|
|
+ if (cd.needs_suspend) {
|
|
|
+ cd.epoch_cyc = read_sched_clock();
|
|
|
+ cd.epoch_cyc_copy = cd.epoch_cyc;
|
|
|
+ cd.suspended = false;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static struct syscore_ops sched_clock_ops = {
|
|
|
.suspend = sched_clock_suspend,
|
|
|
+ .resume = sched_clock_resume,
|
|
|
};
|
|
|
|
|
|
static int __init sched_clock_syscore_init(void)
|