|
@@ -19,6 +19,7 @@
|
|
|
#include <linux/err.h>
|
|
|
#include <linux/platform_device.h>
|
|
|
|
|
|
+#include <asm/sched_clock.h>
|
|
|
#include <asm/mach/irq.h>
|
|
|
#include <asm/mach/time.h>
|
|
|
|
|
@@ -274,19 +275,9 @@ static cycle_t read_cycles(struct clocksource *cs)
|
|
|
return (cycles_t)timer32_read(t);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Kernel assumes that sched_clock can be called early but may not have
|
|
|
- * things ready yet.
|
|
|
- */
|
|
|
-static cycle_t read_dummy(struct clocksource *cs)
|
|
|
-{
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
static struct clocksource clocksource_davinci = {
|
|
|
.rating = 300,
|
|
|
- .read = read_dummy,
|
|
|
+ .read = read_cycles,
|
|
|
.mask = CLOCKSOURCE_MASK(32),
|
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
|
|
};
|
|
@@ -294,12 +285,9 @@ static struct clocksource clocksource_davinci = {
|
|
|
/*
|
|
|
* Overwrite weak default sched_clock with something more precise
|
|
|
*/
|
|
|
-unsigned long long notrace sched_clock(void)
|
|
|
+static u32 notrace davinci_read_sched_clock(void)
|
|
|
{
|
|
|
- const cycle_t cyc = clocksource_davinci.read(&clocksource_davinci);
|
|
|
-
|
|
|
- return clocksource_cyc2ns(cyc, clocksource_davinci.mult,
|
|
|
- clocksource_davinci.shift);
|
|
|
+ return timer32_read(&timers[TID_CLOCKSOURCE]);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -399,12 +387,14 @@ static void __init davinci_timer_init(void)
|
|
|
davinci_clock_tick_rate = clk_get_rate(timer_clk);
|
|
|
|
|
|
/* setup clocksource */
|
|
|
- clocksource_davinci.read = read_cycles;
|
|
|
clocksource_davinci.name = id_to_name[clocksource_id];
|
|
|
if (clocksource_register_hz(&clocksource_davinci,
|
|
|
davinci_clock_tick_rate))
|
|
|
printk(err, clocksource_davinci.name);
|
|
|
|
|
|
+ setup_sched_clock(davinci_read_sched_clock, 32,
|
|
|
+ davinci_clock_tick_rate);
|
|
|
+
|
|
|
/* setup clockevent */
|
|
|
clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
|
|
|
clockevent_davinci.mult = div_sc(davinci_clock_tick_rate, NSEC_PER_SEC,
|