time.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * Based on MIPS implementation arch/mips/kernel/time.c
  5. * Copyright 2001 MontaVista Software Inc.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/clk.h>
  12. #include <linux/clocksource.h>
  13. #include <linux/time.h>
  14. #include <linux/module.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/irq.h>
  17. #include <linux/kernel_stat.h>
  18. #include <linux/errno.h>
  19. #include <linux/init.h>
  20. #include <linux/profile.h>
  21. #include <linux/sysdev.h>
  22. #include <asm/div64.h>
  23. #include <asm/sysreg.h>
  24. #include <asm/io.h>
  25. #include <asm/sections.h>
  26. static cycle_t read_cycle_count(void)
  27. {
  28. return (cycle_t)sysreg_read(COUNT);
  29. }
  30. static struct clocksource clocksource_avr32 = {
  31. .name = "avr32",
  32. .rating = 350,
  33. .read = read_cycle_count,
  34. .mask = CLOCKSOURCE_MASK(32),
  35. .shift = 16,
  36. .is_continuous = 1,
  37. };
  38. /*
  39. * By default we provide the null RTC ops
  40. */
  41. static unsigned long null_rtc_get_time(void)
  42. {
  43. return mktime(2004, 1, 1, 0, 0, 0);
  44. }
  45. static int null_rtc_set_time(unsigned long sec)
  46. {
  47. return 0;
  48. }
  49. static unsigned long (*rtc_get_time)(void) = null_rtc_get_time;
  50. static int (*rtc_set_time)(unsigned long) = null_rtc_set_time;
  51. /* how many counter cycles in a jiffy? */
  52. static unsigned long cycles_per_jiffy;
  53. /* cycle counter value at the previous timer interrupt */
  54. static unsigned int timerhi, timerlo;
  55. /* the count value for the next timer interrupt */
  56. static unsigned int expirelo;
  57. static void avr32_timer_ack(void)
  58. {
  59. unsigned int count;
  60. /* Ack this timer interrupt and set the next one */
  61. expirelo += cycles_per_jiffy;
  62. if (expirelo == 0) {
  63. printk(KERN_DEBUG "expirelo == 0\n");
  64. sysreg_write(COMPARE, expirelo + 1);
  65. } else {
  66. sysreg_write(COMPARE, expirelo);
  67. }
  68. /* Check to see if we have missed any timer interrupts */
  69. count = sysreg_read(COUNT);
  70. if ((count - expirelo) < 0x7fffffff) {
  71. expirelo = count + cycles_per_jiffy;
  72. sysreg_write(COMPARE, expirelo);
  73. }
  74. }
  75. static unsigned int avr32_hpt_read(void)
  76. {
  77. return sysreg_read(COUNT);
  78. }
  79. /*
  80. * Taken from MIPS c0_hpt_timer_init().
  81. *
  82. * Why is it so complicated, and what is "count"? My assumption is
  83. * that `count' specifies the "reference cycle", i.e. the cycle since
  84. * reset that should mean "zero". The reason COUNT is written twice is
  85. * probably to make sure we don't get any timer interrupts while we
  86. * are messing with the counter.
  87. */
  88. static void avr32_hpt_init(unsigned int count)
  89. {
  90. count = sysreg_read(COUNT) - count;
  91. expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy;
  92. sysreg_write(COUNT, expirelo - cycles_per_jiffy);
  93. sysreg_write(COMPARE, expirelo);
  94. sysreg_write(COUNT, count);
  95. }
  96. /*
  97. * Scheduler clock - returns current time in nanosec units.
  98. */
  99. unsigned long long sched_clock(void)
  100. {
  101. /* There must be better ways...? */
  102. return (unsigned long long)jiffies * (1000000000 / HZ);
  103. }
  104. /*
  105. * local_timer_interrupt() does profiling and process accounting on a
  106. * per-CPU basis.
  107. *
  108. * In UP mode, it is invoked from the (global) timer_interrupt.
  109. */
  110. static void local_timer_interrupt(int irq, void *dev_id)
  111. {
  112. if (current->pid)
  113. profile_tick(CPU_PROFILING);
  114. update_process_times(user_mode(get_irq_regs()));
  115. }
  116. static irqreturn_t
  117. timer_interrupt(int irq, void *dev_id)
  118. {
  119. unsigned int count;
  120. /* ack timer interrupt and try to set next interrupt */
  121. count = avr32_hpt_read();
  122. avr32_timer_ack();
  123. /* Update timerhi/timerlo for intra-jiffy calibration */
  124. timerhi += count < timerlo; /* Wrap around */
  125. timerlo = count;
  126. /*
  127. * Call the generic timer interrupt handler
  128. */
  129. write_seqlock(&xtime_lock);
  130. do_timer(1);
  131. write_sequnlock(&xtime_lock);
  132. /*
  133. * In UP mode, we call local_timer_interrupt() to do profiling
  134. * and process accounting.
  135. *
  136. * SMP is not supported yet.
  137. */
  138. local_timer_interrupt(irq, dev_id);
  139. return IRQ_HANDLED;
  140. }
  141. static struct irqaction timer_irqaction = {
  142. .handler = timer_interrupt,
  143. .flags = IRQF_DISABLED,
  144. .name = "timer",
  145. };
  146. void __init time_init(void)
  147. {
  148. unsigned long mult, shift, count_hz;
  149. int ret;
  150. xtime.tv_sec = rtc_get_time();
  151. xtime.tv_nsec = 0;
  152. set_normalized_timespec(&wall_to_monotonic,
  153. -xtime.tv_sec, -xtime.tv_nsec);
  154. printk("Before time_init: count=%08lx, compare=%08lx\n",
  155. (unsigned long)sysreg_read(COUNT),
  156. (unsigned long)sysreg_read(COMPARE));
  157. count_hz = clk_get_rate(boot_cpu_data.clk);
  158. shift = clocksource_avr32.shift;
  159. mult = clocksource_hz2mult(count_hz, shift);
  160. clocksource_avr32.mult = mult;
  161. printk("Cycle counter: mult=%lu, shift=%lu\n", mult, shift);
  162. {
  163. u64 tmp;
  164. tmp = TICK_NSEC;
  165. tmp <<= shift;
  166. tmp += mult / 2;
  167. do_div(tmp, mult);
  168. cycles_per_jiffy = tmp;
  169. }
  170. /* This sets up the high precision timer for the first interrupt. */
  171. avr32_hpt_init(avr32_hpt_read());
  172. printk("After time_init: count=%08lx, compare=%08lx\n",
  173. (unsigned long)sysreg_read(COUNT),
  174. (unsigned long)sysreg_read(COMPARE));
  175. ret = clocksource_register(&clocksource_avr32);
  176. if (ret)
  177. printk(KERN_ERR
  178. "timer: could not register clocksource: %d\n", ret);
  179. ret = setup_irq(0, &timer_irqaction);
  180. if (ret)
  181. printk("timer: could not request IRQ 0: %d\n", ret);
  182. }
  183. static struct sysdev_class timer_class = {
  184. set_kset_name("timer"),
  185. };
  186. static struct sys_device timer_device = {
  187. .id = 0,
  188. .cls = &timer_class,
  189. };
  190. static int __init init_timer_sysfs(void)
  191. {
  192. int err = sysdev_class_register(&timer_class);
  193. if (!err)
  194. err = sysdev_register(&timer_device);
  195. return err;
  196. }
  197. device_initcall(init_timer_sysfs);