time.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. /*
  2. * Copyright 2001 MontaVista Software Inc.
  3. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
  4. * Copyright (c) 2003, 2004 Maciej W. Rozycki
  5. *
  6. * Common time service routines for MIPS machines. See
  7. * Documentation/mips/time.README.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the
  11. * Free Software Foundation; either version 2 of the License, or (at your
  12. * option) any later version.
  13. */
  14. #include <linux/clockchips.h>
  15. #include <linux/types.h>
  16. #include <linux/kernel.h>
  17. #include <linux/init.h>
  18. #include <linux/sched.h>
  19. #include <linux/param.h>
  20. #include <linux/profile.h>
  21. #include <linux/time.h>
  22. #include <linux/timex.h>
  23. #include <linux/smp.h>
  24. #include <linux/kernel_stat.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/module.h>
  28. #include <linux/kallsyms.h>
  29. #include <asm/bootinfo.h>
  30. #include <asm/cache.h>
  31. #include <asm/compiler.h>
  32. #include <asm/cpu.h>
  33. #include <asm/cpu-features.h>
  34. #include <asm/div64.h>
  35. #include <asm/sections.h>
  36. #include <asm/smtc_ipi.h>
  37. #include <asm/time.h>
  38. #include <irq.h>
  39. /*
  40. * The integer part of the number of usecs per jiffy is taken from tick,
  41. * but the fractional part is not recorded, so we calculate it using the
  42. * initial value of HZ. This aids systems where tick isn't really an
  43. * integer (e.g. for HZ = 128).
  44. */
  45. #define USECS_PER_JIFFY TICK_SIZE
  46. #define USECS_PER_JIFFY_FRAC ((unsigned long)(u32)((1000000ULL << 32) / HZ))
  47. #define TICK_SIZE (tick_nsec / 1000)
  48. /*
  49. * forward reference
  50. */
  51. DEFINE_SPINLOCK(rtc_lock);
  52. EXPORT_SYMBOL(rtc_lock);
  53. int __weak rtc_mips_set_time(unsigned long sec)
  54. {
  55. return 0;
  56. }
  57. EXPORT_SYMBOL(rtc_mips_set_time);
  58. int __weak rtc_mips_set_mmss(unsigned long nowtime)
  59. {
  60. return rtc_mips_set_time(nowtime);
  61. }
  62. int update_persistent_clock(struct timespec now)
  63. {
  64. return rtc_mips_set_mmss(now.tv_sec);
  65. }
  66. /* how many counter cycles in a jiffy */
  67. static unsigned long cycles_per_jiffy __read_mostly;
  68. /*
  69. * Null timer ack for systems not needing one (e.g. i8254).
  70. */
  71. static void null_timer_ack(void) { /* nothing */ }
  72. /*
  73. * Null high precision timer functions for systems lacking one.
  74. */
  75. static cycle_t null_hpt_read(void)
  76. {
  77. return 0;
  78. }
  79. /*
  80. * Timer ack for an R4k-compatible timer of a known frequency.
  81. */
  82. static void c0_timer_ack(void)
  83. {
  84. write_c0_compare(read_c0_compare());
  85. }
  86. /*
  87. * High precision timer functions for a R4k-compatible timer.
  88. */
  89. static cycle_t c0_hpt_read(void)
  90. {
  91. return read_c0_count();
  92. }
  93. int (*mips_timer_state)(void);
  94. void (*mips_timer_ack)(void);
  95. /*
  96. * local_timer_interrupt() does profiling and process accounting
  97. * on a per-CPU basis.
  98. *
  99. * In UP mode, it is invoked from the (global) timer_interrupt.
  100. *
  101. * In SMP mode, it might invoked by per-CPU timer interrupt, or
  102. * a broadcasted inter-processor interrupt which itself is triggered
  103. * by the global timer interrupt.
  104. */
  105. void local_timer_interrupt(int irq, void *dev_id)
  106. {
  107. profile_tick(CPU_PROFILING);
  108. update_process_times(user_mode(get_irq_regs()));
  109. }
  110. int null_perf_irq(void)
  111. {
  112. return 0;
  113. }
  114. EXPORT_SYMBOL(null_perf_irq);
  115. int (*perf_irq)(void) = null_perf_irq;
  116. EXPORT_SYMBOL(perf_irq);
  117. /*
  118. * Timer interrupt
  119. */
  120. int cp0_compare_irq;
  121. /*
  122. * Performance counter IRQ or -1 if shared with timer
  123. */
  124. int cp0_perfcount_irq;
  125. EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
  126. /*
  127. * Possibly handle a performance counter interrupt.
  128. * Return true if the timer interrupt should not be checked
  129. */
  130. static inline int handle_perf_irq(int r2)
  131. {
  132. /*
  133. * The performance counter overflow interrupt may be shared with the
  134. * timer interrupt (cp0_perfcount_irq < 0). If it is and a
  135. * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
  136. * and we can't reliably determine if a counter interrupt has also
  137. * happened (!r2) then don't check for a timer interrupt.
  138. */
  139. return (cp0_perfcount_irq < 0) &&
  140. perf_irq() == IRQ_HANDLED &&
  141. !r2;
  142. }
  143. /*
  144. * time_init() - it does the following things.
  145. *
  146. * 1) plat_time_init() -
  147. * a) (optional) set up RTC routines,
  148. * b) (optional) calibrate and set the mips_hpt_frequency
  149. * (only needed if you intended to use cpu counter as timer interrupt
  150. * source)
  151. * 2) calculate a couple of cached variables for later usage
  152. * 3) plat_timer_setup() -
  153. * a) (optional) over-write any choices made above by time_init().
  154. * b) machine specific code should setup the timer irqaction.
  155. * c) enable the timer interrupt
  156. */
  157. unsigned int mips_hpt_frequency;
  158. static unsigned int __init calibrate_hpt(void)
  159. {
  160. cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
  161. const int loops = HZ / 10;
  162. int log_2_loops = 0;
  163. int i;
  164. /*
  165. * We want to calibrate for 0.1s, but to avoid a 64-bit
  166. * division we round the number of loops up to the nearest
  167. * power of 2.
  168. */
  169. while (loops > 1 << log_2_loops)
  170. log_2_loops++;
  171. i = 1 << log_2_loops;
  172. /*
  173. * Wait for a rising edge of the timer interrupt.
  174. */
  175. while (mips_timer_state());
  176. while (!mips_timer_state());
  177. /*
  178. * Now see how many high precision timer ticks happen
  179. * during the calculated number of periods between timer
  180. * interrupts.
  181. */
  182. hpt_start = clocksource_mips.read();
  183. do {
  184. while (mips_timer_state());
  185. while (!mips_timer_state());
  186. } while (--i);
  187. hpt_end = clocksource_mips.read();
  188. hpt_count = (hpt_end - hpt_start) & clocksource_mips.mask;
  189. hz = HZ;
  190. frequency = hpt_count * hz;
  191. return frequency >> log_2_loops;
  192. }
  193. struct clocksource clocksource_mips = {
  194. .name = "MIPS",
  195. .mask = CLOCKSOURCE_MASK(32),
  196. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  197. };
  198. static int mips_next_event(unsigned long delta,
  199. struct clock_event_device *evt)
  200. {
  201. unsigned int cnt;
  202. int res;
  203. #ifdef CONFIG_MIPS_MT_SMTC
  204. {
  205. unsigned long flags, vpflags;
  206. local_irq_save(flags);
  207. vpflags = dvpe();
  208. #endif
  209. cnt = read_c0_count();
  210. cnt += delta;
  211. write_c0_compare(cnt);
  212. res = ((long)(read_c0_count() - cnt ) > 0) ? -ETIME : 0;
  213. #ifdef CONFIG_MIPS_MT_SMTC
  214. evpe(vpflags);
  215. local_irq_restore(flags);
  216. }
  217. #endif
  218. return res;
  219. }
  220. static void mips_set_mode(enum clock_event_mode mode,
  221. struct clock_event_device *evt)
  222. {
  223. /* Nothing to do ... */
  224. }
  225. static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
  226. static int cp0_timer_irq_installed;
  227. static irqreturn_t timer_interrupt(int irq, void *dev_id)
  228. {
  229. const int r2 = cpu_has_mips_r2;
  230. struct clock_event_device *cd;
  231. int cpu = smp_processor_id();
  232. /*
  233. * Suckage alert:
  234. * Before R2 of the architecture there was no way to see if a
  235. * performance counter interrupt was pending, so we have to run
  236. * the performance counter interrupt handler anyway.
  237. */
  238. if (handle_perf_irq(r2))
  239. goto out;
  240. /*
  241. * The same applies to performance counter interrupts. But with the
  242. * above we now know that the reason we got here must be a timer
  243. * interrupt. Being the paranoiacs we are we check anyway.
  244. */
  245. if (!r2 || (read_c0_cause() & (1 << 30))) {
  246. c0_timer_ack();
  247. #ifdef CONFIG_MIPS_MT_SMTC
  248. if (cpu_data[cpu].vpe_id)
  249. goto out;
  250. cpu = 0;
  251. #endif
  252. cd = &per_cpu(mips_clockevent_device, cpu);
  253. cd->event_handler(cd);
  254. }
  255. out:
  256. return IRQ_HANDLED;
  257. }
  258. static struct irqaction timer_irqaction = {
  259. .handler = timer_interrupt,
  260. #ifdef CONFIG_MIPS_MT_SMTC
  261. .flags = IRQF_DISABLED,
  262. #else
  263. .flags = IRQF_DISABLED | IRQF_PERCPU,
  264. #endif
  265. .name = "timer",
  266. };
  267. static void __init init_mips_clocksource(void)
  268. {
  269. u64 temp;
  270. u32 shift;
  271. if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read)
  272. return;
  273. /* Calclate a somewhat reasonable rating value */
  274. clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
  275. /* Find a shift value */
  276. for (shift = 32; shift > 0; shift--) {
  277. temp = (u64) NSEC_PER_SEC << shift;
  278. do_div(temp, mips_hpt_frequency);
  279. if ((temp >> 32) == 0)
  280. break;
  281. }
  282. clocksource_mips.shift = shift;
  283. clocksource_mips.mult = (u32)temp;
  284. clocksource_register(&clocksource_mips);
  285. }
  286. void __init __weak plat_time_init(void)
  287. {
  288. }
  289. void __init __weak plat_timer_setup(struct irqaction *irq)
  290. {
  291. }
  292. #ifdef CONFIG_MIPS_MT_SMTC
  293. DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
  294. static void smtc_set_mode(enum clock_event_mode mode,
  295. struct clock_event_device *evt)
  296. {
  297. }
  298. int dummycnt[NR_CPUS];
  299. static void mips_broadcast(cpumask_t mask)
  300. {
  301. unsigned int cpu;
  302. for_each_cpu_mask(cpu, mask)
  303. smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
  304. }
  305. static void setup_smtc_dummy_clockevent_device(void)
  306. {
  307. //uint64_t mips_freq = mips_hpt_^frequency;
  308. unsigned int cpu = smp_processor_id();
  309. struct clock_event_device *cd;
  310. cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
  311. cd->name = "SMTC";
  312. cd->features = CLOCK_EVT_FEAT_DUMMY;
  313. /* Calculate the min / max delta */
  314. cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
  315. cd->shift = 0; //32;
  316. cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
  317. cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
  318. cd->rating = 200;
  319. cd->irq = 17; //-1;
  320. // if (cpu)
  321. // cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
  322. // else
  323. cd->cpumask = cpumask_of_cpu(cpu);
  324. cd->set_mode = smtc_set_mode;
  325. cd->broadcast = mips_broadcast;
  326. clockevents_register_device(cd);
  327. }
  328. #endif
  329. static void mips_event_handler(struct clock_event_device *dev)
  330. {
  331. }
  332. void __cpuinit mips_clockevent_init(void)
  333. {
  334. uint64_t mips_freq = mips_hpt_frequency;
  335. unsigned int cpu = smp_processor_id();
  336. struct clock_event_device *cd;
  337. unsigned int irq = MIPS_CPU_IRQ_BASE + 7;
  338. if (!cpu_has_counter)
  339. return;
  340. #ifdef CONFIG_MIPS_MT_SMTC
  341. setup_smtc_dummy_clockevent_device();
  342. /*
  343. * On SMTC we only register VPE0's compare interrupt as clockevent
  344. * device.
  345. */
  346. if (cpu)
  347. return;
  348. #endif
  349. cd = &per_cpu(mips_clockevent_device, cpu);
  350. cd->name = "MIPS";
  351. cd->features = CLOCK_EVT_FEAT_ONESHOT;
  352. /* Calculate the min / max delta */
  353. cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
  354. cd->shift = 32;
  355. cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
  356. cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
  357. cd->rating = 300;
  358. cd->irq = irq;
  359. #ifdef CONFIG_MIPS_MT_SMTC
  360. cd->cpumask = CPU_MASK_ALL;
  361. #else
  362. cd->cpumask = cpumask_of_cpu(cpu);
  363. #endif
  364. cd->set_next_event = mips_next_event;
  365. cd->set_mode = mips_set_mode;
  366. cd->event_handler = mips_event_handler;
  367. clockevents_register_device(cd);
  368. if (!cp0_timer_irq_installed) {
  369. #ifdef CONFIG_MIPS_MT_SMTC
  370. #define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
  371. setup_irq_smtc(irq, &timer_irqaction, CPUCTR_IMASKBIT);
  372. #else
  373. setup_irq(irq, &timer_irqaction);
  374. #endif /* CONFIG_MIPS_MT_SMTC */
  375. cp0_timer_irq_installed = 1;
  376. }
  377. }
  378. void __init time_init(void)
  379. {
  380. plat_time_init();
  381. /* Choose appropriate high precision timer routines. */
  382. if (!cpu_has_counter && !clocksource_mips.read)
  383. /* No high precision timer -- sorry. */
  384. clocksource_mips.read = null_hpt_read;
  385. else if (!mips_hpt_frequency && !mips_timer_state) {
  386. /* A high precision timer of unknown frequency. */
  387. if (!clocksource_mips.read)
  388. /* No external high precision timer -- use R4k. */
  389. clocksource_mips.read = c0_hpt_read;
  390. } else {
  391. /* We know counter frequency. Or we can get it. */
  392. if (!clocksource_mips.read) {
  393. /* No external high precision timer -- use R4k. */
  394. clocksource_mips.read = c0_hpt_read;
  395. if (!mips_timer_state) {
  396. /* No external timer interrupt -- use R4k. */
  397. mips_timer_ack = c0_timer_ack;
  398. /* Calculate cache parameters. */
  399. cycles_per_jiffy =
  400. (mips_hpt_frequency + HZ / 2) / HZ;
  401. }
  402. }
  403. if (!mips_hpt_frequency)
  404. mips_hpt_frequency = calibrate_hpt();
  405. /* Report the high precision timer rate for a reference. */
  406. printk("Using %u.%03u MHz high precision timer.\n",
  407. ((mips_hpt_frequency + 500) / 1000) / 1000,
  408. ((mips_hpt_frequency + 500) / 1000) % 1000);
  409. #ifdef CONFIG_IRQ_CPU
  410. setup_irq(MIPS_CPU_IRQ_BASE + 7, &timer_irqaction);
  411. #endif
  412. }
  413. if (!mips_timer_ack)
  414. /* No timer interrupt ack (e.g. i8254). */
  415. mips_timer_ack = null_timer_ack;
  416. /*
  417. * Call board specific timer interrupt setup.
  418. *
  419. * this pointer must be setup in machine setup routine.
  420. *
  421. * Even if a machine chooses to use a low-level timer interrupt,
  422. * it still needs to setup the timer_irqaction.
  423. * In that case, it might be better to set timer_irqaction.handler
  424. * to be NULL function so that we are sure the high-level code
  425. * is not invoked accidentally.
  426. */
  427. plat_timer_setup(&timer_irqaction);
  428. init_mips_clocksource();
  429. mips_clockevent_init();
  430. }