arch_timer.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494
  1. /*
  2. * linux/arch/arm/kernel/arch_timer.c
  3. *
  4. * Copyright (C) 2011 ARM Ltd.
  5. * All Rights Reserved
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/delay.h>
  14. #include <linux/device.h>
  15. #include <linux/smp.h>
  16. #include <linux/cpu.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/clockchips.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/of_irq.h>
  21. #include <linux/io.h>
  22. #include <asm/delay.h>
  23. #include <asm/localtimer.h>
  24. #include <asm/arch_timer.h>
  25. #include <asm/sched_clock.h>
  26. static u32 arch_timer_rate;
  27. enum ppi_nr {
  28. PHYS_SECURE_PPI,
  29. PHYS_NONSECURE_PPI,
  30. VIRT_PPI,
  31. HYP_PPI,
  32. MAX_TIMER_PPI
  33. };
  34. static int arch_timer_ppi[MAX_TIMER_PPI];
  35. static struct clock_event_device __percpu **arch_timer_evt;
  36. static struct delay_timer arch_delay_timer;
  37. static bool arch_timer_use_virtual = true;
  38. /*
  39. * Architected system timer support.
  40. */
  41. #define ARCH_TIMER_CTRL_ENABLE (1 << 0)
  42. #define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
  43. #define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
  44. #define ARCH_TIMER_REG_CTRL 0
  45. #define ARCH_TIMER_REG_TVAL 1
  46. #define ARCH_TIMER_PHYS_ACCESS 0
  47. #define ARCH_TIMER_VIRT_ACCESS 1
  48. /*
  49. * These register accessors are marked inline so the compiler can
  50. * nicely work out which register we want, and chuck away the rest of
  51. * the code. At least it does so with a recent GCC (4.6.3).
  52. */
  53. static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
  54. {
  55. if (access == ARCH_TIMER_PHYS_ACCESS) {
  56. switch (reg) {
  57. case ARCH_TIMER_REG_CTRL:
  58. asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
  59. break;
  60. case ARCH_TIMER_REG_TVAL:
  61. asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
  62. break;
  63. }
  64. }
  65. if (access == ARCH_TIMER_VIRT_ACCESS) {
  66. switch (reg) {
  67. case ARCH_TIMER_REG_CTRL:
  68. asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
  69. break;
  70. case ARCH_TIMER_REG_TVAL:
  71. asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
  72. break;
  73. }
  74. }
  75. isb();
  76. }
  77. static inline u32 arch_timer_reg_read(const int access, const int reg)
  78. {
  79. u32 val = 0;
  80. if (access == ARCH_TIMER_PHYS_ACCESS) {
  81. switch (reg) {
  82. case ARCH_TIMER_REG_CTRL:
  83. asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
  84. break;
  85. case ARCH_TIMER_REG_TVAL:
  86. asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
  87. break;
  88. }
  89. }
  90. if (access == ARCH_TIMER_VIRT_ACCESS) {
  91. switch (reg) {
  92. case ARCH_TIMER_REG_CTRL:
  93. asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
  94. break;
  95. case ARCH_TIMER_REG_TVAL:
  96. asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
  97. break;
  98. }
  99. }
  100. return val;
  101. }
  102. static inline u32 arch_timer_get_cntfrq(void)
  103. {
  104. u32 val;
  105. asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
  106. return val;
  107. }
  108. static inline u64 arch_counter_get_cntpct(void)
  109. {
  110. u64 cval;
  111. asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
  112. return cval;
  113. }
  114. static inline u64 arch_counter_get_cntvct(void)
  115. {
  116. u64 cval;
  117. asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
  118. return cval;
  119. }
  120. static irqreturn_t inline timer_handler(const int access,
  121. struct clock_event_device *evt)
  122. {
  123. unsigned long ctrl;
  124. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
  125. if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
  126. ctrl |= ARCH_TIMER_CTRL_IT_MASK;
  127. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
  128. evt->event_handler(evt);
  129. return IRQ_HANDLED;
  130. }
  131. return IRQ_NONE;
  132. }
  133. static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
  134. {
  135. struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
  136. return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
  137. }
  138. static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
  139. {
  140. struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
  141. return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
  142. }
  143. static inline void timer_set_mode(const int access, int mode)
  144. {
  145. unsigned long ctrl;
  146. switch (mode) {
  147. case CLOCK_EVT_MODE_UNUSED:
  148. case CLOCK_EVT_MODE_SHUTDOWN:
  149. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
  150. ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
  151. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
  152. break;
  153. default:
  154. break;
  155. }
  156. }
  157. static void arch_timer_set_mode_virt(enum clock_event_mode mode,
  158. struct clock_event_device *clk)
  159. {
  160. timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
  161. }
  162. static void arch_timer_set_mode_phys(enum clock_event_mode mode,
  163. struct clock_event_device *clk)
  164. {
  165. timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
  166. }
  167. static inline void set_next_event(const int access, unsigned long evt)
  168. {
  169. unsigned long ctrl;
  170. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
  171. ctrl |= ARCH_TIMER_CTRL_ENABLE;
  172. ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
  173. arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
  174. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
  175. }
  176. static int arch_timer_set_next_event_virt(unsigned long evt,
  177. struct clock_event_device *unused)
  178. {
  179. set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
  180. return 0;
  181. }
  182. static int arch_timer_set_next_event_phys(unsigned long evt,
  183. struct clock_event_device *unused)
  184. {
  185. set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
  186. return 0;
  187. }
  188. static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
  189. {
  190. clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
  191. clk->name = "arch_sys_timer";
  192. clk->rating = 450;
  193. if (arch_timer_use_virtual) {
  194. clk->irq = arch_timer_ppi[VIRT_PPI];
  195. clk->set_mode = arch_timer_set_mode_virt;
  196. clk->set_next_event = arch_timer_set_next_event_virt;
  197. } else {
  198. clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
  199. clk->set_mode = arch_timer_set_mode_phys;
  200. clk->set_next_event = arch_timer_set_next_event_phys;
  201. }
  202. clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
  203. clockevents_config_and_register(clk, arch_timer_rate,
  204. 0xf, 0x7fffffff);
  205. *__this_cpu_ptr(arch_timer_evt) = clk;
  206. if (arch_timer_use_virtual)
  207. enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
  208. else {
  209. enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
  210. if (arch_timer_ppi[PHYS_NONSECURE_PPI])
  211. enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
  212. }
  213. return 0;
  214. }
  215. static int arch_timer_available(void)
  216. {
  217. u32 freq;
  218. if (arch_timer_rate == 0) {
  219. freq = arch_timer_get_cntfrq();
  220. /* Check the timer frequency. */
  221. if (freq == 0) {
  222. pr_warn("Architected timer frequency not available\n");
  223. return -EINVAL;
  224. }
  225. arch_timer_rate = freq;
  226. }
  227. pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
  228. (unsigned long)arch_timer_rate / 1000000,
  229. (unsigned long)(arch_timer_rate / 10000) % 100,
  230. arch_timer_use_virtual ? "virt" : "phys");
  231. return 0;
  232. }
  233. /*
  234. * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
  235. * call it before it has been initialised. Rather than incur a performance
  236. * penalty checking for initialisation, provide a default implementation that
  237. * won't lead to time appearing to jump backwards.
  238. */
  239. static u64 arch_timer_read_zero(void)
  240. {
  241. return 0;
  242. }
  243. u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
  244. static u32 arch_timer_read_counter32(void)
  245. {
  246. return arch_timer_read_counter();
  247. }
  248. static cycle_t arch_counter_read(struct clocksource *cs)
  249. {
  250. return arch_timer_read_counter();
  251. }
  252. static unsigned long arch_timer_read_current_timer(void)
  253. {
  254. return arch_timer_read_counter();
  255. }
  256. static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
  257. {
  258. return arch_timer_read_counter();
  259. }
  260. static struct clocksource clocksource_counter = {
  261. .name = "arch_sys_counter",
  262. .rating = 400,
  263. .read = arch_counter_read,
  264. .mask = CLOCKSOURCE_MASK(56),
  265. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  266. };
  267. static struct cyclecounter cyclecounter = {
  268. .read = arch_counter_read_cc,
  269. .mask = CLOCKSOURCE_MASK(56),
  270. };
  271. static struct timecounter timecounter;
  272. struct timecounter *arch_timer_get_timecounter(void)
  273. {
  274. return &timecounter;
  275. }
  276. static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
  277. {
  278. pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
  279. clk->irq, smp_processor_id());
  280. if (arch_timer_use_virtual)
  281. disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
  282. else {
  283. disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
  284. if (arch_timer_ppi[PHYS_NONSECURE_PPI])
  285. disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
  286. }
  287. clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
  288. }
  289. static struct local_timer_ops arch_timer_ops __cpuinitdata = {
  290. .setup = arch_timer_setup,
  291. .stop = arch_timer_stop,
  292. };
  293. static struct clock_event_device arch_timer_global_evt;
  294. static int __init arch_timer_register(void)
  295. {
  296. int err;
  297. int ppi;
  298. err = arch_timer_available();
  299. if (err)
  300. goto out;
  301. arch_timer_evt = alloc_percpu(struct clock_event_device *);
  302. if (!arch_timer_evt) {
  303. err = -ENOMEM;
  304. goto out;
  305. }
  306. clocksource_register_hz(&clocksource_counter, arch_timer_rate);
  307. cyclecounter.mult = clocksource_counter.mult;
  308. cyclecounter.shift = clocksource_counter.shift;
  309. timecounter_init(&timecounter, &cyclecounter,
  310. arch_counter_get_cntpct());
  311. if (arch_timer_use_virtual) {
  312. ppi = arch_timer_ppi[VIRT_PPI];
  313. err = request_percpu_irq(ppi, arch_timer_handler_virt,
  314. "arch_timer", arch_timer_evt);
  315. } else {
  316. ppi = arch_timer_ppi[PHYS_SECURE_PPI];
  317. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  318. "arch_timer", arch_timer_evt);
  319. if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
  320. ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
  321. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  322. "arch_timer", arch_timer_evt);
  323. if (err)
  324. free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
  325. arch_timer_evt);
  326. }
  327. }
  328. if (err) {
  329. pr_err("arch_timer: can't register interrupt %d (%d)\n",
  330. ppi, err);
  331. goto out_free;
  332. }
  333. err = local_timer_register(&arch_timer_ops);
  334. if (err) {
  335. /*
  336. * We couldn't register as a local timer (could be
  337. * because we're on a UP platform, or because some
  338. * other local timer is already present...). Try as a
  339. * global timer instead.
  340. */
  341. arch_timer_global_evt.cpumask = cpumask_of(0);
  342. err = arch_timer_setup(&arch_timer_global_evt);
  343. }
  344. if (err)
  345. goto out_free_irq;
  346. /* Use the architected timer for the delay loop. */
  347. arch_delay_timer.read_current_timer = &arch_timer_read_current_timer;
  348. arch_delay_timer.freq = arch_timer_rate;
  349. register_current_timer_delay(&arch_delay_timer);
  350. return 0;
  351. out_free_irq:
  352. if (arch_timer_use_virtual)
  353. free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
  354. else {
  355. free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
  356. arch_timer_evt);
  357. if (arch_timer_ppi[PHYS_NONSECURE_PPI])
  358. free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
  359. arch_timer_evt);
  360. }
  361. out_free:
  362. free_percpu(arch_timer_evt);
  363. out:
  364. return err;
  365. }
  366. static const struct of_device_id arch_timer_of_match[] __initconst = {
  367. { .compatible = "arm,armv7-timer", },
  368. {},
  369. };
  370. int __init arch_timer_of_register(void)
  371. {
  372. struct device_node *np;
  373. u32 freq;
  374. int i;
  375. np = of_find_matching_node(NULL, arch_timer_of_match);
  376. if (!np) {
  377. pr_err("arch_timer: can't find DT node\n");
  378. return -ENODEV;
  379. }
  380. /* Try to determine the frequency from the device tree or CNTFRQ */
  381. if (!of_property_read_u32(np, "clock-frequency", &freq))
  382. arch_timer_rate = freq;
  383. for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
  384. arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
  385. of_node_put(np);
  386. /*
  387. * If no interrupt provided for virtual timer, we'll have to
  388. * stick to the physical timer. It'd better be accessible...
  389. */
  390. if (!arch_timer_ppi[VIRT_PPI]) {
  391. arch_timer_use_virtual = false;
  392. if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
  393. !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
  394. pr_warn("arch_timer: No interrupt available, giving up\n");
  395. return -EINVAL;
  396. }
  397. }
  398. if (arch_timer_use_virtual)
  399. arch_timer_read_counter = arch_counter_get_cntvct;
  400. else
  401. arch_timer_read_counter = arch_counter_get_cntpct;
  402. return arch_timer_register();
  403. }
  404. int __init arch_timer_sched_clock_init(void)
  405. {
  406. int err;
  407. err = arch_timer_available();
  408. if (err)
  409. return err;
  410. setup_sched_clock(arch_timer_read_counter32,
  411. 32, arch_timer_rate);
  412. return 0;
  413. }