arm_arch_timer.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. /*
  2. * linux/drivers/clocksource/arm_arch_timer.c
  3. *
  4. * Copyright (C) 2011 ARM Ltd.
  5. * All Rights Reserved
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/device.h>
  14. #include <linux/smp.h>
  15. #include <linux/cpu.h>
  16. #include <linux/clockchips.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/io.h>
  20. #include <asm/arch_timer.h>
  21. #include <asm/virt.h>
  22. #include <clocksource/arm_arch_timer.h>
  23. static u32 arch_timer_rate;
  24. enum ppi_nr {
  25. PHYS_SECURE_PPI,
  26. PHYS_NONSECURE_PPI,
  27. VIRT_PPI,
  28. HYP_PPI,
  29. MAX_TIMER_PPI
  30. };
  31. static int arch_timer_ppi[MAX_TIMER_PPI];
  32. static struct clock_event_device __percpu *arch_timer_evt;
  33. static bool arch_timer_use_virtual = true;
  34. /*
  35. * Architected system timer support.
  36. */
  37. static __always_inline irqreturn_t timer_handler(const int access,
  38. struct clock_event_device *evt)
  39. {
  40. unsigned long ctrl;
  41. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
  42. if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
  43. ctrl |= ARCH_TIMER_CTRL_IT_MASK;
  44. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
  45. evt->event_handler(evt);
  46. return IRQ_HANDLED;
  47. }
  48. return IRQ_NONE;
  49. }
  50. static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
  51. {
  52. struct clock_event_device *evt = dev_id;
  53. return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
  54. }
  55. static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
  56. {
  57. struct clock_event_device *evt = dev_id;
  58. return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
  59. }
  60. static __always_inline void timer_set_mode(const int access, int mode)
  61. {
  62. unsigned long ctrl;
  63. switch (mode) {
  64. case CLOCK_EVT_MODE_UNUSED:
  65. case CLOCK_EVT_MODE_SHUTDOWN:
  66. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
  67. ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
  68. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
  69. break;
  70. default:
  71. break;
  72. }
  73. }
  74. static void arch_timer_set_mode_virt(enum clock_event_mode mode,
  75. struct clock_event_device *clk)
  76. {
  77. timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
  78. }
  79. static void arch_timer_set_mode_phys(enum clock_event_mode mode,
  80. struct clock_event_device *clk)
  81. {
  82. timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
  83. }
  84. static __always_inline void set_next_event(const int access, unsigned long evt)
  85. {
  86. unsigned long ctrl;
  87. ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
  88. ctrl |= ARCH_TIMER_CTRL_ENABLE;
  89. ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
  90. arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
  91. arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
  92. }
  93. static int arch_timer_set_next_event_virt(unsigned long evt,
  94. struct clock_event_device *unused)
  95. {
  96. set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
  97. return 0;
  98. }
  99. static int arch_timer_set_next_event_phys(unsigned long evt,
  100. struct clock_event_device *unused)
  101. {
  102. set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
  103. return 0;
  104. }
  105. static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
  106. {
  107. clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
  108. clk->name = "arch_sys_timer";
  109. clk->rating = 450;
  110. if (arch_timer_use_virtual) {
  111. clk->irq = arch_timer_ppi[VIRT_PPI];
  112. clk->set_mode = arch_timer_set_mode_virt;
  113. clk->set_next_event = arch_timer_set_next_event_virt;
  114. } else {
  115. clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
  116. clk->set_mode = arch_timer_set_mode_phys;
  117. clk->set_next_event = arch_timer_set_next_event_phys;
  118. }
  119. clk->cpumask = cpumask_of(smp_processor_id());
  120. clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
  121. clockevents_config_and_register(clk, arch_timer_rate,
  122. 0xf, 0x7fffffff);
  123. if (arch_timer_use_virtual)
  124. enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
  125. else {
  126. enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
  127. if (arch_timer_ppi[PHYS_NONSECURE_PPI])
  128. enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
  129. }
  130. arch_counter_set_user_access();
  131. return 0;
  132. }
  133. static int arch_timer_available(void)
  134. {
  135. u32 freq;
  136. if (arch_timer_rate == 0) {
  137. freq = arch_timer_get_cntfrq();
  138. /* Check the timer frequency. */
  139. if (freq == 0) {
  140. pr_warn("Architected timer frequency not available\n");
  141. return -EINVAL;
  142. }
  143. arch_timer_rate = freq;
  144. }
  145. pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
  146. (unsigned long)arch_timer_rate / 1000000,
  147. (unsigned long)(arch_timer_rate / 10000) % 100,
  148. arch_timer_use_virtual ? "virt" : "phys");
  149. return 0;
  150. }
  151. u32 arch_timer_get_rate(void)
  152. {
  153. return arch_timer_rate;
  154. }
  155. u64 arch_timer_read_counter(void)
  156. {
  157. return arch_counter_get_cntvct();
  158. }
  159. static cycle_t arch_counter_read(struct clocksource *cs)
  160. {
  161. return arch_counter_get_cntvct();
  162. }
  163. static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
  164. {
  165. return arch_counter_get_cntvct();
  166. }
  167. static struct clocksource clocksource_counter = {
  168. .name = "arch_sys_counter",
  169. .rating = 400,
  170. .read = arch_counter_read,
  171. .mask = CLOCKSOURCE_MASK(56),
  172. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  173. };
  174. static struct cyclecounter cyclecounter = {
  175. .read = arch_counter_read_cc,
  176. .mask = CLOCKSOURCE_MASK(56),
  177. };
  178. static struct timecounter timecounter;
  179. struct timecounter *arch_timer_get_timecounter(void)
  180. {
  181. return &timecounter;
  182. }
  183. static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
  184. {
  185. pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
  186. clk->irq, smp_processor_id());
  187. if (arch_timer_use_virtual)
  188. disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
  189. else {
  190. disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
  191. if (arch_timer_ppi[PHYS_NONSECURE_PPI])
  192. disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
  193. }
  194. clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
  195. }
  196. static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
  197. unsigned long action, void *hcpu)
  198. {
  199. /*
  200. * Grab cpu pointer in each case to avoid spurious
  201. * preemptible warnings
  202. */
  203. switch (action & ~CPU_TASKS_FROZEN) {
  204. case CPU_STARTING:
  205. arch_timer_setup(this_cpu_ptr(arch_timer_evt));
  206. break;
  207. case CPU_DYING:
  208. arch_timer_stop(this_cpu_ptr(arch_timer_evt));
  209. break;
  210. }
  211. return NOTIFY_OK;
  212. }
  213. static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
  214. .notifier_call = arch_timer_cpu_notify,
  215. };
  216. static int __init arch_timer_register(void)
  217. {
  218. int err;
  219. int ppi;
  220. err = arch_timer_available();
  221. if (err)
  222. goto out;
  223. arch_timer_evt = alloc_percpu(struct clock_event_device);
  224. if (!arch_timer_evt) {
  225. err = -ENOMEM;
  226. goto out;
  227. }
  228. clocksource_register_hz(&clocksource_counter, arch_timer_rate);
  229. cyclecounter.mult = clocksource_counter.mult;
  230. cyclecounter.shift = clocksource_counter.shift;
  231. timecounter_init(&timecounter, &cyclecounter,
  232. arch_counter_get_cntvct());
  233. if (arch_timer_use_virtual) {
  234. ppi = arch_timer_ppi[VIRT_PPI];
  235. err = request_percpu_irq(ppi, arch_timer_handler_virt,
  236. "arch_timer", arch_timer_evt);
  237. } else {
  238. ppi = arch_timer_ppi[PHYS_SECURE_PPI];
  239. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  240. "arch_timer", arch_timer_evt);
  241. if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
  242. ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
  243. err = request_percpu_irq(ppi, arch_timer_handler_phys,
  244. "arch_timer", arch_timer_evt);
  245. if (err)
  246. free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
  247. arch_timer_evt);
  248. }
  249. }
  250. if (err) {
  251. pr_err("arch_timer: can't register interrupt %d (%d)\n",
  252. ppi, err);
  253. goto out_free;
  254. }
  255. err = register_cpu_notifier(&arch_timer_cpu_nb);
  256. if (err)
  257. goto out_free_irq;
  258. /* Immediately configure the timer on the boot CPU */
  259. arch_timer_setup(this_cpu_ptr(arch_timer_evt));
  260. return 0;
  261. out_free_irq:
  262. if (arch_timer_use_virtual)
  263. free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
  264. else {
  265. free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
  266. arch_timer_evt);
  267. if (arch_timer_ppi[PHYS_NONSECURE_PPI])
  268. free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
  269. arch_timer_evt);
  270. }
  271. out_free:
  272. free_percpu(arch_timer_evt);
  273. out:
  274. return err;
  275. }
  276. static void __init arch_timer_init(struct device_node *np)
  277. {
  278. u32 freq;
  279. int i;
  280. if (arch_timer_get_rate()) {
  281. pr_warn("arch_timer: multiple nodes in dt, skipping\n");
  282. return;
  283. }
  284. /* Try to determine the frequency from the device tree or CNTFRQ */
  285. if (!of_property_read_u32(np, "clock-frequency", &freq))
  286. arch_timer_rate = freq;
  287. for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
  288. arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
  289. of_node_put(np);
  290. /*
  291. * If HYP mode is available, we know that the physical timer
  292. * has been configured to be accessible from PL1. Use it, so
  293. * that a guest can use the virtual timer instead.
  294. *
  295. * If no interrupt provided for virtual timer, we'll have to
  296. * stick to the physical timer. It'd better be accessible...
  297. */
  298. if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
  299. arch_timer_use_virtual = false;
  300. if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
  301. !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
  302. pr_warn("arch_timer: No interrupt available, giving up\n");
  303. return;
  304. }
  305. }
  306. arch_timer_register();
  307. arch_timer_arch_init();
  308. }
  309. CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
  310. CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);