arch_timer.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. #ifndef __ASMARM_ARCH_TIMER_H
  2. #define __ASMARM_ARCH_TIMER_H
  3. #include <asm/barrier.h>
  4. #include <asm/errno.h>
  5. #include <linux/clocksource.h>
  6. #include <linux/init.h>
  7. #include <linux/types.h>
  8. #include <clocksource/arm_arch_timer.h>
  9. #ifdef CONFIG_ARM_ARCH_TIMER
  10. int arch_timer_of_register(void);
  11. int arch_timer_sched_clock_init(void);
  12. /*
  13. * These register accessors are marked inline so the compiler can
  14. * nicely work out which register we want, and chuck away the rest of
  15. * the code. At least it does so with a recent GCC (4.6.3).
  16. */
  17. static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
  18. {
  19. if (access == ARCH_TIMER_PHYS_ACCESS) {
  20. switch (reg) {
  21. case ARCH_TIMER_REG_CTRL:
  22. asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
  23. break;
  24. case ARCH_TIMER_REG_TVAL:
  25. asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
  26. break;
  27. }
  28. }
  29. if (access == ARCH_TIMER_VIRT_ACCESS) {
  30. switch (reg) {
  31. case ARCH_TIMER_REG_CTRL:
  32. asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
  33. break;
  34. case ARCH_TIMER_REG_TVAL:
  35. asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
  36. break;
  37. }
  38. }
  39. isb();
  40. }
  41. static inline u32 arch_timer_reg_read(const int access, const int reg)
  42. {
  43. u32 val = 0;
  44. if (access == ARCH_TIMER_PHYS_ACCESS) {
  45. switch (reg) {
  46. case ARCH_TIMER_REG_CTRL:
  47. asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
  48. break;
  49. case ARCH_TIMER_REG_TVAL:
  50. asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
  51. break;
  52. }
  53. }
  54. if (access == ARCH_TIMER_VIRT_ACCESS) {
  55. switch (reg) {
  56. case ARCH_TIMER_REG_CTRL:
  57. asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
  58. break;
  59. case ARCH_TIMER_REG_TVAL:
  60. asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
  61. break;
  62. }
  63. }
  64. return val;
  65. }
  66. static inline u32 arch_timer_get_cntfrq(void)
  67. {
  68. u32 val;
  69. asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
  70. return val;
  71. }
  72. static inline u64 arch_counter_get_cntpct(void)
  73. {
  74. u64 cval;
  75. isb();
  76. asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
  77. return cval;
  78. }
  79. static inline u64 arch_counter_get_cntvct(void)
  80. {
  81. u64 cval;
  82. isb();
  83. asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
  84. return cval;
  85. }
  86. static inline void __cpuinit arch_counter_set_user_access(void)
  87. {
  88. u32 cntkctl;
  89. asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
  90. /* disable user access to everything */
  91. cntkctl &= ~((3 << 8) | (7 << 0));
  92. asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
  93. }
  94. #else
  95. static inline int arch_timer_of_register(void)
  96. {
  97. return -ENXIO;
  98. }
  99. static inline int arch_timer_sched_clock_init(void)
  100. {
  101. return -ENXIO;
  102. }
  103. #endif
  104. #endif