csrc-octeon.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2007 by Ralf Baechle
  7. * Copyright (C) 2009, 2012 Cavium, Inc.
  8. */
  9. #include <linux/clocksource.h>
  10. #include <linux/export.h>
  11. #include <linux/init.h>
  12. #include <linux/smp.h>
  13. #include <asm/cpu-info.h>
  14. #include <asm/time.h>
  15. #include <asm/octeon/octeon.h>
  16. #include <asm/octeon/cvmx-ipd-defs.h>
  17. #include <asm/octeon/cvmx-mio-defs.h>
  18. static u64 f;
  19. static u64 rdiv;
  20. static u64 sdiv;
  21. static u64 octeon_udelay_factor;
  22. static u64 octeon_ndelay_factor;
  23. void __init octeon_setup_delays(void)
  24. {
  25. octeon_udelay_factor = octeon_get_clock_rate() / 1000000;
  26. /*
  27. * For __ndelay we divide by 2^16, so the factor is multiplied
  28. * by the same amount.
  29. */
  30. octeon_ndelay_factor = (octeon_udelay_factor * 0x10000ull) / 1000ull;
  31. preset_lpj = octeon_get_clock_rate() / HZ;
  32. if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
  33. union cvmx_mio_rst_boot rst_boot;
  34. rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
  35. rdiv = rst_boot.s.c_mul; /* CPU clock */
  36. sdiv = rst_boot.s.pnr_mul; /* I/O clock */
  37. f = (0x8000000000000000ull / sdiv) * 2;
  38. }
  39. }
  40. /*
  41. * Set the current core's cvmcount counter to the value of the
  42. * IPD_CLK_COUNT. We do this on all cores as they are brought
  43. * on-line. This allows for a read from a local cpu register to
  44. * access a synchronized counter.
  45. *
  46. * On CPU_CAVIUM_OCTEON2 the IPD_CLK_COUNT is scaled by rdiv/sdiv.
  47. */
  48. void octeon_init_cvmcount(void)
  49. {
  50. unsigned long flags;
  51. unsigned loops = 2;
  52. /* Clobber loops so GCC will not unroll the following while loop. */
  53. asm("" : "+r" (loops));
  54. local_irq_save(flags);
  55. /*
  56. * Loop several times so we are executing from the cache,
  57. * which should give more deterministic timing.
  58. */
  59. while (loops--) {
  60. u64 ipd_clk_count = cvmx_read_csr(CVMX_IPD_CLK_COUNT);
  61. if (rdiv != 0) {
  62. ipd_clk_count *= rdiv;
  63. if (f != 0) {
  64. asm("dmultu\t%[cnt],%[f]\n\t"
  65. "mfhi\t%[cnt]"
  66. : [cnt] "+r" (ipd_clk_count)
  67. : [f] "r" (f)
  68. : "hi", "lo");
  69. }
  70. }
  71. write_c0_cvmcount(ipd_clk_count);
  72. }
  73. local_irq_restore(flags);
  74. }
  75. static cycle_t octeon_cvmcount_read(struct clocksource *cs)
  76. {
  77. return read_c0_cvmcount();
  78. }
  79. static struct clocksource clocksource_mips = {
  80. .name = "OCTEON_CVMCOUNT",
  81. .read = octeon_cvmcount_read,
  82. .mask = CLOCKSOURCE_MASK(64),
  83. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  84. };
  85. unsigned long long notrace sched_clock(void)
  86. {
  87. /* 64-bit arithmatic can overflow, so use 128-bit. */
  88. u64 t1, t2, t3;
  89. unsigned long long rv;
  90. u64 mult = clocksource_mips.mult;
  91. u64 shift = clocksource_mips.shift;
  92. u64 cnt = read_c0_cvmcount();
  93. asm (
  94. "dmultu\t%[cnt],%[mult]\n\t"
  95. "nor\t%[t1],$0,%[shift]\n\t"
  96. "mfhi\t%[t2]\n\t"
  97. "mflo\t%[t3]\n\t"
  98. "dsll\t%[t2],%[t2],1\n\t"
  99. "dsrlv\t%[rv],%[t3],%[shift]\n\t"
  100. "dsllv\t%[t1],%[t2],%[t1]\n\t"
  101. "or\t%[rv],%[t1],%[rv]\n\t"
  102. : [rv] "=&r" (rv), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3)
  103. : [cnt] "r" (cnt), [mult] "r" (mult), [shift] "r" (shift)
  104. : "hi", "lo");
  105. return rv;
  106. }
  107. void __init plat_time_init(void)
  108. {
  109. clocksource_mips.rating = 300;
  110. clocksource_register_hz(&clocksource_mips, octeon_get_clock_rate());
  111. }
  112. void __udelay(unsigned long us)
  113. {
  114. u64 cur, end, inc;
  115. cur = read_c0_cvmcount();
  116. inc = us * octeon_udelay_factor;
  117. end = cur + inc;
  118. while (end > cur)
  119. cur = read_c0_cvmcount();
  120. }
  121. EXPORT_SYMBOL(__udelay);
  122. void __ndelay(unsigned long ns)
  123. {
  124. u64 cur, end, inc;
  125. cur = read_c0_cvmcount();
  126. inc = ((ns * octeon_ndelay_factor) >> 16);
  127. end = cur + inc;
  128. while (end > cur)
  129. cur = read_c0_cvmcount();
  130. }
  131. EXPORT_SYMBOL(__ndelay);
  132. void __delay(unsigned long loops)
  133. {
  134. u64 cur, end;
  135. cur = read_c0_cvmcount();
  136. end = cur + loops;
  137. while (end > cur)
  138. cur = read_c0_cvmcount();
  139. }
  140. EXPORT_SYMBOL(__delay);
  141. /**
  142. * octeon_io_clk_delay - wait for a given number of io clock cycles to pass.
  143. *
  144. * We scale the wait by the clock ratio, and then wait for the
  145. * corresponding number of core clocks.
  146. *
  147. * @count: The number of clocks to wait.
  148. */
  149. void octeon_io_clk_delay(unsigned long count)
  150. {
  151. u64 cur, end;
  152. cur = read_c0_cvmcount();
  153. if (rdiv != 0) {
  154. end = count * rdiv;
  155. if (f != 0) {
  156. asm("dmultu\t%[cnt],%[f]\n\t"
  157. "mfhi\t%[cnt]"
  158. : [cnt] "+r" (end)
  159. : [f] "r" (f)
  160. : "hi", "lo");
  161. }
  162. end = cur + end;
  163. } else {
  164. end = cur + count;
  165. }
  166. while (end > cur)
  167. cur = read_c0_cvmcount();
  168. }
  169. EXPORT_SYMBOL(octeon_io_clk_delay);