vmitime.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499
  1. /*
  2. * VMI paravirtual timer support routines.
  3. *
  4. * Copyright (C) 2005, VMware, Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  14. * NON INFRINGEMENT. See the GNU General Public License for more
  15. * details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  20. *
  21. * Send feedback to dhecht@vmware.com
  22. *
  23. */
  24. /*
  25. * Portions of this code from arch/i386/kernel/timers/timer_tsc.c.
  26. * Portions of the CONFIG_NO_IDLE_HZ code from arch/s390/kernel/time.c.
  27. * See comments there for proper credits.
  28. */
  29. #include <linux/spinlock.h>
  30. #include <linux/init.h>
  31. #include <linux/errno.h>
  32. #include <linux/jiffies.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/kernel_stat.h>
  35. #include <linux/rcupdate.h>
  36. #include <linux/clocksource.h>
  37. #include <asm/timer.h>
  38. #include <asm/io.h>
  39. #include <asm/apic.h>
  40. #include <asm/div64.h>
  41. #include <asm/timer.h>
  42. #include <asm/desc.h>
  43. #include <asm/vmi.h>
  44. #include <asm/vmi_time.h>
  45. #include <mach_timer.h>
  46. #include <io_ports.h>
  47. #ifdef CONFIG_X86_LOCAL_APIC
  48. #define VMI_ALARM_WIRING VMI_ALARM_WIRED_LVTT
  49. #else
  50. #define VMI_ALARM_WIRING VMI_ALARM_WIRED_IRQ0
  51. #endif
  52. /* Cached VMI operations */
  53. struct vmi_timer_ops vmi_timer_ops;
  54. #ifdef CONFIG_NO_IDLE_HZ
  55. /* /proc/sys/kernel/hz_timer state. */
  56. int sysctl_hz_timer;
  57. /* Some stats */
  58. static DEFINE_PER_CPU(unsigned long, vmi_idle_no_hz_irqs);
  59. static DEFINE_PER_CPU(unsigned long, vmi_idle_no_hz_jiffies);
  60. static DEFINE_PER_CPU(unsigned long, idle_start_jiffies);
  61. #endif /* CONFIG_NO_IDLE_HZ */
  62. /* Number of alarms per second. By default this is CONFIG_VMI_ALARM_HZ. */
  63. static int alarm_hz = CONFIG_VMI_ALARM_HZ;
  64. /* Cache of the value get_cycle_frequency / HZ. */
  65. static signed long long cycles_per_jiffy;
  66. /* Cache of the value get_cycle_frequency / alarm_hz. */
  67. static signed long long cycles_per_alarm;
  68. /* The number of cycles accounted for by the 'jiffies'/'xtime' count.
  69. * Protected by xtime_lock. */
  70. static unsigned long long real_cycles_accounted_system;
  71. /* The number of cycles accounted for by update_process_times(), per cpu. */
  72. static DEFINE_PER_CPU(unsigned long long, process_times_cycles_accounted_cpu);
  73. /* The number of stolen cycles accounted, per cpu. */
  74. static DEFINE_PER_CPU(unsigned long long, stolen_cycles_accounted_cpu);
  75. /* Clock source. */
  76. static cycle_t read_real_cycles(void)
  77. {
  78. return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
  79. }
  80. static cycle_t read_available_cycles(void)
  81. {
  82. return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE);
  83. }
  84. #if 0
  85. static cycle_t read_stolen_cycles(void)
  86. {
  87. return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_STOLEN);
  88. }
  89. #endif /* 0 */
  90. static struct clocksource clocksource_vmi = {
  91. .name = "vmi-timer",
  92. .rating = 450,
  93. .read = read_real_cycles,
  94. .mask = CLOCKSOURCE_MASK(64),
  95. .mult = 0, /* to be set */
  96. .shift = 22,
  97. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  98. };
  99. /* Timer interrupt handler. */
  100. static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id);
  101. static struct irqaction vmi_timer_irq = {
  102. vmi_timer_interrupt,
  103. SA_INTERRUPT,
  104. CPU_MASK_NONE,
  105. "VMI-alarm",
  106. NULL,
  107. NULL
  108. };
  109. /* Alarm rate */
  110. static int __init vmi_timer_alarm_rate_setup(char* str)
  111. {
  112. int alarm_rate;
  113. if (get_option(&str, &alarm_rate) == 1 && alarm_rate > 0) {
  114. alarm_hz = alarm_rate;
  115. printk(KERN_WARNING "VMI timer alarm HZ set to %d\n", alarm_hz);
  116. }
  117. return 1;
  118. }
  119. __setup("vmi_timer_alarm_hz=", vmi_timer_alarm_rate_setup);
  120. /* Initialization */
  121. static void vmi_get_wallclock_ts(struct timespec *ts)
  122. {
  123. unsigned long long wallclock;
  124. wallclock = vmi_timer_ops.get_wallclock(); // nsec units
  125. ts->tv_nsec = do_div(wallclock, 1000000000);
  126. ts->tv_sec = wallclock;
  127. }
  128. static void update_xtime_from_wallclock(void)
  129. {
  130. struct timespec ts;
  131. vmi_get_wallclock_ts(&ts);
  132. do_settimeofday(&ts);
  133. }
  134. unsigned long vmi_get_wallclock(void)
  135. {
  136. struct timespec ts;
  137. vmi_get_wallclock_ts(&ts);
  138. return ts.tv_sec;
  139. }
  140. int vmi_set_wallclock(unsigned long now)
  141. {
  142. return -1;
  143. }
  144. unsigned long long vmi_sched_clock(void)
  145. {
  146. return read_available_cycles();
  147. }
  148. void __init vmi_time_init(void)
  149. {
  150. unsigned long long cycles_per_sec, cycles_per_msec;
  151. unsigned long flags;
  152. local_irq_save(flags);
  153. setup_irq(0, &vmi_timer_irq);
  154. #ifdef CONFIG_X86_LOCAL_APIC
  155. set_intr_gate(LOCAL_TIMER_VECTOR, apic_vmi_timer_interrupt);
  156. #endif
  157. no_sync_cmos_clock = 1;
  158. vmi_get_wallclock_ts(&xtime);
  159. set_normalized_timespec(&wall_to_monotonic,
  160. -xtime.tv_sec, -xtime.tv_nsec);
  161. real_cycles_accounted_system = read_real_cycles();
  162. update_xtime_from_wallclock();
  163. per_cpu(process_times_cycles_accounted_cpu, 0) = read_available_cycles();
  164. cycles_per_sec = vmi_timer_ops.get_cycle_frequency();
  165. cycles_per_jiffy = cycles_per_sec;
  166. (void)do_div(cycles_per_jiffy, HZ);
  167. cycles_per_alarm = cycles_per_sec;
  168. (void)do_div(cycles_per_alarm, alarm_hz);
  169. cycles_per_msec = cycles_per_sec;
  170. (void)do_div(cycles_per_msec, 1000);
  171. cpu_khz = cycles_per_msec;
  172. printk(KERN_WARNING "VMI timer cycles/sec = %llu ; cycles/jiffy = %llu ;"
  173. "cycles/alarm = %llu\n", cycles_per_sec, cycles_per_jiffy,
  174. cycles_per_alarm);
  175. clocksource_vmi.mult = clocksource_khz2mult(cycles_per_msec,
  176. clocksource_vmi.shift);
  177. if (clocksource_register(&clocksource_vmi))
  178. printk(KERN_WARNING "Error registering VMITIME clocksource.");
  179. /* Disable PIT. */
  180. outb_p(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */
  181. /* schedule the alarm. do this in phase with process_times_cycles_accounted_cpu
  182. * reduce the latency calling update_process_times. */
  183. vmi_timer_ops.set_alarm(
  184. VMI_ALARM_WIRED_IRQ0 | VMI_ALARM_IS_PERIODIC | VMI_CYCLES_AVAILABLE,
  185. per_cpu(process_times_cycles_accounted_cpu, 0) + cycles_per_alarm,
  186. cycles_per_alarm);
  187. local_irq_restore(flags);
  188. }
  189. #ifdef CONFIG_X86_LOCAL_APIC
  190. void __init vmi_timer_setup_boot_alarm(void)
  191. {
  192. local_irq_disable();
  193. /* Route the interrupt to the correct vector. */
  194. apic_write_around(APIC_LVTT, LOCAL_TIMER_VECTOR);
  195. /* Cancel the IRQ0 wired alarm, and setup the LVTT alarm. */
  196. vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE);
  197. vmi_timer_ops.set_alarm(
  198. VMI_ALARM_WIRED_LVTT | VMI_ALARM_IS_PERIODIC | VMI_CYCLES_AVAILABLE,
  199. per_cpu(process_times_cycles_accounted_cpu, 0) + cycles_per_alarm,
  200. cycles_per_alarm);
  201. local_irq_enable();
  202. }
  203. /* Initialize the time accounting variables for an AP on an SMP system.
  204. * Also, set the local alarm for the AP. */
  205. void __init vmi_timer_setup_secondary_alarm(void)
  206. {
  207. int cpu = smp_processor_id();
  208. /* Route the interrupt to the correct vector. */
  209. apic_write_around(APIC_LVTT, LOCAL_TIMER_VECTOR);
  210. per_cpu(process_times_cycles_accounted_cpu, cpu) = read_available_cycles();
  211. vmi_timer_ops.set_alarm(
  212. VMI_ALARM_WIRED_LVTT | VMI_ALARM_IS_PERIODIC | VMI_CYCLES_AVAILABLE,
  213. per_cpu(process_times_cycles_accounted_cpu, cpu) + cycles_per_alarm,
  214. cycles_per_alarm);
  215. }
  216. #endif
  217. /* Update system wide (real) time accounting (e.g. jiffies, xtime). */
  218. static void vmi_account_real_cycles(unsigned long long cur_real_cycles)
  219. {
  220. long long cycles_not_accounted;
  221. write_seqlock(&xtime_lock);
  222. cycles_not_accounted = cur_real_cycles - real_cycles_accounted_system;
  223. while (cycles_not_accounted >= cycles_per_jiffy) {
  224. /* systems wide jiffies and wallclock. */
  225. do_timer(1);
  226. cycles_not_accounted -= cycles_per_jiffy;
  227. real_cycles_accounted_system += cycles_per_jiffy;
  228. }
  229. if (vmi_timer_ops.wallclock_updated())
  230. update_xtime_from_wallclock();
  231. write_sequnlock(&xtime_lock);
  232. }
  233. /* Update per-cpu process times. */
  234. static void vmi_account_process_times_cycles(struct pt_regs *regs, int cpu,
  235. unsigned long long cur_process_times_cycles)
  236. {
  237. long long cycles_not_accounted;
  238. cycles_not_accounted = cur_process_times_cycles -
  239. per_cpu(process_times_cycles_accounted_cpu, cpu);
  240. while (cycles_not_accounted >= cycles_per_jiffy) {
  241. /* Account time to the current process. This includes
  242. * calling into the scheduler to decrement the timeslice
  243. * and possibly reschedule.*/
  244. update_process_times(user_mode(regs));
  245. /* XXX handle /proc/profile multiplier. */
  246. profile_tick(CPU_PROFILING);
  247. cycles_not_accounted -= cycles_per_jiffy;
  248. per_cpu(process_times_cycles_accounted_cpu, cpu) += cycles_per_jiffy;
  249. }
  250. }
  251. #ifdef CONFIG_NO_IDLE_HZ
  252. /* Update per-cpu idle times. Used when a no-hz halt is ended. */
  253. static void vmi_account_no_hz_idle_cycles(int cpu,
  254. unsigned long long cur_process_times_cycles)
  255. {
  256. long long cycles_not_accounted;
  257. unsigned long no_idle_hz_jiffies = 0;
  258. cycles_not_accounted = cur_process_times_cycles -
  259. per_cpu(process_times_cycles_accounted_cpu, cpu);
  260. while (cycles_not_accounted >= cycles_per_jiffy) {
  261. no_idle_hz_jiffies++;
  262. cycles_not_accounted -= cycles_per_jiffy;
  263. per_cpu(process_times_cycles_accounted_cpu, cpu) += cycles_per_jiffy;
  264. }
  265. /* Account time to the idle process. */
  266. account_steal_time(idle_task(cpu), jiffies_to_cputime(no_idle_hz_jiffies));
  267. }
  268. #endif
  269. /* Update per-cpu stolen time. */
  270. static void vmi_account_stolen_cycles(int cpu,
  271. unsigned long long cur_real_cycles,
  272. unsigned long long cur_avail_cycles)
  273. {
  274. long long stolen_cycles_not_accounted;
  275. unsigned long stolen_jiffies = 0;
  276. if (cur_real_cycles < cur_avail_cycles)
  277. return;
  278. stolen_cycles_not_accounted = cur_real_cycles - cur_avail_cycles -
  279. per_cpu(stolen_cycles_accounted_cpu, cpu);
  280. while (stolen_cycles_not_accounted >= cycles_per_jiffy) {
  281. stolen_jiffies++;
  282. stolen_cycles_not_accounted -= cycles_per_jiffy;
  283. per_cpu(stolen_cycles_accounted_cpu, cpu) += cycles_per_jiffy;
  284. }
  285. /* HACK: pass NULL to force time onto cpustat->steal. */
  286. account_steal_time(NULL, jiffies_to_cputime(stolen_jiffies));
  287. }
  288. /* Body of either IRQ0 interrupt handler (UP no local-APIC) or
  289. * local-APIC LVTT interrupt handler (UP & local-APIC or SMP). */
  290. static void vmi_local_timer_interrupt(int cpu)
  291. {
  292. unsigned long long cur_real_cycles, cur_process_times_cycles;
  293. cur_real_cycles = read_real_cycles();
  294. cur_process_times_cycles = read_available_cycles();
  295. /* Update system wide (real) time state (xtime, jiffies). */
  296. vmi_account_real_cycles(cur_real_cycles);
  297. /* Update per-cpu process times. */
  298. vmi_account_process_times_cycles(get_irq_regs(), cpu, cur_process_times_cycles);
  299. /* Update time stolen from this cpu by the hypervisor. */
  300. vmi_account_stolen_cycles(cpu, cur_real_cycles, cur_process_times_cycles);
  301. }
  302. #ifdef CONFIG_NO_IDLE_HZ
  303. /* Must be called only from idle loop, with interrupts disabled. */
  304. int vmi_stop_hz_timer(void)
  305. {
  306. /* Note that cpu_set, cpu_clear are (SMP safe) atomic on x86. */
  307. unsigned long seq, next;
  308. unsigned long long real_cycles_expiry;
  309. int cpu = smp_processor_id();
  310. int idle;
  311. BUG_ON(!irqs_disabled());
  312. if (sysctl_hz_timer != 0)
  313. return 0;
  314. cpu_set(cpu, nohz_cpu_mask);
  315. smp_mb();
  316. if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
  317. (next = next_timer_interrupt(), time_before_eq(next, jiffies))) {
  318. cpu_clear(cpu, nohz_cpu_mask);
  319. next = jiffies;
  320. idle = 0;
  321. } else
  322. idle = 1;
  323. /* Convert jiffies to the real cycle counter. */
  324. do {
  325. seq = read_seqbegin(&xtime_lock);
  326. real_cycles_expiry = real_cycles_accounted_system +
  327. (long)(next - jiffies) * cycles_per_jiffy;
  328. } while (read_seqretry(&xtime_lock, seq));
  329. /* This cpu is going idle. Disable the periodic alarm. */
  330. if (idle) {
  331. vmi_timer_ops.cancel_alarm(VMI_CYCLES_AVAILABLE);
  332. per_cpu(idle_start_jiffies, cpu) = jiffies;
  333. }
  334. /* Set the real time alarm to expire at the next event. */
  335. vmi_timer_ops.set_alarm(
  336. VMI_ALARM_WIRING | VMI_ALARM_IS_ONESHOT | VMI_CYCLES_REAL,
  337. real_cycles_expiry, 0);
  338. return idle;
  339. }
  340. static void vmi_reenable_hz_timer(int cpu)
  341. {
  342. /* For /proc/vmi/info idle_hz stat. */
  343. per_cpu(vmi_idle_no_hz_jiffies, cpu) += jiffies - per_cpu(idle_start_jiffies, cpu);
  344. per_cpu(vmi_idle_no_hz_irqs, cpu)++;
  345. /* Don't bother explicitly cancelling the one-shot alarm -- at
  346. * worse we will receive a spurious timer interrupt. */
  347. vmi_timer_ops.set_alarm(
  348. VMI_ALARM_WIRING | VMI_ALARM_IS_PERIODIC | VMI_CYCLES_AVAILABLE,
  349. per_cpu(process_times_cycles_accounted_cpu, cpu) + cycles_per_alarm,
  350. cycles_per_alarm);
  351. /* Indicate this cpu is no longer nohz idle. */
  352. cpu_clear(cpu, nohz_cpu_mask);
  353. }
  354. /* Called from interrupt handlers when (local) HZ timer is disabled. */
  355. void vmi_account_time_restart_hz_timer(void)
  356. {
  357. unsigned long long cur_real_cycles, cur_process_times_cycles;
  358. int cpu = smp_processor_id();
  359. BUG_ON(!irqs_disabled());
  360. /* Account the time during which the HZ timer was disabled. */
  361. cur_real_cycles = read_real_cycles();
  362. cur_process_times_cycles = read_available_cycles();
  363. /* Update system wide (real) time state (xtime, jiffies). */
  364. vmi_account_real_cycles(cur_real_cycles);
  365. /* Update per-cpu idle times. */
  366. vmi_account_no_hz_idle_cycles(cpu, cur_process_times_cycles);
  367. /* Update time stolen from this cpu by the hypervisor. */
  368. vmi_account_stolen_cycles(cpu, cur_real_cycles, cur_process_times_cycles);
  369. /* Reenable the hz timer. */
  370. vmi_reenable_hz_timer(cpu);
  371. }
  372. #endif /* CONFIG_NO_IDLE_HZ */
  373. /* UP (and no local-APIC) VMI-timer alarm interrupt handler.
  374. * Handler for IRQ0. Not used when SMP or X86_LOCAL_APIC after
  375. * APIC setup and setup_boot_vmi_alarm() is called. */
  376. static irqreturn_t vmi_timer_interrupt(int irq, void *dev_id)
  377. {
  378. vmi_local_timer_interrupt(smp_processor_id());
  379. return IRQ_HANDLED;
  380. }
  381. #ifdef CONFIG_X86_LOCAL_APIC
  382. /* SMP VMI-timer alarm interrupt handler. Handler for LVTT vector.
  383. * Also used in UP when CONFIG_X86_LOCAL_APIC.
  384. * The wrapper code is from arch/i386/kernel/apic.c#smp_apic_timer_interrupt. */
  385. void smp_apic_vmi_timer_interrupt(struct pt_regs *regs)
  386. {
  387. struct pt_regs *old_regs = set_irq_regs(regs);
  388. int cpu = smp_processor_id();
  389. /*
  390. * the NMI deadlock-detector uses this.
  391. */
  392. per_cpu(irq_stat,cpu).apic_timer_irqs++;
  393. /*
  394. * NOTE! We'd better ACK the irq immediately,
  395. * because timer handling can be slow.
  396. */
  397. ack_APIC_irq();
  398. /*
  399. * update_process_times() expects us to have done irq_enter().
  400. * Besides, if we don't timer interrupts ignore the global
  401. * interrupt lock, which is the WrongThing (tm) to do.
  402. */
  403. irq_enter();
  404. vmi_local_timer_interrupt(cpu);
  405. irq_exit();
  406. set_irq_regs(old_regs);
  407. }
  408. #endif /* CONFIG_X86_LOCAL_APIC */