time.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /******************************************************************************
  2. * arch/ia64/xen/time.c
  3. *
  4. * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
  5. * VA Linux Systems Japan K.K.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/delay.h>
  23. #include <linux/kernel_stat.h>
  24. #include <linux/posix-timers.h>
  25. #include <linux/irq.h>
  26. #include <linux/clocksource.h>
  27. #include <asm/timex.h>
  28. #include <asm/xen/hypervisor.h>
  29. #include <xen/interface/vcpu.h>
  30. #include "../kernel/fsyscall_gtod_data.h"
  31. DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
  32. DEFINE_PER_CPU(unsigned long, processed_stolen_time);
  33. DEFINE_PER_CPU(unsigned long, processed_blocked_time);
  34. /* taken from i386/kernel/time-xen.c */
  35. static void xen_init_missing_ticks_accounting(int cpu)
  36. {
  37. struct vcpu_register_runstate_memory_area area;
  38. struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
  39. int rc;
  40. memset(runstate, 0, sizeof(*runstate));
  41. area.addr.v = runstate;
  42. rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu,
  43. &area);
  44. WARN_ON(rc && rc != -ENOSYS);
  45. per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
  46. per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
  47. + runstate->time[RUNSTATE_offline];
  48. }
  49. /*
  50. * Runstate accounting
  51. */
  52. /* stolen from arch/x86/xen/time.c */
  53. static void get_runstate_snapshot(struct vcpu_runstate_info *res)
  54. {
  55. u64 state_time;
  56. struct vcpu_runstate_info *state;
  57. BUG_ON(preemptible());
  58. state = &__get_cpu_var(runstate);
  59. /*
  60. * The runstate info is always updated by the hypervisor on
  61. * the current CPU, so there's no need to use anything
  62. * stronger than a compiler barrier when fetching it.
  63. */
  64. do {
  65. state_time = state->state_entry_time;
  66. rmb();
  67. *res = *state;
  68. rmb();
  69. } while (state->state_entry_time != state_time);
  70. }
  71. #define NS_PER_TICK (1000000000LL/HZ)
  72. static unsigned long
  73. consider_steal_time(unsigned long new_itm)
  74. {
  75. unsigned long stolen, blocked;
  76. unsigned long delta_itm = 0, stolentick = 0;
  77. int cpu = smp_processor_id();
  78. struct vcpu_runstate_info runstate;
  79. struct task_struct *p = current;
  80. get_runstate_snapshot(&runstate);
  81. /*
  82. * Check for vcpu migration effect
  83. * In this case, itc value is reversed.
  84. * This causes huge stolen value.
  85. * This function just checks and reject this effect.
  86. */
  87. if (!time_after_eq(runstate.time[RUNSTATE_blocked],
  88. per_cpu(processed_blocked_time, cpu)))
  89. blocked = 0;
  90. if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
  91. runstate.time[RUNSTATE_offline],
  92. per_cpu(processed_stolen_time, cpu)))
  93. stolen = 0;
  94. if (!time_after(delta_itm + new_itm, ia64_get_itc()))
  95. stolentick = ia64_get_itc() - new_itm;
  96. do_div(stolentick, NS_PER_TICK);
  97. stolentick++;
  98. do_div(stolen, NS_PER_TICK);
  99. if (stolen > stolentick)
  100. stolen = stolentick;
  101. stolentick -= stolen;
  102. do_div(blocked, NS_PER_TICK);
  103. if (blocked > stolentick)
  104. blocked = stolentick;
  105. if (stolen > 0 || blocked > 0) {
  106. account_steal_ticks(stolen);
  107. account_idle_ticks(blocked);
  108. run_local_timers();
  109. if (rcu_pending(cpu))
  110. rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
  111. scheduler_tick();
  112. run_posix_cpu_timers(p);
  113. delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
  114. if (cpu == time_keeper_id) {
  115. write_seqlock(&xtime_lock);
  116. do_timer(stolen + blocked);
  117. local_cpu_data->itm_next = delta_itm + new_itm;
  118. write_sequnlock(&xtime_lock);
  119. } else {
  120. local_cpu_data->itm_next = delta_itm + new_itm;
  121. }
  122. per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen;
  123. per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked;
  124. }
  125. return delta_itm;
  126. }
  127. static int xen_do_steal_accounting(unsigned long *new_itm)
  128. {
  129. unsigned long delta_itm;
  130. delta_itm = consider_steal_time(*new_itm);
  131. *new_itm += delta_itm;
  132. if (time_after(*new_itm, ia64_get_itc()) && delta_itm)
  133. return 1;
  134. return 0;
  135. }
  136. static void xen_itc_jitter_data_reset(void)
  137. {
  138. u64 lcycle, ret;
  139. do {
  140. lcycle = itc_jitter_data.itc_lastcycle;
  141. ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0);
  142. } while (unlikely(ret != lcycle));
  143. }
  144. /* based on xen_sched_clock() in arch/x86/xen/time.c. */
  145. /*
  146. * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined,
  147. * something similar logic should be implemented here.
  148. */
  149. /*
  150. * Xen sched_clock implementation. Returns the number of unstolen
  151. * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
  152. * states.
  153. */
  154. static unsigned long long xen_sched_clock(void)
  155. {
  156. struct vcpu_runstate_info runstate;
  157. unsigned long long now;
  158. unsigned long long offset;
  159. unsigned long long ret;
  160. /*
  161. * Ideally sched_clock should be called on a per-cpu basis
  162. * anyway, so preempt should already be disabled, but that's
  163. * not current practice at the moment.
  164. */
  165. preempt_disable();
  166. /*
  167. * both ia64_native_sched_clock() and xen's runstate are
  168. * based on mAR.ITC. So difference of them makes sense.
  169. */
  170. now = ia64_native_sched_clock();
  171. get_runstate_snapshot(&runstate);
  172. WARN_ON(runstate.state != RUNSTATE_running);
  173. offset = 0;
  174. if (now > runstate.state_entry_time)
  175. offset = now - runstate.state_entry_time;
  176. ret = runstate.time[RUNSTATE_blocked] +
  177. runstate.time[RUNSTATE_running] +
  178. offset;
  179. preempt_enable();
  180. return ret;
  181. }
  182. struct pv_time_ops xen_time_ops __initdata = {
  183. .init_missing_ticks_accounting = xen_init_missing_ticks_accounting,
  184. .do_steal_accounting = xen_do_steal_accounting,
  185. .clocksource_resume = xen_itc_jitter_data_reset,
  186. .sched_clock = xen_sched_clock,
  187. };
  188. /* Called after suspend, to resume time. */
  189. static void xen_local_tick_resume(void)
  190. {
  191. /* Just trigger a tick. */
  192. ia64_cpu_local_tick();
  193. touch_softlockup_watchdog();
  194. }
  195. void
  196. xen_timer_resume(void)
  197. {
  198. unsigned int cpu;
  199. xen_local_tick_resume();
  200. for_each_online_cpu(cpu)
  201. xen_init_missing_ticks_accounting(cpu);
  202. }
  203. static void ia64_cpu_local_tick_fn(void *unused)
  204. {
  205. xen_local_tick_resume();
  206. xen_init_missing_ticks_accounting(smp_processor_id());
  207. }
  208. void
  209. xen_timer_resume_on_aps(void)
  210. {
  211. smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1);
  212. }