time.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. /*
  2. * linux/arch/ia64/kernel/time.c
  3. *
  4. * Copyright (C) 1998-2003 Hewlett-Packard Co
  5. * Stephane Eranian <eranian@hpl.hp.com>
  6. * David Mosberger <davidm@hpl.hp.com>
  7. * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
  8. * Copyright (C) 1999-2000 VA Linux Systems
  9. * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
  10. */
  11. #include <linux/cpu.h>
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/profile.h>
  16. #include <linux/sched.h>
  17. #include <linux/time.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/efi.h>
  20. #include <linux/timex.h>
  21. #include <linux/timekeeper_internal.h>
  22. #include <linux/platform_device.h>
  23. #include <asm/machvec.h>
  24. #include <asm/delay.h>
  25. #include <asm/hw_irq.h>
  26. #include <asm/paravirt.h>
  27. #include <asm/ptrace.h>
  28. #include <asm/sal.h>
  29. #include <asm/sections.h>
  30. #include "fsyscall_gtod_data.h"
  31. static cycle_t itc_get_cycles(struct clocksource *cs);
  32. struct fsyscall_gtod_data_t fsyscall_gtod_data;
  33. struct itc_jitter_data_t itc_jitter_data;
  34. volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
  35. #ifdef CONFIG_IA64_DEBUG_IRQ
  36. unsigned long last_cli_ip;
  37. EXPORT_SYMBOL(last_cli_ip);
  38. #endif
  39. #ifdef CONFIG_PARAVIRT
  40. /* We need to define a real function for sched_clock, to override the
  41. weak default version */
  42. unsigned long long sched_clock(void)
  43. {
  44. return paravirt_sched_clock();
  45. }
  46. #endif
  47. #ifdef CONFIG_PARAVIRT
  48. static void
  49. paravirt_clocksource_resume(struct clocksource *cs)
  50. {
  51. if (pv_time_ops.clocksource_resume)
  52. pv_time_ops.clocksource_resume();
  53. }
  54. #endif
  55. static struct clocksource clocksource_itc = {
  56. .name = "itc",
  57. .rating = 350,
  58. .read = itc_get_cycles,
  59. .mask = CLOCKSOURCE_MASK(64),
  60. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  61. #ifdef CONFIG_PARAVIRT
  62. .resume = paravirt_clocksource_resume,
  63. #endif
  64. };
  65. static struct clocksource *itc_clocksource;
  66. #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  67. #include <linux/kernel_stat.h>
  68. extern cputime_t cycle_to_cputime(u64 cyc);
  69. void vtime_account_user(struct task_struct *tsk)
  70. {
  71. cputime_t delta_utime;
  72. struct thread_info *ti = task_thread_info(tsk);
  73. if (ti->ac_utime) {
  74. delta_utime = cycle_to_cputime(ti->ac_utime);
  75. account_user_time(tsk, delta_utime, delta_utime);
  76. ti->ac_utime = 0;
  77. }
  78. }
  79. /*
  80. * Called from the context switch with interrupts disabled, to charge all
  81. * accumulated times to the current process, and to prepare accounting on
  82. * the next process.
  83. */
  84. void arch_vtime_task_switch(struct task_struct *prev)
  85. {
  86. struct thread_info *pi = task_thread_info(prev);
  87. struct thread_info *ni = task_thread_info(current);
  88. pi->ac_stamp = ni->ac_stamp;
  89. ni->ac_stime = ni->ac_utime = 0;
  90. }
  91. /*
  92. * Account time for a transition between system, hard irq or soft irq state.
  93. * Note that this function is called with interrupts enabled.
  94. */
  95. static cputime_t vtime_delta(struct task_struct *tsk)
  96. {
  97. struct thread_info *ti = task_thread_info(tsk);
  98. cputime_t delta_stime;
  99. __u64 now;
  100. WARN_ON_ONCE(!irqs_disabled());
  101. now = ia64_get_itc();
  102. delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
  103. ti->ac_stime = 0;
  104. ti->ac_stamp = now;
  105. return delta_stime;
  106. }
  107. void vtime_account_system(struct task_struct *tsk)
  108. {
  109. cputime_t delta = vtime_delta(tsk);
  110. account_system_time(tsk, 0, delta, delta);
  111. }
  112. EXPORT_SYMBOL_GPL(vtime_account_system);
  113. void vtime_account_idle(struct task_struct *tsk)
  114. {
  115. account_idle_time(vtime_delta(tsk));
  116. }
  117. #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
  118. static irqreturn_t
  119. timer_interrupt (int irq, void *dev_id)
  120. {
  121. unsigned long new_itm;
  122. if (cpu_is_offline(smp_processor_id())) {
  123. return IRQ_HANDLED;
  124. }
  125. platform_timer_interrupt(irq, dev_id);
  126. new_itm = local_cpu_data->itm_next;
  127. if (!time_after(ia64_get_itc(), new_itm))
  128. printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
  129. ia64_get_itc(), new_itm);
  130. profile_tick(CPU_PROFILING);
  131. if (paravirt_do_steal_accounting(&new_itm))
  132. goto skip_process_time_accounting;
  133. while (1) {
  134. update_process_times(user_mode(get_irq_regs()));
  135. new_itm += local_cpu_data->itm_delta;
  136. if (smp_processor_id() == time_keeper_id)
  137. xtime_update(1);
  138. local_cpu_data->itm_next = new_itm;
  139. if (time_after(new_itm, ia64_get_itc()))
  140. break;
  141. /*
  142. * Allow IPIs to interrupt the timer loop.
  143. */
  144. local_irq_enable();
  145. local_irq_disable();
  146. }
  147. skip_process_time_accounting:
  148. do {
  149. /*
  150. * If we're too close to the next clock tick for
  151. * comfort, we increase the safety margin by
  152. * intentionally dropping the next tick(s). We do NOT
  153. * update itm.next because that would force us to call
  154. * xtime_update() which in turn would let our clock run
  155. * too fast (with the potentially devastating effect
  156. * of losing monotony of time).
  157. */
  158. while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
  159. new_itm += local_cpu_data->itm_delta;
  160. ia64_set_itm(new_itm);
  161. /* double check, in case we got hit by a (slow) PMI: */
  162. } while (time_after_eq(ia64_get_itc(), new_itm));
  163. return IRQ_HANDLED;
  164. }
  165. /*
  166. * Encapsulate access to the itm structure for SMP.
  167. */
  168. void
  169. ia64_cpu_local_tick (void)
  170. {
  171. int cpu = smp_processor_id();
  172. unsigned long shift = 0, delta;
  173. /* arrange for the cycle counter to generate a timer interrupt: */
  174. ia64_set_itv(IA64_TIMER_VECTOR);
  175. delta = local_cpu_data->itm_delta;
  176. /*
  177. * Stagger the timer tick for each CPU so they don't occur all at (almost) the
  178. * same time:
  179. */
  180. if (cpu) {
  181. unsigned long hi = 1UL << ia64_fls(cpu);
  182. shift = (2*(cpu - hi) + 1) * delta/hi/2;
  183. }
  184. local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
  185. ia64_set_itm(local_cpu_data->itm_next);
  186. }
  187. static int nojitter;
  188. static int __init nojitter_setup(char *str)
  189. {
  190. nojitter = 1;
  191. printk("Jitter checking for ITC timers disabled\n");
  192. return 1;
  193. }
  194. __setup("nojitter", nojitter_setup);
  195. void ia64_init_itm(void)
  196. {
  197. unsigned long platform_base_freq, itc_freq;
  198. struct pal_freq_ratio itc_ratio, proc_ratio;
  199. long status, platform_base_drift, itc_drift;
  200. /*
  201. * According to SAL v2.6, we need to use a SAL call to determine the platform base
  202. * frequency and then a PAL call to determine the frequency ratio between the ITC
  203. * and the base frequency.
  204. */
  205. status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
  206. &platform_base_freq, &platform_base_drift);
  207. if (status != 0) {
  208. printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
  209. } else {
  210. status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
  211. if (status != 0)
  212. printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
  213. }
  214. if (status != 0) {
  215. /* invent "random" values */
  216. printk(KERN_ERR
  217. "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
  218. platform_base_freq = 100000000;
  219. platform_base_drift = -1; /* no drift info */
  220. itc_ratio.num = 3;
  221. itc_ratio.den = 1;
  222. }
  223. if (platform_base_freq < 40000000) {
  224. printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
  225. platform_base_freq);
  226. platform_base_freq = 75000000;
  227. platform_base_drift = -1;
  228. }
  229. if (!proc_ratio.den)
  230. proc_ratio.den = 1; /* avoid division by zero */
  231. if (!itc_ratio.den)
  232. itc_ratio.den = 1; /* avoid division by zero */
  233. itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
  234. local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
  235. printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
  236. "ITC freq=%lu.%03luMHz", smp_processor_id(),
  237. platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
  238. itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
  239. if (platform_base_drift != -1) {
  240. itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
  241. printk("+/-%ldppm\n", itc_drift);
  242. } else {
  243. itc_drift = -1;
  244. printk("\n");
  245. }
  246. local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
  247. local_cpu_data->itc_freq = itc_freq;
  248. local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
  249. local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
  250. + itc_freq/2)/itc_freq;
  251. if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
  252. #ifdef CONFIG_SMP
  253. /* On IA64 in an SMP configuration ITCs are never accurately synchronized.
  254. * Jitter compensation requires a cmpxchg which may limit
  255. * the scalability of the syscalls for retrieving time.
  256. * The ITC synchronization is usually successful to within a few
  257. * ITC ticks but this is not a sure thing. If you need to improve
  258. * timer performance in SMP situations then boot the kernel with the
  259. * "nojitter" option. However, doing so may result in time fluctuating (maybe
  260. * even going backward) if the ITC offsets between the individual CPUs
  261. * are too large.
  262. */
  263. if (!nojitter)
  264. itc_jitter_data.itc_jitter = 1;
  265. #endif
  266. } else
  267. /*
  268. * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
  269. * ITC values may fluctuate significantly between processors.
  270. * Clock should not be used for hrtimers. Mark itc as only
  271. * useful for boot and testing.
  272. *
  273. * Note that jitter compensation is off! There is no point of
  274. * synchronizing ITCs since they may be large differentials
  275. * that change over time.
  276. *
  277. * The only way to fix this would be to repeatedly sync the
  278. * ITCs. Until that time we have to avoid ITC.
  279. */
  280. clocksource_itc.rating = 50;
  281. paravirt_init_missing_ticks_accounting(smp_processor_id());
  282. /* avoid softlock up message when cpu is unplug and plugged again. */
  283. touch_softlockup_watchdog();
  284. /* Setup the CPU local timer tick */
  285. ia64_cpu_local_tick();
  286. if (!itc_clocksource) {
  287. clocksource_register_hz(&clocksource_itc,
  288. local_cpu_data->itc_freq);
  289. itc_clocksource = &clocksource_itc;
  290. }
  291. }
  292. static cycle_t itc_get_cycles(struct clocksource *cs)
  293. {
  294. unsigned long lcycle, now, ret;
  295. if (!itc_jitter_data.itc_jitter)
  296. return get_cycles();
  297. lcycle = itc_jitter_data.itc_lastcycle;
  298. now = get_cycles();
  299. if (lcycle && time_after(lcycle, now))
  300. return lcycle;
  301. /*
  302. * Keep track of the last timer value returned.
  303. * In an SMP environment, you could lose out in contention of
  304. * cmpxchg. If so, your cmpxchg returns new value which the
  305. * winner of contention updated to. Use the new value instead.
  306. */
  307. ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
  308. if (unlikely(ret != lcycle))
  309. return ret;
  310. return now;
  311. }
  312. static struct irqaction timer_irqaction = {
  313. .handler = timer_interrupt,
  314. .flags = IRQF_DISABLED | IRQF_IRQPOLL,
  315. .name = "timer"
  316. };
  317. static struct platform_device rtc_efi_dev = {
  318. .name = "rtc-efi",
  319. .id = -1,
  320. };
  321. static int __init rtc_init(void)
  322. {
  323. if (platform_device_register(&rtc_efi_dev) < 0)
  324. printk(KERN_ERR "unable to register rtc device...\n");
  325. /* not necessarily an error */
  326. return 0;
  327. }
  328. module_init(rtc_init);
  329. void read_persistent_clock(struct timespec *ts)
  330. {
  331. efi_gettimeofday(ts);
  332. }
  333. void __init
  334. time_init (void)
  335. {
  336. register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
  337. ia64_init_itm();
  338. }
  339. /*
  340. * Generic udelay assumes that if preemption is allowed and the thread
  341. * migrates to another CPU, that the ITC values are synchronized across
  342. * all CPUs.
  343. */
  344. static void
  345. ia64_itc_udelay (unsigned long usecs)
  346. {
  347. unsigned long start = ia64_get_itc();
  348. unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
  349. while (time_before(ia64_get_itc(), end))
  350. cpu_relax();
  351. }
  352. void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
  353. void
  354. udelay (unsigned long usecs)
  355. {
  356. (*ia64_udelay)(usecs);
  357. }
  358. EXPORT_SYMBOL(udelay);
  359. /* IA64 doesn't cache the timezone */
  360. void update_vsyscall_tz(void)
  361. {
  362. }
  363. void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
  364. struct clocksource *c, u32 mult)
  365. {
  366. write_seqcount_begin(&fsyscall_gtod_data.seq);
  367. /* copy fsyscall clock data */
  368. fsyscall_gtod_data.clk_mask = c->mask;
  369. fsyscall_gtod_data.clk_mult = mult;
  370. fsyscall_gtod_data.clk_shift = c->shift;
  371. fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
  372. fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
  373. /* copy kernel time structures */
  374. fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
  375. fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
  376. fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
  377. + wall->tv_sec;
  378. fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
  379. + wall->tv_nsec;
  380. /* normalize */
  381. while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
  382. fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
  383. fsyscall_gtod_data.monotonic_time.tv_sec++;
  384. }
  385. write_seqcount_end(&fsyscall_gtod_data.seq);
  386. }