time.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160
  1. /*
  2. * Common time routines among all ppc machines.
  3. *
  4. * Written by Cort Dougan (cort@cs.nmt.edu) to merge
  5. * Paul Mackerras' version and mine for PReP and Pmac.
  6. * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
  7. * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
  8. *
  9. * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
  10. * to make clock more stable (2.4.0-test5). The only thing
  11. * that this code assumes is that the timebases have been synchronized
  12. * by firmware on SMP and are never stopped (never do sleep
  13. * on SMP then, nap and doze are OK).
  14. *
  15. * Speeded up do_gettimeofday by getting rid of references to
  16. * xtime (which required locks for consistency). (mikejc@us.ibm.com)
  17. *
  18. * TODO (not necessarily in this file):
  19. * - improve precision and reproducibility of timebase frequency
  20. * measurement at boot time. (for iSeries, we calibrate the timebase
  21. * against the Titan chip's clock.)
  22. * - for astronomical applications: add a new function to get
  23. * non ambiguous timestamps even around leap seconds. This needs
  24. * a new timestamp format and a good name.
  25. *
  26. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
  27. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  28. *
  29. * This program is free software; you can redistribute it and/or
  30. * modify it under the terms of the GNU General Public License
  31. * as published by the Free Software Foundation; either version
  32. * 2 of the License, or (at your option) any later version.
  33. */
  34. #include <linux/errno.h>
  35. #include <linux/module.h>
  36. #include <linux/sched.h>
  37. #include <linux/kernel.h>
  38. #include <linux/param.h>
  39. #include <linux/string.h>
  40. #include <linux/mm.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/timex.h>
  43. #include <linux/kernel_stat.h>
  44. #include <linux/time.h>
  45. #include <linux/init.h>
  46. #include <linux/profile.h>
  47. #include <linux/cpu.h>
  48. #include <linux/security.h>
  49. #include <linux/percpu.h>
  50. #include <linux/rtc.h>
  51. #include <linux/jiffies.h>
  52. #include <linux/posix-timers.h>
  53. #include <linux/irq.h>
  54. #include <asm/io.h>
  55. #include <asm/processor.h>
  56. #include <asm/nvram.h>
  57. #include <asm/cache.h>
  58. #include <asm/machdep.h>
  59. #include <asm/uaccess.h>
  60. #include <asm/time.h>
  61. #include <asm/prom.h>
  62. #include <asm/irq.h>
  63. #include <asm/div64.h>
  64. #include <asm/smp.h>
  65. #include <asm/vdso_datapage.h>
  66. #include <asm/firmware.h>
  67. #include <asm/cputime.h>
  68. #ifdef CONFIG_PPC_ISERIES
  69. #include <asm/iseries/it_lp_queue.h>
  70. #include <asm/iseries/hv_call_xm.h>
  71. #endif
  72. /* powerpc clocksource/clockevent code */
  73. #include <linux/clockchips.h>
  74. #include <linux/clocksource.h>
  75. static cycle_t rtc_read(void);
  76. static struct clocksource clocksource_rtc = {
  77. .name = "rtc",
  78. .rating = 400,
  79. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  80. .mask = CLOCKSOURCE_MASK(64),
  81. .shift = 22,
  82. .mult = 0, /* To be filled in */
  83. .read = rtc_read,
  84. };
  85. static cycle_t timebase_read(void);
  86. static struct clocksource clocksource_timebase = {
  87. .name = "timebase",
  88. .rating = 400,
  89. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  90. .mask = CLOCKSOURCE_MASK(64),
  91. .shift = 22,
  92. .mult = 0, /* To be filled in */
  93. .read = timebase_read,
  94. };
  95. #define DECREMENTER_MAX 0x7fffffff
  96. static int decrementer_set_next_event(unsigned long evt,
  97. struct clock_event_device *dev);
  98. static void decrementer_set_mode(enum clock_event_mode mode,
  99. struct clock_event_device *dev);
  100. static struct clock_event_device decrementer_clockevent = {
  101. .name = "decrementer",
  102. .rating = 200,
  103. .shift = 16,
  104. .mult = 0, /* To be filled in */
  105. .irq = 0,
  106. .set_next_event = decrementer_set_next_event,
  107. .set_mode = decrementer_set_mode,
  108. .features = CLOCK_EVT_FEAT_ONESHOT,
  109. };
  110. struct decrementer_clock {
  111. struct clock_event_device event;
  112. u64 next_tb;
  113. };
  114. static DEFINE_PER_CPU(struct decrementer_clock, decrementers);
  115. #ifdef CONFIG_PPC_ISERIES
  116. static unsigned long __initdata iSeries_recal_titan;
  117. static signed long __initdata iSeries_recal_tb;
  118. /* Forward declaration is only needed for iSereis compiles */
  119. static void __init clocksource_init(void);
  120. #endif
  121. #define XSEC_PER_SEC (1024*1024)
  122. #ifdef CONFIG_PPC64
  123. #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
  124. #else
  125. /* compute ((xsec << 12) * max) >> 32 */
  126. #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
  127. #endif
  128. unsigned long tb_ticks_per_jiffy;
  129. unsigned long tb_ticks_per_usec = 100; /* sane default */
  130. EXPORT_SYMBOL(tb_ticks_per_usec);
  131. unsigned long tb_ticks_per_sec;
  132. EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
  133. u64 tb_to_xs;
  134. unsigned tb_to_us;
  135. #define TICKLEN_SCALE NTP_SCALE_SHIFT
  136. static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
  137. static u64 ticklen_to_xs; /* 0.64 fraction */
  138. /* If last_tick_len corresponds to about 1/HZ seconds, then
  139. last_tick_len << TICKLEN_SHIFT will be about 2^63. */
  140. #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
  141. DEFINE_SPINLOCK(rtc_lock);
  142. EXPORT_SYMBOL_GPL(rtc_lock);
  143. static u64 tb_to_ns_scale __read_mostly;
  144. static unsigned tb_to_ns_shift __read_mostly;
  145. static unsigned long boot_tb __read_mostly;
  146. static struct gettimeofday_struct do_gtod;
  147. extern struct timezone sys_tz;
  148. static long timezone_offset;
  149. unsigned long ppc_proc_freq;
  150. EXPORT_SYMBOL(ppc_proc_freq);
  151. unsigned long ppc_tb_freq;
  152. static u64 tb_last_jiffy __cacheline_aligned_in_smp;
  153. static DEFINE_PER_CPU(u64, last_jiffy);
  154. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  155. /*
  156. * Factors for converting from cputime_t (timebase ticks) to
  157. * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
  158. * These are all stored as 0.64 fixed-point binary fractions.
  159. */
  160. u64 __cputime_jiffies_factor;
  161. EXPORT_SYMBOL(__cputime_jiffies_factor);
  162. u64 __cputime_msec_factor;
  163. EXPORT_SYMBOL(__cputime_msec_factor);
  164. u64 __cputime_sec_factor;
  165. EXPORT_SYMBOL(__cputime_sec_factor);
  166. u64 __cputime_clockt_factor;
  167. EXPORT_SYMBOL(__cputime_clockt_factor);
  168. DEFINE_PER_CPU(unsigned long, cputime_last_delta);
  169. DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
  170. static void calc_cputime_factors(void)
  171. {
  172. struct div_result res;
  173. div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
  174. __cputime_jiffies_factor = res.result_low;
  175. div128_by_32(1000, 0, tb_ticks_per_sec, &res);
  176. __cputime_msec_factor = res.result_low;
  177. div128_by_32(1, 0, tb_ticks_per_sec, &res);
  178. __cputime_sec_factor = res.result_low;
  179. div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
  180. __cputime_clockt_factor = res.result_low;
  181. }
  182. /*
  183. * Read the PURR on systems that have it, otherwise the timebase.
  184. */
  185. static u64 read_purr(void)
  186. {
  187. if (cpu_has_feature(CPU_FTR_PURR))
  188. return mfspr(SPRN_PURR);
  189. return mftb();
  190. }
  191. /*
  192. * Read the SPURR on systems that have it, otherwise the purr
  193. */
  194. static u64 read_spurr(u64 purr)
  195. {
  196. /*
  197. * cpus without PURR won't have a SPURR
  198. * We already know the former when we use this, so tell gcc
  199. */
  200. if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR))
  201. return mfspr(SPRN_SPURR);
  202. return purr;
  203. }
  204. /*
  205. * Account time for a transition between system, hard irq
  206. * or soft irq state.
  207. */
  208. void account_system_vtime(struct task_struct *tsk)
  209. {
  210. u64 now, nowscaled, delta, deltascaled, sys_time;
  211. unsigned long flags;
  212. local_irq_save(flags);
  213. now = read_purr();
  214. nowscaled = read_spurr(now);
  215. delta = now - get_paca()->startpurr;
  216. deltascaled = nowscaled - get_paca()->startspurr;
  217. get_paca()->startpurr = now;
  218. get_paca()->startspurr = nowscaled;
  219. if (!in_interrupt()) {
  220. /* deltascaled includes both user and system time.
  221. * Hence scale it based on the purr ratio to estimate
  222. * the system time */
  223. sys_time = get_paca()->system_time;
  224. if (get_paca()->user_time)
  225. deltascaled = deltascaled * sys_time /
  226. (sys_time + get_paca()->user_time);
  227. delta += sys_time;
  228. get_paca()->system_time = 0;
  229. }
  230. account_system_time(tsk, 0, delta);
  231. account_system_time_scaled(tsk, deltascaled);
  232. per_cpu(cputime_last_delta, smp_processor_id()) = delta;
  233. per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled;
  234. local_irq_restore(flags);
  235. }
  236. /*
  237. * Transfer the user and system times accumulated in the paca
  238. * by the exception entry and exit code to the generic process
  239. * user and system time records.
  240. * Must be called with interrupts disabled.
  241. */
  242. void account_process_tick(struct task_struct *tsk, int user_tick)
  243. {
  244. cputime_t utime, utimescaled;
  245. utime = get_paca()->user_time;
  246. get_paca()->user_time = 0;
  247. account_user_time(tsk, utime);
  248. utimescaled = cputime_to_scaled(utime);
  249. account_user_time_scaled(tsk, utimescaled);
  250. }
  251. /*
  252. * Stuff for accounting stolen time.
  253. */
  254. struct cpu_purr_data {
  255. int initialized; /* thread is running */
  256. u64 tb; /* last TB value read */
  257. u64 purr; /* last PURR value read */
  258. u64 spurr; /* last SPURR value read */
  259. };
  260. /*
  261. * Each entry in the cpu_purr_data array is manipulated only by its
  262. * "owner" cpu -- usually in the timer interrupt but also occasionally
  263. * in process context for cpu online. As long as cpus do not touch
  264. * each others' cpu_purr_data, disabling local interrupts is
  265. * sufficient to serialize accesses.
  266. */
  267. static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
  268. static void snapshot_tb_and_purr(void *data)
  269. {
  270. unsigned long flags;
  271. struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
  272. local_irq_save(flags);
  273. p->tb = get_tb_or_rtc();
  274. p->purr = mfspr(SPRN_PURR);
  275. wmb();
  276. p->initialized = 1;
  277. local_irq_restore(flags);
  278. }
  279. /*
  280. * Called during boot when all cpus have come up.
  281. */
  282. void snapshot_timebases(void)
  283. {
  284. if (!cpu_has_feature(CPU_FTR_PURR))
  285. return;
  286. on_each_cpu(snapshot_tb_and_purr, NULL, 1);
  287. }
  288. /*
  289. * Must be called with interrupts disabled.
  290. */
  291. void calculate_steal_time(void)
  292. {
  293. u64 tb, purr;
  294. s64 stolen;
  295. struct cpu_purr_data *pme;
  296. pme = &__get_cpu_var(cpu_purr_data);
  297. if (!pme->initialized)
  298. return; /* !CPU_FTR_PURR or early in early boot */
  299. tb = mftb();
  300. purr = mfspr(SPRN_PURR);
  301. stolen = (tb - pme->tb) - (purr - pme->purr);
  302. if (stolen > 0)
  303. account_steal_time(current, stolen);
  304. pme->tb = tb;
  305. pme->purr = purr;
  306. }
  307. #ifdef CONFIG_PPC_SPLPAR
  308. /*
  309. * Must be called before the cpu is added to the online map when
  310. * a cpu is being brought up at runtime.
  311. */
  312. static void snapshot_purr(void)
  313. {
  314. struct cpu_purr_data *pme;
  315. unsigned long flags;
  316. if (!cpu_has_feature(CPU_FTR_PURR))
  317. return;
  318. local_irq_save(flags);
  319. pme = &__get_cpu_var(cpu_purr_data);
  320. pme->tb = mftb();
  321. pme->purr = mfspr(SPRN_PURR);
  322. pme->initialized = 1;
  323. local_irq_restore(flags);
  324. }
  325. #endif /* CONFIG_PPC_SPLPAR */
  326. #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
  327. #define calc_cputime_factors()
  328. #define calculate_steal_time() do { } while (0)
  329. #endif
  330. #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
  331. #define snapshot_purr() do { } while (0)
  332. #endif
  333. /*
  334. * Called when a cpu comes up after the system has finished booting,
  335. * i.e. as a result of a hotplug cpu action.
  336. */
  337. void snapshot_timebase(void)
  338. {
  339. __get_cpu_var(last_jiffy) = get_tb_or_rtc();
  340. snapshot_purr();
  341. }
  342. void __delay(unsigned long loops)
  343. {
  344. unsigned long start;
  345. int diff;
  346. if (__USE_RTC()) {
  347. start = get_rtcl();
  348. do {
  349. /* the RTCL register wraps at 1000000000 */
  350. diff = get_rtcl() - start;
  351. if (diff < 0)
  352. diff += 1000000000;
  353. } while (diff < loops);
  354. } else {
  355. start = get_tbl();
  356. while (get_tbl() - start < loops)
  357. HMT_low();
  358. HMT_medium();
  359. }
  360. }
  361. EXPORT_SYMBOL(__delay);
  362. void udelay(unsigned long usecs)
  363. {
  364. __delay(tb_ticks_per_usec * usecs);
  365. }
  366. EXPORT_SYMBOL(udelay);
  367. /*
  368. * There are two copies of tb_to_xs and stamp_xsec so that no
  369. * lock is needed to access and use these values in
  370. * do_gettimeofday. We alternate the copies and as long as a
  371. * reasonable time elapses between changes, there will never
  372. * be inconsistent values. ntpd has a minimum of one minute
  373. * between updates.
  374. */
  375. static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
  376. u64 new_tb_to_xs)
  377. {
  378. unsigned temp_idx;
  379. struct gettimeofday_vars *temp_varp;
  380. temp_idx = (do_gtod.var_idx == 0);
  381. temp_varp = &do_gtod.vars[temp_idx];
  382. temp_varp->tb_to_xs = new_tb_to_xs;
  383. temp_varp->tb_orig_stamp = new_tb_stamp;
  384. temp_varp->stamp_xsec = new_stamp_xsec;
  385. smp_mb();
  386. do_gtod.varp = temp_varp;
  387. do_gtod.var_idx = temp_idx;
  388. /*
  389. * tb_update_count is used to allow the userspace gettimeofday code
  390. * to assure itself that it sees a consistent view of the tb_to_xs and
  391. * stamp_xsec variables. It reads the tb_update_count, then reads
  392. * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
  393. * the two values of tb_update_count match and are even then the
  394. * tb_to_xs and stamp_xsec values are consistent. If not, then it
  395. * loops back and reads them again until this criteria is met.
  396. * We expect the caller to have done the first increment of
  397. * vdso_data->tb_update_count already.
  398. */
  399. vdso_data->tb_orig_stamp = new_tb_stamp;
  400. vdso_data->stamp_xsec = new_stamp_xsec;
  401. vdso_data->tb_to_xs = new_tb_to_xs;
  402. vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
  403. vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
  404. vdso_data->stamp_xtime = xtime;
  405. smp_wmb();
  406. ++(vdso_data->tb_update_count);
  407. }
  408. #ifdef CONFIG_SMP
  409. unsigned long profile_pc(struct pt_regs *regs)
  410. {
  411. unsigned long pc = instruction_pointer(regs);
  412. if (in_lock_functions(pc))
  413. return regs->link;
  414. return pc;
  415. }
  416. EXPORT_SYMBOL(profile_pc);
  417. #endif
  418. #ifdef CONFIG_PPC_ISERIES
  419. /*
  420. * This function recalibrates the timebase based on the 49-bit time-of-day
  421. * value in the Titan chip. The Titan is much more accurate than the value
  422. * returned by the service processor for the timebase frequency.
  423. */
  424. static int __init iSeries_tb_recal(void)
  425. {
  426. struct div_result divres;
  427. unsigned long titan, tb;
  428. /* Make sure we only run on iSeries */
  429. if (!firmware_has_feature(FW_FEATURE_ISERIES))
  430. return -ENODEV;
  431. tb = get_tb();
  432. titan = HvCallXm_loadTod();
  433. if ( iSeries_recal_titan ) {
  434. unsigned long tb_ticks = tb - iSeries_recal_tb;
  435. unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
  436. unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
  437. unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
  438. long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
  439. char sign = '+';
  440. /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
  441. new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
  442. if ( tick_diff < 0 ) {
  443. tick_diff = -tick_diff;
  444. sign = '-';
  445. }
  446. if ( tick_diff ) {
  447. if ( tick_diff < tb_ticks_per_jiffy/25 ) {
  448. printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
  449. new_tb_ticks_per_jiffy, sign, tick_diff );
  450. tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
  451. tb_ticks_per_sec = new_tb_ticks_per_sec;
  452. calc_cputime_factors();
  453. div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
  454. do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
  455. tb_to_xs = divres.result_low;
  456. do_gtod.varp->tb_to_xs = tb_to_xs;
  457. vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
  458. vdso_data->tb_to_xs = tb_to_xs;
  459. }
  460. else {
  461. printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
  462. " new tb_ticks_per_jiffy = %lu\n"
  463. " old tb_ticks_per_jiffy = %lu\n",
  464. new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
  465. }
  466. }
  467. }
  468. iSeries_recal_titan = titan;
  469. iSeries_recal_tb = tb;
  470. /* Called here as now we know accurate values for the timebase */
  471. clocksource_init();
  472. return 0;
  473. }
  474. late_initcall(iSeries_tb_recal);
  475. /* Called from platform early init */
  476. void __init iSeries_time_init_early(void)
  477. {
  478. iSeries_recal_tb = get_tb();
  479. iSeries_recal_titan = HvCallXm_loadTod();
  480. }
  481. #endif /* CONFIG_PPC_ISERIES */
  482. /*
  483. * For iSeries shared processors, we have to let the hypervisor
  484. * set the hardware decrementer. We set a virtual decrementer
  485. * in the lppaca and call the hypervisor if the virtual
  486. * decrementer is less than the current value in the hardware
  487. * decrementer. (almost always the new decrementer value will
  488. * be greater than the current hardware decementer so the hypervisor
  489. * call will not be needed)
  490. */
  491. /*
  492. * timer_interrupt - gets called when the decrementer overflows,
  493. * with interrupts disabled.
  494. */
  495. void timer_interrupt(struct pt_regs * regs)
  496. {
  497. struct pt_regs *old_regs;
  498. struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
  499. struct clock_event_device *evt = &decrementer->event;
  500. u64 now;
  501. /* Ensure a positive value is written to the decrementer, or else
  502. * some CPUs will continuue to take decrementer exceptions */
  503. set_dec(DECREMENTER_MAX);
  504. #ifdef CONFIG_PPC32
  505. if (atomic_read(&ppc_n_lost_interrupts) != 0)
  506. do_IRQ(regs);
  507. #endif
  508. now = get_tb_or_rtc();
  509. if (now < decrementer->next_tb) {
  510. /* not time for this event yet */
  511. now = decrementer->next_tb - now;
  512. if (now <= DECREMENTER_MAX)
  513. set_dec((int)now);
  514. return;
  515. }
  516. old_regs = set_irq_regs(regs);
  517. irq_enter();
  518. calculate_steal_time();
  519. #ifdef CONFIG_PPC_ISERIES
  520. if (firmware_has_feature(FW_FEATURE_ISERIES))
  521. get_lppaca()->int_dword.fields.decr_int = 0;
  522. #endif
  523. if (evt->event_handler)
  524. evt->event_handler(evt);
  525. #ifdef CONFIG_PPC_ISERIES
  526. if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
  527. process_hvlpevents();
  528. #endif
  529. #ifdef CONFIG_PPC64
  530. /* collect purr register values often, for accurate calculations */
  531. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  532. struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
  533. cu->current_tb = mfspr(SPRN_PURR);
  534. }
  535. #endif
  536. irq_exit();
  537. set_irq_regs(old_regs);
  538. }
  539. void wakeup_decrementer(void)
  540. {
  541. unsigned long ticks;
  542. /*
  543. * The timebase gets saved on sleep and restored on wakeup,
  544. * so all we need to do is to reset the decrementer.
  545. */
  546. ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
  547. if (ticks < tb_ticks_per_jiffy)
  548. ticks = tb_ticks_per_jiffy - ticks;
  549. else
  550. ticks = 1;
  551. set_dec(ticks);
  552. }
  553. #ifdef CONFIG_SUSPEND
  554. void generic_suspend_disable_irqs(void)
  555. {
  556. preempt_disable();
  557. /* Disable the decrementer, so that it doesn't interfere
  558. * with suspending.
  559. */
  560. set_dec(0x7fffffff);
  561. local_irq_disable();
  562. set_dec(0x7fffffff);
  563. }
  564. void generic_suspend_enable_irqs(void)
  565. {
  566. wakeup_decrementer();
  567. local_irq_enable();
  568. preempt_enable();
  569. }
  570. /* Overrides the weak version in kernel/power/main.c */
  571. void arch_suspend_disable_irqs(void)
  572. {
  573. if (ppc_md.suspend_disable_irqs)
  574. ppc_md.suspend_disable_irqs();
  575. generic_suspend_disable_irqs();
  576. }
  577. /* Overrides the weak version in kernel/power/main.c */
  578. void arch_suspend_enable_irqs(void)
  579. {
  580. generic_suspend_enable_irqs();
  581. if (ppc_md.suspend_enable_irqs)
  582. ppc_md.suspend_enable_irqs();
  583. }
  584. #endif
  585. #ifdef CONFIG_SMP
  586. void __init smp_space_timers(unsigned int max_cpus)
  587. {
  588. int i;
  589. u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
  590. /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
  591. previous_tb -= tb_ticks_per_jiffy;
  592. for_each_possible_cpu(i) {
  593. if (i == boot_cpuid)
  594. continue;
  595. per_cpu(last_jiffy, i) = previous_tb;
  596. }
  597. }
  598. #endif
  599. /*
  600. * Scheduler clock - returns current time in nanosec units.
  601. *
  602. * Note: mulhdu(a, b) (multiply high double unsigned) returns
  603. * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
  604. * are 64-bit unsigned numbers.
  605. */
  606. unsigned long long sched_clock(void)
  607. {
  608. if (__USE_RTC())
  609. return get_rtc();
  610. return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
  611. }
  612. static int __init get_freq(char *name, int cells, unsigned long *val)
  613. {
  614. struct device_node *cpu;
  615. const unsigned int *fp;
  616. int found = 0;
  617. /* The cpu node should have timebase and clock frequency properties */
  618. cpu = of_find_node_by_type(NULL, "cpu");
  619. if (cpu) {
  620. fp = of_get_property(cpu, name, NULL);
  621. if (fp) {
  622. found = 1;
  623. *val = of_read_ulong(fp, cells);
  624. }
  625. of_node_put(cpu);
  626. }
  627. return found;
  628. }
  629. void __init generic_calibrate_decr(void)
  630. {
  631. ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
  632. if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
  633. !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
  634. printk(KERN_ERR "WARNING: Estimating decrementer frequency "
  635. "(not found)\n");
  636. }
  637. ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
  638. if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
  639. !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
  640. printk(KERN_ERR "WARNING: Estimating processor frequency "
  641. "(not found)\n");
  642. }
  643. #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
  644. /* Clear any pending timer interrupts */
  645. mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
  646. /* Enable decrementer interrupt */
  647. mtspr(SPRN_TCR, TCR_DIE);
  648. #endif
  649. }
  650. int update_persistent_clock(struct timespec now)
  651. {
  652. struct rtc_time tm;
  653. if (!ppc_md.set_rtc_time)
  654. return 0;
  655. to_tm(now.tv_sec + 1 + timezone_offset, &tm);
  656. tm.tm_year -= 1900;
  657. tm.tm_mon -= 1;
  658. return ppc_md.set_rtc_time(&tm);
  659. }
  660. unsigned long read_persistent_clock(void)
  661. {
  662. struct rtc_time tm;
  663. static int first = 1;
  664. /* XXX this is a litle fragile but will work okay in the short term */
  665. if (first) {
  666. first = 0;
  667. if (ppc_md.time_init)
  668. timezone_offset = ppc_md.time_init();
  669. /* get_boot_time() isn't guaranteed to be safe to call late */
  670. if (ppc_md.get_boot_time)
  671. return ppc_md.get_boot_time() -timezone_offset;
  672. }
  673. if (!ppc_md.get_rtc_time)
  674. return 0;
  675. ppc_md.get_rtc_time(&tm);
  676. return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
  677. tm.tm_hour, tm.tm_min, tm.tm_sec);
  678. }
  679. /* clocksource code */
  680. static cycle_t rtc_read(void)
  681. {
  682. return (cycle_t)get_rtc();
  683. }
  684. static cycle_t timebase_read(void)
  685. {
  686. return (cycle_t)get_tb();
  687. }
  688. void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
  689. {
  690. u64 t2x, stamp_xsec;
  691. if (clock != &clocksource_timebase)
  692. return;
  693. /* Make userspace gettimeofday spin until we're done. */
  694. ++vdso_data->tb_update_count;
  695. smp_mb();
  696. /* XXX this assumes clock->shift == 22 */
  697. /* 4611686018 ~= 2^(20+64-22) / 1e9 */
  698. t2x = (u64) clock->mult * 4611686018ULL;
  699. stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
  700. do_div(stamp_xsec, 1000000000);
  701. stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
  702. update_gtod(clock->cycle_last, stamp_xsec, t2x);
  703. }
  704. void update_vsyscall_tz(void)
  705. {
  706. /* Make userspace gettimeofday spin until we're done. */
  707. ++vdso_data->tb_update_count;
  708. smp_mb();
  709. vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
  710. vdso_data->tz_dsttime = sys_tz.tz_dsttime;
  711. smp_mb();
  712. ++vdso_data->tb_update_count;
  713. }
  714. static void __init clocksource_init(void)
  715. {
  716. struct clocksource *clock;
  717. if (__USE_RTC())
  718. clock = &clocksource_rtc;
  719. else
  720. clock = &clocksource_timebase;
  721. clock->mult = clocksource_hz2mult(tb_ticks_per_sec, clock->shift);
  722. if (clocksource_register(clock)) {
  723. printk(KERN_ERR "clocksource: %s is already registered\n",
  724. clock->name);
  725. return;
  726. }
  727. printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
  728. clock->name, clock->mult, clock->shift);
  729. }
  730. static int decrementer_set_next_event(unsigned long evt,
  731. struct clock_event_device *dev)
  732. {
  733. __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt;
  734. set_dec(evt);
  735. return 0;
  736. }
  737. static void decrementer_set_mode(enum clock_event_mode mode,
  738. struct clock_event_device *dev)
  739. {
  740. if (mode != CLOCK_EVT_MODE_ONESHOT)
  741. decrementer_set_next_event(DECREMENTER_MAX, dev);
  742. }
  743. static void register_decrementer_clockevent(int cpu)
  744. {
  745. struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
  746. *dec = decrementer_clockevent;
  747. dec->cpumask = cpumask_of_cpu(cpu);
  748. printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
  749. dec->name, dec->mult, dec->shift, cpu);
  750. clockevents_register_device(dec);
  751. }
  752. static void __init init_decrementer_clockevent(void)
  753. {
  754. int cpu = smp_processor_id();
  755. decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC,
  756. decrementer_clockevent.shift);
  757. decrementer_clockevent.max_delta_ns =
  758. clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
  759. decrementer_clockevent.min_delta_ns =
  760. clockevent_delta2ns(2, &decrementer_clockevent);
  761. register_decrementer_clockevent(cpu);
  762. }
  763. void secondary_cpu_time_init(void)
  764. {
  765. /* FIME: Should make unrelatred change to move snapshot_timebase
  766. * call here ! */
  767. register_decrementer_clockevent(smp_processor_id());
  768. }
  769. /* This function is only called on the boot processor */
  770. void __init time_init(void)
  771. {
  772. unsigned long flags;
  773. struct div_result res;
  774. u64 scale, x;
  775. unsigned shift;
  776. if (__USE_RTC()) {
  777. /* 601 processor: dec counts down by 128 every 128ns */
  778. ppc_tb_freq = 1000000000;
  779. tb_last_jiffy = get_rtcl();
  780. } else {
  781. /* Normal PowerPC with timebase register */
  782. ppc_md.calibrate_decr();
  783. printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
  784. ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
  785. printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
  786. ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
  787. tb_last_jiffy = get_tb();
  788. }
  789. tb_ticks_per_jiffy = ppc_tb_freq / HZ;
  790. tb_ticks_per_sec = ppc_tb_freq;
  791. tb_ticks_per_usec = ppc_tb_freq / 1000000;
  792. tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
  793. calc_cputime_factors();
  794. /*
  795. * Calculate the length of each tick in ns. It will not be
  796. * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
  797. * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
  798. * rounded up.
  799. */
  800. x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
  801. do_div(x, ppc_tb_freq);
  802. tick_nsec = x;
  803. last_tick_len = x << TICKLEN_SCALE;
  804. /*
  805. * Compute ticklen_to_xs, which is a factor which gets multiplied
  806. * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
  807. * It is computed as:
  808. * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
  809. * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
  810. * which turns out to be N = 51 - SHIFT_HZ.
  811. * This gives the result as a 0.64 fixed-point fraction.
  812. * That value is reduced by an offset amounting to 1 xsec per
  813. * 2^31 timebase ticks to avoid problems with time going backwards
  814. * by 1 xsec when we do timer_recalc_offset due to losing the
  815. * fractional xsec. That offset is equal to ppc_tb_freq/2^51
  816. * since there are 2^20 xsec in a second.
  817. */
  818. div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
  819. tb_ticks_per_jiffy << SHIFT_HZ, &res);
  820. div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
  821. ticklen_to_xs = res.result_low;
  822. /* Compute tb_to_xs from tick_nsec */
  823. tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
  824. /*
  825. * Compute scale factor for sched_clock.
  826. * The calibrate_decr() function has set tb_ticks_per_sec,
  827. * which is the timebase frequency.
  828. * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
  829. * the 128-bit result as a 64.64 fixed-point number.
  830. * We then shift that number right until it is less than 1.0,
  831. * giving us the scale factor and shift count to use in
  832. * sched_clock().
  833. */
  834. div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
  835. scale = res.result_low;
  836. for (shift = 0; res.result_high != 0; ++shift) {
  837. scale = (scale >> 1) | (res.result_high << 63);
  838. res.result_high >>= 1;
  839. }
  840. tb_to_ns_scale = scale;
  841. tb_to_ns_shift = shift;
  842. /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
  843. boot_tb = get_tb_or_rtc();
  844. write_seqlock_irqsave(&xtime_lock, flags);
  845. /* If platform provided a timezone (pmac), we correct the time */
  846. if (timezone_offset) {
  847. sys_tz.tz_minuteswest = -timezone_offset / 60;
  848. sys_tz.tz_dsttime = 0;
  849. }
  850. do_gtod.varp = &do_gtod.vars[0];
  851. do_gtod.var_idx = 0;
  852. do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
  853. __get_cpu_var(last_jiffy) = tb_last_jiffy;
  854. do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
  855. do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
  856. do_gtod.varp->tb_to_xs = tb_to_xs;
  857. do_gtod.tb_to_us = tb_to_us;
  858. vdso_data->tb_orig_stamp = tb_last_jiffy;
  859. vdso_data->tb_update_count = 0;
  860. vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
  861. vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
  862. vdso_data->tb_to_xs = tb_to_xs;
  863. write_sequnlock_irqrestore(&xtime_lock, flags);
  864. /* Register the clocksource, if we're not running on iSeries */
  865. if (!firmware_has_feature(FW_FEATURE_ISERIES))
  866. clocksource_init();
  867. init_decrementer_clockevent();
  868. }
  869. #define FEBRUARY 2
  870. #define STARTOFTIME 1970
  871. #define SECDAY 86400L
  872. #define SECYR (SECDAY * 365)
  873. #define leapyear(year) ((year) % 4 == 0 && \
  874. ((year) % 100 != 0 || (year) % 400 == 0))
  875. #define days_in_year(a) (leapyear(a) ? 366 : 365)
  876. #define days_in_month(a) (month_days[(a) - 1])
  877. static int month_days[12] = {
  878. 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
  879. };
  880. /*
  881. * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
  882. */
  883. void GregorianDay(struct rtc_time * tm)
  884. {
  885. int leapsToDate;
  886. int lastYear;
  887. int day;
  888. int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
  889. lastYear = tm->tm_year - 1;
  890. /*
  891. * Number of leap corrections to apply up to end of last year
  892. */
  893. leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
  894. /*
  895. * This year is a leap year if it is divisible by 4 except when it is
  896. * divisible by 100 unless it is divisible by 400
  897. *
  898. * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
  899. */
  900. day = tm->tm_mon > 2 && leapyear(tm->tm_year);
  901. day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
  902. tm->tm_mday;
  903. tm->tm_wday = day % 7;
  904. }
  905. void to_tm(int tim, struct rtc_time * tm)
  906. {
  907. register int i;
  908. register long hms, day;
  909. day = tim / SECDAY;
  910. hms = tim % SECDAY;
  911. /* Hours, minutes, seconds are easy */
  912. tm->tm_hour = hms / 3600;
  913. tm->tm_min = (hms % 3600) / 60;
  914. tm->tm_sec = (hms % 3600) % 60;
  915. /* Number of years in days */
  916. for (i = STARTOFTIME; day >= days_in_year(i); i++)
  917. day -= days_in_year(i);
  918. tm->tm_year = i;
  919. /* Number of months in days left */
  920. if (leapyear(tm->tm_year))
  921. days_in_month(FEBRUARY) = 29;
  922. for (i = 1; day >= days_in_month(i); i++)
  923. day -= days_in_month(i);
  924. days_in_month(FEBRUARY) = 28;
  925. tm->tm_mon = i;
  926. /* Days are what is left over (+1) from all that. */
  927. tm->tm_mday = day + 1;
  928. /*
  929. * Determine the day of week
  930. */
  931. GregorianDay(tm);
  932. }
  933. /* Auxiliary function to compute scaling factors */
  934. /* Actually the choice of a timebase running at 1/4 the of the bus
  935. * frequency giving resolution of a few tens of nanoseconds is quite nice.
  936. * It makes this computation very precise (27-28 bits typically) which
  937. * is optimistic considering the stability of most processor clock
  938. * oscillators and the precision with which the timebase frequency
  939. * is measured but does not harm.
  940. */
  941. unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
  942. {
  943. unsigned mlt=0, tmp, err;
  944. /* No concern for performance, it's done once: use a stupid
  945. * but safe and compact method to find the multiplier.
  946. */
  947. for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
  948. if (mulhwu(inscale, mlt|tmp) < outscale)
  949. mlt |= tmp;
  950. }
  951. /* We might still be off by 1 for the best approximation.
  952. * A side effect of this is that if outscale is too large
  953. * the returned value will be zero.
  954. * Many corner cases have been checked and seem to work,
  955. * some might have been forgotten in the test however.
  956. */
  957. err = inscale * (mlt+1);
  958. if (err <= inscale/2)
  959. mlt++;
  960. return mlt;
  961. }
  962. /*
  963. * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
  964. * result.
  965. */
  966. void div128_by_32(u64 dividend_high, u64 dividend_low,
  967. unsigned divisor, struct div_result *dr)
  968. {
  969. unsigned long a, b, c, d;
  970. unsigned long w, x, y, z;
  971. u64 ra, rb, rc;
  972. a = dividend_high >> 32;
  973. b = dividend_high & 0xffffffff;
  974. c = dividend_low >> 32;
  975. d = dividend_low & 0xffffffff;
  976. w = a / divisor;
  977. ra = ((u64)(a - (w * divisor)) << 32) + b;
  978. rb = ((u64) do_div(ra, divisor) << 32) + c;
  979. x = ra;
  980. rc = ((u64) do_div(rb, divisor) << 32) + d;
  981. y = rb;
  982. do_div(rc, divisor);
  983. z = rc;
  984. dr->result_high = ((u64)w << 32) + x;
  985. dr->result_low = ((u64)y << 32) + z;
  986. }