time.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002
  1. /*
  2. * Common time routines among all ppc machines.
  3. *
  4. * Written by Cort Dougan (cort@cs.nmt.edu) to merge
  5. * Paul Mackerras' version and mine for PReP and Pmac.
  6. * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
  7. * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
  8. *
  9. * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
  10. * to make clock more stable (2.4.0-test5). The only thing
  11. * that this code assumes is that the timebases have been synchronized
  12. * by firmware on SMP and are never stopped (never do sleep
  13. * on SMP then, nap and doze are OK).
  14. *
  15. * Speeded up do_gettimeofday by getting rid of references to
  16. * xtime (which required locks for consistency). (mikejc@us.ibm.com)
  17. *
  18. * TODO (not necessarily in this file):
  19. * - improve precision and reproducibility of timebase frequency
  20. * measurement at boot time. (for iSeries, we calibrate the timebase
  21. * against the Titan chip's clock.)
  22. * - for astronomical applications: add a new function to get
  23. * non ambiguous timestamps even around leap seconds. This needs
  24. * a new timestamp format and a good name.
  25. *
  26. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
  27. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  28. *
  29. * This program is free software; you can redistribute it and/or
  30. * modify it under the terms of the GNU General Public License
  31. * as published by the Free Software Foundation; either version
  32. * 2 of the License, or (at your option) any later version.
  33. */
  34. #include <linux/config.h>
  35. #include <linux/errno.h>
  36. #include <linux/module.h>
  37. #include <linux/sched.h>
  38. #include <linux/kernel.h>
  39. #include <linux/param.h>
  40. #include <linux/string.h>
  41. #include <linux/mm.h>
  42. #include <linux/interrupt.h>
  43. #include <linux/timex.h>
  44. #include <linux/kernel_stat.h>
  45. #include <linux/time.h>
  46. #include <linux/init.h>
  47. #include <linux/profile.h>
  48. #include <linux/cpu.h>
  49. #include <linux/security.h>
  50. #include <linux/percpu.h>
  51. #include <linux/rtc.h>
  52. #include <asm/io.h>
  53. #include <asm/processor.h>
  54. #include <asm/nvram.h>
  55. #include <asm/cache.h>
  56. #include <asm/machdep.h>
  57. #include <asm/uaccess.h>
  58. #include <asm/time.h>
  59. #include <asm/prom.h>
  60. #include <asm/irq.h>
  61. #include <asm/div64.h>
  62. #include <asm/smp.h>
  63. #ifdef CONFIG_PPC64
  64. #include <asm/systemcfg.h>
  65. #include <asm/firmware.h>
  66. #endif
  67. #ifdef CONFIG_PPC_ISERIES
  68. #include <asm/iseries/it_lp_queue.h>
  69. #include <asm/iseries/hv_call_xm.h>
  70. #endif
  71. /* keep track of when we need to update the rtc */
  72. time_t last_rtc_update;
  73. extern int piranha_simulator;
  74. #ifdef CONFIG_PPC_ISERIES
  75. unsigned long iSeries_recal_titan = 0;
  76. unsigned long iSeries_recal_tb = 0;
  77. static unsigned long first_settimeofday = 1;
  78. #endif
  79. /* The decrementer counts down by 128 every 128ns on a 601. */
  80. #define DECREMENTER_COUNT_601 (1000000000 / HZ)
  81. #define XSEC_PER_SEC (1024*1024)
  82. #ifdef CONFIG_PPC64
  83. #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
  84. #else
  85. /* compute ((xsec << 12) * max) >> 32 */
  86. #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
  87. #endif
  88. unsigned long tb_ticks_per_jiffy;
  89. unsigned long tb_ticks_per_usec = 100; /* sane default */
  90. EXPORT_SYMBOL(tb_ticks_per_usec);
  91. unsigned long tb_ticks_per_sec;
  92. u64 tb_to_xs;
  93. unsigned tb_to_us;
  94. unsigned long processor_freq;
  95. DEFINE_SPINLOCK(rtc_lock);
  96. EXPORT_SYMBOL_GPL(rtc_lock);
  97. u64 tb_to_ns_scale;
  98. unsigned tb_to_ns_shift;
  99. struct gettimeofday_struct do_gtod;
  100. extern unsigned long wall_jiffies;
  101. extern struct timezone sys_tz;
  102. static long timezone_offset;
  103. void ppc_adjtimex(void);
  104. static unsigned adjusting_time = 0;
  105. unsigned long ppc_proc_freq;
  106. unsigned long ppc_tb_freq;
  107. u64 tb_last_jiffy __cacheline_aligned_in_smp;
  108. unsigned long tb_last_stamp;
  109. /*
  110. * Note that on ppc32 this only stores the bottom 32 bits of
  111. * the timebase value, but that's enough to tell when a jiffy
  112. * has passed.
  113. */
  114. DEFINE_PER_CPU(unsigned long, last_jiffy);
  115. static __inline__ void timer_check_rtc(void)
  116. {
  117. /*
  118. * update the rtc when needed, this should be performed on the
  119. * right fraction of a second. Half or full second ?
  120. * Full second works on mk48t59 clocks, others need testing.
  121. * Note that this update is basically only used through
  122. * the adjtimex system calls. Setting the HW clock in
  123. * any other way is a /dev/rtc and userland business.
  124. * This is still wrong by -0.5/+1.5 jiffies because of the
  125. * timer interrupt resolution and possible delay, but here we
  126. * hit a quantization limit which can only be solved by higher
  127. * resolution timers and decoupling time management from timer
  128. * interrupts. This is also wrong on the clocks
  129. * which require being written at the half second boundary.
  130. * We should have an rtc call that only sets the minutes and
  131. * seconds like on Intel to avoid problems with non UTC clocks.
  132. */
  133. if (ppc_md.set_rtc_time && ntp_synced() &&
  134. xtime.tv_sec - last_rtc_update >= 659 &&
  135. abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
  136. jiffies - wall_jiffies == 1) {
  137. struct rtc_time tm;
  138. to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
  139. tm.tm_year -= 1900;
  140. tm.tm_mon -= 1;
  141. if (ppc_md.set_rtc_time(&tm) == 0)
  142. last_rtc_update = xtime.tv_sec + 1;
  143. else
  144. /* Try again one minute later */
  145. last_rtc_update += 60;
  146. }
  147. }
  148. /*
  149. * This version of gettimeofday has microsecond resolution.
  150. */
  151. static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val)
  152. {
  153. unsigned long sec, usec;
  154. u64 tb_ticks, xsec;
  155. struct gettimeofday_vars *temp_varp;
  156. u64 temp_tb_to_xs, temp_stamp_xsec;
  157. /*
  158. * These calculations are faster (gets rid of divides)
  159. * if done in units of 1/2^20 rather than microseconds.
  160. * The conversion to microseconds at the end is done
  161. * without a divide (and in fact, without a multiply)
  162. */
  163. temp_varp = do_gtod.varp;
  164. tb_ticks = tb_val - temp_varp->tb_orig_stamp;
  165. temp_tb_to_xs = temp_varp->tb_to_xs;
  166. temp_stamp_xsec = temp_varp->stamp_xsec;
  167. xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
  168. sec = xsec / XSEC_PER_SEC;
  169. usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
  170. usec = SCALE_XSEC(usec, 1000000);
  171. tv->tv_sec = sec;
  172. tv->tv_usec = usec;
  173. }
  174. void do_gettimeofday(struct timeval *tv)
  175. {
  176. if (__USE_RTC()) {
  177. /* do this the old way */
  178. unsigned long flags, seq;
  179. unsigned int sec, nsec, usec, lost;
  180. do {
  181. seq = read_seqbegin_irqsave(&xtime_lock, flags);
  182. sec = xtime.tv_sec;
  183. nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
  184. lost = jiffies - wall_jiffies;
  185. } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
  186. usec = nsec / 1000 + lost * (1000000 / HZ);
  187. while (usec >= 1000000) {
  188. usec -= 1000000;
  189. ++sec;
  190. }
  191. tv->tv_sec = sec;
  192. tv->tv_usec = usec;
  193. return;
  194. }
  195. __do_gettimeofday(tv, get_tb());
  196. }
  197. EXPORT_SYMBOL(do_gettimeofday);
  198. /* Synchronize xtime with do_gettimeofday */
  199. static inline void timer_sync_xtime(unsigned long cur_tb)
  200. {
  201. #ifdef CONFIG_PPC64
  202. /* why do we do this? */
  203. struct timeval my_tv;
  204. __do_gettimeofday(&my_tv, cur_tb);
  205. if (xtime.tv_sec <= my_tv.tv_sec) {
  206. xtime.tv_sec = my_tv.tv_sec;
  207. xtime.tv_nsec = my_tv.tv_usec * 1000;
  208. }
  209. #endif
  210. }
  211. /*
  212. * There are two copies of tb_to_xs and stamp_xsec so that no
  213. * lock is needed to access and use these values in
  214. * do_gettimeofday. We alternate the copies and as long as a
  215. * reasonable time elapses between changes, there will never
  216. * be inconsistent values. ntpd has a minimum of one minute
  217. * between updates.
  218. */
  219. static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
  220. u64 new_tb_to_xs)
  221. {
  222. unsigned temp_idx;
  223. struct gettimeofday_vars *temp_varp;
  224. temp_idx = (do_gtod.var_idx == 0);
  225. temp_varp = &do_gtod.vars[temp_idx];
  226. temp_varp->tb_to_xs = new_tb_to_xs;
  227. temp_varp->tb_orig_stamp = new_tb_stamp;
  228. temp_varp->stamp_xsec = new_stamp_xsec;
  229. smp_mb();
  230. do_gtod.varp = temp_varp;
  231. do_gtod.var_idx = temp_idx;
  232. #ifdef CONFIG_PPC64
  233. /*
  234. * tb_update_count is used to allow the userspace gettimeofday code
  235. * to assure itself that it sees a consistent view of the tb_to_xs and
  236. * stamp_xsec variables. It reads the tb_update_count, then reads
  237. * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
  238. * the two values of tb_update_count match and are even then the
  239. * tb_to_xs and stamp_xsec values are consistent. If not, then it
  240. * loops back and reads them again until this criteria is met.
  241. */
  242. ++(systemcfg->tb_update_count);
  243. smp_wmb();
  244. systemcfg->tb_orig_stamp = new_tb_stamp;
  245. systemcfg->stamp_xsec = new_stamp_xsec;
  246. systemcfg->tb_to_xs = new_tb_to_xs;
  247. smp_wmb();
  248. ++(systemcfg->tb_update_count);
  249. #endif
  250. }
  251. /*
  252. * When the timebase - tb_orig_stamp gets too big, we do a manipulation
  253. * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
  254. * difference tb - tb_orig_stamp small enough to always fit inside a
  255. * 32 bits number. This is a requirement of our fast 32 bits userland
  256. * implementation in the vdso. If we "miss" a call to this function
  257. * (interrupt latency, CPU locked in a spinlock, ...) and we end up
  258. * with a too big difference, then the vdso will fallback to calling
  259. * the syscall
  260. */
  261. static __inline__ void timer_recalc_offset(u64 cur_tb)
  262. {
  263. unsigned long offset;
  264. u64 new_stamp_xsec;
  265. if (__USE_RTC())
  266. return;
  267. offset = cur_tb - do_gtod.varp->tb_orig_stamp;
  268. if ((offset & 0x80000000u) == 0)
  269. return;
  270. new_stamp_xsec = do_gtod.varp->stamp_xsec
  271. + mulhdu(offset, do_gtod.varp->tb_to_xs);
  272. update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs);
  273. }
  274. #ifdef CONFIG_SMP
  275. unsigned long profile_pc(struct pt_regs *regs)
  276. {
  277. unsigned long pc = instruction_pointer(regs);
  278. if (in_lock_functions(pc))
  279. return regs->link;
  280. return pc;
  281. }
  282. EXPORT_SYMBOL(profile_pc);
  283. #endif
  284. #ifdef CONFIG_PPC_ISERIES
  285. /*
  286. * This function recalibrates the timebase based on the 49-bit time-of-day
  287. * value in the Titan chip. The Titan is much more accurate than the value
  288. * returned by the service processor for the timebase frequency.
  289. */
  290. static void iSeries_tb_recal(void)
  291. {
  292. struct div_result divres;
  293. unsigned long titan, tb;
  294. tb = get_tb();
  295. titan = HvCallXm_loadTod();
  296. if ( iSeries_recal_titan ) {
  297. unsigned long tb_ticks = tb - iSeries_recal_tb;
  298. unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
  299. unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
  300. unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
  301. long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
  302. char sign = '+';
  303. /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
  304. new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
  305. if ( tick_diff < 0 ) {
  306. tick_diff = -tick_diff;
  307. sign = '-';
  308. }
  309. if ( tick_diff ) {
  310. if ( tick_diff < tb_ticks_per_jiffy/25 ) {
  311. printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
  312. new_tb_ticks_per_jiffy, sign, tick_diff );
  313. tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
  314. tb_ticks_per_sec = new_tb_ticks_per_sec;
  315. div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
  316. do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
  317. tb_to_xs = divres.result_low;
  318. do_gtod.varp->tb_to_xs = tb_to_xs;
  319. systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
  320. systemcfg->tb_to_xs = tb_to_xs;
  321. }
  322. else {
  323. printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
  324. " new tb_ticks_per_jiffy = %lu\n"
  325. " old tb_ticks_per_jiffy = %lu\n",
  326. new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
  327. }
  328. }
  329. }
  330. iSeries_recal_titan = titan;
  331. iSeries_recal_tb = tb;
  332. }
  333. #endif
  334. /*
  335. * For iSeries shared processors, we have to let the hypervisor
  336. * set the hardware decrementer. We set a virtual decrementer
  337. * in the lppaca and call the hypervisor if the virtual
  338. * decrementer is less than the current value in the hardware
  339. * decrementer. (almost always the new decrementer value will
  340. * be greater than the current hardware decementer so the hypervisor
  341. * call will not be needed)
  342. */
  343. /*
  344. * timer_interrupt - gets called when the decrementer overflows,
  345. * with interrupts disabled.
  346. */
  347. void timer_interrupt(struct pt_regs * regs)
  348. {
  349. int next_dec;
  350. int cpu = smp_processor_id();
  351. unsigned long ticks;
  352. #ifdef CONFIG_PPC32
  353. if (atomic_read(&ppc_n_lost_interrupts) != 0)
  354. do_IRQ(regs);
  355. #endif
  356. irq_enter();
  357. profile_tick(CPU_PROFILING, regs);
  358. #ifdef CONFIG_PPC_ISERIES
  359. get_paca()->lppaca.int_dword.fields.decr_int = 0;
  360. #endif
  361. while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
  362. >= tb_ticks_per_jiffy) {
  363. /* Update last_jiffy */
  364. per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
  365. /* Handle RTCL overflow on 601 */
  366. if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
  367. per_cpu(last_jiffy, cpu) -= 1000000000;
  368. /*
  369. * We cannot disable the decrementer, so in the period
  370. * between this cpu's being marked offline in cpu_online_map
  371. * and calling stop-self, it is taking timer interrupts.
  372. * Avoid calling into the scheduler rebalancing code if this
  373. * is the case.
  374. */
  375. if (!cpu_is_offline(cpu))
  376. update_process_times(user_mode(regs));
  377. /*
  378. * No need to check whether cpu is offline here; boot_cpuid
  379. * should have been fixed up by now.
  380. */
  381. if (cpu != boot_cpuid)
  382. continue;
  383. write_seqlock(&xtime_lock);
  384. tb_last_jiffy += tb_ticks_per_jiffy;
  385. tb_last_stamp = per_cpu(last_jiffy, cpu);
  386. timer_recalc_offset(tb_last_jiffy);
  387. do_timer(regs);
  388. timer_sync_xtime(tb_last_jiffy);
  389. timer_check_rtc();
  390. write_sequnlock(&xtime_lock);
  391. if (adjusting_time && (time_adjust == 0))
  392. ppc_adjtimex();
  393. }
  394. next_dec = tb_ticks_per_jiffy - ticks;
  395. set_dec(next_dec);
  396. #ifdef CONFIG_PPC_ISERIES
  397. if (hvlpevent_is_pending())
  398. process_hvlpevents(regs);
  399. #endif
  400. #ifdef CONFIG_PPC64
  401. /* collect purr register values often, for accurate calculations */
  402. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  403. struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
  404. cu->current_tb = mfspr(SPRN_PURR);
  405. }
  406. #endif
  407. irq_exit();
  408. }
  409. void wakeup_decrementer(void)
  410. {
  411. int i;
  412. set_dec(tb_ticks_per_jiffy);
  413. /*
  414. * We don't expect this to be called on a machine with a 601,
  415. * so using get_tbl is fine.
  416. */
  417. tb_last_stamp = tb_last_jiffy = get_tb();
  418. for_each_cpu(i)
  419. per_cpu(last_jiffy, i) = tb_last_stamp;
  420. }
  421. #ifdef CONFIG_SMP
  422. void __init smp_space_timers(unsigned int max_cpus)
  423. {
  424. int i;
  425. unsigned long offset = tb_ticks_per_jiffy / max_cpus;
  426. unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
  427. for_each_cpu(i) {
  428. if (i != boot_cpuid) {
  429. previous_tb += offset;
  430. per_cpu(last_jiffy, i) = previous_tb;
  431. }
  432. }
  433. }
  434. #endif
  435. /*
  436. * Scheduler clock - returns current time in nanosec units.
  437. *
  438. * Note: mulhdu(a, b) (multiply high double unsigned) returns
  439. * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
  440. * are 64-bit unsigned numbers.
  441. */
  442. unsigned long long sched_clock(void)
  443. {
  444. if (__USE_RTC())
  445. return get_rtc();
  446. return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
  447. }
  448. int do_settimeofday(struct timespec *tv)
  449. {
  450. time_t wtm_sec, new_sec = tv->tv_sec;
  451. long wtm_nsec, new_nsec = tv->tv_nsec;
  452. unsigned long flags;
  453. long int tb_delta;
  454. u64 new_xsec, tb_delta_xs;
  455. if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
  456. return -EINVAL;
  457. write_seqlock_irqsave(&xtime_lock, flags);
  458. /*
  459. * Updating the RTC is not the job of this code. If the time is
  460. * stepped under NTP, the RTC will be updated after STA_UNSYNC
  461. * is cleared. Tools like clock/hwclock either copy the RTC
  462. * to the system time, in which case there is no point in writing
  463. * to the RTC again, or write to the RTC but then they don't call
  464. * settimeofday to perform this operation.
  465. */
  466. #ifdef CONFIG_PPC_ISERIES
  467. if (first_settimeofday) {
  468. iSeries_tb_recal();
  469. first_settimeofday = 0;
  470. }
  471. #endif
  472. tb_delta = tb_ticks_since(tb_last_stamp);
  473. tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
  474. tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs);
  475. wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
  476. wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
  477. set_normalized_timespec(&xtime, new_sec, new_nsec);
  478. set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
  479. /* In case of a large backwards jump in time with NTP, we want the
  480. * clock to be updated as soon as the PLL is again in lock.
  481. */
  482. last_rtc_update = new_sec - 658;
  483. ntp_clear();
  484. new_xsec = 0;
  485. if (new_nsec != 0) {
  486. new_xsec = (u64)new_nsec * XSEC_PER_SEC;
  487. do_div(new_xsec, NSEC_PER_SEC);
  488. }
  489. new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs;
  490. update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
  491. #ifdef CONFIG_PPC64
  492. systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
  493. systemcfg->tz_dsttime = sys_tz.tz_dsttime;
  494. #endif
  495. write_sequnlock_irqrestore(&xtime_lock, flags);
  496. clock_was_set();
  497. return 0;
  498. }
  499. EXPORT_SYMBOL(do_settimeofday);
  500. void __init generic_calibrate_decr(void)
  501. {
  502. struct device_node *cpu;
  503. unsigned int *fp;
  504. int node_found;
  505. /*
  506. * The cpu node should have a timebase-frequency property
  507. * to tell us the rate at which the decrementer counts.
  508. */
  509. cpu = of_find_node_by_type(NULL, "cpu");
  510. ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
  511. node_found = 0;
  512. if (cpu != 0) {
  513. fp = (unsigned int *)get_property(cpu, "timebase-frequency",
  514. NULL);
  515. if (fp != 0) {
  516. node_found = 1;
  517. ppc_tb_freq = *fp;
  518. }
  519. }
  520. if (!node_found)
  521. printk(KERN_ERR "WARNING: Estimating decrementer frequency "
  522. "(not found)\n");
  523. ppc_proc_freq = DEFAULT_PROC_FREQ;
  524. node_found = 0;
  525. if (cpu != 0) {
  526. fp = (unsigned int *)get_property(cpu, "clock-frequency",
  527. NULL);
  528. if (fp != 0) {
  529. node_found = 1;
  530. ppc_proc_freq = *fp;
  531. }
  532. }
  533. #ifdef CONFIG_BOOKE
  534. /* Set the time base to zero */
  535. mtspr(SPRN_TBWL, 0);
  536. mtspr(SPRN_TBWU, 0);
  537. /* Clear any pending timer interrupts */
  538. mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
  539. /* Enable decrementer interrupt */
  540. mtspr(SPRN_TCR, TCR_DIE);
  541. #endif
  542. if (!node_found)
  543. printk(KERN_ERR "WARNING: Estimating processor frequency "
  544. "(not found)\n");
  545. of_node_put(cpu);
  546. }
  547. unsigned long get_boot_time(void)
  548. {
  549. struct rtc_time tm;
  550. if (ppc_md.get_boot_time)
  551. return ppc_md.get_boot_time();
  552. if (!ppc_md.get_rtc_time)
  553. return 0;
  554. ppc_md.get_rtc_time(&tm);
  555. return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
  556. tm.tm_hour, tm.tm_min, tm.tm_sec);
  557. }
  558. /* This function is only called on the boot processor */
  559. void __init time_init(void)
  560. {
  561. unsigned long flags;
  562. unsigned long tm = 0;
  563. struct div_result res;
  564. u64 scale;
  565. unsigned shift;
  566. if (ppc_md.time_init != NULL)
  567. timezone_offset = ppc_md.time_init();
  568. if (__USE_RTC()) {
  569. /* 601 processor: dec counts down by 128 every 128ns */
  570. ppc_tb_freq = 1000000000;
  571. tb_last_stamp = get_rtcl();
  572. tb_last_jiffy = tb_last_stamp;
  573. } else {
  574. /* Normal PowerPC with timebase register */
  575. ppc_md.calibrate_decr();
  576. printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
  577. ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
  578. printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
  579. ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
  580. tb_last_stamp = tb_last_jiffy = get_tb();
  581. }
  582. tb_ticks_per_jiffy = ppc_tb_freq / HZ;
  583. tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
  584. tb_ticks_per_usec = ppc_tb_freq / 1000000;
  585. tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
  586. div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res);
  587. tb_to_xs = res.result_low;
  588. #ifdef CONFIG_PPC64
  589. get_paca()->default_decr = tb_ticks_per_jiffy;
  590. #endif
  591. /*
  592. * Compute scale factor for sched_clock.
  593. * The calibrate_decr() function has set tb_ticks_per_sec,
  594. * which is the timebase frequency.
  595. * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
  596. * the 128-bit result as a 64.64 fixed-point number.
  597. * We then shift that number right until it is less than 1.0,
  598. * giving us the scale factor and shift count to use in
  599. * sched_clock().
  600. */
  601. div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
  602. scale = res.result_low;
  603. for (shift = 0; res.result_high != 0; ++shift) {
  604. scale = (scale >> 1) | (res.result_high << 63);
  605. res.result_high >>= 1;
  606. }
  607. tb_to_ns_scale = scale;
  608. tb_to_ns_shift = shift;
  609. #ifdef CONFIG_PPC_ISERIES
  610. if (!piranha_simulator)
  611. #endif
  612. tm = get_boot_time();
  613. write_seqlock_irqsave(&xtime_lock, flags);
  614. xtime.tv_sec = tm;
  615. xtime.tv_nsec = 0;
  616. do_gtod.varp = &do_gtod.vars[0];
  617. do_gtod.var_idx = 0;
  618. do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
  619. __get_cpu_var(last_jiffy) = tb_last_stamp;
  620. do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
  621. do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
  622. do_gtod.varp->tb_to_xs = tb_to_xs;
  623. do_gtod.tb_to_us = tb_to_us;
  624. #ifdef CONFIG_PPC64
  625. systemcfg->tb_orig_stamp = tb_last_jiffy;
  626. systemcfg->tb_update_count = 0;
  627. systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
  628. systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
  629. systemcfg->tb_to_xs = tb_to_xs;
  630. #endif
  631. time_freq = 0;
  632. /* If platform provided a timezone (pmac), we correct the time */
  633. if (timezone_offset) {
  634. sys_tz.tz_minuteswest = -timezone_offset / 60;
  635. sys_tz.tz_dsttime = 0;
  636. xtime.tv_sec -= timezone_offset;
  637. }
  638. last_rtc_update = xtime.tv_sec;
  639. set_normalized_timespec(&wall_to_monotonic,
  640. -xtime.tv_sec, -xtime.tv_nsec);
  641. write_sequnlock_irqrestore(&xtime_lock, flags);
  642. /* Not exact, but the timer interrupt takes care of this */
  643. set_dec(tb_ticks_per_jiffy);
  644. }
  645. /*
  646. * After adjtimex is called, adjust the conversion of tb ticks
  647. * to microseconds to keep do_gettimeofday synchronized
  648. * with ntpd.
  649. *
  650. * Use the time_adjust, time_freq and time_offset computed by adjtimex to
  651. * adjust the frequency.
  652. */
  653. /* #define DEBUG_PPC_ADJTIMEX 1 */
  654. void ppc_adjtimex(void)
  655. {
  656. #ifdef CONFIG_PPC64
  657. unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
  658. new_tb_to_xs, new_xsec, new_stamp_xsec;
  659. unsigned long tb_ticks_per_sec_delta;
  660. long delta_freq, ltemp;
  661. struct div_result divres;
  662. unsigned long flags;
  663. long singleshot_ppm = 0;
  664. /*
  665. * Compute parts per million frequency adjustment to
  666. * accomplish the time adjustment implied by time_offset to be
  667. * applied over the elapsed time indicated by time_constant.
  668. * Use SHIFT_USEC to get it into the same units as
  669. * time_freq.
  670. */
  671. if ( time_offset < 0 ) {
  672. ltemp = -time_offset;
  673. ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
  674. ltemp >>= SHIFT_KG + time_constant;
  675. ltemp = -ltemp;
  676. } else {
  677. ltemp = time_offset;
  678. ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
  679. ltemp >>= SHIFT_KG + time_constant;
  680. }
  681. /* If there is a single shot time adjustment in progress */
  682. if ( time_adjust ) {
  683. #ifdef DEBUG_PPC_ADJTIMEX
  684. printk("ppc_adjtimex: ");
  685. if ( adjusting_time == 0 )
  686. printk("starting ");
  687. printk("single shot time_adjust = %ld\n", time_adjust);
  688. #endif
  689. adjusting_time = 1;
  690. /*
  691. * Compute parts per million frequency adjustment
  692. * to match time_adjust
  693. */
  694. singleshot_ppm = tickadj * HZ;
  695. /*
  696. * The adjustment should be tickadj*HZ to match the code in
  697. * linux/kernel/timer.c, but experiments show that this is too
  698. * large. 3/4 of tickadj*HZ seems about right
  699. */
  700. singleshot_ppm -= singleshot_ppm / 4;
  701. /* Use SHIFT_USEC to get it into the same units as time_freq */
  702. singleshot_ppm <<= SHIFT_USEC;
  703. if ( time_adjust < 0 )
  704. singleshot_ppm = -singleshot_ppm;
  705. }
  706. else {
  707. #ifdef DEBUG_PPC_ADJTIMEX
  708. if ( adjusting_time )
  709. printk("ppc_adjtimex: ending single shot time_adjust\n");
  710. #endif
  711. adjusting_time = 0;
  712. }
  713. /* Add up all of the frequency adjustments */
  714. delta_freq = time_freq + ltemp + singleshot_ppm;
  715. /*
  716. * Compute a new value for tb_ticks_per_sec based on
  717. * the frequency adjustment
  718. */
  719. den = 1000000 * (1 << (SHIFT_USEC - 8));
  720. if ( delta_freq < 0 ) {
  721. tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
  722. new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;
  723. }
  724. else {
  725. tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;
  726. new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;
  727. }
  728. #ifdef DEBUG_PPC_ADJTIMEX
  729. printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
  730. printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
  731. #endif
  732. /*
  733. * Compute a new value of tb_to_xs (used to convert tb to
  734. * microseconds) and a new value of stamp_xsec which is the
  735. * time (in 1/2^20 second units) corresponding to
  736. * tb_orig_stamp. This new value of stamp_xsec compensates
  737. * for the change in frequency (implied by the new tb_to_xs)
  738. * which guarantees that the current time remains the same.
  739. */
  740. write_seqlock_irqsave( &xtime_lock, flags );
  741. tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
  742. div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
  743. new_tb_to_xs = divres.result_low;
  744. new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
  745. old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
  746. new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
  747. update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
  748. write_sequnlock_irqrestore( &xtime_lock, flags );
  749. #endif /* CONFIG_PPC64 */
  750. }
  751. #define FEBRUARY 2
  752. #define STARTOFTIME 1970
  753. #define SECDAY 86400L
  754. #define SECYR (SECDAY * 365)
  755. #define leapyear(year) ((year) % 4 == 0 && \
  756. ((year) % 100 != 0 || (year) % 400 == 0))
  757. #define days_in_year(a) (leapyear(a) ? 366 : 365)
  758. #define days_in_month(a) (month_days[(a) - 1])
  759. static int month_days[12] = {
  760. 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
  761. };
  762. /*
  763. * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
  764. */
  765. void GregorianDay(struct rtc_time * tm)
  766. {
  767. int leapsToDate;
  768. int lastYear;
  769. int day;
  770. int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
  771. lastYear = tm->tm_year - 1;
  772. /*
  773. * Number of leap corrections to apply up to end of last year
  774. */
  775. leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
  776. /*
  777. * This year is a leap year if it is divisible by 4 except when it is
  778. * divisible by 100 unless it is divisible by 400
  779. *
  780. * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
  781. */
  782. day = tm->tm_mon > 2 && leapyear(tm->tm_year);
  783. day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
  784. tm->tm_mday;
  785. tm->tm_wday = day % 7;
  786. }
  787. void to_tm(int tim, struct rtc_time * tm)
  788. {
  789. register int i;
  790. register long hms, day;
  791. day = tim / SECDAY;
  792. hms = tim % SECDAY;
  793. /* Hours, minutes, seconds are easy */
  794. tm->tm_hour = hms / 3600;
  795. tm->tm_min = (hms % 3600) / 60;
  796. tm->tm_sec = (hms % 3600) % 60;
  797. /* Number of years in days */
  798. for (i = STARTOFTIME; day >= days_in_year(i); i++)
  799. day -= days_in_year(i);
  800. tm->tm_year = i;
  801. /* Number of months in days left */
  802. if (leapyear(tm->tm_year))
  803. days_in_month(FEBRUARY) = 29;
  804. for (i = 1; day >= days_in_month(i); i++)
  805. day -= days_in_month(i);
  806. days_in_month(FEBRUARY) = 28;
  807. tm->tm_mon = i;
  808. /* Days are what is left over (+1) from all that. */
  809. tm->tm_mday = day + 1;
  810. /*
  811. * Determine the day of week
  812. */
  813. GregorianDay(tm);
  814. }
  815. /* Auxiliary function to compute scaling factors */
  816. /* Actually the choice of a timebase running at 1/4 the of the bus
  817. * frequency giving resolution of a few tens of nanoseconds is quite nice.
  818. * It makes this computation very precise (27-28 bits typically) which
  819. * is optimistic considering the stability of most processor clock
  820. * oscillators and the precision with which the timebase frequency
  821. * is measured but does not harm.
  822. */
  823. unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
  824. {
  825. unsigned mlt=0, tmp, err;
  826. /* No concern for performance, it's done once: use a stupid
  827. * but safe and compact method to find the multiplier.
  828. */
  829. for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
  830. if (mulhwu(inscale, mlt|tmp) < outscale)
  831. mlt |= tmp;
  832. }
  833. /* We might still be off by 1 for the best approximation.
  834. * A side effect of this is that if outscale is too large
  835. * the returned value will be zero.
  836. * Many corner cases have been checked and seem to work,
  837. * some might have been forgotten in the test however.
  838. */
  839. err = inscale * (mlt+1);
  840. if (err <= inscale/2)
  841. mlt++;
  842. return mlt;
  843. }
  844. /*
  845. * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
  846. * result.
  847. */
  848. void div128_by_32(u64 dividend_high, u64 dividend_low,
  849. unsigned divisor, struct div_result *dr)
  850. {
  851. unsigned long a, b, c, d;
  852. unsigned long w, x, y, z;
  853. u64 ra, rb, rc;
  854. a = dividend_high >> 32;
  855. b = dividend_high & 0xffffffff;
  856. c = dividend_low >> 32;
  857. d = dividend_low & 0xffffffff;
  858. w = a / divisor;
  859. ra = ((u64)(a - (w * divisor)) << 32) + b;
  860. rb = ((u64) do_div(ra, divisor) << 32) + c;
  861. x = ra;
  862. rc = ((u64) do_div(rb, divisor) << 32) + d;
  863. y = rb;
  864. do_div(rc, divisor);
  865. z = rc;
  866. dr->result_high = ((u64)w << 32) + x;
  867. dr->result_low = ((u64)y << 32) + z;
  868. }