time.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234
  1. /*
  2. * Common time routines among all ppc machines.
  3. *
  4. * Written by Cort Dougan (cort@cs.nmt.edu) to merge
  5. * Paul Mackerras' version and mine for PReP and Pmac.
  6. * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
  7. * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
  8. *
  9. * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
  10. * to make clock more stable (2.4.0-test5). The only thing
  11. * that this code assumes is that the timebases have been synchronized
  12. * by firmware on SMP and are never stopped (never do sleep
  13. * on SMP then, nap and doze are OK).
  14. *
  15. * Speeded up do_gettimeofday by getting rid of references to
  16. * xtime (which required locks for consistency). (mikejc@us.ibm.com)
  17. *
  18. * TODO (not necessarily in this file):
  19. * - improve precision and reproducibility of timebase frequency
  20. * measurement at boot time. (for iSeries, we calibrate the timebase
  21. * against the Titan chip's clock.)
  22. * - for astronomical applications: add a new function to get
  23. * non ambiguous timestamps even around leap seconds. This needs
  24. * a new timestamp format and a good name.
  25. *
  26. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
  27. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  28. *
  29. * This program is free software; you can redistribute it and/or
  30. * modify it under the terms of the GNU General Public License
  31. * as published by the Free Software Foundation; either version
  32. * 2 of the License, or (at your option) any later version.
  33. */
  34. #include <linux/errno.h>
  35. #include <linux/module.h>
  36. #include <linux/sched.h>
  37. #include <linux/kernel.h>
  38. #include <linux/param.h>
  39. #include <linux/string.h>
  40. #include <linux/mm.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/timex.h>
  43. #include <linux/kernel_stat.h>
  44. #include <linux/time.h>
  45. #include <linux/init.h>
  46. #include <linux/profile.h>
  47. #include <linux/cpu.h>
  48. #include <linux/security.h>
  49. #include <linux/percpu.h>
  50. #include <linux/rtc.h>
  51. #include <linux/jiffies.h>
  52. #include <linux/posix-timers.h>
  53. #include <linux/irq.h>
  54. #include <asm/io.h>
  55. #include <asm/processor.h>
  56. #include <asm/nvram.h>
  57. #include <asm/cache.h>
  58. #include <asm/machdep.h>
  59. #include <asm/uaccess.h>
  60. #include <asm/time.h>
  61. #include <asm/prom.h>
  62. #include <asm/irq.h>
  63. #include <asm/div64.h>
  64. #include <asm/smp.h>
  65. #include <asm/vdso_datapage.h>
  66. #ifdef CONFIG_PPC64
  67. #include <asm/firmware.h>
  68. #endif
  69. #ifdef CONFIG_PPC_ISERIES
  70. #include <asm/iseries/it_lp_queue.h>
  71. #include <asm/iseries/hv_call_xm.h>
  72. #endif
  73. #include <asm/smp.h>
  74. /* keep track of when we need to update the rtc */
  75. time_t last_rtc_update;
  76. #ifdef CONFIG_PPC_ISERIES
  77. unsigned long iSeries_recal_titan = 0;
  78. unsigned long iSeries_recal_tb = 0;
  79. static unsigned long first_settimeofday = 1;
  80. #endif
  81. /* The decrementer counts down by 128 every 128ns on a 601. */
  82. #define DECREMENTER_COUNT_601 (1000000000 / HZ)
  83. #define XSEC_PER_SEC (1024*1024)
  84. #ifdef CONFIG_PPC64
  85. #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
  86. #else
  87. /* compute ((xsec << 12) * max) >> 32 */
  88. #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
  89. #endif
  90. unsigned long tb_ticks_per_jiffy;
  91. unsigned long tb_ticks_per_usec = 100; /* sane default */
  92. EXPORT_SYMBOL(tb_ticks_per_usec);
  93. unsigned long tb_ticks_per_sec;
  94. EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
  95. u64 tb_to_xs;
  96. unsigned tb_to_us;
  97. #define TICKLEN_SCALE TICK_LENGTH_SHIFT
  98. u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
  99. u64 ticklen_to_xs; /* 0.64 fraction */
  100. /* If last_tick_len corresponds to about 1/HZ seconds, then
  101. last_tick_len << TICKLEN_SHIFT will be about 2^63. */
  102. #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
  103. DEFINE_SPINLOCK(rtc_lock);
  104. EXPORT_SYMBOL_GPL(rtc_lock);
  105. u64 tb_to_ns_scale;
  106. unsigned tb_to_ns_shift;
  107. struct gettimeofday_struct do_gtod;
  108. extern struct timezone sys_tz;
  109. static long timezone_offset;
  110. unsigned long ppc_proc_freq;
  111. unsigned long ppc_tb_freq;
  112. static u64 tb_last_jiffy __cacheline_aligned_in_smp;
  113. static DEFINE_PER_CPU(u64, last_jiffy);
  114. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  115. /*
  116. * Factors for converting from cputime_t (timebase ticks) to
  117. * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
  118. * These are all stored as 0.64 fixed-point binary fractions.
  119. */
  120. u64 __cputime_jiffies_factor;
  121. EXPORT_SYMBOL(__cputime_jiffies_factor);
  122. u64 __cputime_msec_factor;
  123. EXPORT_SYMBOL(__cputime_msec_factor);
  124. u64 __cputime_sec_factor;
  125. EXPORT_SYMBOL(__cputime_sec_factor);
  126. u64 __cputime_clockt_factor;
  127. EXPORT_SYMBOL(__cputime_clockt_factor);
  128. static void calc_cputime_factors(void)
  129. {
  130. struct div_result res;
  131. div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
  132. __cputime_jiffies_factor = res.result_low;
  133. div128_by_32(1000, 0, tb_ticks_per_sec, &res);
  134. __cputime_msec_factor = res.result_low;
  135. div128_by_32(1, 0, tb_ticks_per_sec, &res);
  136. __cputime_sec_factor = res.result_low;
  137. div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
  138. __cputime_clockt_factor = res.result_low;
  139. }
  140. /*
  141. * Read the PURR on systems that have it, otherwise the timebase.
  142. */
  143. static u64 read_purr(void)
  144. {
  145. if (cpu_has_feature(CPU_FTR_PURR))
  146. return mfspr(SPRN_PURR);
  147. return mftb();
  148. }
  149. /*
  150. * Account time for a transition between system, hard irq
  151. * or soft irq state.
  152. */
  153. void account_system_vtime(struct task_struct *tsk)
  154. {
  155. u64 now, delta;
  156. unsigned long flags;
  157. local_irq_save(flags);
  158. now = read_purr();
  159. delta = now - get_paca()->startpurr;
  160. get_paca()->startpurr = now;
  161. if (!in_interrupt()) {
  162. delta += get_paca()->system_time;
  163. get_paca()->system_time = 0;
  164. }
  165. account_system_time(tsk, 0, delta);
  166. local_irq_restore(flags);
  167. }
  168. /*
  169. * Transfer the user and system times accumulated in the paca
  170. * by the exception entry and exit code to the generic process
  171. * user and system time records.
  172. * Must be called with interrupts disabled.
  173. */
  174. void account_process_vtime(struct task_struct *tsk)
  175. {
  176. cputime_t utime;
  177. utime = get_paca()->user_time;
  178. get_paca()->user_time = 0;
  179. account_user_time(tsk, utime);
  180. }
  181. static void account_process_time(struct pt_regs *regs)
  182. {
  183. int cpu = smp_processor_id();
  184. account_process_vtime(current);
  185. run_local_timers();
  186. if (rcu_pending(cpu))
  187. rcu_check_callbacks(cpu, user_mode(regs));
  188. scheduler_tick();
  189. run_posix_cpu_timers(current);
  190. }
  191. #ifdef CONFIG_PPC_SPLPAR
  192. /*
  193. * Stuff for accounting stolen time.
  194. */
  195. struct cpu_purr_data {
  196. int initialized; /* thread is running */
  197. u64 tb0; /* timebase at origin time */
  198. u64 purr0; /* PURR at origin time */
  199. u64 tb; /* last TB value read */
  200. u64 purr; /* last PURR value read */
  201. u64 stolen; /* stolen time so far */
  202. spinlock_t lock;
  203. };
  204. static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
  205. static void snapshot_tb_and_purr(void *data)
  206. {
  207. struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
  208. p->tb0 = mftb();
  209. p->purr0 = mfspr(SPRN_PURR);
  210. p->tb = p->tb0;
  211. p->purr = 0;
  212. wmb();
  213. p->initialized = 1;
  214. }
  215. /*
  216. * Called during boot when all cpus have come up.
  217. */
  218. void snapshot_timebases(void)
  219. {
  220. int cpu;
  221. if (!cpu_has_feature(CPU_FTR_PURR))
  222. return;
  223. for_each_possible_cpu(cpu)
  224. spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
  225. on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
  226. }
  227. void calculate_steal_time(void)
  228. {
  229. u64 tb, purr, t0;
  230. s64 stolen;
  231. struct cpu_purr_data *p0, *pme, *phim;
  232. int cpu;
  233. if (!cpu_has_feature(CPU_FTR_PURR))
  234. return;
  235. cpu = smp_processor_id();
  236. pme = &per_cpu(cpu_purr_data, cpu);
  237. if (!pme->initialized)
  238. return; /* this can happen in early boot */
  239. p0 = &per_cpu(cpu_purr_data, cpu & ~1);
  240. phim = &per_cpu(cpu_purr_data, cpu ^ 1);
  241. spin_lock(&p0->lock);
  242. tb = mftb();
  243. purr = mfspr(SPRN_PURR) - pme->purr0;
  244. if (!phim->initialized || !cpu_online(cpu ^ 1)) {
  245. stolen = (tb - pme->tb) - (purr - pme->purr);
  246. } else {
  247. t0 = pme->tb0;
  248. if (phim->tb0 < t0)
  249. t0 = phim->tb0;
  250. stolen = phim->tb - t0 - phim->purr - purr - p0->stolen;
  251. }
  252. if (stolen > 0) {
  253. account_steal_time(current, stolen);
  254. p0->stolen += stolen;
  255. }
  256. pme->tb = tb;
  257. pme->purr = purr;
  258. spin_unlock(&p0->lock);
  259. }
  260. /*
  261. * Must be called before the cpu is added to the online map when
  262. * a cpu is being brought up at runtime.
  263. */
  264. static void snapshot_purr(void)
  265. {
  266. int cpu;
  267. u64 purr;
  268. struct cpu_purr_data *p0, *pme, *phim;
  269. unsigned long flags;
  270. if (!cpu_has_feature(CPU_FTR_PURR))
  271. return;
  272. cpu = smp_processor_id();
  273. pme = &per_cpu(cpu_purr_data, cpu);
  274. p0 = &per_cpu(cpu_purr_data, cpu & ~1);
  275. phim = &per_cpu(cpu_purr_data, cpu ^ 1);
  276. spin_lock_irqsave(&p0->lock, flags);
  277. pme->tb = pme->tb0 = mftb();
  278. purr = mfspr(SPRN_PURR);
  279. if (!phim->initialized) {
  280. pme->purr = 0;
  281. pme->purr0 = purr;
  282. } else {
  283. /* set p->purr and p->purr0 for no change in p0->stolen */
  284. pme->purr = phim->tb - phim->tb0 - phim->purr - p0->stolen;
  285. pme->purr0 = purr - pme->purr;
  286. }
  287. pme->initialized = 1;
  288. spin_unlock_irqrestore(&p0->lock, flags);
  289. }
  290. #endif /* CONFIG_PPC_SPLPAR */
  291. #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
  292. #define calc_cputime_factors()
  293. #define account_process_time(regs) update_process_times(user_mode(regs))
  294. #define calculate_steal_time() do { } while (0)
  295. #endif
  296. #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
  297. #define snapshot_purr() do { } while (0)
  298. #endif
  299. /*
  300. * Called when a cpu comes up after the system has finished booting,
  301. * i.e. as a result of a hotplug cpu action.
  302. */
  303. void snapshot_timebase(void)
  304. {
  305. __get_cpu_var(last_jiffy) = get_tb();
  306. snapshot_purr();
  307. }
  308. void __delay(unsigned long loops)
  309. {
  310. unsigned long start;
  311. int diff;
  312. if (__USE_RTC()) {
  313. start = get_rtcl();
  314. do {
  315. /* the RTCL register wraps at 1000000000 */
  316. diff = get_rtcl() - start;
  317. if (diff < 0)
  318. diff += 1000000000;
  319. } while (diff < loops);
  320. } else {
  321. start = get_tbl();
  322. while (get_tbl() - start < loops)
  323. HMT_low();
  324. HMT_medium();
  325. }
  326. }
  327. EXPORT_SYMBOL(__delay);
  328. void udelay(unsigned long usecs)
  329. {
  330. __delay(tb_ticks_per_usec * usecs);
  331. }
  332. EXPORT_SYMBOL(udelay);
  333. static __inline__ void timer_check_rtc(void)
  334. {
  335. /*
  336. * update the rtc when needed, this should be performed on the
  337. * right fraction of a second. Half or full second ?
  338. * Full second works on mk48t59 clocks, others need testing.
  339. * Note that this update is basically only used through
  340. * the adjtimex system calls. Setting the HW clock in
  341. * any other way is a /dev/rtc and userland business.
  342. * This is still wrong by -0.5/+1.5 jiffies because of the
  343. * timer interrupt resolution and possible delay, but here we
  344. * hit a quantization limit which can only be solved by higher
  345. * resolution timers and decoupling time management from timer
  346. * interrupts. This is also wrong on the clocks
  347. * which require being written at the half second boundary.
  348. * We should have an rtc call that only sets the minutes and
  349. * seconds like on Intel to avoid problems with non UTC clocks.
  350. */
  351. if (ppc_md.set_rtc_time && ntp_synced() &&
  352. xtime.tv_sec - last_rtc_update >= 659 &&
  353. abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
  354. struct rtc_time tm;
  355. to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
  356. tm.tm_year -= 1900;
  357. tm.tm_mon -= 1;
  358. if (ppc_md.set_rtc_time(&tm) == 0)
  359. last_rtc_update = xtime.tv_sec + 1;
  360. else
  361. /* Try again one minute later */
  362. last_rtc_update += 60;
  363. }
  364. }
  365. /*
  366. * This version of gettimeofday has microsecond resolution.
  367. */
  368. static inline void __do_gettimeofday(struct timeval *tv)
  369. {
  370. unsigned long sec, usec;
  371. u64 tb_ticks, xsec;
  372. struct gettimeofday_vars *temp_varp;
  373. u64 temp_tb_to_xs, temp_stamp_xsec;
  374. /*
  375. * These calculations are faster (gets rid of divides)
  376. * if done in units of 1/2^20 rather than microseconds.
  377. * The conversion to microseconds at the end is done
  378. * without a divide (and in fact, without a multiply)
  379. */
  380. temp_varp = do_gtod.varp;
  381. /* Sampling the time base must be done after loading
  382. * do_gtod.varp in order to avoid racing with update_gtod.
  383. */
  384. data_barrier(temp_varp);
  385. tb_ticks = get_tb() - temp_varp->tb_orig_stamp;
  386. temp_tb_to_xs = temp_varp->tb_to_xs;
  387. temp_stamp_xsec = temp_varp->stamp_xsec;
  388. xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
  389. sec = xsec / XSEC_PER_SEC;
  390. usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
  391. usec = SCALE_XSEC(usec, 1000000);
  392. tv->tv_sec = sec;
  393. tv->tv_usec = usec;
  394. }
  395. void do_gettimeofday(struct timeval *tv)
  396. {
  397. if (__USE_RTC()) {
  398. /* do this the old way */
  399. unsigned long flags, seq;
  400. unsigned int sec, nsec, usec;
  401. do {
  402. seq = read_seqbegin_irqsave(&xtime_lock, flags);
  403. sec = xtime.tv_sec;
  404. nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy);
  405. } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
  406. usec = nsec / 1000;
  407. while (usec >= 1000000) {
  408. usec -= 1000000;
  409. ++sec;
  410. }
  411. tv->tv_sec = sec;
  412. tv->tv_usec = usec;
  413. return;
  414. }
  415. __do_gettimeofday(tv);
  416. }
  417. EXPORT_SYMBOL(do_gettimeofday);
  418. /*
  419. * There are two copies of tb_to_xs and stamp_xsec so that no
  420. * lock is needed to access and use these values in
  421. * do_gettimeofday. We alternate the copies and as long as a
  422. * reasonable time elapses between changes, there will never
  423. * be inconsistent values. ntpd has a minimum of one minute
  424. * between updates.
  425. */
  426. static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
  427. u64 new_tb_to_xs)
  428. {
  429. unsigned temp_idx;
  430. struct gettimeofday_vars *temp_varp;
  431. temp_idx = (do_gtod.var_idx == 0);
  432. temp_varp = &do_gtod.vars[temp_idx];
  433. temp_varp->tb_to_xs = new_tb_to_xs;
  434. temp_varp->tb_orig_stamp = new_tb_stamp;
  435. temp_varp->stamp_xsec = new_stamp_xsec;
  436. smp_mb();
  437. do_gtod.varp = temp_varp;
  438. do_gtod.var_idx = temp_idx;
  439. /*
  440. * tb_update_count is used to allow the userspace gettimeofday code
  441. * to assure itself that it sees a consistent view of the tb_to_xs and
  442. * stamp_xsec variables. It reads the tb_update_count, then reads
  443. * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
  444. * the two values of tb_update_count match and are even then the
  445. * tb_to_xs and stamp_xsec values are consistent. If not, then it
  446. * loops back and reads them again until this criteria is met.
  447. * We expect the caller to have done the first increment of
  448. * vdso_data->tb_update_count already.
  449. */
  450. vdso_data->tb_orig_stamp = new_tb_stamp;
  451. vdso_data->stamp_xsec = new_stamp_xsec;
  452. vdso_data->tb_to_xs = new_tb_to_xs;
  453. vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
  454. vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
  455. smp_wmb();
  456. ++(vdso_data->tb_update_count);
  457. }
  458. /*
  459. * When the timebase - tb_orig_stamp gets too big, we do a manipulation
  460. * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
  461. * difference tb - tb_orig_stamp small enough to always fit inside a
  462. * 32 bits number. This is a requirement of our fast 32 bits userland
  463. * implementation in the vdso. If we "miss" a call to this function
  464. * (interrupt latency, CPU locked in a spinlock, ...) and we end up
  465. * with a too big difference, then the vdso will fallback to calling
  466. * the syscall
  467. */
  468. static __inline__ void timer_recalc_offset(u64 cur_tb)
  469. {
  470. unsigned long offset;
  471. u64 new_stamp_xsec;
  472. u64 tlen, t2x;
  473. u64 tb, xsec_old, xsec_new;
  474. struct gettimeofday_vars *varp;
  475. if (__USE_RTC())
  476. return;
  477. tlen = current_tick_length();
  478. offset = cur_tb - do_gtod.varp->tb_orig_stamp;
  479. if (tlen == last_tick_len && offset < 0x80000000u)
  480. return;
  481. if (tlen != last_tick_len) {
  482. t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
  483. last_tick_len = tlen;
  484. } else
  485. t2x = do_gtod.varp->tb_to_xs;
  486. new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
  487. do_div(new_stamp_xsec, 1000000000);
  488. new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
  489. ++vdso_data->tb_update_count;
  490. smp_mb();
  491. /*
  492. * Make sure time doesn't go backwards for userspace gettimeofday.
  493. */
  494. tb = get_tb();
  495. varp = do_gtod.varp;
  496. xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)
  497. + varp->stamp_xsec;
  498. xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;
  499. if (xsec_new < xsec_old)
  500. new_stamp_xsec += xsec_old - xsec_new;
  501. update_gtod(cur_tb, new_stamp_xsec, t2x);
  502. }
  503. #ifdef CONFIG_SMP
  504. unsigned long profile_pc(struct pt_regs *regs)
  505. {
  506. unsigned long pc = instruction_pointer(regs);
  507. if (in_lock_functions(pc))
  508. return regs->link;
  509. return pc;
  510. }
  511. EXPORT_SYMBOL(profile_pc);
  512. #endif
  513. #ifdef CONFIG_PPC_ISERIES
  514. /*
  515. * This function recalibrates the timebase based on the 49-bit time-of-day
  516. * value in the Titan chip. The Titan is much more accurate than the value
  517. * returned by the service processor for the timebase frequency.
  518. */
  519. static void iSeries_tb_recal(void)
  520. {
  521. struct div_result divres;
  522. unsigned long titan, tb;
  523. tb = get_tb();
  524. titan = HvCallXm_loadTod();
  525. if ( iSeries_recal_titan ) {
  526. unsigned long tb_ticks = tb - iSeries_recal_tb;
  527. unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
  528. unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
  529. unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
  530. long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
  531. char sign = '+';
  532. /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
  533. new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
  534. if ( tick_diff < 0 ) {
  535. tick_diff = -tick_diff;
  536. sign = '-';
  537. }
  538. if ( tick_diff ) {
  539. if ( tick_diff < tb_ticks_per_jiffy/25 ) {
  540. printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
  541. new_tb_ticks_per_jiffy, sign, tick_diff );
  542. tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
  543. tb_ticks_per_sec = new_tb_ticks_per_sec;
  544. calc_cputime_factors();
  545. div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
  546. do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
  547. tb_to_xs = divres.result_low;
  548. do_gtod.varp->tb_to_xs = tb_to_xs;
  549. vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
  550. vdso_data->tb_to_xs = tb_to_xs;
  551. }
  552. else {
  553. printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
  554. " new tb_ticks_per_jiffy = %lu\n"
  555. " old tb_ticks_per_jiffy = %lu\n",
  556. new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
  557. }
  558. }
  559. }
  560. iSeries_recal_titan = titan;
  561. iSeries_recal_tb = tb;
  562. }
  563. #endif
  564. /*
  565. * For iSeries shared processors, we have to let the hypervisor
  566. * set the hardware decrementer. We set a virtual decrementer
  567. * in the lppaca and call the hypervisor if the virtual
  568. * decrementer is less than the current value in the hardware
  569. * decrementer. (almost always the new decrementer value will
  570. * be greater than the current hardware decementer so the hypervisor
  571. * call will not be needed)
  572. */
  573. /*
  574. * timer_interrupt - gets called when the decrementer overflows,
  575. * with interrupts disabled.
  576. */
  577. void timer_interrupt(struct pt_regs * regs)
  578. {
  579. struct pt_regs *old_regs;
  580. int next_dec;
  581. int cpu = smp_processor_id();
  582. unsigned long ticks;
  583. u64 tb_next_jiffy;
  584. #ifdef CONFIG_PPC32
  585. if (atomic_read(&ppc_n_lost_interrupts) != 0)
  586. do_IRQ(regs);
  587. #endif
  588. old_regs = set_irq_regs(regs);
  589. irq_enter();
  590. profile_tick(CPU_PROFILING);
  591. calculate_steal_time();
  592. #ifdef CONFIG_PPC_ISERIES
  593. get_lppaca()->int_dword.fields.decr_int = 0;
  594. #endif
  595. while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
  596. >= tb_ticks_per_jiffy) {
  597. /* Update last_jiffy */
  598. per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
  599. /* Handle RTCL overflow on 601 */
  600. if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
  601. per_cpu(last_jiffy, cpu) -= 1000000000;
  602. /*
  603. * We cannot disable the decrementer, so in the period
  604. * between this cpu's being marked offline in cpu_online_map
  605. * and calling stop-self, it is taking timer interrupts.
  606. * Avoid calling into the scheduler rebalancing code if this
  607. * is the case.
  608. */
  609. if (!cpu_is_offline(cpu))
  610. account_process_time(regs);
  611. /*
  612. * No need to check whether cpu is offline here; boot_cpuid
  613. * should have been fixed up by now.
  614. */
  615. if (cpu != boot_cpuid)
  616. continue;
  617. write_seqlock(&xtime_lock);
  618. tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
  619. if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
  620. tb_last_jiffy = tb_next_jiffy;
  621. do_timer(1);
  622. timer_recalc_offset(tb_last_jiffy);
  623. timer_check_rtc();
  624. }
  625. write_sequnlock(&xtime_lock);
  626. }
  627. next_dec = tb_ticks_per_jiffy - ticks;
  628. set_dec(next_dec);
  629. #ifdef CONFIG_PPC_ISERIES
  630. if (hvlpevent_is_pending())
  631. process_hvlpevents();
  632. #endif
  633. #ifdef CONFIG_PPC64
  634. /* collect purr register values often, for accurate calculations */
  635. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  636. struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
  637. cu->current_tb = mfspr(SPRN_PURR);
  638. }
  639. #endif
  640. irq_exit();
  641. set_irq_regs(old_regs);
  642. }
  643. void wakeup_decrementer(void)
  644. {
  645. unsigned long ticks;
  646. /*
  647. * The timebase gets saved on sleep and restored on wakeup,
  648. * so all we need to do is to reset the decrementer.
  649. */
  650. ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
  651. if (ticks < tb_ticks_per_jiffy)
  652. ticks = tb_ticks_per_jiffy - ticks;
  653. else
  654. ticks = 1;
  655. set_dec(ticks);
  656. }
  657. #ifdef CONFIG_SMP
  658. void __init smp_space_timers(unsigned int max_cpus)
  659. {
  660. int i;
  661. unsigned long half = tb_ticks_per_jiffy / 2;
  662. unsigned long offset = tb_ticks_per_jiffy / max_cpus;
  663. u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
  664. /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
  665. previous_tb -= tb_ticks_per_jiffy;
  666. /*
  667. * The stolen time calculation for POWER5 shared-processor LPAR
  668. * systems works better if the two threads' timebase interrupts
  669. * are staggered by half a jiffy with respect to each other.
  670. */
  671. for_each_possible_cpu(i) {
  672. if (i == boot_cpuid)
  673. continue;
  674. if (i == (boot_cpuid ^ 1))
  675. per_cpu(last_jiffy, i) =
  676. per_cpu(last_jiffy, boot_cpuid) - half;
  677. else if (i & 1)
  678. per_cpu(last_jiffy, i) =
  679. per_cpu(last_jiffy, i ^ 1) + half;
  680. else {
  681. previous_tb += offset;
  682. per_cpu(last_jiffy, i) = previous_tb;
  683. }
  684. }
  685. }
  686. #endif
  687. /*
  688. * Scheduler clock - returns current time in nanosec units.
  689. *
  690. * Note: mulhdu(a, b) (multiply high double unsigned) returns
  691. * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
  692. * are 64-bit unsigned numbers.
  693. */
  694. unsigned long long sched_clock(void)
  695. {
  696. if (__USE_RTC())
  697. return get_rtc();
  698. return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
  699. }
  700. int do_settimeofday(struct timespec *tv)
  701. {
  702. time_t wtm_sec, new_sec = tv->tv_sec;
  703. long wtm_nsec, new_nsec = tv->tv_nsec;
  704. unsigned long flags;
  705. u64 new_xsec;
  706. unsigned long tb_delta;
  707. if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
  708. return -EINVAL;
  709. write_seqlock_irqsave(&xtime_lock, flags);
  710. /*
  711. * Updating the RTC is not the job of this code. If the time is
  712. * stepped under NTP, the RTC will be updated after STA_UNSYNC
  713. * is cleared. Tools like clock/hwclock either copy the RTC
  714. * to the system time, in which case there is no point in writing
  715. * to the RTC again, or write to the RTC but then they don't call
  716. * settimeofday to perform this operation.
  717. */
  718. #ifdef CONFIG_PPC_ISERIES
  719. if (first_settimeofday) {
  720. iSeries_tb_recal();
  721. first_settimeofday = 0;
  722. }
  723. #endif
  724. /* Make userspace gettimeofday spin until we're done. */
  725. ++vdso_data->tb_update_count;
  726. smp_mb();
  727. /*
  728. * Subtract off the number of nanoseconds since the
  729. * beginning of the last tick.
  730. */
  731. tb_delta = tb_ticks_since(tb_last_jiffy);
  732. tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
  733. new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
  734. wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
  735. wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
  736. set_normalized_timespec(&xtime, new_sec, new_nsec);
  737. set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
  738. /* In case of a large backwards jump in time with NTP, we want the
  739. * clock to be updated as soon as the PLL is again in lock.
  740. */
  741. last_rtc_update = new_sec - 658;
  742. ntp_clear();
  743. new_xsec = xtime.tv_nsec;
  744. if (new_xsec != 0) {
  745. new_xsec *= XSEC_PER_SEC;
  746. do_div(new_xsec, NSEC_PER_SEC);
  747. }
  748. new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
  749. update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
  750. vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
  751. vdso_data->tz_dsttime = sys_tz.tz_dsttime;
  752. write_sequnlock_irqrestore(&xtime_lock, flags);
  753. clock_was_set();
  754. return 0;
  755. }
  756. EXPORT_SYMBOL(do_settimeofday);
  757. static int __init get_freq(char *name, int cells, unsigned long *val)
  758. {
  759. struct device_node *cpu;
  760. const unsigned int *fp;
  761. int found = 0;
  762. /* The cpu node should have timebase and clock frequency properties */
  763. cpu = of_find_node_by_type(NULL, "cpu");
  764. if (cpu) {
  765. fp = get_property(cpu, name, NULL);
  766. if (fp) {
  767. found = 1;
  768. *val = of_read_ulong(fp, cells);
  769. }
  770. of_node_put(cpu);
  771. }
  772. return found;
  773. }
  774. void __init generic_calibrate_decr(void)
  775. {
  776. ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
  777. if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
  778. !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
  779. printk(KERN_ERR "WARNING: Estimating decrementer frequency "
  780. "(not found)\n");
  781. }
  782. ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
  783. if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
  784. !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
  785. printk(KERN_ERR "WARNING: Estimating processor frequency "
  786. "(not found)\n");
  787. }
  788. #ifdef CONFIG_BOOKE
  789. /* Set the time base to zero */
  790. mtspr(SPRN_TBWL, 0);
  791. mtspr(SPRN_TBWU, 0);
  792. /* Clear any pending timer interrupts */
  793. mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
  794. /* Enable decrementer interrupt */
  795. mtspr(SPRN_TCR, TCR_DIE);
  796. #endif
  797. }
  798. unsigned long get_boot_time(void)
  799. {
  800. struct rtc_time tm;
  801. if (ppc_md.get_boot_time)
  802. return ppc_md.get_boot_time();
  803. if (!ppc_md.get_rtc_time)
  804. return 0;
  805. ppc_md.get_rtc_time(&tm);
  806. return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
  807. tm.tm_hour, tm.tm_min, tm.tm_sec);
  808. }
  809. /* This function is only called on the boot processor */
  810. void __init time_init(void)
  811. {
  812. unsigned long flags;
  813. unsigned long tm = 0;
  814. struct div_result res;
  815. u64 scale, x;
  816. unsigned shift;
  817. if (ppc_md.time_init != NULL)
  818. timezone_offset = ppc_md.time_init();
  819. if (__USE_RTC()) {
  820. /* 601 processor: dec counts down by 128 every 128ns */
  821. ppc_tb_freq = 1000000000;
  822. tb_last_jiffy = get_rtcl();
  823. } else {
  824. /* Normal PowerPC with timebase register */
  825. ppc_md.calibrate_decr();
  826. printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
  827. ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
  828. printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
  829. ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
  830. tb_last_jiffy = get_tb();
  831. }
  832. tb_ticks_per_jiffy = ppc_tb_freq / HZ;
  833. tb_ticks_per_sec = ppc_tb_freq;
  834. tb_ticks_per_usec = ppc_tb_freq / 1000000;
  835. tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
  836. calc_cputime_factors();
  837. /*
  838. * Calculate the length of each tick in ns. It will not be
  839. * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
  840. * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
  841. * rounded up.
  842. */
  843. x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
  844. do_div(x, ppc_tb_freq);
  845. tick_nsec = x;
  846. last_tick_len = x << TICKLEN_SCALE;
  847. /*
  848. * Compute ticklen_to_xs, which is a factor which gets multiplied
  849. * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
  850. * It is computed as:
  851. * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
  852. * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
  853. * which turns out to be N = 51 - SHIFT_HZ.
  854. * This gives the result as a 0.64 fixed-point fraction.
  855. * That value is reduced by an offset amounting to 1 xsec per
  856. * 2^31 timebase ticks to avoid problems with time going backwards
  857. * by 1 xsec when we do timer_recalc_offset due to losing the
  858. * fractional xsec. That offset is equal to ppc_tb_freq/2^51
  859. * since there are 2^20 xsec in a second.
  860. */
  861. div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
  862. tb_ticks_per_jiffy << SHIFT_HZ, &res);
  863. div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
  864. ticklen_to_xs = res.result_low;
  865. /* Compute tb_to_xs from tick_nsec */
  866. tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
  867. /*
  868. * Compute scale factor for sched_clock.
  869. * The calibrate_decr() function has set tb_ticks_per_sec,
  870. * which is the timebase frequency.
  871. * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
  872. * the 128-bit result as a 64.64 fixed-point number.
  873. * We then shift that number right until it is less than 1.0,
  874. * giving us the scale factor and shift count to use in
  875. * sched_clock().
  876. */
  877. div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
  878. scale = res.result_low;
  879. for (shift = 0; res.result_high != 0; ++shift) {
  880. scale = (scale >> 1) | (res.result_high << 63);
  881. res.result_high >>= 1;
  882. }
  883. tb_to_ns_scale = scale;
  884. tb_to_ns_shift = shift;
  885. tm = get_boot_time();
  886. write_seqlock_irqsave(&xtime_lock, flags);
  887. /* If platform provided a timezone (pmac), we correct the time */
  888. if (timezone_offset) {
  889. sys_tz.tz_minuteswest = -timezone_offset / 60;
  890. sys_tz.tz_dsttime = 0;
  891. tm -= timezone_offset;
  892. }
  893. xtime.tv_sec = tm;
  894. xtime.tv_nsec = 0;
  895. do_gtod.varp = &do_gtod.vars[0];
  896. do_gtod.var_idx = 0;
  897. do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
  898. __get_cpu_var(last_jiffy) = tb_last_jiffy;
  899. do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
  900. do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
  901. do_gtod.varp->tb_to_xs = tb_to_xs;
  902. do_gtod.tb_to_us = tb_to_us;
  903. vdso_data->tb_orig_stamp = tb_last_jiffy;
  904. vdso_data->tb_update_count = 0;
  905. vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
  906. vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
  907. vdso_data->tb_to_xs = tb_to_xs;
  908. time_freq = 0;
  909. last_rtc_update = xtime.tv_sec;
  910. set_normalized_timespec(&wall_to_monotonic,
  911. -xtime.tv_sec, -xtime.tv_nsec);
  912. write_sequnlock_irqrestore(&xtime_lock, flags);
  913. /* Not exact, but the timer interrupt takes care of this */
  914. set_dec(tb_ticks_per_jiffy);
  915. }
  916. #ifdef CONFIG_RTC_CLASS
  917. static int set_rtc_class_time(struct rtc_time *tm)
  918. {
  919. int err;
  920. struct class_device *class_dev =
  921. rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
  922. if (class_dev == NULL)
  923. return -ENODEV;
  924. err = rtc_set_time(class_dev, tm);
  925. rtc_class_close(class_dev);
  926. return 0;
  927. }
  928. static void get_rtc_class_time(struct rtc_time *tm)
  929. {
  930. int err;
  931. struct class_device *class_dev =
  932. rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
  933. if (class_dev == NULL)
  934. return;
  935. err = rtc_read_time(class_dev, tm);
  936. rtc_class_close(class_dev);
  937. return;
  938. }
  939. int __init rtc_class_hookup(void)
  940. {
  941. ppc_md.get_rtc_time = get_rtc_class_time;
  942. ppc_md.set_rtc_time = set_rtc_class_time;
  943. return 0;
  944. }
  945. #endif /* CONFIG_RTC_CLASS */
  946. #define FEBRUARY 2
  947. #define STARTOFTIME 1970
  948. #define SECDAY 86400L
  949. #define SECYR (SECDAY * 365)
  950. #define leapyear(year) ((year) % 4 == 0 && \
  951. ((year) % 100 != 0 || (year) % 400 == 0))
  952. #define days_in_year(a) (leapyear(a) ? 366 : 365)
  953. #define days_in_month(a) (month_days[(a) - 1])
  954. static int month_days[12] = {
  955. 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
  956. };
  957. /*
  958. * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
  959. */
  960. void GregorianDay(struct rtc_time * tm)
  961. {
  962. int leapsToDate;
  963. int lastYear;
  964. int day;
  965. int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
  966. lastYear = tm->tm_year - 1;
  967. /*
  968. * Number of leap corrections to apply up to end of last year
  969. */
  970. leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
  971. /*
  972. * This year is a leap year if it is divisible by 4 except when it is
  973. * divisible by 100 unless it is divisible by 400
  974. *
  975. * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
  976. */
  977. day = tm->tm_mon > 2 && leapyear(tm->tm_year);
  978. day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
  979. tm->tm_mday;
  980. tm->tm_wday = day % 7;
  981. }
  982. void to_tm(int tim, struct rtc_time * tm)
  983. {
  984. register int i;
  985. register long hms, day;
  986. day = tim / SECDAY;
  987. hms = tim % SECDAY;
  988. /* Hours, minutes, seconds are easy */
  989. tm->tm_hour = hms / 3600;
  990. tm->tm_min = (hms % 3600) / 60;
  991. tm->tm_sec = (hms % 3600) % 60;
  992. /* Number of years in days */
  993. for (i = STARTOFTIME; day >= days_in_year(i); i++)
  994. day -= days_in_year(i);
  995. tm->tm_year = i;
  996. /* Number of months in days left */
  997. if (leapyear(tm->tm_year))
  998. days_in_month(FEBRUARY) = 29;
  999. for (i = 1; day >= days_in_month(i); i++)
  1000. day -= days_in_month(i);
  1001. days_in_month(FEBRUARY) = 28;
  1002. tm->tm_mon = i;
  1003. /* Days are what is left over (+1) from all that. */
  1004. tm->tm_mday = day + 1;
  1005. /*
  1006. * Determine the day of week
  1007. */
  1008. GregorianDay(tm);
  1009. }
  1010. /* Auxiliary function to compute scaling factors */
  1011. /* Actually the choice of a timebase running at 1/4 the of the bus
  1012. * frequency giving resolution of a few tens of nanoseconds is quite nice.
  1013. * It makes this computation very precise (27-28 bits typically) which
  1014. * is optimistic considering the stability of most processor clock
  1015. * oscillators and the precision with which the timebase frequency
  1016. * is measured but does not harm.
  1017. */
  1018. unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
  1019. {
  1020. unsigned mlt=0, tmp, err;
  1021. /* No concern for performance, it's done once: use a stupid
  1022. * but safe and compact method to find the multiplier.
  1023. */
  1024. for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
  1025. if (mulhwu(inscale, mlt|tmp) < outscale)
  1026. mlt |= tmp;
  1027. }
  1028. /* We might still be off by 1 for the best approximation.
  1029. * A side effect of this is that if outscale is too large
  1030. * the returned value will be zero.
  1031. * Many corner cases have been checked and seem to work,
  1032. * some might have been forgotten in the test however.
  1033. */
  1034. err = inscale * (mlt+1);
  1035. if (err <= inscale/2)
  1036. mlt++;
  1037. return mlt;
  1038. }
  1039. /*
  1040. * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
  1041. * result.
  1042. */
  1043. void div128_by_32(u64 dividend_high, u64 dividend_low,
  1044. unsigned divisor, struct div_result *dr)
  1045. {
  1046. unsigned long a, b, c, d;
  1047. unsigned long w, x, y, z;
  1048. u64 ra, rb, rc;
  1049. a = dividend_high >> 32;
  1050. b = dividend_high & 0xffffffff;
  1051. c = dividend_low >> 32;
  1052. d = dividend_low & 0xffffffff;
  1053. w = a / divisor;
  1054. ra = ((u64)(a - (w * divisor)) << 32) + b;
  1055. rb = ((u64) do_div(ra, divisor) << 32) + c;
  1056. x = ra;
  1057. rc = ((u64) do_div(rb, divisor) << 32) + d;
  1058. y = rb;
  1059. do_div(rc, divisor);
  1060. z = rc;
  1061. dr->result_high = ((u64)w << 32) + x;
  1062. dr->result_low = ((u64)y << 32) + z;
  1063. }