time.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. /*
  2. * Common time routines among all ppc machines.
  3. *
  4. * Written by Cort Dougan (cort@cs.nmt.edu) to merge
  5. * Paul Mackerras' version and mine for PReP and Pmac.
  6. * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
  7. * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
  8. *
  9. * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
  10. * to make clock more stable (2.4.0-test5). The only thing
  11. * that this code assumes is that the timebases have been synchronized
  12. * by firmware on SMP and are never stopped (never do sleep
  13. * on SMP then, nap and doze are OK).
  14. *
  15. * Speeded up do_gettimeofday by getting rid of references to
  16. * xtime (which required locks for consistency). (mikejc@us.ibm.com)
  17. *
  18. * TODO (not necessarily in this file):
  19. * - improve precision and reproducibility of timebase frequency
  20. * measurement at boot time.
  21. * - for astronomical applications: add a new function to get
  22. * non ambiguous timestamps even around leap seconds. This needs
  23. * a new timestamp format and a good name.
  24. *
  25. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
  26. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  27. *
  28. * This program is free software; you can redistribute it and/or
  29. * modify it under the terms of the GNU General Public License
  30. * as published by the Free Software Foundation; either version
  31. * 2 of the License, or (at your option) any later version.
  32. */
  33. #include <linux/errno.h>
  34. #include <linux/export.h>
  35. #include <linux/sched.h>
  36. #include <linux/kernel.h>
  37. #include <linux/param.h>
  38. #include <linux/string.h>
  39. #include <linux/mm.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/timex.h>
  42. #include <linux/kernel_stat.h>
  43. #include <linux/time.h>
  44. #include <linux/init.h>
  45. #include <linux/profile.h>
  46. #include <linux/cpu.h>
  47. #include <linux/security.h>
  48. #include <linux/percpu.h>
  49. #include <linux/rtc.h>
  50. #include <linux/jiffies.h>
  51. #include <linux/posix-timers.h>
  52. #include <linux/irq.h>
  53. #include <linux/delay.h>
  54. #include <linux/irq_work.h>
  55. #include <asm/trace.h>
  56. #include <asm/io.h>
  57. #include <asm/processor.h>
  58. #include <asm/nvram.h>
  59. #include <asm/cache.h>
  60. #include <asm/machdep.h>
  61. #include <asm/uaccess.h>
  62. #include <asm/time.h>
  63. #include <asm/prom.h>
  64. #include <asm/irq.h>
  65. #include <asm/div64.h>
  66. #include <asm/smp.h>
  67. #include <asm/vdso_datapage.h>
  68. #include <asm/firmware.h>
  69. #include <asm/cputime.h>
  70. /* powerpc clocksource/clockevent code */
  71. #include <linux/clockchips.h>
  72. #include <linux/clocksource.h>
  73. static cycle_t rtc_read(struct clocksource *);
  74. static struct clocksource clocksource_rtc = {
  75. .name = "rtc",
  76. .rating = 400,
  77. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  78. .mask = CLOCKSOURCE_MASK(64),
  79. .read = rtc_read,
  80. };
  81. static cycle_t timebase_read(struct clocksource *);
  82. static struct clocksource clocksource_timebase = {
  83. .name = "timebase",
  84. .rating = 400,
  85. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  86. .mask = CLOCKSOURCE_MASK(64),
  87. .read = timebase_read,
  88. };
  89. #define DECREMENTER_MAX 0x7fffffff
  90. static int decrementer_set_next_event(unsigned long evt,
  91. struct clock_event_device *dev);
  92. static void decrementer_set_mode(enum clock_event_mode mode,
  93. struct clock_event_device *dev);
  94. static struct clock_event_device decrementer_clockevent = {
  95. .name = "decrementer",
  96. .rating = 200,
  97. .irq = 0,
  98. .set_next_event = decrementer_set_next_event,
  99. .set_mode = decrementer_set_mode,
  100. .features = CLOCK_EVT_FEAT_ONESHOT,
  101. };
  102. DEFINE_PER_CPU(u64, decrementers_next_tb);
  103. static DEFINE_PER_CPU(struct clock_event_device, decrementers);
  104. #define XSEC_PER_SEC (1024*1024)
  105. #ifdef CONFIG_PPC64
  106. #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
  107. #else
  108. /* compute ((xsec << 12) * max) >> 32 */
  109. #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
  110. #endif
  111. unsigned long tb_ticks_per_jiffy;
  112. unsigned long tb_ticks_per_usec = 100; /* sane default */
  113. EXPORT_SYMBOL(tb_ticks_per_usec);
  114. unsigned long tb_ticks_per_sec;
  115. EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
  116. DEFINE_SPINLOCK(rtc_lock);
  117. EXPORT_SYMBOL_GPL(rtc_lock);
  118. static u64 tb_to_ns_scale __read_mostly;
  119. static unsigned tb_to_ns_shift __read_mostly;
  120. static u64 boot_tb __read_mostly;
  121. extern struct timezone sys_tz;
  122. static long timezone_offset;
  123. unsigned long ppc_proc_freq;
  124. EXPORT_SYMBOL_GPL(ppc_proc_freq);
  125. unsigned long ppc_tb_freq;
  126. EXPORT_SYMBOL_GPL(ppc_tb_freq);
  127. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  128. /*
  129. * Factors for converting from cputime_t (timebase ticks) to
  130. * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
  131. * These are all stored as 0.64 fixed-point binary fractions.
  132. */
  133. u64 __cputime_jiffies_factor;
  134. EXPORT_SYMBOL(__cputime_jiffies_factor);
  135. u64 __cputime_usec_factor;
  136. EXPORT_SYMBOL(__cputime_usec_factor);
  137. u64 __cputime_sec_factor;
  138. EXPORT_SYMBOL(__cputime_sec_factor);
  139. u64 __cputime_clockt_factor;
  140. EXPORT_SYMBOL(__cputime_clockt_factor);
  141. DEFINE_PER_CPU(unsigned long, cputime_last_delta);
  142. DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
  143. cputime_t cputime_one_jiffy;
  144. void (*dtl_consumer)(struct dtl_entry *, u64);
  145. static void calc_cputime_factors(void)
  146. {
  147. struct div_result res;
  148. div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
  149. __cputime_jiffies_factor = res.result_low;
  150. div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
  151. __cputime_usec_factor = res.result_low;
  152. div128_by_32(1, 0, tb_ticks_per_sec, &res);
  153. __cputime_sec_factor = res.result_low;
  154. div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
  155. __cputime_clockt_factor = res.result_low;
  156. }
  157. /*
  158. * Read the SPURR on systems that have it, otherwise the PURR,
  159. * or if that doesn't exist return the timebase value passed in.
  160. */
  161. static u64 read_spurr(u64 tb)
  162. {
  163. if (cpu_has_feature(CPU_FTR_SPURR))
  164. return mfspr(SPRN_SPURR);
  165. if (cpu_has_feature(CPU_FTR_PURR))
  166. return mfspr(SPRN_PURR);
  167. return tb;
  168. }
  169. #ifdef CONFIG_PPC_SPLPAR
  170. /*
  171. * Scan the dispatch trace log and count up the stolen time.
  172. * Should be called with interrupts disabled.
  173. */
  174. static u64 scan_dispatch_log(u64 stop_tb)
  175. {
  176. u64 i = local_paca->dtl_ridx;
  177. struct dtl_entry *dtl = local_paca->dtl_curr;
  178. struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
  179. struct lppaca *vpa = local_paca->lppaca_ptr;
  180. u64 tb_delta;
  181. u64 stolen = 0;
  182. u64 dtb;
  183. if (!dtl)
  184. return 0;
  185. if (i == vpa->dtl_idx)
  186. return 0;
  187. while (i < vpa->dtl_idx) {
  188. if (dtl_consumer)
  189. dtl_consumer(dtl, i);
  190. dtb = dtl->timebase;
  191. tb_delta = dtl->enqueue_to_dispatch_time +
  192. dtl->ready_to_enqueue_time;
  193. barrier();
  194. if (i + N_DISPATCH_LOG < vpa->dtl_idx) {
  195. /* buffer has overflowed */
  196. i = vpa->dtl_idx - N_DISPATCH_LOG;
  197. dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
  198. continue;
  199. }
  200. if (dtb > stop_tb)
  201. break;
  202. stolen += tb_delta;
  203. ++i;
  204. ++dtl;
  205. if (dtl == dtl_end)
  206. dtl = local_paca->dispatch_log;
  207. }
  208. local_paca->dtl_ridx = i;
  209. local_paca->dtl_curr = dtl;
  210. return stolen;
  211. }
  212. /*
  213. * Accumulate stolen time by scanning the dispatch trace log.
  214. * Called on entry from user mode.
  215. */
  216. void accumulate_stolen_time(void)
  217. {
  218. u64 sst, ust;
  219. u8 save_soft_enabled = local_paca->soft_enabled;
  220. /* We are called early in the exception entry, before
  221. * soft/hard_enabled are sync'ed to the expected state
  222. * for the exception. We are hard disabled but the PACA
  223. * needs to reflect that so various debug stuff doesn't
  224. * complain
  225. */
  226. local_paca->soft_enabled = 0;
  227. sst = scan_dispatch_log(local_paca->starttime_user);
  228. ust = scan_dispatch_log(local_paca->starttime);
  229. local_paca->system_time -= sst;
  230. local_paca->user_time -= ust;
  231. local_paca->stolen_time += ust + sst;
  232. local_paca->soft_enabled = save_soft_enabled;
  233. }
  234. static inline u64 calculate_stolen_time(u64 stop_tb)
  235. {
  236. u64 stolen = 0;
  237. if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) {
  238. stolen = scan_dispatch_log(stop_tb);
  239. get_paca()->system_time -= stolen;
  240. }
  241. stolen += get_paca()->stolen_time;
  242. get_paca()->stolen_time = 0;
  243. return stolen;
  244. }
  245. #else /* CONFIG_PPC_SPLPAR */
  246. static inline u64 calculate_stolen_time(u64 stop_tb)
  247. {
  248. return 0;
  249. }
  250. #endif /* CONFIG_PPC_SPLPAR */
  251. /*
  252. * Account time for a transition between system, hard irq
  253. * or soft irq state.
  254. */
  255. void account_system_vtime(struct task_struct *tsk)
  256. {
  257. u64 now, nowscaled, delta, deltascaled;
  258. unsigned long flags;
  259. u64 stolen, udelta, sys_scaled, user_scaled;
  260. local_irq_save(flags);
  261. now = mftb();
  262. nowscaled = read_spurr(now);
  263. get_paca()->system_time += now - get_paca()->starttime;
  264. get_paca()->starttime = now;
  265. deltascaled = nowscaled - get_paca()->startspurr;
  266. get_paca()->startspurr = nowscaled;
  267. stolen = calculate_stolen_time(now);
  268. delta = get_paca()->system_time;
  269. get_paca()->system_time = 0;
  270. udelta = get_paca()->user_time - get_paca()->utime_sspurr;
  271. get_paca()->utime_sspurr = get_paca()->user_time;
  272. /*
  273. * Because we don't read the SPURR on every kernel entry/exit,
  274. * deltascaled includes both user and system SPURR ticks.
  275. * Apportion these ticks to system SPURR ticks and user
  276. * SPURR ticks in the same ratio as the system time (delta)
  277. * and user time (udelta) values obtained from the timebase
  278. * over the same interval. The system ticks get accounted here;
  279. * the user ticks get saved up in paca->user_time_scaled to be
  280. * used by account_process_tick.
  281. */
  282. sys_scaled = delta;
  283. user_scaled = udelta;
  284. if (deltascaled != delta + udelta) {
  285. if (udelta) {
  286. sys_scaled = deltascaled * delta / (delta + udelta);
  287. user_scaled = deltascaled - sys_scaled;
  288. } else {
  289. sys_scaled = deltascaled;
  290. }
  291. }
  292. get_paca()->user_time_scaled += user_scaled;
  293. if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
  294. account_system_time(tsk, 0, delta, sys_scaled);
  295. if (stolen)
  296. account_steal_time(stolen);
  297. } else {
  298. account_idle_time(delta + stolen);
  299. }
  300. local_irq_restore(flags);
  301. }
  302. EXPORT_SYMBOL_GPL(account_system_vtime);
  303. /*
  304. * Transfer the user and system times accumulated in the paca
  305. * by the exception entry and exit code to the generic process
  306. * user and system time records.
  307. * Must be called with interrupts disabled.
  308. * Assumes that account_system_vtime() has been called recently
  309. * (i.e. since the last entry from usermode) so that
  310. * get_paca()->user_time_scaled is up to date.
  311. */
  312. void account_process_tick(struct task_struct *tsk, int user_tick)
  313. {
  314. cputime_t utime, utimescaled;
  315. utime = get_paca()->user_time;
  316. utimescaled = get_paca()->user_time_scaled;
  317. get_paca()->user_time = 0;
  318. get_paca()->user_time_scaled = 0;
  319. get_paca()->utime_sspurr = 0;
  320. account_user_time(tsk, utime, utimescaled);
  321. }
  322. #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
  323. #define calc_cputime_factors()
  324. #endif
  325. void __delay(unsigned long loops)
  326. {
  327. unsigned long start;
  328. int diff;
  329. if (__USE_RTC()) {
  330. start = get_rtcl();
  331. do {
  332. /* the RTCL register wraps at 1000000000 */
  333. diff = get_rtcl() - start;
  334. if (diff < 0)
  335. diff += 1000000000;
  336. } while (diff < loops);
  337. } else {
  338. start = get_tbl();
  339. while (get_tbl() - start < loops)
  340. HMT_low();
  341. HMT_medium();
  342. }
  343. }
  344. EXPORT_SYMBOL(__delay);
  345. void udelay(unsigned long usecs)
  346. {
  347. __delay(tb_ticks_per_usec * usecs);
  348. }
  349. EXPORT_SYMBOL(udelay);
  350. #ifdef CONFIG_SMP
  351. unsigned long profile_pc(struct pt_regs *regs)
  352. {
  353. unsigned long pc = instruction_pointer(regs);
  354. if (in_lock_functions(pc))
  355. return regs->link;
  356. return pc;
  357. }
  358. EXPORT_SYMBOL(profile_pc);
  359. #endif
  360. #ifdef CONFIG_IRQ_WORK
  361. /*
  362. * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
  363. */
  364. #ifdef CONFIG_PPC64
  365. static inline unsigned long test_irq_work_pending(void)
  366. {
  367. unsigned long x;
  368. asm volatile("lbz %0,%1(13)"
  369. : "=r" (x)
  370. : "i" (offsetof(struct paca_struct, irq_work_pending)));
  371. return x;
  372. }
  373. static inline void set_irq_work_pending_flag(void)
  374. {
  375. asm volatile("stb %0,%1(13)" : :
  376. "r" (1),
  377. "i" (offsetof(struct paca_struct, irq_work_pending)));
  378. }
  379. static inline void clear_irq_work_pending(void)
  380. {
  381. asm volatile("stb %0,%1(13)" : :
  382. "r" (0),
  383. "i" (offsetof(struct paca_struct, irq_work_pending)));
  384. }
  385. #else /* 32-bit */
  386. DEFINE_PER_CPU(u8, irq_work_pending);
  387. #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
  388. #define test_irq_work_pending() __get_cpu_var(irq_work_pending)
  389. #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
  390. #endif /* 32 vs 64 bit */
  391. void arch_irq_work_raise(void)
  392. {
  393. preempt_disable();
  394. set_irq_work_pending_flag();
  395. set_dec(1);
  396. preempt_enable();
  397. }
  398. #else /* CONFIG_IRQ_WORK */
  399. #define test_irq_work_pending() 0
  400. #define clear_irq_work_pending()
  401. #endif /* CONFIG_IRQ_WORK */
  402. /*
  403. * timer_interrupt - gets called when the decrementer overflows,
  404. * with interrupts disabled.
  405. */
  406. void timer_interrupt(struct pt_regs * regs)
  407. {
  408. struct pt_regs *old_regs;
  409. u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
  410. struct clock_event_device *evt = &__get_cpu_var(decrementers);
  411. /* Ensure a positive value is written to the decrementer, or else
  412. * some CPUs will continue to take decrementer exceptions.
  413. */
  414. set_dec(DECREMENTER_MAX);
  415. /* Some implementations of hotplug will get timer interrupts while
  416. * offline, just ignore these
  417. */
  418. if (!cpu_online(smp_processor_id()))
  419. return;
  420. /* Conditionally hard-enable interrupts now that the DEC has been
  421. * bumped to its maximum value
  422. */
  423. may_hard_irq_enable();
  424. trace_timer_interrupt_entry(regs);
  425. __get_cpu_var(irq_stat).timer_irqs++;
  426. #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
  427. if (atomic_read(&ppc_n_lost_interrupts) != 0)
  428. do_IRQ(regs);
  429. #endif
  430. old_regs = set_irq_regs(regs);
  431. irq_enter();
  432. if (test_irq_work_pending()) {
  433. clear_irq_work_pending();
  434. irq_work_run();
  435. }
  436. *next_tb = ~(u64)0;
  437. if (evt->event_handler)
  438. evt->event_handler(evt);
  439. #ifdef CONFIG_PPC64
  440. /* collect purr register values often, for accurate calculations */
  441. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  442. struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
  443. cu->current_tb = mfspr(SPRN_PURR);
  444. }
  445. #endif
  446. irq_exit();
  447. set_irq_regs(old_regs);
  448. trace_timer_interrupt_exit(regs);
  449. }
  450. #ifdef CONFIG_SUSPEND
  451. static void generic_suspend_disable_irqs(void)
  452. {
  453. /* Disable the decrementer, so that it doesn't interfere
  454. * with suspending.
  455. */
  456. set_dec(DECREMENTER_MAX);
  457. local_irq_disable();
  458. set_dec(DECREMENTER_MAX);
  459. }
  460. static void generic_suspend_enable_irqs(void)
  461. {
  462. local_irq_enable();
  463. }
  464. /* Overrides the weak version in kernel/power/main.c */
  465. void arch_suspend_disable_irqs(void)
  466. {
  467. if (ppc_md.suspend_disable_irqs)
  468. ppc_md.suspend_disable_irqs();
  469. generic_suspend_disable_irqs();
  470. }
  471. /* Overrides the weak version in kernel/power/main.c */
  472. void arch_suspend_enable_irqs(void)
  473. {
  474. generic_suspend_enable_irqs();
  475. if (ppc_md.suspend_enable_irqs)
  476. ppc_md.suspend_enable_irqs();
  477. }
  478. #endif
  479. /*
  480. * Scheduler clock - returns current time in nanosec units.
  481. *
  482. * Note: mulhdu(a, b) (multiply high double unsigned) returns
  483. * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
  484. * are 64-bit unsigned numbers.
  485. */
  486. unsigned long long sched_clock(void)
  487. {
  488. if (__USE_RTC())
  489. return get_rtc();
  490. return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
  491. }
  492. static int __init get_freq(char *name, int cells, unsigned long *val)
  493. {
  494. struct device_node *cpu;
  495. const unsigned int *fp;
  496. int found = 0;
  497. /* The cpu node should have timebase and clock frequency properties */
  498. cpu = of_find_node_by_type(NULL, "cpu");
  499. if (cpu) {
  500. fp = of_get_property(cpu, name, NULL);
  501. if (fp) {
  502. found = 1;
  503. *val = of_read_ulong(fp, cells);
  504. }
  505. of_node_put(cpu);
  506. }
  507. return found;
  508. }
  509. /* should become __cpuinit when secondary_cpu_time_init also is */
  510. void start_cpu_decrementer(void)
  511. {
  512. #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
  513. /* Clear any pending timer interrupts */
  514. mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
  515. /* Enable decrementer interrupt */
  516. mtspr(SPRN_TCR, TCR_DIE);
  517. #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
  518. }
  519. void __init generic_calibrate_decr(void)
  520. {
  521. ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
  522. if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
  523. !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
  524. printk(KERN_ERR "WARNING: Estimating decrementer frequency "
  525. "(not found)\n");
  526. }
  527. ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
  528. if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
  529. !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
  530. printk(KERN_ERR "WARNING: Estimating processor frequency "
  531. "(not found)\n");
  532. }
  533. }
  534. int update_persistent_clock(struct timespec now)
  535. {
  536. struct rtc_time tm;
  537. if (!ppc_md.set_rtc_time)
  538. return 0;
  539. to_tm(now.tv_sec + 1 + timezone_offset, &tm);
  540. tm.tm_year -= 1900;
  541. tm.tm_mon -= 1;
  542. return ppc_md.set_rtc_time(&tm);
  543. }
  544. static void __read_persistent_clock(struct timespec *ts)
  545. {
  546. struct rtc_time tm;
  547. static int first = 1;
  548. ts->tv_nsec = 0;
  549. /* XXX this is a litle fragile but will work okay in the short term */
  550. if (first) {
  551. first = 0;
  552. if (ppc_md.time_init)
  553. timezone_offset = ppc_md.time_init();
  554. /* get_boot_time() isn't guaranteed to be safe to call late */
  555. if (ppc_md.get_boot_time) {
  556. ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
  557. return;
  558. }
  559. }
  560. if (!ppc_md.get_rtc_time) {
  561. ts->tv_sec = 0;
  562. return;
  563. }
  564. ppc_md.get_rtc_time(&tm);
  565. ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
  566. tm.tm_hour, tm.tm_min, tm.tm_sec);
  567. }
  568. void read_persistent_clock(struct timespec *ts)
  569. {
  570. __read_persistent_clock(ts);
  571. /* Sanitize it in case real time clock is set below EPOCH */
  572. if (ts->tv_sec < 0) {
  573. ts->tv_sec = 0;
  574. ts->tv_nsec = 0;
  575. }
  576. }
  577. /* clocksource code */
  578. static cycle_t rtc_read(struct clocksource *cs)
  579. {
  580. return (cycle_t)get_rtc();
  581. }
  582. static cycle_t timebase_read(struct clocksource *cs)
  583. {
  584. return (cycle_t)get_tb();
  585. }
  586. void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
  587. struct clocksource *clock, u32 mult)
  588. {
  589. u64 new_tb_to_xs, new_stamp_xsec;
  590. u32 frac_sec;
  591. if (clock != &clocksource_timebase)
  592. return;
  593. /* Make userspace gettimeofday spin until we're done. */
  594. ++vdso_data->tb_update_count;
  595. smp_mb();
  596. /* 19342813113834067 ~= 2^(20+64) / 1e9 */
  597. new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
  598. new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
  599. do_div(new_stamp_xsec, 1000000000);
  600. new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
  601. BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
  602. /* this is tv_nsec / 1e9 as a 0.32 fraction */
  603. frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
  604. /*
  605. * tb_update_count is used to allow the userspace gettimeofday code
  606. * to assure itself that it sees a consistent view of the tb_to_xs and
  607. * stamp_xsec variables. It reads the tb_update_count, then reads
  608. * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
  609. * the two values of tb_update_count match and are even then the
  610. * tb_to_xs and stamp_xsec values are consistent. If not, then it
  611. * loops back and reads them again until this criteria is met.
  612. * We expect the caller to have done the first increment of
  613. * vdso_data->tb_update_count already.
  614. */
  615. vdso_data->tb_orig_stamp = clock->cycle_last;
  616. vdso_data->stamp_xsec = new_stamp_xsec;
  617. vdso_data->tb_to_xs = new_tb_to_xs;
  618. vdso_data->wtom_clock_sec = wtm->tv_sec;
  619. vdso_data->wtom_clock_nsec = wtm->tv_nsec;
  620. vdso_data->stamp_xtime = *wall_time;
  621. vdso_data->stamp_sec_fraction = frac_sec;
  622. smp_wmb();
  623. ++(vdso_data->tb_update_count);
  624. }
  625. void update_vsyscall_tz(void)
  626. {
  627. /* Make userspace gettimeofday spin until we're done. */
  628. ++vdso_data->tb_update_count;
  629. smp_mb();
  630. vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
  631. vdso_data->tz_dsttime = sys_tz.tz_dsttime;
  632. smp_mb();
  633. ++vdso_data->tb_update_count;
  634. }
  635. static void __init clocksource_init(void)
  636. {
  637. struct clocksource *clock;
  638. if (__USE_RTC())
  639. clock = &clocksource_rtc;
  640. else
  641. clock = &clocksource_timebase;
  642. if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
  643. printk(KERN_ERR "clocksource: %s is already registered\n",
  644. clock->name);
  645. return;
  646. }
  647. printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
  648. clock->name, clock->mult, clock->shift);
  649. }
  650. static int decrementer_set_next_event(unsigned long evt,
  651. struct clock_event_device *dev)
  652. {
  653. __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
  654. set_dec(evt);
  655. return 0;
  656. }
  657. static void decrementer_set_mode(enum clock_event_mode mode,
  658. struct clock_event_device *dev)
  659. {
  660. if (mode != CLOCK_EVT_MODE_ONESHOT)
  661. decrementer_set_next_event(DECREMENTER_MAX, dev);
  662. }
  663. static void register_decrementer_clockevent(int cpu)
  664. {
  665. struct clock_event_device *dec = &per_cpu(decrementers, cpu);
  666. *dec = decrementer_clockevent;
  667. dec->cpumask = cpumask_of(cpu);
  668. printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
  669. dec->name, dec->mult, dec->shift, cpu);
  670. clockevents_register_device(dec);
  671. }
  672. static void __init init_decrementer_clockevent(void)
  673. {
  674. int cpu = smp_processor_id();
  675. clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
  676. decrementer_clockevent.max_delta_ns =
  677. clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
  678. decrementer_clockevent.min_delta_ns =
  679. clockevent_delta2ns(2, &decrementer_clockevent);
  680. register_decrementer_clockevent(cpu);
  681. }
  682. void secondary_cpu_time_init(void)
  683. {
  684. /* Start the decrementer on CPUs that have manual control
  685. * such as BookE
  686. */
  687. start_cpu_decrementer();
  688. /* FIME: Should make unrelatred change to move snapshot_timebase
  689. * call here ! */
  690. register_decrementer_clockevent(smp_processor_id());
  691. }
  692. /* This function is only called on the boot processor */
  693. void __init time_init(void)
  694. {
  695. struct div_result res;
  696. u64 scale;
  697. unsigned shift;
  698. if (__USE_RTC()) {
  699. /* 601 processor: dec counts down by 128 every 128ns */
  700. ppc_tb_freq = 1000000000;
  701. } else {
  702. /* Normal PowerPC with timebase register */
  703. ppc_md.calibrate_decr();
  704. printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
  705. ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
  706. printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
  707. ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
  708. }
  709. tb_ticks_per_jiffy = ppc_tb_freq / HZ;
  710. tb_ticks_per_sec = ppc_tb_freq;
  711. tb_ticks_per_usec = ppc_tb_freq / 1000000;
  712. calc_cputime_factors();
  713. setup_cputime_one_jiffy();
  714. /*
  715. * Compute scale factor for sched_clock.
  716. * The calibrate_decr() function has set tb_ticks_per_sec,
  717. * which is the timebase frequency.
  718. * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
  719. * the 128-bit result as a 64.64 fixed-point number.
  720. * We then shift that number right until it is less than 1.0,
  721. * giving us the scale factor and shift count to use in
  722. * sched_clock().
  723. */
  724. div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
  725. scale = res.result_low;
  726. for (shift = 0; res.result_high != 0; ++shift) {
  727. scale = (scale >> 1) | (res.result_high << 63);
  728. res.result_high >>= 1;
  729. }
  730. tb_to_ns_scale = scale;
  731. tb_to_ns_shift = shift;
  732. /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
  733. boot_tb = get_tb_or_rtc();
  734. /* If platform provided a timezone (pmac), we correct the time */
  735. if (timezone_offset) {
  736. sys_tz.tz_minuteswest = -timezone_offset / 60;
  737. sys_tz.tz_dsttime = 0;
  738. }
  739. vdso_data->tb_update_count = 0;
  740. vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
  741. /* Start the decrementer on CPUs that have manual control
  742. * such as BookE
  743. */
  744. start_cpu_decrementer();
  745. /* Register the clocksource */
  746. clocksource_init();
  747. init_decrementer_clockevent();
  748. }
  749. #define FEBRUARY 2
  750. #define STARTOFTIME 1970
  751. #define SECDAY 86400L
  752. #define SECYR (SECDAY * 365)
  753. #define leapyear(year) ((year) % 4 == 0 && \
  754. ((year) % 100 != 0 || (year) % 400 == 0))
  755. #define days_in_year(a) (leapyear(a) ? 366 : 365)
  756. #define days_in_month(a) (month_days[(a) - 1])
  757. static int month_days[12] = {
  758. 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
  759. };
  760. /*
  761. * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
  762. */
  763. void GregorianDay(struct rtc_time * tm)
  764. {
  765. int leapsToDate;
  766. int lastYear;
  767. int day;
  768. int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
  769. lastYear = tm->tm_year - 1;
  770. /*
  771. * Number of leap corrections to apply up to end of last year
  772. */
  773. leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
  774. /*
  775. * This year is a leap year if it is divisible by 4 except when it is
  776. * divisible by 100 unless it is divisible by 400
  777. *
  778. * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
  779. */
  780. day = tm->tm_mon > 2 && leapyear(tm->tm_year);
  781. day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
  782. tm->tm_mday;
  783. tm->tm_wday = day % 7;
  784. }
  785. void to_tm(int tim, struct rtc_time * tm)
  786. {
  787. register int i;
  788. register long hms, day;
  789. day = tim / SECDAY;
  790. hms = tim % SECDAY;
  791. /* Hours, minutes, seconds are easy */
  792. tm->tm_hour = hms / 3600;
  793. tm->tm_min = (hms % 3600) / 60;
  794. tm->tm_sec = (hms % 3600) % 60;
  795. /* Number of years in days */
  796. for (i = STARTOFTIME; day >= days_in_year(i); i++)
  797. day -= days_in_year(i);
  798. tm->tm_year = i;
  799. /* Number of months in days left */
  800. if (leapyear(tm->tm_year))
  801. days_in_month(FEBRUARY) = 29;
  802. for (i = 1; day >= days_in_month(i); i++)
  803. day -= days_in_month(i);
  804. days_in_month(FEBRUARY) = 28;
  805. tm->tm_mon = i;
  806. /* Days are what is left over (+1) from all that. */
  807. tm->tm_mday = day + 1;
  808. /*
  809. * Determine the day of week
  810. */
  811. GregorianDay(tm);
  812. }
  813. /*
  814. * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
  815. * result.
  816. */
  817. void div128_by_32(u64 dividend_high, u64 dividend_low,
  818. unsigned divisor, struct div_result *dr)
  819. {
  820. unsigned long a, b, c, d;
  821. unsigned long w, x, y, z;
  822. u64 ra, rb, rc;
  823. a = dividend_high >> 32;
  824. b = dividend_high & 0xffffffff;
  825. c = dividend_low >> 32;
  826. d = dividend_low & 0xffffffff;
  827. w = a / divisor;
  828. ra = ((u64)(a - (w * divisor)) << 32) + b;
  829. rb = ((u64) do_div(ra, divisor) << 32) + c;
  830. x = ra;
  831. rc = ((u64) do_div(rb, divisor) << 32) + d;
  832. y = rb;
  833. do_div(rc, divisor);
  834. z = rc;
  835. dr->result_high = ((u64)w << 32) + x;
  836. dr->result_low = ((u64)y << 32) + z;
  837. }
  838. /* We don't need to calibrate delay, we use the CPU timebase for that */
  839. void calibrate_delay(void)
  840. {
  841. /* Some generic code (such as spinlock debug) use loops_per_jiffy
  842. * as the number of __delay(1) in a jiffy, so make it so
  843. */
  844. loops_per_jiffy = tb_ticks_per_jiffy;
  845. }
  846. static int __init rtc_init(void)
  847. {
  848. struct platform_device *pdev;
  849. if (!ppc_md.get_rtc_time)
  850. return -ENODEV;
  851. pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
  852. if (IS_ERR(pdev))
  853. return PTR_ERR(pdev);
  854. return 0;
  855. }
  856. module_init(rtc_init);