time.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418
  1. /* $Id: time.c,v 1.42 2002/01/23 14:33:55 davem Exp $
  2. * time.c: UltraSparc timer and TOD clock support.
  3. *
  4. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
  6. *
  7. * Based largely on code which is:
  8. *
  9. * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/module.h>
  13. #include <linux/sched.h>
  14. #include <linux/kernel.h>
  15. #include <linux/param.h>
  16. #include <linux/string.h>
  17. #include <linux/mm.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/time.h>
  20. #include <linux/timex.h>
  21. #include <linux/init.h>
  22. #include <linux/ioport.h>
  23. #include <linux/mc146818rtc.h>
  24. #include <linux/delay.h>
  25. #include <linux/profile.h>
  26. #include <linux/bcd.h>
  27. #include <linux/jiffies.h>
  28. #include <linux/cpufreq.h>
  29. #include <linux/percpu.h>
  30. #include <linux/profile.h>
  31. #include <linux/miscdevice.h>
  32. #include <linux/rtc.h>
  33. #include <linux/kernel_stat.h>
  34. #include <linux/clockchips.h>
  35. #include <linux/clocksource.h>
  36. #include <asm/oplib.h>
  37. #include <asm/mostek.h>
  38. #include <asm/timer.h>
  39. #include <asm/irq.h>
  40. #include <asm/io.h>
  41. #include <asm/prom.h>
  42. #include <asm/of_device.h>
  43. #include <asm/starfire.h>
  44. #include <asm/smp.h>
  45. #include <asm/sections.h>
  46. #include <asm/cpudata.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/prom.h>
  49. #include <asm/irq_regs.h>
  50. DEFINE_SPINLOCK(mostek_lock);
  51. DEFINE_SPINLOCK(rtc_lock);
  52. void __iomem *mstk48t02_regs = NULL;
  53. #ifdef CONFIG_PCI
  54. unsigned long ds1287_regs = 0UL;
  55. #endif
  56. static void __iomem *mstk48t08_regs;
  57. static void __iomem *mstk48t59_regs;
  58. static int set_rtc_mmss(unsigned long);
  59. #define TICK_PRIV_BIT (1UL << 63)
  60. #define TICKCMP_IRQ_BIT (1UL << 63)
  61. #ifdef CONFIG_SMP
  62. unsigned long profile_pc(struct pt_regs *regs)
  63. {
  64. unsigned long pc = instruction_pointer(regs);
  65. if (in_lock_functions(pc))
  66. return regs->u_regs[UREG_RETPC];
  67. return pc;
  68. }
  69. EXPORT_SYMBOL(profile_pc);
  70. #endif
  71. static void tick_disable_protection(void)
  72. {
  73. /* Set things up so user can access tick register for profiling
  74. * purposes. Also workaround BB_ERRATA_1 by doing a dummy
  75. * read back of %tick after writing it.
  76. */
  77. __asm__ __volatile__(
  78. " ba,pt %%xcc, 1f\n"
  79. " nop\n"
  80. " .align 64\n"
  81. "1: rd %%tick, %%g2\n"
  82. " add %%g2, 6, %%g2\n"
  83. " andn %%g2, %0, %%g2\n"
  84. " wrpr %%g2, 0, %%tick\n"
  85. " rdpr %%tick, %%g0"
  86. : /* no outputs */
  87. : "r" (TICK_PRIV_BIT)
  88. : "g2");
  89. }
  90. static void tick_disable_irq(void)
  91. {
  92. __asm__ __volatile__(
  93. " ba,pt %%xcc, 1f\n"
  94. " nop\n"
  95. " .align 64\n"
  96. "1: wr %0, 0x0, %%tick_cmpr\n"
  97. " rd %%tick_cmpr, %%g0"
  98. : /* no outputs */
  99. : "r" (TICKCMP_IRQ_BIT));
  100. }
  101. static void tick_init_tick(void)
  102. {
  103. tick_disable_protection();
  104. tick_disable_irq();
  105. }
  106. static unsigned long tick_get_tick(void)
  107. {
  108. unsigned long ret;
  109. __asm__ __volatile__("rd %%tick, %0\n\t"
  110. "mov %0, %0"
  111. : "=r" (ret));
  112. return ret & ~TICK_PRIV_BIT;
  113. }
  114. static int tick_add_compare(unsigned long adj)
  115. {
  116. unsigned long orig_tick, new_tick, new_compare;
  117. __asm__ __volatile__("rd %%tick, %0"
  118. : "=r" (orig_tick));
  119. orig_tick &= ~TICKCMP_IRQ_BIT;
  120. /* Workaround for Spitfire Errata (#54 I think??), I discovered
  121. * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
  122. * number 103640.
  123. *
  124. * On Blackbird writes to %tick_cmpr can fail, the
  125. * workaround seems to be to execute the wr instruction
  126. * at the start of an I-cache line, and perform a dummy
  127. * read back from %tick_cmpr right after writing to it. -DaveM
  128. */
  129. __asm__ __volatile__("ba,pt %%xcc, 1f\n\t"
  130. " add %1, %2, %0\n\t"
  131. ".align 64\n"
  132. "1:\n\t"
  133. "wr %0, 0, %%tick_cmpr\n\t"
  134. "rd %%tick_cmpr, %%g0\n\t"
  135. : "=r" (new_compare)
  136. : "r" (orig_tick), "r" (adj));
  137. __asm__ __volatile__("rd %%tick, %0"
  138. : "=r" (new_tick));
  139. new_tick &= ~TICKCMP_IRQ_BIT;
  140. return ((long)(new_tick - (orig_tick+adj))) > 0L;
  141. }
  142. static unsigned long tick_add_tick(unsigned long adj)
  143. {
  144. unsigned long new_tick;
  145. /* Also need to handle Blackbird bug here too. */
  146. __asm__ __volatile__("rd %%tick, %0\n\t"
  147. "add %0, %1, %0\n\t"
  148. "wrpr %0, 0, %%tick\n\t"
  149. : "=&r" (new_tick)
  150. : "r" (adj));
  151. return new_tick;
  152. }
  153. static struct sparc64_tick_ops tick_operations __read_mostly = {
  154. .name = "tick",
  155. .init_tick = tick_init_tick,
  156. .disable_irq = tick_disable_irq,
  157. .get_tick = tick_get_tick,
  158. .add_tick = tick_add_tick,
  159. .add_compare = tick_add_compare,
  160. .softint_mask = 1UL << 0,
  161. };
  162. struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
  163. static void stick_disable_irq(void)
  164. {
  165. __asm__ __volatile__(
  166. "wr %0, 0x0, %%asr25"
  167. : /* no outputs */
  168. : "r" (TICKCMP_IRQ_BIT));
  169. }
  170. static void stick_init_tick(void)
  171. {
  172. /* Writes to the %tick and %stick register are not
  173. * allowed on sun4v. The Hypervisor controls that
  174. * bit, per-strand.
  175. */
  176. if (tlb_type != hypervisor) {
  177. tick_disable_protection();
  178. tick_disable_irq();
  179. /* Let the user get at STICK too. */
  180. __asm__ __volatile__(
  181. " rd %%asr24, %%g2\n"
  182. " andn %%g2, %0, %%g2\n"
  183. " wr %%g2, 0, %%asr24"
  184. : /* no outputs */
  185. : "r" (TICK_PRIV_BIT)
  186. : "g1", "g2");
  187. }
  188. stick_disable_irq();
  189. }
  190. static unsigned long stick_get_tick(void)
  191. {
  192. unsigned long ret;
  193. __asm__ __volatile__("rd %%asr24, %0"
  194. : "=r" (ret));
  195. return ret & ~TICK_PRIV_BIT;
  196. }
  197. static unsigned long stick_add_tick(unsigned long adj)
  198. {
  199. unsigned long new_tick;
  200. __asm__ __volatile__("rd %%asr24, %0\n\t"
  201. "add %0, %1, %0\n\t"
  202. "wr %0, 0, %%asr24\n\t"
  203. : "=&r" (new_tick)
  204. : "r" (adj));
  205. return new_tick;
  206. }
  207. static int stick_add_compare(unsigned long adj)
  208. {
  209. unsigned long orig_tick, new_tick;
  210. __asm__ __volatile__("rd %%asr24, %0"
  211. : "=r" (orig_tick));
  212. orig_tick &= ~TICKCMP_IRQ_BIT;
  213. __asm__ __volatile__("wr %0, 0, %%asr25"
  214. : /* no outputs */
  215. : "r" (orig_tick + adj));
  216. __asm__ __volatile__("rd %%asr24, %0"
  217. : "=r" (new_tick));
  218. new_tick &= ~TICKCMP_IRQ_BIT;
  219. return ((long)(new_tick - (orig_tick+adj))) > 0L;
  220. }
  221. static struct sparc64_tick_ops stick_operations __read_mostly = {
  222. .name = "stick",
  223. .init_tick = stick_init_tick,
  224. .disable_irq = stick_disable_irq,
  225. .get_tick = stick_get_tick,
  226. .add_tick = stick_add_tick,
  227. .add_compare = stick_add_compare,
  228. .softint_mask = 1UL << 16,
  229. };
  230. /* On Hummingbird the STICK/STICK_CMPR register is implemented
  231. * in I/O space. There are two 64-bit registers each, the
  232. * first holds the low 32-bits of the value and the second holds
  233. * the high 32-bits.
  234. *
  235. * Since STICK is constantly updating, we have to access it carefully.
  236. *
  237. * The sequence we use to read is:
  238. * 1) read high
  239. * 2) read low
  240. * 3) read high again, if it rolled re-read both low and high again.
  241. *
  242. * Writing STICK safely is also tricky:
  243. * 1) write low to zero
  244. * 2) write high
  245. * 3) write low
  246. */
  247. #define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
  248. #define HBIRD_STICK_ADDR 0x1fe0000f070UL
  249. static unsigned long __hbird_read_stick(void)
  250. {
  251. unsigned long ret, tmp1, tmp2, tmp3;
  252. unsigned long addr = HBIRD_STICK_ADDR+8;
  253. __asm__ __volatile__("ldxa [%1] %5, %2\n"
  254. "1:\n\t"
  255. "sub %1, 0x8, %1\n\t"
  256. "ldxa [%1] %5, %3\n\t"
  257. "add %1, 0x8, %1\n\t"
  258. "ldxa [%1] %5, %4\n\t"
  259. "cmp %4, %2\n\t"
  260. "bne,a,pn %%xcc, 1b\n\t"
  261. " mov %4, %2\n\t"
  262. "sllx %4, 32, %4\n\t"
  263. "or %3, %4, %0\n\t"
  264. : "=&r" (ret), "=&r" (addr),
  265. "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
  266. : "i" (ASI_PHYS_BYPASS_EC_E), "1" (addr));
  267. return ret;
  268. }
  269. static void __hbird_write_stick(unsigned long val)
  270. {
  271. unsigned long low = (val & 0xffffffffUL);
  272. unsigned long high = (val >> 32UL);
  273. unsigned long addr = HBIRD_STICK_ADDR;
  274. __asm__ __volatile__("stxa %%g0, [%0] %4\n\t"
  275. "add %0, 0x8, %0\n\t"
  276. "stxa %3, [%0] %4\n\t"
  277. "sub %0, 0x8, %0\n\t"
  278. "stxa %2, [%0] %4"
  279. : "=&r" (addr)
  280. : "0" (addr), "r" (low), "r" (high),
  281. "i" (ASI_PHYS_BYPASS_EC_E));
  282. }
  283. static void __hbird_write_compare(unsigned long val)
  284. {
  285. unsigned long low = (val & 0xffffffffUL);
  286. unsigned long high = (val >> 32UL);
  287. unsigned long addr = HBIRD_STICKCMP_ADDR + 0x8UL;
  288. __asm__ __volatile__("stxa %3, [%0] %4\n\t"
  289. "sub %0, 0x8, %0\n\t"
  290. "stxa %2, [%0] %4"
  291. : "=&r" (addr)
  292. : "0" (addr), "r" (low), "r" (high),
  293. "i" (ASI_PHYS_BYPASS_EC_E));
  294. }
  295. static void hbtick_disable_irq(void)
  296. {
  297. __hbird_write_compare(TICKCMP_IRQ_BIT);
  298. }
  299. static void hbtick_init_tick(void)
  300. {
  301. tick_disable_protection();
  302. /* XXX This seems to be necessary to 'jumpstart' Hummingbird
  303. * XXX into actually sending STICK interrupts. I think because
  304. * XXX of how we store %tick_cmpr in head.S this somehow resets the
  305. * XXX {TICK + STICK} interrupt mux. -DaveM
  306. */
  307. __hbird_write_stick(__hbird_read_stick());
  308. hbtick_disable_irq();
  309. }
  310. static unsigned long hbtick_get_tick(void)
  311. {
  312. return __hbird_read_stick() & ~TICK_PRIV_BIT;
  313. }
  314. static unsigned long hbtick_add_tick(unsigned long adj)
  315. {
  316. unsigned long val;
  317. val = __hbird_read_stick() + adj;
  318. __hbird_write_stick(val);
  319. return val;
  320. }
  321. static int hbtick_add_compare(unsigned long adj)
  322. {
  323. unsigned long val = __hbird_read_stick();
  324. unsigned long val2;
  325. val &= ~TICKCMP_IRQ_BIT;
  326. val += adj;
  327. __hbird_write_compare(val);
  328. val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT;
  329. return ((long)(val2 - val)) > 0L;
  330. }
  331. static struct sparc64_tick_ops hbtick_operations __read_mostly = {
  332. .name = "hbtick",
  333. .init_tick = hbtick_init_tick,
  334. .disable_irq = hbtick_disable_irq,
  335. .get_tick = hbtick_get_tick,
  336. .add_tick = hbtick_add_tick,
  337. .add_compare = hbtick_add_compare,
  338. .softint_mask = 1UL << 0,
  339. };
  340. static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
  341. #define TICK_SIZE (tick_nsec / 1000)
  342. #define USEC_AFTER 500000
  343. #define USEC_BEFORE 500000
  344. static void sync_cmos_clock(unsigned long dummy);
  345. static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
  346. static void sync_cmos_clock(unsigned long dummy)
  347. {
  348. struct timeval now, next;
  349. int fail = 1;
  350. /*
  351. * If we have an externally synchronized Linux clock, then update
  352. * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
  353. * called as close as possible to 500 ms before the new second starts.
  354. * This code is run on a timer. If the clock is set, that timer
  355. * may not expire at the correct time. Thus, we adjust...
  356. */
  357. if (!ntp_synced())
  358. /*
  359. * Not synced, exit, do not restart a timer (if one is
  360. * running, let it run out).
  361. */
  362. return;
  363. do_gettimeofday(&now);
  364. if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
  365. now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
  366. fail = set_rtc_mmss(now.tv_sec);
  367. next.tv_usec = USEC_AFTER - now.tv_usec;
  368. if (next.tv_usec <= 0)
  369. next.tv_usec += USEC_PER_SEC;
  370. if (!fail)
  371. next.tv_sec = 659;
  372. else
  373. next.tv_sec = 0;
  374. if (next.tv_usec >= USEC_PER_SEC) {
  375. next.tv_sec++;
  376. next.tv_usec -= USEC_PER_SEC;
  377. }
  378. mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
  379. }
  380. void notify_arch_cmos_timer(void)
  381. {
  382. mod_timer(&sync_cmos_timer, jiffies + 1);
  383. }
  384. /* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
  385. static void __init kick_start_clock(void)
  386. {
  387. void __iomem *regs = mstk48t02_regs;
  388. u8 sec, tmp;
  389. int i, count;
  390. prom_printf("CLOCK: Clock was stopped. Kick start ");
  391. spin_lock_irq(&mostek_lock);
  392. /* Turn on the kick start bit to start the oscillator. */
  393. tmp = mostek_read(regs + MOSTEK_CREG);
  394. tmp |= MSTK_CREG_WRITE;
  395. mostek_write(regs + MOSTEK_CREG, tmp);
  396. tmp = mostek_read(regs + MOSTEK_SEC);
  397. tmp &= ~MSTK_STOP;
  398. mostek_write(regs + MOSTEK_SEC, tmp);
  399. tmp = mostek_read(regs + MOSTEK_HOUR);
  400. tmp |= MSTK_KICK_START;
  401. mostek_write(regs + MOSTEK_HOUR, tmp);
  402. tmp = mostek_read(regs + MOSTEK_CREG);
  403. tmp &= ~MSTK_CREG_WRITE;
  404. mostek_write(regs + MOSTEK_CREG, tmp);
  405. spin_unlock_irq(&mostek_lock);
  406. /* Delay to allow the clock oscillator to start. */
  407. sec = MSTK_REG_SEC(regs);
  408. for (i = 0; i < 3; i++) {
  409. while (sec == MSTK_REG_SEC(regs))
  410. for (count = 0; count < 100000; count++)
  411. /* nothing */ ;
  412. prom_printf(".");
  413. sec = MSTK_REG_SEC(regs);
  414. }
  415. prom_printf("\n");
  416. spin_lock_irq(&mostek_lock);
  417. /* Turn off kick start and set a "valid" time and date. */
  418. tmp = mostek_read(regs + MOSTEK_CREG);
  419. tmp |= MSTK_CREG_WRITE;
  420. mostek_write(regs + MOSTEK_CREG, tmp);
  421. tmp = mostek_read(regs + MOSTEK_HOUR);
  422. tmp &= ~MSTK_KICK_START;
  423. mostek_write(regs + MOSTEK_HOUR, tmp);
  424. MSTK_SET_REG_SEC(regs,0);
  425. MSTK_SET_REG_MIN(regs,0);
  426. MSTK_SET_REG_HOUR(regs,0);
  427. MSTK_SET_REG_DOW(regs,5);
  428. MSTK_SET_REG_DOM(regs,1);
  429. MSTK_SET_REG_MONTH(regs,8);
  430. MSTK_SET_REG_YEAR(regs,1996 - MSTK_YEAR_ZERO);
  431. tmp = mostek_read(regs + MOSTEK_CREG);
  432. tmp &= ~MSTK_CREG_WRITE;
  433. mostek_write(regs + MOSTEK_CREG, tmp);
  434. spin_unlock_irq(&mostek_lock);
  435. /* Ensure the kick start bit is off. If it isn't, turn it off. */
  436. while (mostek_read(regs + MOSTEK_HOUR) & MSTK_KICK_START) {
  437. prom_printf("CLOCK: Kick start still on!\n");
  438. spin_lock_irq(&mostek_lock);
  439. tmp = mostek_read(regs + MOSTEK_CREG);
  440. tmp |= MSTK_CREG_WRITE;
  441. mostek_write(regs + MOSTEK_CREG, tmp);
  442. tmp = mostek_read(regs + MOSTEK_HOUR);
  443. tmp &= ~MSTK_KICK_START;
  444. mostek_write(regs + MOSTEK_HOUR, tmp);
  445. tmp = mostek_read(regs + MOSTEK_CREG);
  446. tmp &= ~MSTK_CREG_WRITE;
  447. mostek_write(regs + MOSTEK_CREG, tmp);
  448. spin_unlock_irq(&mostek_lock);
  449. }
  450. prom_printf("CLOCK: Kick start procedure successful.\n");
  451. }
  452. /* Return nonzero if the clock chip battery is low. */
  453. static int __init has_low_battery(void)
  454. {
  455. void __iomem *regs = mstk48t02_regs;
  456. u8 data1, data2;
  457. spin_lock_irq(&mostek_lock);
  458. data1 = mostek_read(regs + MOSTEK_EEPROM); /* Read some data. */
  459. mostek_write(regs + MOSTEK_EEPROM, ~data1); /* Write back the complement. */
  460. data2 = mostek_read(regs + MOSTEK_EEPROM); /* Read back the complement. */
  461. mostek_write(regs + MOSTEK_EEPROM, data1); /* Restore original value. */
  462. spin_unlock_irq(&mostek_lock);
  463. return (data1 == data2); /* Was the write blocked? */
  464. }
  465. /* Probe for the real time clock chip. */
  466. static void __init set_system_time(void)
  467. {
  468. unsigned int year, mon, day, hour, min, sec;
  469. void __iomem *mregs = mstk48t02_regs;
  470. #ifdef CONFIG_PCI
  471. unsigned long dregs = ds1287_regs;
  472. #else
  473. unsigned long dregs = 0UL;
  474. #endif
  475. u8 tmp;
  476. if (!mregs && !dregs) {
  477. prom_printf("Something wrong, clock regs not mapped yet.\n");
  478. prom_halt();
  479. }
  480. if (mregs) {
  481. spin_lock_irq(&mostek_lock);
  482. /* Traditional Mostek chip. */
  483. tmp = mostek_read(mregs + MOSTEK_CREG);
  484. tmp |= MSTK_CREG_READ;
  485. mostek_write(mregs + MOSTEK_CREG, tmp);
  486. sec = MSTK_REG_SEC(mregs);
  487. min = MSTK_REG_MIN(mregs);
  488. hour = MSTK_REG_HOUR(mregs);
  489. day = MSTK_REG_DOM(mregs);
  490. mon = MSTK_REG_MONTH(mregs);
  491. year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
  492. } else {
  493. /* Dallas 12887 RTC chip. */
  494. do {
  495. sec = CMOS_READ(RTC_SECONDS);
  496. min = CMOS_READ(RTC_MINUTES);
  497. hour = CMOS_READ(RTC_HOURS);
  498. day = CMOS_READ(RTC_DAY_OF_MONTH);
  499. mon = CMOS_READ(RTC_MONTH);
  500. year = CMOS_READ(RTC_YEAR);
  501. } while (sec != CMOS_READ(RTC_SECONDS));
  502. if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
  503. BCD_TO_BIN(sec);
  504. BCD_TO_BIN(min);
  505. BCD_TO_BIN(hour);
  506. BCD_TO_BIN(day);
  507. BCD_TO_BIN(mon);
  508. BCD_TO_BIN(year);
  509. }
  510. if ((year += 1900) < 1970)
  511. year += 100;
  512. }
  513. xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
  514. xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
  515. set_normalized_timespec(&wall_to_monotonic,
  516. -xtime.tv_sec, -xtime.tv_nsec);
  517. if (mregs) {
  518. tmp = mostek_read(mregs + MOSTEK_CREG);
  519. tmp &= ~MSTK_CREG_READ;
  520. mostek_write(mregs + MOSTEK_CREG, tmp);
  521. spin_unlock_irq(&mostek_lock);
  522. }
  523. }
  524. /* davem suggests we keep this within the 4M locked kernel image */
  525. static u32 starfire_get_time(void)
  526. {
  527. static char obp_gettod[32];
  528. static u32 unix_tod;
  529. sprintf(obp_gettod, "h# %08x unix-gettod",
  530. (unsigned int) (long) &unix_tod);
  531. prom_feval(obp_gettod);
  532. return unix_tod;
  533. }
  534. static int starfire_set_time(u32 val)
  535. {
  536. /* Do nothing, time is set using the service processor
  537. * console on this platform.
  538. */
  539. return 0;
  540. }
  541. static u32 hypervisor_get_time(void)
  542. {
  543. register unsigned long func asm("%o5");
  544. register unsigned long arg0 asm("%o0");
  545. register unsigned long arg1 asm("%o1");
  546. int retries = 10000;
  547. retry:
  548. func = HV_FAST_TOD_GET;
  549. arg0 = 0;
  550. arg1 = 0;
  551. __asm__ __volatile__("ta %6"
  552. : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
  553. : "0" (func), "1" (arg0), "2" (arg1),
  554. "i" (HV_FAST_TRAP));
  555. if (arg0 == HV_EOK)
  556. return arg1;
  557. if (arg0 == HV_EWOULDBLOCK) {
  558. if (--retries > 0) {
  559. udelay(100);
  560. goto retry;
  561. }
  562. printk(KERN_WARNING "SUN4V: tod_get() timed out.\n");
  563. return 0;
  564. }
  565. printk(KERN_WARNING "SUN4V: tod_get() not supported.\n");
  566. return 0;
  567. }
  568. static int hypervisor_set_time(u32 secs)
  569. {
  570. register unsigned long func asm("%o5");
  571. register unsigned long arg0 asm("%o0");
  572. int retries = 10000;
  573. retry:
  574. func = HV_FAST_TOD_SET;
  575. arg0 = secs;
  576. __asm__ __volatile__("ta %4"
  577. : "=&r" (func), "=&r" (arg0)
  578. : "0" (func), "1" (arg0),
  579. "i" (HV_FAST_TRAP));
  580. if (arg0 == HV_EOK)
  581. return 0;
  582. if (arg0 == HV_EWOULDBLOCK) {
  583. if (--retries > 0) {
  584. udelay(100);
  585. goto retry;
  586. }
  587. printk(KERN_WARNING "SUN4V: tod_set() timed out.\n");
  588. return -EAGAIN;
  589. }
  590. printk(KERN_WARNING "SUN4V: tod_set() not supported.\n");
  591. return -EOPNOTSUPP;
  592. }
  593. static int __init clock_model_matches(char *model)
  594. {
  595. if (strcmp(model, "mk48t02") &&
  596. strcmp(model, "mk48t08") &&
  597. strcmp(model, "mk48t59") &&
  598. strcmp(model, "m5819") &&
  599. strcmp(model, "m5819p") &&
  600. strcmp(model, "m5823") &&
  601. strcmp(model, "ds1287"))
  602. return 0;
  603. return 1;
  604. }
  605. static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
  606. {
  607. struct device_node *dp = op->node;
  608. char *model = of_get_property(dp, "model", NULL);
  609. unsigned long size, flags;
  610. void __iomem *regs;
  611. if (!model || !clock_model_matches(model))
  612. return -ENODEV;
  613. /* On an Enterprise system there can be multiple mostek clocks.
  614. * We should only match the one that is on the central FHC bus.
  615. */
  616. if (!strcmp(dp->parent->name, "fhc") &&
  617. strcmp(dp->parent->parent->name, "central") != 0)
  618. return -ENODEV;
  619. size = (op->resource[0].end - op->resource[0].start) + 1;
  620. regs = of_ioremap(&op->resource[0], 0, size, "clock");
  621. if (!regs)
  622. return -ENOMEM;
  623. #ifdef CONFIG_PCI
  624. if (!strcmp(model, "ds1287") ||
  625. !strcmp(model, "m5819") ||
  626. !strcmp(model, "m5819p") ||
  627. !strcmp(model, "m5823")) {
  628. ds1287_regs = (unsigned long) regs;
  629. } else
  630. #endif
  631. if (model[5] == '0' && model[6] == '2') {
  632. mstk48t02_regs = regs;
  633. } else if(model[5] == '0' && model[6] == '8') {
  634. mstk48t08_regs = regs;
  635. mstk48t02_regs = mstk48t08_regs + MOSTEK_48T08_48T02;
  636. } else {
  637. mstk48t59_regs = regs;
  638. mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
  639. }
  640. printk(KERN_INFO "%s: Clock regs at %p\n", dp->full_name, regs);
  641. local_irq_save(flags);
  642. if (mstk48t02_regs != NULL) {
  643. /* Report a low battery voltage condition. */
  644. if (has_low_battery())
  645. prom_printf("NVRAM: Low battery voltage!\n");
  646. /* Kick start the clock if it is completely stopped. */
  647. if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
  648. kick_start_clock();
  649. }
  650. set_system_time();
  651. local_irq_restore(flags);
  652. return 0;
  653. }
  654. static struct of_device_id clock_match[] = {
  655. {
  656. .name = "eeprom",
  657. },
  658. {
  659. .name = "rtc",
  660. },
  661. {},
  662. };
  663. static struct of_platform_driver clock_driver = {
  664. .name = "clock",
  665. .match_table = clock_match,
  666. .probe = clock_probe,
  667. };
  668. static int __init clock_init(void)
  669. {
  670. if (this_is_starfire) {
  671. xtime.tv_sec = starfire_get_time();
  672. xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
  673. set_normalized_timespec(&wall_to_monotonic,
  674. -xtime.tv_sec, -xtime.tv_nsec);
  675. return 0;
  676. }
  677. if (tlb_type == hypervisor) {
  678. xtime.tv_sec = hypervisor_get_time();
  679. xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
  680. set_normalized_timespec(&wall_to_monotonic,
  681. -xtime.tv_sec, -xtime.tv_nsec);
  682. return 0;
  683. }
  684. return of_register_driver(&clock_driver, &of_bus_type);
  685. }
  686. /* Must be after subsys_initcall() so that busses are probed. Must
  687. * be before device_initcall() because things like the RTC driver
  688. * need to see the clock registers.
  689. */
  690. fs_initcall(clock_init);
  691. /* This is gets the master TICK_INT timer going. */
  692. static unsigned long sparc64_init_timers(void)
  693. {
  694. struct device_node *dp;
  695. struct property *prop;
  696. unsigned long clock;
  697. #ifdef CONFIG_SMP
  698. extern void smp_tick_init(void);
  699. #endif
  700. dp = of_find_node_by_path("/");
  701. if (tlb_type == spitfire) {
  702. unsigned long ver, manuf, impl;
  703. __asm__ __volatile__ ("rdpr %%ver, %0"
  704. : "=&r" (ver));
  705. manuf = ((ver >> 48) & 0xffff);
  706. impl = ((ver >> 32) & 0xffff);
  707. if (manuf == 0x17 && impl == 0x13) {
  708. /* Hummingbird, aka Ultra-IIe */
  709. tick_ops = &hbtick_operations;
  710. prop = of_find_property(dp, "stick-frequency", NULL);
  711. } else {
  712. tick_ops = &tick_operations;
  713. cpu_find_by_instance(0, &dp, NULL);
  714. prop = of_find_property(dp, "clock-frequency", NULL);
  715. }
  716. } else {
  717. tick_ops = &stick_operations;
  718. prop = of_find_property(dp, "stick-frequency", NULL);
  719. }
  720. clock = *(unsigned int *) prop->value;
  721. #ifdef CONFIG_SMP
  722. smp_tick_init();
  723. #endif
  724. return clock;
  725. }
  726. struct freq_table {
  727. unsigned long clock_tick_ref;
  728. unsigned int ref_freq;
  729. };
  730. static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0 };
  731. unsigned long sparc64_get_clock_tick(unsigned int cpu)
  732. {
  733. struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
  734. if (ft->clock_tick_ref)
  735. return ft->clock_tick_ref;
  736. return cpu_data(cpu).clock_tick;
  737. }
  738. #ifdef CONFIG_CPU_FREQ
  739. static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  740. void *data)
  741. {
  742. struct cpufreq_freqs *freq = data;
  743. unsigned int cpu = freq->cpu;
  744. struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
  745. if (!ft->ref_freq) {
  746. ft->ref_freq = freq->old;
  747. ft->clock_tick_ref = cpu_data(cpu).clock_tick;
  748. }
  749. if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
  750. (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
  751. (val == CPUFREQ_RESUMECHANGE)) {
  752. cpu_data(cpu).clock_tick =
  753. cpufreq_scale(ft->clock_tick_ref,
  754. ft->ref_freq,
  755. freq->new);
  756. }
  757. return 0;
  758. }
  759. static struct notifier_block sparc64_cpufreq_notifier_block = {
  760. .notifier_call = sparc64_cpufreq_notifier
  761. };
  762. #endif /* CONFIG_CPU_FREQ */
  763. static int sparc64_next_event(unsigned long delta,
  764. struct clock_event_device *evt)
  765. {
  766. return tick_ops->add_compare(delta);
  767. }
  768. static void sparc64_timer_setup(enum clock_event_mode mode,
  769. struct clock_event_device *evt)
  770. {
  771. switch (mode) {
  772. case CLOCK_EVT_MODE_ONESHOT:
  773. break;
  774. case CLOCK_EVT_MODE_SHUTDOWN:
  775. tick_ops->disable_irq();
  776. break;
  777. case CLOCK_EVT_MODE_PERIODIC:
  778. case CLOCK_EVT_MODE_UNUSED:
  779. WARN_ON(1);
  780. break;
  781. };
  782. }
  783. static struct clock_event_device sparc64_clockevent = {
  784. .features = CLOCK_EVT_FEAT_ONESHOT,
  785. .set_mode = sparc64_timer_setup,
  786. .set_next_event = sparc64_next_event,
  787. .rating = 100,
  788. .shift = 30,
  789. .irq = -1,
  790. };
  791. static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
  792. void timer_interrupt(int irq, struct pt_regs *regs)
  793. {
  794. struct pt_regs *old_regs = set_irq_regs(regs);
  795. unsigned long tick_mask = tick_ops->softint_mask;
  796. int cpu = smp_processor_id();
  797. struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
  798. clear_softint(tick_mask);
  799. irq_enter();
  800. kstat_this_cpu.irqs[0]++;
  801. if (unlikely(!evt->event_handler)) {
  802. printk(KERN_WARNING
  803. "Spurious SPARC64 timer interrupt on cpu %d\n", cpu);
  804. } else
  805. evt->event_handler(evt);
  806. irq_exit();
  807. set_irq_regs(old_regs);
  808. }
  809. void __devinit setup_sparc64_timer(void)
  810. {
  811. struct clock_event_device *sevt;
  812. unsigned long pstate;
  813. /* Guarantee that the following sequences execute
  814. * uninterrupted.
  815. */
  816. __asm__ __volatile__("rdpr %%pstate, %0\n\t"
  817. "wrpr %0, %1, %%pstate"
  818. : "=r" (pstate)
  819. : "i" (PSTATE_IE));
  820. tick_ops->init_tick();
  821. /* Restore PSTATE_IE. */
  822. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  823. : /* no outputs */
  824. : "r" (pstate));
  825. sevt = &__get_cpu_var(sparc64_events);
  826. memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
  827. sevt->cpumask = cpumask_of_cpu(smp_processor_id());
  828. clockevents_register_device(sevt);
  829. }
  830. #define SPARC64_NSEC_PER_CYC_SHIFT 32UL
  831. static struct clocksource clocksource_tick = {
  832. .rating = 100,
  833. .mask = CLOCKSOURCE_MASK(64),
  834. .shift = 16,
  835. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  836. };
  837. static void __init setup_clockevent_multiplier(unsigned long hz)
  838. {
  839. unsigned long mult, shift = 32;
  840. while (1) {
  841. mult = div_sc(hz, NSEC_PER_SEC, shift);
  842. if (mult && (mult >> 32UL) == 0UL)
  843. break;
  844. shift--;
  845. }
  846. sparc64_clockevent.shift = shift;
  847. sparc64_clockevent.mult = mult;
  848. }
  849. void __init time_init(void)
  850. {
  851. unsigned long clock = sparc64_init_timers();
  852. timer_ticks_per_nsec_quotient =
  853. clocksource_hz2mult(clock, SPARC64_NSEC_PER_CYC_SHIFT);
  854. clocksource_tick.name = tick_ops->name;
  855. clocksource_tick.mult =
  856. clocksource_hz2mult(clock,
  857. clocksource_tick.shift);
  858. clocksource_tick.read = tick_ops->get_tick;
  859. printk("clocksource: mult[%x] shift[%d]\n",
  860. clocksource_tick.mult, clocksource_tick.shift);
  861. clocksource_register(&clocksource_tick);
  862. sparc64_clockevent.name = tick_ops->name;
  863. setup_clockevent_multiplier(clock);
  864. sparc64_clockevent.max_delta_ns =
  865. clockevent_delta2ns(0x7fffffffffffffff, &sparc64_clockevent);
  866. sparc64_clockevent.min_delta_ns =
  867. clockevent_delta2ns(0xF, &sparc64_clockevent);
  868. printk("clockevent: mult[%lx] shift[%d]\n",
  869. sparc64_clockevent.mult, sparc64_clockevent.shift);
  870. setup_sparc64_timer();
  871. #ifdef CONFIG_CPU_FREQ
  872. cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
  873. CPUFREQ_TRANSITION_NOTIFIER);
  874. #endif
  875. }
  876. unsigned long long sched_clock(void)
  877. {
  878. unsigned long ticks = tick_ops->get_tick();
  879. return (ticks * timer_ticks_per_nsec_quotient)
  880. >> SPARC64_NSEC_PER_CYC_SHIFT;
  881. }
  882. static int set_rtc_mmss(unsigned long nowtime)
  883. {
  884. int real_seconds, real_minutes, chip_minutes;
  885. void __iomem *mregs = mstk48t02_regs;
  886. #ifdef CONFIG_PCI
  887. unsigned long dregs = ds1287_regs;
  888. #else
  889. unsigned long dregs = 0UL;
  890. #endif
  891. unsigned long flags;
  892. u8 tmp;
  893. /*
  894. * Not having a register set can lead to trouble.
  895. * Also starfire doesn't have a tod clock.
  896. */
  897. if (!mregs && !dregs)
  898. return -1;
  899. if (mregs) {
  900. spin_lock_irqsave(&mostek_lock, flags);
  901. /* Read the current RTC minutes. */
  902. tmp = mostek_read(mregs + MOSTEK_CREG);
  903. tmp |= MSTK_CREG_READ;
  904. mostek_write(mregs + MOSTEK_CREG, tmp);
  905. chip_minutes = MSTK_REG_MIN(mregs);
  906. tmp = mostek_read(mregs + MOSTEK_CREG);
  907. tmp &= ~MSTK_CREG_READ;
  908. mostek_write(mregs + MOSTEK_CREG, tmp);
  909. /*
  910. * since we're only adjusting minutes and seconds,
  911. * don't interfere with hour overflow. This avoids
  912. * messing with unknown time zones but requires your
  913. * RTC not to be off by more than 15 minutes
  914. */
  915. real_seconds = nowtime % 60;
  916. real_minutes = nowtime / 60;
  917. if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
  918. real_minutes += 30; /* correct for half hour time zone */
  919. real_minutes %= 60;
  920. if (abs(real_minutes - chip_minutes) < 30) {
  921. tmp = mostek_read(mregs + MOSTEK_CREG);
  922. tmp |= MSTK_CREG_WRITE;
  923. mostek_write(mregs + MOSTEK_CREG, tmp);
  924. MSTK_SET_REG_SEC(mregs,real_seconds);
  925. MSTK_SET_REG_MIN(mregs,real_minutes);
  926. tmp = mostek_read(mregs + MOSTEK_CREG);
  927. tmp &= ~MSTK_CREG_WRITE;
  928. mostek_write(mregs + MOSTEK_CREG, tmp);
  929. spin_unlock_irqrestore(&mostek_lock, flags);
  930. return 0;
  931. } else {
  932. spin_unlock_irqrestore(&mostek_lock, flags);
  933. return -1;
  934. }
  935. } else {
  936. int retval = 0;
  937. unsigned char save_control, save_freq_select;
  938. /* Stolen from arch/i386/kernel/time.c, see there for
  939. * credits and descriptive comments.
  940. */
  941. spin_lock_irqsave(&rtc_lock, flags);
  942. save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
  943. CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
  944. save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
  945. CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
  946. chip_minutes = CMOS_READ(RTC_MINUTES);
  947. if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
  948. BCD_TO_BIN(chip_minutes);
  949. real_seconds = nowtime % 60;
  950. real_minutes = nowtime / 60;
  951. if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
  952. real_minutes += 30;
  953. real_minutes %= 60;
  954. if (abs(real_minutes - chip_minutes) < 30) {
  955. if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
  956. BIN_TO_BCD(real_seconds);
  957. BIN_TO_BCD(real_minutes);
  958. }
  959. CMOS_WRITE(real_seconds,RTC_SECONDS);
  960. CMOS_WRITE(real_minutes,RTC_MINUTES);
  961. } else {
  962. printk(KERN_WARNING
  963. "set_rtc_mmss: can't update from %d to %d\n",
  964. chip_minutes, real_minutes);
  965. retval = -1;
  966. }
  967. CMOS_WRITE(save_control, RTC_CONTROL);
  968. CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
  969. spin_unlock_irqrestore(&rtc_lock, flags);
  970. return retval;
  971. }
  972. }
  973. #define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
  974. static unsigned char mini_rtc_status; /* bitmapped status byte. */
  975. /* months start at 0 now */
  976. static unsigned char days_in_mo[] =
  977. {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
  978. #define FEBRUARY 2
  979. #define STARTOFTIME 1970
  980. #define SECDAY 86400L
  981. #define SECYR (SECDAY * 365)
  982. #define leapyear(year) ((year) % 4 == 0 && \
  983. ((year) % 100 != 0 || (year) % 400 == 0))
  984. #define days_in_year(a) (leapyear(a) ? 366 : 365)
  985. #define days_in_month(a) (month_days[(a) - 1])
  986. static int month_days[12] = {
  987. 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
  988. };
  989. /*
  990. * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
  991. */
  992. static void GregorianDay(struct rtc_time * tm)
  993. {
  994. int leapsToDate;
  995. int lastYear;
  996. int day;
  997. int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
  998. lastYear = tm->tm_year - 1;
  999. /*
  1000. * Number of leap corrections to apply up to end of last year
  1001. */
  1002. leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
  1003. /*
  1004. * This year is a leap year if it is divisible by 4 except when it is
  1005. * divisible by 100 unless it is divisible by 400
  1006. *
  1007. * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
  1008. */
  1009. day = tm->tm_mon > 2 && leapyear(tm->tm_year);
  1010. day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
  1011. tm->tm_mday;
  1012. tm->tm_wday = day % 7;
  1013. }
  1014. static void to_tm(int tim, struct rtc_time *tm)
  1015. {
  1016. register int i;
  1017. register long hms, day;
  1018. day = tim / SECDAY;
  1019. hms = tim % SECDAY;
  1020. /* Hours, minutes, seconds are easy */
  1021. tm->tm_hour = hms / 3600;
  1022. tm->tm_min = (hms % 3600) / 60;
  1023. tm->tm_sec = (hms % 3600) % 60;
  1024. /* Number of years in days */
  1025. for (i = STARTOFTIME; day >= days_in_year(i); i++)
  1026. day -= days_in_year(i);
  1027. tm->tm_year = i;
  1028. /* Number of months in days left */
  1029. if (leapyear(tm->tm_year))
  1030. days_in_month(FEBRUARY) = 29;
  1031. for (i = 1; day >= days_in_month(i); i++)
  1032. day -= days_in_month(i);
  1033. days_in_month(FEBRUARY) = 28;
  1034. tm->tm_mon = i;
  1035. /* Days are what is left over (+1) from all that. */
  1036. tm->tm_mday = day + 1;
  1037. /*
  1038. * Determine the day of week
  1039. */
  1040. GregorianDay(tm);
  1041. }
  1042. /* Both Starfire and SUN4V give us seconds since Jan 1st, 1970,
  1043. * aka Unix time. So we have to convert to/from rtc_time.
  1044. */
  1045. static inline void mini_get_rtc_time(struct rtc_time *time)
  1046. {
  1047. unsigned long flags;
  1048. u32 seconds;
  1049. spin_lock_irqsave(&rtc_lock, flags);
  1050. seconds = 0;
  1051. if (this_is_starfire)
  1052. seconds = starfire_get_time();
  1053. else if (tlb_type == hypervisor)
  1054. seconds = hypervisor_get_time();
  1055. spin_unlock_irqrestore(&rtc_lock, flags);
  1056. to_tm(seconds, time);
  1057. time->tm_year -= 1900;
  1058. time->tm_mon -= 1;
  1059. }
  1060. static inline int mini_set_rtc_time(struct rtc_time *time)
  1061. {
  1062. u32 seconds = mktime(time->tm_year + 1900, time->tm_mon + 1,
  1063. time->tm_mday, time->tm_hour,
  1064. time->tm_min, time->tm_sec);
  1065. unsigned long flags;
  1066. int err;
  1067. spin_lock_irqsave(&rtc_lock, flags);
  1068. err = -ENODEV;
  1069. if (this_is_starfire)
  1070. err = starfire_set_time(seconds);
  1071. else if (tlb_type == hypervisor)
  1072. err = hypervisor_set_time(seconds);
  1073. spin_unlock_irqrestore(&rtc_lock, flags);
  1074. return err;
  1075. }
  1076. static int mini_rtc_ioctl(struct inode *inode, struct file *file,
  1077. unsigned int cmd, unsigned long arg)
  1078. {
  1079. struct rtc_time wtime;
  1080. void __user *argp = (void __user *)arg;
  1081. switch (cmd) {
  1082. case RTC_PLL_GET:
  1083. return -EINVAL;
  1084. case RTC_PLL_SET:
  1085. return -EINVAL;
  1086. case RTC_UIE_OFF: /* disable ints from RTC updates. */
  1087. return 0;
  1088. case RTC_UIE_ON: /* enable ints for RTC updates. */
  1089. return -EINVAL;
  1090. case RTC_RD_TIME: /* Read the time/date from RTC */
  1091. /* this doesn't get week-day, who cares */
  1092. memset(&wtime, 0, sizeof(wtime));
  1093. mini_get_rtc_time(&wtime);
  1094. return copy_to_user(argp, &wtime, sizeof(wtime)) ? -EFAULT : 0;
  1095. case RTC_SET_TIME: /* Set the RTC */
  1096. {
  1097. int year;
  1098. unsigned char leap_yr;
  1099. if (!capable(CAP_SYS_TIME))
  1100. return -EACCES;
  1101. if (copy_from_user(&wtime, argp, sizeof(wtime)))
  1102. return -EFAULT;
  1103. year = wtime.tm_year + 1900;
  1104. leap_yr = ((!(year % 4) && (year % 100)) ||
  1105. !(year % 400));
  1106. if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1))
  1107. return -EINVAL;
  1108. if (wtime.tm_mday < 0 || wtime.tm_mday >
  1109. (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr)))
  1110. return -EINVAL;
  1111. if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 ||
  1112. wtime.tm_min < 0 || wtime.tm_min >= 60 ||
  1113. wtime.tm_sec < 0 || wtime.tm_sec >= 60)
  1114. return -EINVAL;
  1115. return mini_set_rtc_time(&wtime);
  1116. }
  1117. }
  1118. return -EINVAL;
  1119. }
  1120. static int mini_rtc_open(struct inode *inode, struct file *file)
  1121. {
  1122. if (mini_rtc_status & RTC_IS_OPEN)
  1123. return -EBUSY;
  1124. mini_rtc_status |= RTC_IS_OPEN;
  1125. return 0;
  1126. }
  1127. static int mini_rtc_release(struct inode *inode, struct file *file)
  1128. {
  1129. mini_rtc_status &= ~RTC_IS_OPEN;
  1130. return 0;
  1131. }
  1132. static const struct file_operations mini_rtc_fops = {
  1133. .owner = THIS_MODULE,
  1134. .ioctl = mini_rtc_ioctl,
  1135. .open = mini_rtc_open,
  1136. .release = mini_rtc_release,
  1137. };
  1138. static struct miscdevice rtc_mini_dev =
  1139. {
  1140. .minor = RTC_MINOR,
  1141. .name = "rtc",
  1142. .fops = &mini_rtc_fops,
  1143. };
  1144. static int __init rtc_mini_init(void)
  1145. {
  1146. int retval;
  1147. if (tlb_type != hypervisor && !this_is_starfire)
  1148. return -ENODEV;
  1149. printk(KERN_INFO "Mini RTC Driver\n");
  1150. retval = misc_register(&rtc_mini_dev);
  1151. if (retval < 0)
  1152. return retval;
  1153. return 0;
  1154. }
  1155. static void __exit rtc_mini_exit(void)
  1156. {
  1157. misc_deregister(&rtc_mini_dev);
  1158. }
  1159. module_init(rtc_mini_init);
  1160. module_exit(rtc_mini_exit);