time.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413
  1. /* $Id: time.c,v 1.42 2002/01/23 14:33:55 davem Exp $
  2. * time.c: UltraSparc timer and TOD clock support.
  3. *
  4. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
  6. *
  7. * Based largely on code which is:
  8. *
  9. * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/module.h>
  13. #include <linux/sched.h>
  14. #include <linux/kernel.h>
  15. #include <linux/param.h>
  16. #include <linux/string.h>
  17. #include <linux/mm.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/time.h>
  20. #include <linux/timex.h>
  21. #include <linux/init.h>
  22. #include <linux/ioport.h>
  23. #include <linux/mc146818rtc.h>
  24. #include <linux/delay.h>
  25. #include <linux/profile.h>
  26. #include <linux/bcd.h>
  27. #include <linux/jiffies.h>
  28. #include <linux/cpufreq.h>
  29. #include <linux/percpu.h>
  30. #include <linux/profile.h>
  31. #include <linux/miscdevice.h>
  32. #include <linux/rtc.h>
  33. #include <linux/kernel_stat.h>
  34. #include <linux/clockchips.h>
  35. #include <linux/clocksource.h>
  36. #include <asm/oplib.h>
  37. #include <asm/mostek.h>
  38. #include <asm/timer.h>
  39. #include <asm/irq.h>
  40. #include <asm/io.h>
  41. #include <asm/prom.h>
  42. #include <asm/of_device.h>
  43. #include <asm/starfire.h>
  44. #include <asm/smp.h>
  45. #include <asm/sections.h>
  46. #include <asm/cpudata.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/prom.h>
  49. #include <asm/irq_regs.h>
  50. DEFINE_SPINLOCK(mostek_lock);
  51. DEFINE_SPINLOCK(rtc_lock);
  52. void __iomem *mstk48t02_regs = NULL;
  53. #ifdef CONFIG_PCI
  54. unsigned long ds1287_regs = 0UL;
  55. #endif
  56. static void __iomem *mstk48t08_regs;
  57. static void __iomem *mstk48t59_regs;
  58. static int set_rtc_mmss(unsigned long);
  59. #define TICK_PRIV_BIT (1UL << 63)
  60. #define TICKCMP_IRQ_BIT (1UL << 63)
  61. #ifdef CONFIG_SMP
  62. unsigned long profile_pc(struct pt_regs *regs)
  63. {
  64. unsigned long pc = instruction_pointer(regs);
  65. if (in_lock_functions(pc))
  66. return regs->u_regs[UREG_RETPC];
  67. return pc;
  68. }
  69. EXPORT_SYMBOL(profile_pc);
  70. #endif
  71. static void tick_disable_protection(void)
  72. {
  73. /* Set things up so user can access tick register for profiling
  74. * purposes. Also workaround BB_ERRATA_1 by doing a dummy
  75. * read back of %tick after writing it.
  76. */
  77. __asm__ __volatile__(
  78. " ba,pt %%xcc, 1f\n"
  79. " nop\n"
  80. " .align 64\n"
  81. "1: rd %%tick, %%g2\n"
  82. " add %%g2, 6, %%g2\n"
  83. " andn %%g2, %0, %%g2\n"
  84. " wrpr %%g2, 0, %%tick\n"
  85. " rdpr %%tick, %%g0"
  86. : /* no outputs */
  87. : "r" (TICK_PRIV_BIT)
  88. : "g2");
  89. }
  90. static void tick_disable_irq(void)
  91. {
  92. __asm__ __volatile__(
  93. " ba,pt %%xcc, 1f\n"
  94. " nop\n"
  95. " .align 64\n"
  96. "1: wr %0, 0x0, %%tick_cmpr\n"
  97. " rd %%tick_cmpr, %%g0"
  98. : /* no outputs */
  99. : "r" (TICKCMP_IRQ_BIT));
  100. }
  101. static void tick_init_tick(void)
  102. {
  103. tick_disable_protection();
  104. tick_disable_irq();
  105. }
  106. static unsigned long tick_get_tick(void)
  107. {
  108. unsigned long ret;
  109. __asm__ __volatile__("rd %%tick, %0\n\t"
  110. "mov %0, %0"
  111. : "=r" (ret));
  112. return ret & ~TICK_PRIV_BIT;
  113. }
  114. static int tick_add_compare(unsigned long adj)
  115. {
  116. unsigned long orig_tick, new_tick, new_compare;
  117. __asm__ __volatile__("rd %%tick, %0"
  118. : "=r" (orig_tick));
  119. orig_tick &= ~TICKCMP_IRQ_BIT;
  120. /* Workaround for Spitfire Errata (#54 I think??), I discovered
  121. * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
  122. * number 103640.
  123. *
  124. * On Blackbird writes to %tick_cmpr can fail, the
  125. * workaround seems to be to execute the wr instruction
  126. * at the start of an I-cache line, and perform a dummy
  127. * read back from %tick_cmpr right after writing to it. -DaveM
  128. */
  129. __asm__ __volatile__("ba,pt %%xcc, 1f\n\t"
  130. " add %1, %2, %0\n\t"
  131. ".align 64\n"
  132. "1:\n\t"
  133. "wr %0, 0, %%tick_cmpr\n\t"
  134. "rd %%tick_cmpr, %%g0\n\t"
  135. : "=r" (new_compare)
  136. : "r" (orig_tick), "r" (adj));
  137. __asm__ __volatile__("rd %%tick, %0"
  138. : "=r" (new_tick));
  139. new_tick &= ~TICKCMP_IRQ_BIT;
  140. return ((long)(new_tick - (orig_tick+adj))) > 0L;
  141. }
  142. static unsigned long tick_add_tick(unsigned long adj)
  143. {
  144. unsigned long new_tick;
  145. /* Also need to handle Blackbird bug here too. */
  146. __asm__ __volatile__("rd %%tick, %0\n\t"
  147. "add %0, %1, %0\n\t"
  148. "wrpr %0, 0, %%tick\n\t"
  149. : "=&r" (new_tick)
  150. : "r" (adj));
  151. return new_tick;
  152. }
  153. static struct sparc64_tick_ops tick_operations __read_mostly = {
  154. .name = "tick",
  155. .init_tick = tick_init_tick,
  156. .disable_irq = tick_disable_irq,
  157. .get_tick = tick_get_tick,
  158. .add_tick = tick_add_tick,
  159. .add_compare = tick_add_compare,
  160. .softint_mask = 1UL << 0,
  161. };
  162. struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
  163. static void stick_disable_irq(void)
  164. {
  165. __asm__ __volatile__(
  166. "wr %0, 0x0, %%asr25"
  167. : /* no outputs */
  168. : "r" (TICKCMP_IRQ_BIT));
  169. }
  170. static void stick_init_tick(void)
  171. {
  172. /* Writes to the %tick and %stick register are not
  173. * allowed on sun4v. The Hypervisor controls that
  174. * bit, per-strand.
  175. */
  176. if (tlb_type != hypervisor) {
  177. tick_disable_protection();
  178. tick_disable_irq();
  179. /* Let the user get at STICK too. */
  180. __asm__ __volatile__(
  181. " rd %%asr24, %%g2\n"
  182. " andn %%g2, %0, %%g2\n"
  183. " wr %%g2, 0, %%asr24"
  184. : /* no outputs */
  185. : "r" (TICK_PRIV_BIT)
  186. : "g1", "g2");
  187. }
  188. stick_disable_irq();
  189. }
  190. static unsigned long stick_get_tick(void)
  191. {
  192. unsigned long ret;
  193. __asm__ __volatile__("rd %%asr24, %0"
  194. : "=r" (ret));
  195. return ret & ~TICK_PRIV_BIT;
  196. }
  197. static unsigned long stick_add_tick(unsigned long adj)
  198. {
  199. unsigned long new_tick;
  200. __asm__ __volatile__("rd %%asr24, %0\n\t"
  201. "add %0, %1, %0\n\t"
  202. "wr %0, 0, %%asr24\n\t"
  203. : "=&r" (new_tick)
  204. : "r" (adj));
  205. return new_tick;
  206. }
  207. static int stick_add_compare(unsigned long adj)
  208. {
  209. unsigned long orig_tick, new_tick;
  210. __asm__ __volatile__("rd %%asr24, %0"
  211. : "=r" (orig_tick));
  212. orig_tick &= ~TICKCMP_IRQ_BIT;
  213. __asm__ __volatile__("wr %0, 0, %%asr25"
  214. : /* no outputs */
  215. : "r" (orig_tick + adj));
  216. __asm__ __volatile__("rd %%asr24, %0"
  217. : "=r" (new_tick));
  218. new_tick &= ~TICKCMP_IRQ_BIT;
  219. return ((long)(new_tick - (orig_tick+adj))) > 0L;
  220. }
  221. static struct sparc64_tick_ops stick_operations __read_mostly = {
  222. .name = "stick",
  223. .init_tick = stick_init_tick,
  224. .disable_irq = stick_disable_irq,
  225. .get_tick = stick_get_tick,
  226. .add_tick = stick_add_tick,
  227. .add_compare = stick_add_compare,
  228. .softint_mask = 1UL << 16,
  229. };
  230. /* On Hummingbird the STICK/STICK_CMPR register is implemented
  231. * in I/O space. There are two 64-bit registers each, the
  232. * first holds the low 32-bits of the value and the second holds
  233. * the high 32-bits.
  234. *
  235. * Since STICK is constantly updating, we have to access it carefully.
  236. *
  237. * The sequence we use to read is:
  238. * 1) read high
  239. * 2) read low
  240. * 3) read high again, if it rolled re-read both low and high again.
  241. *
  242. * Writing STICK safely is also tricky:
  243. * 1) write low to zero
  244. * 2) write high
  245. * 3) write low
  246. */
  247. #define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
  248. #define HBIRD_STICK_ADDR 0x1fe0000f070UL
  249. static unsigned long __hbird_read_stick(void)
  250. {
  251. unsigned long ret, tmp1, tmp2, tmp3;
  252. unsigned long addr = HBIRD_STICK_ADDR+8;
  253. __asm__ __volatile__("ldxa [%1] %5, %2\n"
  254. "1:\n\t"
  255. "sub %1, 0x8, %1\n\t"
  256. "ldxa [%1] %5, %3\n\t"
  257. "add %1, 0x8, %1\n\t"
  258. "ldxa [%1] %5, %4\n\t"
  259. "cmp %4, %2\n\t"
  260. "bne,a,pn %%xcc, 1b\n\t"
  261. " mov %4, %2\n\t"
  262. "sllx %4, 32, %4\n\t"
  263. "or %3, %4, %0\n\t"
  264. : "=&r" (ret), "=&r" (addr),
  265. "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
  266. : "i" (ASI_PHYS_BYPASS_EC_E), "1" (addr));
  267. return ret;
  268. }
  269. static void __hbird_write_stick(unsigned long val)
  270. {
  271. unsigned long low = (val & 0xffffffffUL);
  272. unsigned long high = (val >> 32UL);
  273. unsigned long addr = HBIRD_STICK_ADDR;
  274. __asm__ __volatile__("stxa %%g0, [%0] %4\n\t"
  275. "add %0, 0x8, %0\n\t"
  276. "stxa %3, [%0] %4\n\t"
  277. "sub %0, 0x8, %0\n\t"
  278. "stxa %2, [%0] %4"
  279. : "=&r" (addr)
  280. : "0" (addr), "r" (low), "r" (high),
  281. "i" (ASI_PHYS_BYPASS_EC_E));
  282. }
  283. static void __hbird_write_compare(unsigned long val)
  284. {
  285. unsigned long low = (val & 0xffffffffUL);
  286. unsigned long high = (val >> 32UL);
  287. unsigned long addr = HBIRD_STICKCMP_ADDR + 0x8UL;
  288. __asm__ __volatile__("stxa %3, [%0] %4\n\t"
  289. "sub %0, 0x8, %0\n\t"
  290. "stxa %2, [%0] %4"
  291. : "=&r" (addr)
  292. : "0" (addr), "r" (low), "r" (high),
  293. "i" (ASI_PHYS_BYPASS_EC_E));
  294. }
  295. static void hbtick_disable_irq(void)
  296. {
  297. __hbird_write_compare(TICKCMP_IRQ_BIT);
  298. }
  299. static void hbtick_init_tick(void)
  300. {
  301. tick_disable_protection();
  302. /* XXX This seems to be necessary to 'jumpstart' Hummingbird
  303. * XXX into actually sending STICK interrupts. I think because
  304. * XXX of how we store %tick_cmpr in head.S this somehow resets the
  305. * XXX {TICK + STICK} interrupt mux. -DaveM
  306. */
  307. __hbird_write_stick(__hbird_read_stick());
  308. hbtick_disable_irq();
  309. }
  310. static unsigned long hbtick_get_tick(void)
  311. {
  312. return __hbird_read_stick() & ~TICK_PRIV_BIT;
  313. }
  314. static unsigned long hbtick_add_tick(unsigned long adj)
  315. {
  316. unsigned long val;
  317. val = __hbird_read_stick() + adj;
  318. __hbird_write_stick(val);
  319. return val;
  320. }
  321. static int hbtick_add_compare(unsigned long adj)
  322. {
  323. unsigned long val = __hbird_read_stick();
  324. unsigned long val2;
  325. val &= ~TICKCMP_IRQ_BIT;
  326. val += adj;
  327. __hbird_write_compare(val);
  328. val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT;
  329. return ((long)(val2 - val)) > 0L;
  330. }
  331. static struct sparc64_tick_ops hbtick_operations __read_mostly = {
  332. .name = "hbtick",
  333. .init_tick = hbtick_init_tick,
  334. .disable_irq = hbtick_disable_irq,
  335. .get_tick = hbtick_get_tick,
  336. .add_tick = hbtick_add_tick,
  337. .add_compare = hbtick_add_compare,
  338. .softint_mask = 1UL << 0,
  339. };
  340. static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
  341. #define TICK_SIZE (tick_nsec / 1000)
  342. #define USEC_AFTER 500000
  343. #define USEC_BEFORE 500000
  344. static void sync_cmos_clock(unsigned long dummy);
  345. static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
  346. static void sync_cmos_clock(unsigned long dummy)
  347. {
  348. struct timeval now, next;
  349. int fail = 1;
  350. /*
  351. * If we have an externally synchronized Linux clock, then update
  352. * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
  353. * called as close as possible to 500 ms before the new second starts.
  354. * This code is run on a timer. If the clock is set, that timer
  355. * may not expire at the correct time. Thus, we adjust...
  356. */
  357. if (!ntp_synced())
  358. /*
  359. * Not synced, exit, do not restart a timer (if one is
  360. * running, let it run out).
  361. */
  362. return;
  363. do_gettimeofday(&now);
  364. if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
  365. now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
  366. fail = set_rtc_mmss(now.tv_sec);
  367. next.tv_usec = USEC_AFTER - now.tv_usec;
  368. if (next.tv_usec <= 0)
  369. next.tv_usec += USEC_PER_SEC;
  370. if (!fail)
  371. next.tv_sec = 659;
  372. else
  373. next.tv_sec = 0;
  374. if (next.tv_usec >= USEC_PER_SEC) {
  375. next.tv_sec++;
  376. next.tv_usec -= USEC_PER_SEC;
  377. }
  378. mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
  379. }
  380. void notify_arch_cmos_timer(void)
  381. {
  382. mod_timer(&sync_cmos_timer, jiffies + 1);
  383. }
  384. /* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
  385. static void __init kick_start_clock(void)
  386. {
  387. void __iomem *regs = mstk48t02_regs;
  388. u8 sec, tmp;
  389. int i, count;
  390. prom_printf("CLOCK: Clock was stopped. Kick start ");
  391. spin_lock_irq(&mostek_lock);
  392. /* Turn on the kick start bit to start the oscillator. */
  393. tmp = mostek_read(regs + MOSTEK_CREG);
  394. tmp |= MSTK_CREG_WRITE;
  395. mostek_write(regs + MOSTEK_CREG, tmp);
  396. tmp = mostek_read(regs + MOSTEK_SEC);
  397. tmp &= ~MSTK_STOP;
  398. mostek_write(regs + MOSTEK_SEC, tmp);
  399. tmp = mostek_read(regs + MOSTEK_HOUR);
  400. tmp |= MSTK_KICK_START;
  401. mostek_write(regs + MOSTEK_HOUR, tmp);
  402. tmp = mostek_read(regs + MOSTEK_CREG);
  403. tmp &= ~MSTK_CREG_WRITE;
  404. mostek_write(regs + MOSTEK_CREG, tmp);
  405. spin_unlock_irq(&mostek_lock);
  406. /* Delay to allow the clock oscillator to start. */
  407. sec = MSTK_REG_SEC(regs);
  408. for (i = 0; i < 3; i++) {
  409. while (sec == MSTK_REG_SEC(regs))
  410. for (count = 0; count < 100000; count++)
  411. /* nothing */ ;
  412. prom_printf(".");
  413. sec = MSTK_REG_SEC(regs);
  414. }
  415. prom_printf("\n");
  416. spin_lock_irq(&mostek_lock);
  417. /* Turn off kick start and set a "valid" time and date. */
  418. tmp = mostek_read(regs + MOSTEK_CREG);
  419. tmp |= MSTK_CREG_WRITE;
  420. mostek_write(regs + MOSTEK_CREG, tmp);
  421. tmp = mostek_read(regs + MOSTEK_HOUR);
  422. tmp &= ~MSTK_KICK_START;
  423. mostek_write(regs + MOSTEK_HOUR, tmp);
  424. MSTK_SET_REG_SEC(regs,0);
  425. MSTK_SET_REG_MIN(regs,0);
  426. MSTK_SET_REG_HOUR(regs,0);
  427. MSTK_SET_REG_DOW(regs,5);
  428. MSTK_SET_REG_DOM(regs,1);
  429. MSTK_SET_REG_MONTH(regs,8);
  430. MSTK_SET_REG_YEAR(regs,1996 - MSTK_YEAR_ZERO);
  431. tmp = mostek_read(regs + MOSTEK_CREG);
  432. tmp &= ~MSTK_CREG_WRITE;
  433. mostek_write(regs + MOSTEK_CREG, tmp);
  434. spin_unlock_irq(&mostek_lock);
  435. /* Ensure the kick start bit is off. If it isn't, turn it off. */
  436. while (mostek_read(regs + MOSTEK_HOUR) & MSTK_KICK_START) {
  437. prom_printf("CLOCK: Kick start still on!\n");
  438. spin_lock_irq(&mostek_lock);
  439. tmp = mostek_read(regs + MOSTEK_CREG);
  440. tmp |= MSTK_CREG_WRITE;
  441. mostek_write(regs + MOSTEK_CREG, tmp);
  442. tmp = mostek_read(regs + MOSTEK_HOUR);
  443. tmp &= ~MSTK_KICK_START;
  444. mostek_write(regs + MOSTEK_HOUR, tmp);
  445. tmp = mostek_read(regs + MOSTEK_CREG);
  446. tmp &= ~MSTK_CREG_WRITE;
  447. mostek_write(regs + MOSTEK_CREG, tmp);
  448. spin_unlock_irq(&mostek_lock);
  449. }
  450. prom_printf("CLOCK: Kick start procedure successful.\n");
  451. }
  452. /* Return nonzero if the clock chip battery is low. */
  453. static int __init has_low_battery(void)
  454. {
  455. void __iomem *regs = mstk48t02_regs;
  456. u8 data1, data2;
  457. spin_lock_irq(&mostek_lock);
  458. data1 = mostek_read(regs + MOSTEK_EEPROM); /* Read some data. */
  459. mostek_write(regs + MOSTEK_EEPROM, ~data1); /* Write back the complement. */
  460. data2 = mostek_read(regs + MOSTEK_EEPROM); /* Read back the complement. */
  461. mostek_write(regs + MOSTEK_EEPROM, data1); /* Restore original value. */
  462. spin_unlock_irq(&mostek_lock);
  463. return (data1 == data2); /* Was the write blocked? */
  464. }
  465. /* Probe for the real time clock chip. */
  466. static void __init set_system_time(void)
  467. {
  468. unsigned int year, mon, day, hour, min, sec;
  469. void __iomem *mregs = mstk48t02_regs;
  470. #ifdef CONFIG_PCI
  471. unsigned long dregs = ds1287_regs;
  472. #else
  473. unsigned long dregs = 0UL;
  474. #endif
  475. u8 tmp;
  476. if (!mregs && !dregs) {
  477. prom_printf("Something wrong, clock regs not mapped yet.\n");
  478. prom_halt();
  479. }
  480. if (mregs) {
  481. spin_lock_irq(&mostek_lock);
  482. /* Traditional Mostek chip. */
  483. tmp = mostek_read(mregs + MOSTEK_CREG);
  484. tmp |= MSTK_CREG_READ;
  485. mostek_write(mregs + MOSTEK_CREG, tmp);
  486. sec = MSTK_REG_SEC(mregs);
  487. min = MSTK_REG_MIN(mregs);
  488. hour = MSTK_REG_HOUR(mregs);
  489. day = MSTK_REG_DOM(mregs);
  490. mon = MSTK_REG_MONTH(mregs);
  491. year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
  492. } else {
  493. /* Dallas 12887 RTC chip. */
  494. do {
  495. sec = CMOS_READ(RTC_SECONDS);
  496. min = CMOS_READ(RTC_MINUTES);
  497. hour = CMOS_READ(RTC_HOURS);
  498. day = CMOS_READ(RTC_DAY_OF_MONTH);
  499. mon = CMOS_READ(RTC_MONTH);
  500. year = CMOS_READ(RTC_YEAR);
  501. } while (sec != CMOS_READ(RTC_SECONDS));
  502. if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
  503. BCD_TO_BIN(sec);
  504. BCD_TO_BIN(min);
  505. BCD_TO_BIN(hour);
  506. BCD_TO_BIN(day);
  507. BCD_TO_BIN(mon);
  508. BCD_TO_BIN(year);
  509. }
  510. if ((year += 1900) < 1970)
  511. year += 100;
  512. }
  513. xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
  514. xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
  515. set_normalized_timespec(&wall_to_monotonic,
  516. -xtime.tv_sec, -xtime.tv_nsec);
  517. if (mregs) {
  518. tmp = mostek_read(mregs + MOSTEK_CREG);
  519. tmp &= ~MSTK_CREG_READ;
  520. mostek_write(mregs + MOSTEK_CREG, tmp);
  521. spin_unlock_irq(&mostek_lock);
  522. }
  523. }
  524. /* davem suggests we keep this within the 4M locked kernel image */
  525. static u32 starfire_get_time(void)
  526. {
  527. static char obp_gettod[32];
  528. static u32 unix_tod;
  529. sprintf(obp_gettod, "h# %08x unix-gettod",
  530. (unsigned int) (long) &unix_tod);
  531. prom_feval(obp_gettod);
  532. return unix_tod;
  533. }
  534. static int starfire_set_time(u32 val)
  535. {
  536. /* Do nothing, time is set using the service processor
  537. * console on this platform.
  538. */
  539. return 0;
  540. }
  541. static u32 hypervisor_get_time(void)
  542. {
  543. register unsigned long func asm("%o5");
  544. register unsigned long arg0 asm("%o0");
  545. register unsigned long arg1 asm("%o1");
  546. int retries = 10000;
  547. retry:
  548. func = HV_FAST_TOD_GET;
  549. arg0 = 0;
  550. arg1 = 0;
  551. __asm__ __volatile__("ta %6"
  552. : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
  553. : "0" (func), "1" (arg0), "2" (arg1),
  554. "i" (HV_FAST_TRAP));
  555. if (arg0 == HV_EOK)
  556. return arg1;
  557. if (arg0 == HV_EWOULDBLOCK) {
  558. if (--retries > 0) {
  559. udelay(100);
  560. goto retry;
  561. }
  562. printk(KERN_WARNING "SUN4V: tod_get() timed out.\n");
  563. return 0;
  564. }
  565. printk(KERN_WARNING "SUN4V: tod_get() not supported.\n");
  566. return 0;
  567. }
  568. static int hypervisor_set_time(u32 secs)
  569. {
  570. register unsigned long func asm("%o5");
  571. register unsigned long arg0 asm("%o0");
  572. int retries = 10000;
  573. retry:
  574. func = HV_FAST_TOD_SET;
  575. arg0 = secs;
  576. __asm__ __volatile__("ta %4"
  577. : "=&r" (func), "=&r" (arg0)
  578. : "0" (func), "1" (arg0),
  579. "i" (HV_FAST_TRAP));
  580. if (arg0 == HV_EOK)
  581. return 0;
  582. if (arg0 == HV_EWOULDBLOCK) {
  583. if (--retries > 0) {
  584. udelay(100);
  585. goto retry;
  586. }
  587. printk(KERN_WARNING "SUN4V: tod_set() timed out.\n");
  588. return -EAGAIN;
  589. }
  590. printk(KERN_WARNING "SUN4V: tod_set() not supported.\n");
  591. return -EOPNOTSUPP;
  592. }
  593. static int __init clock_model_matches(const char *model)
  594. {
  595. if (strcmp(model, "mk48t02") &&
  596. strcmp(model, "mk48t08") &&
  597. strcmp(model, "mk48t59") &&
  598. strcmp(model, "m5819") &&
  599. strcmp(model, "m5819p") &&
  600. strcmp(model, "m5823") &&
  601. strcmp(model, "ds1287"))
  602. return 0;
  603. return 1;
  604. }
  605. static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
  606. {
  607. struct device_node *dp = op->node;
  608. const char *model = of_get_property(dp, "model", NULL);
  609. unsigned long size, flags;
  610. void __iomem *regs;
  611. if (!model || !clock_model_matches(model))
  612. return -ENODEV;
  613. /* On an Enterprise system there can be multiple mostek clocks.
  614. * We should only match the one that is on the central FHC bus.
  615. */
  616. if (!strcmp(dp->parent->name, "fhc") &&
  617. strcmp(dp->parent->parent->name, "central") != 0)
  618. return -ENODEV;
  619. size = (op->resource[0].end - op->resource[0].start) + 1;
  620. regs = of_ioremap(&op->resource[0], 0, size, "clock");
  621. if (!regs)
  622. return -ENOMEM;
  623. #ifdef CONFIG_PCI
  624. if (!strcmp(model, "ds1287") ||
  625. !strcmp(model, "m5819") ||
  626. !strcmp(model, "m5819p") ||
  627. !strcmp(model, "m5823")) {
  628. ds1287_regs = (unsigned long) regs;
  629. } else
  630. #endif
  631. if (model[5] == '0' && model[6] == '2') {
  632. mstk48t02_regs = regs;
  633. } else if(model[5] == '0' && model[6] == '8') {
  634. mstk48t08_regs = regs;
  635. mstk48t02_regs = mstk48t08_regs + MOSTEK_48T08_48T02;
  636. } else {
  637. mstk48t59_regs = regs;
  638. mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
  639. }
  640. printk(KERN_INFO "%s: Clock regs at %p\n", dp->full_name, regs);
  641. local_irq_save(flags);
  642. if (mstk48t02_regs != NULL) {
  643. /* Report a low battery voltage condition. */
  644. if (has_low_battery())
  645. prom_printf("NVRAM: Low battery voltage!\n");
  646. /* Kick start the clock if it is completely stopped. */
  647. if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
  648. kick_start_clock();
  649. }
  650. set_system_time();
  651. local_irq_restore(flags);
  652. return 0;
  653. }
  654. static struct of_device_id clock_match[] = {
  655. {
  656. .name = "eeprom",
  657. },
  658. {
  659. .name = "rtc",
  660. },
  661. {},
  662. };
  663. static struct of_platform_driver clock_driver = {
  664. .name = "clock",
  665. .match_table = clock_match,
  666. .probe = clock_probe,
  667. };
  668. static int __init clock_init(void)
  669. {
  670. if (this_is_starfire) {
  671. xtime.tv_sec = starfire_get_time();
  672. xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
  673. set_normalized_timespec(&wall_to_monotonic,
  674. -xtime.tv_sec, -xtime.tv_nsec);
  675. return 0;
  676. }
  677. if (tlb_type == hypervisor) {
  678. xtime.tv_sec = hypervisor_get_time();
  679. xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
  680. set_normalized_timespec(&wall_to_monotonic,
  681. -xtime.tv_sec, -xtime.tv_nsec);
  682. return 0;
  683. }
  684. return of_register_driver(&clock_driver, &of_bus_type);
  685. }
  686. /* Must be after subsys_initcall() so that busses are probed. Must
  687. * be before device_initcall() because things like the RTC driver
  688. * need to see the clock registers.
  689. */
  690. fs_initcall(clock_init);
  691. /* This is gets the master TICK_INT timer going. */
  692. static unsigned long sparc64_init_timers(void)
  693. {
  694. struct device_node *dp;
  695. struct property *prop;
  696. unsigned long clock;
  697. #ifdef CONFIG_SMP
  698. extern void smp_tick_init(void);
  699. #endif
  700. dp = of_find_node_by_path("/");
  701. if (tlb_type == spitfire) {
  702. unsigned long ver, manuf, impl;
  703. __asm__ __volatile__ ("rdpr %%ver, %0"
  704. : "=&r" (ver));
  705. manuf = ((ver >> 48) & 0xffff);
  706. impl = ((ver >> 32) & 0xffff);
  707. if (manuf == 0x17 && impl == 0x13) {
  708. /* Hummingbird, aka Ultra-IIe */
  709. tick_ops = &hbtick_operations;
  710. prop = of_find_property(dp, "stick-frequency", NULL);
  711. } else {
  712. tick_ops = &tick_operations;
  713. cpu_find_by_instance(0, &dp, NULL);
  714. prop = of_find_property(dp, "clock-frequency", NULL);
  715. }
  716. } else {
  717. tick_ops = &stick_operations;
  718. prop = of_find_property(dp, "stick-frequency", NULL);
  719. }
  720. clock = *(unsigned int *) prop->value;
  721. #ifdef CONFIG_SMP
  722. smp_tick_init();
  723. #endif
  724. return clock;
  725. }
  726. struct freq_table {
  727. unsigned long clock_tick_ref;
  728. unsigned int ref_freq;
  729. };
  730. static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0 };
  731. unsigned long sparc64_get_clock_tick(unsigned int cpu)
  732. {
  733. struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
  734. if (ft->clock_tick_ref)
  735. return ft->clock_tick_ref;
  736. return cpu_data(cpu).clock_tick;
  737. }
  738. #ifdef CONFIG_CPU_FREQ
  739. static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  740. void *data)
  741. {
  742. struct cpufreq_freqs *freq = data;
  743. unsigned int cpu = freq->cpu;
  744. struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
  745. if (!ft->ref_freq) {
  746. ft->ref_freq = freq->old;
  747. ft->clock_tick_ref = cpu_data(cpu).clock_tick;
  748. }
  749. if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
  750. (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
  751. (val == CPUFREQ_RESUMECHANGE)) {
  752. cpu_data(cpu).clock_tick =
  753. cpufreq_scale(ft->clock_tick_ref,
  754. ft->ref_freq,
  755. freq->new);
  756. }
  757. return 0;
  758. }
  759. static struct notifier_block sparc64_cpufreq_notifier_block = {
  760. .notifier_call = sparc64_cpufreq_notifier
  761. };
  762. #endif /* CONFIG_CPU_FREQ */
  763. static int sparc64_next_event(unsigned long delta,
  764. struct clock_event_device *evt)
  765. {
  766. return tick_ops->add_compare(delta) ? -ETIME : 0;
  767. }
  768. static void sparc64_timer_setup(enum clock_event_mode mode,
  769. struct clock_event_device *evt)
  770. {
  771. switch (mode) {
  772. case CLOCK_EVT_MODE_ONESHOT:
  773. break;
  774. case CLOCK_EVT_MODE_SHUTDOWN:
  775. tick_ops->disable_irq();
  776. break;
  777. case CLOCK_EVT_MODE_PERIODIC:
  778. case CLOCK_EVT_MODE_UNUSED:
  779. WARN_ON(1);
  780. break;
  781. };
  782. }
  783. static struct clock_event_device sparc64_clockevent = {
  784. .features = CLOCK_EVT_FEAT_ONESHOT,
  785. .set_mode = sparc64_timer_setup,
  786. .set_next_event = sparc64_next_event,
  787. .rating = 100,
  788. .shift = 30,
  789. .irq = -1,
  790. };
  791. static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
  792. void timer_interrupt(int irq, struct pt_regs *regs)
  793. {
  794. struct pt_regs *old_regs = set_irq_regs(regs);
  795. unsigned long tick_mask = tick_ops->softint_mask;
  796. int cpu = smp_processor_id();
  797. struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
  798. clear_softint(tick_mask);
  799. irq_enter();
  800. kstat_this_cpu.irqs[0]++;
  801. if (unlikely(!evt->event_handler)) {
  802. printk(KERN_WARNING
  803. "Spurious SPARC64 timer interrupt on cpu %d\n", cpu);
  804. } else
  805. evt->event_handler(evt);
  806. irq_exit();
  807. set_irq_regs(old_regs);
  808. }
  809. void __devinit setup_sparc64_timer(void)
  810. {
  811. struct clock_event_device *sevt;
  812. unsigned long pstate;
  813. /* Guarantee that the following sequences execute
  814. * uninterrupted.
  815. */
  816. __asm__ __volatile__("rdpr %%pstate, %0\n\t"
  817. "wrpr %0, %1, %%pstate"
  818. : "=r" (pstate)
  819. : "i" (PSTATE_IE));
  820. tick_ops->init_tick();
  821. /* Restore PSTATE_IE. */
  822. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  823. : /* no outputs */
  824. : "r" (pstate));
  825. sevt = &__get_cpu_var(sparc64_events);
  826. memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
  827. sevt->cpumask = cpumask_of_cpu(smp_processor_id());
  828. clockevents_register_device(sevt);
  829. }
  830. #define SPARC64_NSEC_PER_CYC_SHIFT 32UL
  831. static struct clocksource clocksource_tick = {
  832. .rating = 100,
  833. .mask = CLOCKSOURCE_MASK(64),
  834. .shift = 16,
  835. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  836. };
  837. static void __init setup_clockevent_multiplier(unsigned long hz)
  838. {
  839. unsigned long mult, shift = 32;
  840. while (1) {
  841. mult = div_sc(hz, NSEC_PER_SEC, shift);
  842. if (mult && (mult >> 32UL) == 0UL)
  843. break;
  844. shift--;
  845. }
  846. sparc64_clockevent.shift = shift;
  847. sparc64_clockevent.mult = mult;
  848. }
  849. void __init time_init(void)
  850. {
  851. unsigned long clock = sparc64_init_timers();
  852. timer_ticks_per_nsec_quotient =
  853. clocksource_hz2mult(clock, SPARC64_NSEC_PER_CYC_SHIFT);
  854. clocksource_tick.name = tick_ops->name;
  855. clocksource_tick.mult =
  856. clocksource_hz2mult(clock,
  857. clocksource_tick.shift);
  858. clocksource_tick.read = tick_ops->get_tick;
  859. printk("clocksource: mult[%x] shift[%d]\n",
  860. clocksource_tick.mult, clocksource_tick.shift);
  861. clocksource_register(&clocksource_tick);
  862. sparc64_clockevent.name = tick_ops->name;
  863. setup_clockevent_multiplier(clock);
  864. sparc64_clockevent.max_delta_ns =
  865. clockevent_delta2ns(0x7fffffffffffffff, &sparc64_clockevent);
  866. sparc64_clockevent.min_delta_ns =
  867. clockevent_delta2ns(0xF, &sparc64_clockevent);
  868. printk("clockevent: mult[%lx] shift[%d]\n",
  869. sparc64_clockevent.mult, sparc64_clockevent.shift);
  870. setup_sparc64_timer();
  871. #ifdef CONFIG_CPU_FREQ
  872. cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
  873. CPUFREQ_TRANSITION_NOTIFIER);
  874. #endif
  875. }
  876. unsigned long long sched_clock(void)
  877. {
  878. unsigned long ticks = tick_ops->get_tick();
  879. return (ticks * timer_ticks_per_nsec_quotient)
  880. >> SPARC64_NSEC_PER_CYC_SHIFT;
  881. }
  882. static int set_rtc_mmss(unsigned long nowtime)
  883. {
  884. int real_seconds, real_minutes, chip_minutes;
  885. void __iomem *mregs = mstk48t02_regs;
  886. #ifdef CONFIG_PCI
  887. unsigned long dregs = ds1287_regs;
  888. #else
  889. unsigned long dregs = 0UL;
  890. #endif
  891. unsigned long flags;
  892. u8 tmp;
  893. /*
  894. * Not having a register set can lead to trouble.
  895. * Also starfire doesn't have a tod clock.
  896. */
  897. if (!mregs && !dregs)
  898. return -1;
  899. if (mregs) {
  900. spin_lock_irqsave(&mostek_lock, flags);
  901. /* Read the current RTC minutes. */
  902. tmp = mostek_read(mregs + MOSTEK_CREG);
  903. tmp |= MSTK_CREG_READ;
  904. mostek_write(mregs + MOSTEK_CREG, tmp);
  905. chip_minutes = MSTK_REG_MIN(mregs);
  906. tmp = mostek_read(mregs + MOSTEK_CREG);
  907. tmp &= ~MSTK_CREG_READ;
  908. mostek_write(mregs + MOSTEK_CREG, tmp);
  909. /*
  910. * since we're only adjusting minutes and seconds,
  911. * don't interfere with hour overflow. This avoids
  912. * messing with unknown time zones but requires your
  913. * RTC not to be off by more than 15 minutes
  914. */
  915. real_seconds = nowtime % 60;
  916. real_minutes = nowtime / 60;
  917. if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
  918. real_minutes += 30; /* correct for half hour time zone */
  919. real_minutes %= 60;
  920. if (abs(real_minutes - chip_minutes) < 30) {
  921. tmp = mostek_read(mregs + MOSTEK_CREG);
  922. tmp |= MSTK_CREG_WRITE;
  923. mostek_write(mregs + MOSTEK_CREG, tmp);
  924. MSTK_SET_REG_SEC(mregs,real_seconds);
  925. MSTK_SET_REG_MIN(mregs,real_minutes);
  926. tmp = mostek_read(mregs + MOSTEK_CREG);
  927. tmp &= ~MSTK_CREG_WRITE;
  928. mostek_write(mregs + MOSTEK_CREG, tmp);
  929. spin_unlock_irqrestore(&mostek_lock, flags);
  930. return 0;
  931. } else {
  932. spin_unlock_irqrestore(&mostek_lock, flags);
  933. return -1;
  934. }
  935. } else {
  936. int retval = 0;
  937. unsigned char save_control, save_freq_select;
  938. /* Stolen from arch/i386/kernel/time.c, see there for
  939. * credits and descriptive comments.
  940. */
  941. spin_lock_irqsave(&rtc_lock, flags);
  942. save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
  943. CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
  944. save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
  945. CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
  946. chip_minutes = CMOS_READ(RTC_MINUTES);
  947. if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
  948. BCD_TO_BIN(chip_minutes);
  949. real_seconds = nowtime % 60;
  950. real_minutes = nowtime / 60;
  951. if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
  952. real_minutes += 30;
  953. real_minutes %= 60;
  954. if (abs(real_minutes - chip_minutes) < 30) {
  955. if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
  956. BIN_TO_BCD(real_seconds);
  957. BIN_TO_BCD(real_minutes);
  958. }
  959. CMOS_WRITE(real_seconds,RTC_SECONDS);
  960. CMOS_WRITE(real_minutes,RTC_MINUTES);
  961. } else {
  962. printk(KERN_WARNING
  963. "set_rtc_mmss: can't update from %d to %d\n",
  964. chip_minutes, real_minutes);
  965. retval = -1;
  966. }
  967. CMOS_WRITE(save_control, RTC_CONTROL);
  968. CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
  969. spin_unlock_irqrestore(&rtc_lock, flags);
  970. return retval;
  971. }
  972. }
  973. #define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
  974. static unsigned char mini_rtc_status; /* bitmapped status byte. */
  975. #define FEBRUARY 2
  976. #define STARTOFTIME 1970
  977. #define SECDAY 86400L
  978. #define SECYR (SECDAY * 365)
  979. #define leapyear(year) ((year) % 4 == 0 && \
  980. ((year) % 100 != 0 || (year) % 400 == 0))
  981. #define days_in_year(a) (leapyear(a) ? 366 : 365)
  982. #define days_in_month(a) (month_days[(a) - 1])
  983. static int month_days[12] = {
  984. 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
  985. };
  986. /*
  987. * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
  988. */
  989. static void GregorianDay(struct rtc_time * tm)
  990. {
  991. int leapsToDate;
  992. int lastYear;
  993. int day;
  994. int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
  995. lastYear = tm->tm_year - 1;
  996. /*
  997. * Number of leap corrections to apply up to end of last year
  998. */
  999. leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
  1000. /*
  1001. * This year is a leap year if it is divisible by 4 except when it is
  1002. * divisible by 100 unless it is divisible by 400
  1003. *
  1004. * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
  1005. */
  1006. day = tm->tm_mon > 2 && leapyear(tm->tm_year);
  1007. day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
  1008. tm->tm_mday;
  1009. tm->tm_wday = day % 7;
  1010. }
  1011. static void to_tm(int tim, struct rtc_time *tm)
  1012. {
  1013. register int i;
  1014. register long hms, day;
  1015. day = tim / SECDAY;
  1016. hms = tim % SECDAY;
  1017. /* Hours, minutes, seconds are easy */
  1018. tm->tm_hour = hms / 3600;
  1019. tm->tm_min = (hms % 3600) / 60;
  1020. tm->tm_sec = (hms % 3600) % 60;
  1021. /* Number of years in days */
  1022. for (i = STARTOFTIME; day >= days_in_year(i); i++)
  1023. day -= days_in_year(i);
  1024. tm->tm_year = i;
  1025. /* Number of months in days left */
  1026. if (leapyear(tm->tm_year))
  1027. days_in_month(FEBRUARY) = 29;
  1028. for (i = 1; day >= days_in_month(i); i++)
  1029. day -= days_in_month(i);
  1030. days_in_month(FEBRUARY) = 28;
  1031. tm->tm_mon = i;
  1032. /* Days are what is left over (+1) from all that. */
  1033. tm->tm_mday = day + 1;
  1034. /*
  1035. * Determine the day of week
  1036. */
  1037. GregorianDay(tm);
  1038. }
  1039. /* Both Starfire and SUN4V give us seconds since Jan 1st, 1970,
  1040. * aka Unix time. So we have to convert to/from rtc_time.
  1041. */
  1042. static inline void mini_get_rtc_time(struct rtc_time *time)
  1043. {
  1044. unsigned long flags;
  1045. u32 seconds;
  1046. spin_lock_irqsave(&rtc_lock, flags);
  1047. seconds = 0;
  1048. if (this_is_starfire)
  1049. seconds = starfire_get_time();
  1050. else if (tlb_type == hypervisor)
  1051. seconds = hypervisor_get_time();
  1052. spin_unlock_irqrestore(&rtc_lock, flags);
  1053. to_tm(seconds, time);
  1054. time->tm_year -= 1900;
  1055. time->tm_mon -= 1;
  1056. }
  1057. static inline int mini_set_rtc_time(struct rtc_time *time)
  1058. {
  1059. u32 seconds = mktime(time->tm_year + 1900, time->tm_mon + 1,
  1060. time->tm_mday, time->tm_hour,
  1061. time->tm_min, time->tm_sec);
  1062. unsigned long flags;
  1063. int err;
  1064. spin_lock_irqsave(&rtc_lock, flags);
  1065. err = -ENODEV;
  1066. if (this_is_starfire)
  1067. err = starfire_set_time(seconds);
  1068. else if (tlb_type == hypervisor)
  1069. err = hypervisor_set_time(seconds);
  1070. spin_unlock_irqrestore(&rtc_lock, flags);
  1071. return err;
  1072. }
  1073. static int mini_rtc_ioctl(struct inode *inode, struct file *file,
  1074. unsigned int cmd, unsigned long arg)
  1075. {
  1076. struct rtc_time wtime;
  1077. void __user *argp = (void __user *)arg;
  1078. switch (cmd) {
  1079. case RTC_PLL_GET:
  1080. return -EINVAL;
  1081. case RTC_PLL_SET:
  1082. return -EINVAL;
  1083. case RTC_UIE_OFF: /* disable ints from RTC updates. */
  1084. return 0;
  1085. case RTC_UIE_ON: /* enable ints for RTC updates. */
  1086. return -EINVAL;
  1087. case RTC_RD_TIME: /* Read the time/date from RTC */
  1088. /* this doesn't get week-day, who cares */
  1089. memset(&wtime, 0, sizeof(wtime));
  1090. mini_get_rtc_time(&wtime);
  1091. return copy_to_user(argp, &wtime, sizeof(wtime)) ? -EFAULT : 0;
  1092. case RTC_SET_TIME: /* Set the RTC */
  1093. {
  1094. int year, days;
  1095. if (!capable(CAP_SYS_TIME))
  1096. return -EACCES;
  1097. if (copy_from_user(&wtime, argp, sizeof(wtime)))
  1098. return -EFAULT;
  1099. year = wtime.tm_year + 1900;
  1100. days = month_days[wtime.tm_mon] +
  1101. ((wtime.tm_mon == 1) && leapyear(year));
  1102. if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) ||
  1103. (wtime.tm_mday < 1))
  1104. return -EINVAL;
  1105. if (wtime.tm_mday < 0 || wtime.tm_mday > days)
  1106. return -EINVAL;
  1107. if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 ||
  1108. wtime.tm_min < 0 || wtime.tm_min >= 60 ||
  1109. wtime.tm_sec < 0 || wtime.tm_sec >= 60)
  1110. return -EINVAL;
  1111. return mini_set_rtc_time(&wtime);
  1112. }
  1113. }
  1114. return -EINVAL;
  1115. }
  1116. static int mini_rtc_open(struct inode *inode, struct file *file)
  1117. {
  1118. if (mini_rtc_status & RTC_IS_OPEN)
  1119. return -EBUSY;
  1120. mini_rtc_status |= RTC_IS_OPEN;
  1121. return 0;
  1122. }
  1123. static int mini_rtc_release(struct inode *inode, struct file *file)
  1124. {
  1125. mini_rtc_status &= ~RTC_IS_OPEN;
  1126. return 0;
  1127. }
  1128. static const struct file_operations mini_rtc_fops = {
  1129. .owner = THIS_MODULE,
  1130. .ioctl = mini_rtc_ioctl,
  1131. .open = mini_rtc_open,
  1132. .release = mini_rtc_release,
  1133. };
  1134. static struct miscdevice rtc_mini_dev =
  1135. {
  1136. .minor = RTC_MINOR,
  1137. .name = "rtc",
  1138. .fops = &mini_rtc_fops,
  1139. };
  1140. static int __init rtc_mini_init(void)
  1141. {
  1142. int retval;
  1143. if (tlb_type != hypervisor && !this_is_starfire)
  1144. return -ENODEV;
  1145. printk(KERN_INFO "Mini RTC Driver\n");
  1146. retval = misc_register(&rtc_mini_dev);
  1147. if (retval < 0)
  1148. return retval;
  1149. return 0;
  1150. }
  1151. static void __exit rtc_mini_exit(void)
  1152. {
  1153. misc_deregister(&rtc_mini_dev);
  1154. }
  1155. module_init(rtc_mini_init);
  1156. module_exit(rtc_mini_exit);