clocksource.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. /*
  2. * linux/kernel/time/clocksource.c
  3. *
  4. * This file contains the functions which manage clocksource drivers.
  5. *
  6. * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. *
  22. * TODO WishList:
  23. * o Allow clocksource drivers to be unregistered
  24. */
  25. #include <linux/clocksource.h>
  26. #include <linux/sysdev.h>
  27. #include <linux/init.h>
  28. #include <linux/module.h>
  29. #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
  30. #include <linux/tick.h>
  31. void timecounter_init(struct timecounter *tc,
  32. const struct cyclecounter *cc,
  33. u64 start_tstamp)
  34. {
  35. tc->cc = cc;
  36. tc->cycle_last = cc->read(cc);
  37. tc->nsec = start_tstamp;
  38. }
  39. EXPORT_SYMBOL(timecounter_init);
  40. /**
  41. * timecounter_read_delta - get nanoseconds since last call of this function
  42. * @tc: Pointer to time counter
  43. *
  44. * When the underlying cycle counter runs over, this will be handled
  45. * correctly as long as it does not run over more than once between
  46. * calls.
  47. *
  48. * The first call to this function for a new time counter initializes
  49. * the time tracking and returns an undefined result.
  50. */
  51. static u64 timecounter_read_delta(struct timecounter *tc)
  52. {
  53. cycle_t cycle_now, cycle_delta;
  54. u64 ns_offset;
  55. /* read cycle counter: */
  56. cycle_now = tc->cc->read(tc->cc);
  57. /* calculate the delta since the last timecounter_read_delta(): */
  58. cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
  59. /* convert to nanoseconds: */
  60. ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
  61. /* update time stamp of timecounter_read_delta() call: */
  62. tc->cycle_last = cycle_now;
  63. return ns_offset;
  64. }
  65. u64 timecounter_read(struct timecounter *tc)
  66. {
  67. u64 nsec;
  68. /* increment time by nanoseconds since last call */
  69. nsec = timecounter_read_delta(tc);
  70. nsec += tc->nsec;
  71. tc->nsec = nsec;
  72. return nsec;
  73. }
  74. EXPORT_SYMBOL(timecounter_read);
  75. u64 timecounter_cyc2time(struct timecounter *tc,
  76. cycle_t cycle_tstamp)
  77. {
  78. u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
  79. u64 nsec;
  80. /*
  81. * Instead of always treating cycle_tstamp as more recent
  82. * than tc->cycle_last, detect when it is too far in the
  83. * future and treat it as old time stamp instead.
  84. */
  85. if (cycle_delta > tc->cc->mask / 2) {
  86. cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
  87. nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
  88. } else {
  89. nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
  90. }
  91. return nsec;
  92. }
  93. EXPORT_SYMBOL(timecounter_cyc2time);
  94. /*[Clocksource internal variables]---------
  95. * curr_clocksource:
  96. * currently selected clocksource.
  97. * next_clocksource:
  98. * pending next selected clocksource.
  99. * clocksource_list:
  100. * linked list with the registered clocksources
  101. * clocksource_lock:
  102. * protects manipulations to curr_clocksource and next_clocksource
  103. * and the clocksource_list
  104. * override_name:
  105. * Name of the user-specified clocksource.
  106. */
  107. static struct clocksource *curr_clocksource;
  108. static struct clocksource *next_clocksource;
  109. static LIST_HEAD(clocksource_list);
  110. static DEFINE_SPINLOCK(clocksource_lock);
  111. static char override_name[32];
  112. static int finished_booting;
  113. /* clocksource_done_booting - Called near the end of core bootup
  114. *
  115. * Hack to avoid lots of clocksource churn at boot time.
  116. * We use fs_initcall because we want this to start before
  117. * device_initcall but after subsys_initcall.
  118. */
  119. static int __init clocksource_done_booting(void)
  120. {
  121. finished_booting = 1;
  122. return 0;
  123. }
  124. fs_initcall(clocksource_done_booting);
  125. #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
  126. static LIST_HEAD(watchdog_list);
  127. static struct clocksource *watchdog;
  128. static struct timer_list watchdog_timer;
  129. static struct work_struct watchdog_work;
  130. static DEFINE_SPINLOCK(watchdog_lock);
  131. static cycle_t watchdog_last;
  132. static int watchdog_running;
  133. static void clocksource_watchdog_work(struct work_struct *work);
  134. /*
  135. * Interval: 0.5sec Threshold: 0.0625s
  136. */
  137. #define WATCHDOG_INTERVAL (HZ >> 1)
  138. #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
  139. static void clocksource_unstable(struct clocksource *cs, int64_t delta)
  140. {
  141. printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
  142. cs->name, delta);
  143. cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
  144. cs->flags |= CLOCK_SOURCE_UNSTABLE;
  145. schedule_work(&watchdog_work);
  146. }
  147. static void clocksource_watchdog(unsigned long data)
  148. {
  149. struct clocksource *cs;
  150. cycle_t csnow, wdnow;
  151. int64_t wd_nsec, cs_nsec;
  152. int next_cpu;
  153. spin_lock(&watchdog_lock);
  154. if (!watchdog_running)
  155. goto out;
  156. wdnow = watchdog->read(watchdog);
  157. wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
  158. watchdog_last = wdnow;
  159. list_for_each_entry(cs, &watchdog_list, wd_list) {
  160. /* Clocksource already marked unstable? */
  161. if (cs->flags & CLOCK_SOURCE_UNSTABLE)
  162. continue;
  163. csnow = cs->read(cs);
  164. /* Clocksource initialized ? */
  165. if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
  166. cs->flags |= CLOCK_SOURCE_WATCHDOG;
  167. cs->wd_last = csnow;
  168. continue;
  169. }
  170. /* Check the deviation from the watchdog clocksource. */
  171. cs_nsec = cyc2ns(cs, (csnow - cs->wd_last) & cs->mask);
  172. cs->wd_last = csnow;
  173. if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
  174. clocksource_unstable(cs, cs_nsec - wd_nsec);
  175. continue;
  176. }
  177. if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
  178. (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
  179. (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
  180. cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
  181. /*
  182. * We just marked the clocksource as highres-capable,
  183. * notify the rest of the system as well so that we
  184. * transition into high-res mode:
  185. */
  186. tick_clock_notify();
  187. }
  188. }
  189. /*
  190. * Cycle through CPUs to check if the CPUs stay synchronized
  191. * to each other.
  192. */
  193. next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
  194. if (next_cpu >= nr_cpu_ids)
  195. next_cpu = cpumask_first(cpu_online_mask);
  196. watchdog_timer.expires += WATCHDOG_INTERVAL;
  197. add_timer_on(&watchdog_timer, next_cpu);
  198. out:
  199. spin_unlock(&watchdog_lock);
  200. }
  201. static inline void clocksource_start_watchdog(void)
  202. {
  203. if (watchdog_running || !watchdog || list_empty(&watchdog_list))
  204. return;
  205. INIT_WORK(&watchdog_work, clocksource_watchdog_work);
  206. init_timer(&watchdog_timer);
  207. watchdog_timer.function = clocksource_watchdog;
  208. watchdog_last = watchdog->read(watchdog);
  209. watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
  210. add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
  211. watchdog_running = 1;
  212. }
  213. static inline void clocksource_stop_watchdog(void)
  214. {
  215. if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
  216. return;
  217. del_timer(&watchdog_timer);
  218. watchdog_running = 0;
  219. }
  220. static inline void clocksource_reset_watchdog(void)
  221. {
  222. struct clocksource *cs;
  223. list_for_each_entry(cs, &watchdog_list, wd_list)
  224. cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
  225. }
  226. static void clocksource_resume_watchdog(void)
  227. {
  228. unsigned long flags;
  229. spin_lock_irqsave(&watchdog_lock, flags);
  230. clocksource_reset_watchdog();
  231. spin_unlock_irqrestore(&watchdog_lock, flags);
  232. }
  233. static void clocksource_enqueue_watchdog(struct clocksource *cs)
  234. {
  235. unsigned long flags;
  236. spin_lock_irqsave(&watchdog_lock, flags);
  237. if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
  238. /* cs is a clocksource to be watched. */
  239. list_add(&cs->wd_list, &watchdog_list);
  240. cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
  241. } else {
  242. /* cs is a watchdog. */
  243. if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
  244. cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
  245. /* Pick the best watchdog. */
  246. if (!watchdog || cs->rating > watchdog->rating) {
  247. watchdog = cs;
  248. /* Reset watchdog cycles */
  249. clocksource_reset_watchdog();
  250. }
  251. }
  252. /* Check if the watchdog timer needs to be started. */
  253. clocksource_start_watchdog();
  254. spin_unlock_irqrestore(&watchdog_lock, flags);
  255. }
  256. static void clocksource_dequeue_watchdog(struct clocksource *cs)
  257. {
  258. struct clocksource *tmp;
  259. unsigned long flags;
  260. spin_lock_irqsave(&watchdog_lock, flags);
  261. if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
  262. /* cs is a watched clocksource. */
  263. list_del_init(&cs->wd_list);
  264. } else if (cs == watchdog) {
  265. /* Reset watchdog cycles */
  266. clocksource_reset_watchdog();
  267. /* Current watchdog is removed. Find an alternative. */
  268. watchdog = NULL;
  269. list_for_each_entry(tmp, &clocksource_list, list) {
  270. if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY)
  271. continue;
  272. if (!watchdog || tmp->rating > watchdog->rating)
  273. watchdog = tmp;
  274. }
  275. }
  276. cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
  277. /* Check if the watchdog timer needs to be stopped. */
  278. clocksource_stop_watchdog();
  279. spin_unlock_irqrestore(&watchdog_lock, flags);
  280. }
  281. static void clocksource_watchdog_work(struct work_struct *work)
  282. {
  283. struct clocksource *cs, *tmp;
  284. unsigned long flags;
  285. spin_lock_irqsave(&watchdog_lock, flags);
  286. list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list)
  287. if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
  288. list_del_init(&cs->wd_list);
  289. clocksource_change_rating(cs, 0);
  290. }
  291. /* Check if the watchdog timer needs to be stopped. */
  292. clocksource_stop_watchdog();
  293. spin_unlock(&watchdog_lock);
  294. }
  295. #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
  296. static void clocksource_enqueue_watchdog(struct clocksource *cs)
  297. {
  298. if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
  299. cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
  300. }
  301. static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
  302. static inline void clocksource_resume_watchdog(void) { }
  303. #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
  304. /**
  305. * clocksource_resume - resume the clocksource(s)
  306. */
  307. void clocksource_resume(void)
  308. {
  309. struct clocksource *cs;
  310. unsigned long flags;
  311. spin_lock_irqsave(&clocksource_lock, flags);
  312. list_for_each_entry(cs, &clocksource_list, list) {
  313. if (cs->resume)
  314. cs->resume();
  315. }
  316. clocksource_resume_watchdog();
  317. spin_unlock_irqrestore(&clocksource_lock, flags);
  318. }
  319. /**
  320. * clocksource_touch_watchdog - Update watchdog
  321. *
  322. * Update the watchdog after exception contexts such as kgdb so as not
  323. * to incorrectly trip the watchdog.
  324. *
  325. */
  326. void clocksource_touch_watchdog(void)
  327. {
  328. clocksource_resume_watchdog();
  329. }
  330. #ifdef CONFIG_GENERIC_TIME
  331. /**
  332. * clocksource_get_next - Returns the selected clocksource
  333. *
  334. */
  335. struct clocksource *clocksource_get_next(void)
  336. {
  337. unsigned long flags;
  338. spin_lock_irqsave(&clocksource_lock, flags);
  339. if (next_clocksource && finished_booting) {
  340. curr_clocksource = next_clocksource;
  341. next_clocksource = NULL;
  342. }
  343. spin_unlock_irqrestore(&clocksource_lock, flags);
  344. return curr_clocksource;
  345. }
  346. /**
  347. * clocksource_select - Select the best clocksource available
  348. *
  349. * Private function. Must hold clocksource_lock when called.
  350. *
  351. * Select the clocksource with the best rating, or the clocksource,
  352. * which is selected by userspace override.
  353. */
  354. static void clocksource_select(void)
  355. {
  356. struct clocksource *best, *cs;
  357. if (list_empty(&clocksource_list))
  358. return;
  359. /* First clocksource on the list has the best rating. */
  360. best = list_first_entry(&clocksource_list, struct clocksource, list);
  361. /* Check for the override clocksource. */
  362. list_for_each_entry(cs, &clocksource_list, list) {
  363. if (strcmp(cs->name, override_name) != 0)
  364. continue;
  365. /*
  366. * Check to make sure we don't switch to a non-highres
  367. * capable clocksource if the tick code is in oneshot
  368. * mode (highres or nohz)
  369. */
  370. if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
  371. tick_oneshot_mode_active()) {
  372. /* Override clocksource cannot be used. */
  373. printk(KERN_WARNING "Override clocksource %s is not "
  374. "HRT compatible. Cannot switch while in "
  375. "HRT/NOHZ mode\n", cs->name);
  376. override_name[0] = 0;
  377. } else
  378. /* Override clocksource can be used. */
  379. best = cs;
  380. break;
  381. }
  382. if (curr_clocksource != best)
  383. next_clocksource = best;
  384. }
  385. #else /* CONFIG_GENERIC_TIME */
  386. static void clocksource_select(void) { }
  387. #endif
  388. /*
  389. * Enqueue the clocksource sorted by rating
  390. */
  391. static void clocksource_enqueue(struct clocksource *cs)
  392. {
  393. struct list_head *entry = &clocksource_list;
  394. struct clocksource *tmp;
  395. list_for_each_entry(tmp, &clocksource_list, list)
  396. /* Keep track of the place, where to insert */
  397. if (tmp->rating >= cs->rating)
  398. entry = &tmp->list;
  399. list_add(&cs->list, entry);
  400. }
  401. /**
  402. * clocksource_register - Used to install new clocksources
  403. * @t: clocksource to be registered
  404. *
  405. * Returns -EBUSY if registration fails, zero otherwise.
  406. */
  407. int clocksource_register(struct clocksource *cs)
  408. {
  409. unsigned long flags;
  410. spin_lock_irqsave(&clocksource_lock, flags);
  411. clocksource_enqueue(cs);
  412. clocksource_select();
  413. spin_unlock_irqrestore(&clocksource_lock, flags);
  414. clocksource_enqueue_watchdog(cs);
  415. return 0;
  416. }
  417. EXPORT_SYMBOL(clocksource_register);
  418. /**
  419. * clocksource_change_rating - Change the rating of a registered clocksource
  420. */
  421. void clocksource_change_rating(struct clocksource *cs, int rating)
  422. {
  423. unsigned long flags;
  424. spin_lock_irqsave(&clocksource_lock, flags);
  425. list_del(&cs->list);
  426. cs->rating = rating;
  427. clocksource_enqueue(cs);
  428. clocksource_select();
  429. spin_unlock_irqrestore(&clocksource_lock, flags);
  430. }
  431. EXPORT_SYMBOL(clocksource_change_rating);
  432. /**
  433. * clocksource_unregister - remove a registered clocksource
  434. */
  435. void clocksource_unregister(struct clocksource *cs)
  436. {
  437. unsigned long flags;
  438. clocksource_dequeue_watchdog(cs);
  439. spin_lock_irqsave(&clocksource_lock, flags);
  440. list_del(&cs->list);
  441. clocksource_select();
  442. spin_unlock_irqrestore(&clocksource_lock, flags);
  443. }
  444. EXPORT_SYMBOL(clocksource_unregister);
  445. #ifdef CONFIG_SYSFS
  446. /**
  447. * sysfs_show_current_clocksources - sysfs interface for current clocksource
  448. * @dev: unused
  449. * @buf: char buffer to be filled with clocksource list
  450. *
  451. * Provides sysfs interface for listing current clocksource.
  452. */
  453. static ssize_t
  454. sysfs_show_current_clocksources(struct sys_device *dev,
  455. struct sysdev_attribute *attr, char *buf)
  456. {
  457. ssize_t count = 0;
  458. spin_lock_irq(&clocksource_lock);
  459. count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
  460. spin_unlock_irq(&clocksource_lock);
  461. return count;
  462. }
  463. /**
  464. * sysfs_override_clocksource - interface for manually overriding clocksource
  465. * @dev: unused
  466. * @buf: name of override clocksource
  467. * @count: length of buffer
  468. *
  469. * Takes input from sysfs interface for manually overriding the default
  470. * clocksource selction.
  471. */
  472. static ssize_t sysfs_override_clocksource(struct sys_device *dev,
  473. struct sysdev_attribute *attr,
  474. const char *buf, size_t count)
  475. {
  476. size_t ret = count;
  477. /* strings from sysfs write are not 0 terminated! */
  478. if (count >= sizeof(override_name))
  479. return -EINVAL;
  480. /* strip of \n: */
  481. if (buf[count-1] == '\n')
  482. count--;
  483. spin_lock_irq(&clocksource_lock);
  484. if (count > 0)
  485. memcpy(override_name, buf, count);
  486. override_name[count] = 0;
  487. clocksource_select();
  488. spin_unlock_irq(&clocksource_lock);
  489. return ret;
  490. }
  491. /**
  492. * sysfs_show_available_clocksources - sysfs interface for listing clocksource
  493. * @dev: unused
  494. * @buf: char buffer to be filled with clocksource list
  495. *
  496. * Provides sysfs interface for listing registered clocksources
  497. */
  498. static ssize_t
  499. sysfs_show_available_clocksources(struct sys_device *dev,
  500. struct sysdev_attribute *attr,
  501. char *buf)
  502. {
  503. struct clocksource *src;
  504. ssize_t count = 0;
  505. spin_lock_irq(&clocksource_lock);
  506. list_for_each_entry(src, &clocksource_list, list) {
  507. /*
  508. * Don't show non-HRES clocksource if the tick code is
  509. * in one shot mode (highres=on or nohz=on)
  510. */
  511. if (!tick_oneshot_mode_active() ||
  512. (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
  513. count += snprintf(buf + count,
  514. max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
  515. "%s ", src->name);
  516. }
  517. spin_unlock_irq(&clocksource_lock);
  518. count += snprintf(buf + count,
  519. max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
  520. return count;
  521. }
  522. /*
  523. * Sysfs setup bits:
  524. */
  525. static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
  526. sysfs_override_clocksource);
  527. static SYSDEV_ATTR(available_clocksource, 0444,
  528. sysfs_show_available_clocksources, NULL);
  529. static struct sysdev_class clocksource_sysclass = {
  530. .name = "clocksource",
  531. };
  532. static struct sys_device device_clocksource = {
  533. .id = 0,
  534. .cls = &clocksource_sysclass,
  535. };
  536. static int __init init_clocksource_sysfs(void)
  537. {
  538. int error = sysdev_class_register(&clocksource_sysclass);
  539. if (!error)
  540. error = sysdev_register(&device_clocksource);
  541. if (!error)
  542. error = sysdev_create_file(
  543. &device_clocksource,
  544. &attr_current_clocksource);
  545. if (!error)
  546. error = sysdev_create_file(
  547. &device_clocksource,
  548. &attr_available_clocksource);
  549. return error;
  550. }
  551. device_initcall(init_clocksource_sysfs);
  552. #endif /* CONFIG_SYSFS */
  553. /**
  554. * boot_override_clocksource - boot clock override
  555. * @str: override name
  556. *
  557. * Takes a clocksource= boot argument and uses it
  558. * as the clocksource override name.
  559. */
  560. static int __init boot_override_clocksource(char* str)
  561. {
  562. unsigned long flags;
  563. spin_lock_irqsave(&clocksource_lock, flags);
  564. if (str)
  565. strlcpy(override_name, str, sizeof(override_name));
  566. spin_unlock_irqrestore(&clocksource_lock, flags);
  567. return 1;
  568. }
  569. __setup("clocksource=", boot_override_clocksource);
  570. /**
  571. * boot_override_clock - Compatibility layer for deprecated boot option
  572. * @str: override name
  573. *
  574. * DEPRECATED! Takes a clock= boot argument and uses it
  575. * as the clocksource override name
  576. */
  577. static int __init boot_override_clock(char* str)
  578. {
  579. if (!strcmp(str, "pmtmr")) {
  580. printk("Warning: clock=pmtmr is deprecated. "
  581. "Use clocksource=acpi_pm.\n");
  582. return boot_override_clocksource("acpi_pm");
  583. }
  584. printk("Warning! clock= boot option is deprecated. "
  585. "Use clocksource=xyz\n");
  586. return boot_override_clocksource(str);
  587. }
  588. __setup("clock=", boot_override_clock);