time.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403
  1. /*
  2. * arch/s390/kernel/time.c
  3. * Time of day based timer functions.
  4. *
  5. * S390 version
  6. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  7. * Author(s): Hartmut Penner (hp@de.ibm.com),
  8. * Martin Schwidefsky (schwidefsky@de.ibm.com),
  9. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  10. *
  11. * Derived from "arch/i386/kernel/time.c"
  12. * Copyright (C) 1991, 1992, 1995 Linus Torvalds
  13. */
  14. #include <linux/errno.h>
  15. #include <linux/module.h>
  16. #include <linux/sched.h>
  17. #include <linux/kernel.h>
  18. #include <linux/param.h>
  19. #include <linux/string.h>
  20. #include <linux/mm.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/time.h>
  23. #include <linux/sysdev.h>
  24. #include <linux/delay.h>
  25. #include <linux/init.h>
  26. #include <linux/smp.h>
  27. #include <linux/types.h>
  28. #include <linux/profile.h>
  29. #include <linux/timex.h>
  30. #include <linux/notifier.h>
  31. #include <linux/clocksource.h>
  32. #include <asm/uaccess.h>
  33. #include <asm/delay.h>
  34. #include <asm/s390_ext.h>
  35. #include <asm/div64.h>
  36. #include <asm/irq.h>
  37. #include <asm/irq_regs.h>
  38. #include <asm/timer.h>
  39. #include <asm/etr.h>
  40. /* change this if you have some constant time drift */
  41. #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
  42. #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
  43. /* The value of the TOD clock for 1.1.1970. */
  44. #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
  45. /*
  46. * Create a small time difference between the timer interrupts
  47. * on the different cpus to avoid lock contention.
  48. */
  49. #define CPU_DEVIATION (smp_processor_id() << 12)
  50. #define TICK_SIZE tick
  51. static ext_int_info_t ext_int_info_cc;
  52. static ext_int_info_t ext_int_etr_cc;
  53. static u64 init_timer_cc;
  54. static u64 jiffies_timer_cc;
  55. static u64 xtime_cc;
  56. /*
  57. * Scheduler clock - returns current time in nanosec units.
  58. */
  59. unsigned long long sched_clock(void)
  60. {
  61. return ((get_clock() - jiffies_timer_cc) * 125) >> 9;
  62. }
  63. /*
  64. * Monotonic_clock - returns # of nanoseconds passed since time_init()
  65. */
  66. unsigned long long monotonic_clock(void)
  67. {
  68. return sched_clock();
  69. }
  70. EXPORT_SYMBOL(monotonic_clock);
  71. void tod_to_timeval(__u64 todval, struct timespec *xtime)
  72. {
  73. unsigned long long sec;
  74. sec = todval >> 12;
  75. do_div(sec, 1000000);
  76. xtime->tv_sec = sec;
  77. todval -= (sec * 1000000) << 12;
  78. xtime->tv_nsec = ((todval * 1000) >> 12);
  79. }
  80. #ifdef CONFIG_PROFILING
  81. #define s390_do_profile() profile_tick(CPU_PROFILING)
  82. #else
  83. #define s390_do_profile() do { ; } while(0)
  84. #endif /* CONFIG_PROFILING */
  85. /*
  86. * Advance the per cpu tick counter up to the time given with the
  87. * "time" argument. The per cpu update consists of accounting
  88. * the virtual cpu time, calling update_process_times and calling
  89. * the profiling hook. If xtime is before time it is advanced as well.
  90. */
  91. void account_ticks(u64 time)
  92. {
  93. __u32 ticks;
  94. __u64 tmp;
  95. /* Calculate how many ticks have passed. */
  96. if (time < S390_lowcore.jiffy_timer)
  97. return;
  98. tmp = time - S390_lowcore.jiffy_timer;
  99. if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
  100. ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
  101. S390_lowcore.jiffy_timer +=
  102. CLK_TICKS_PER_JIFFY * (__u64) ticks;
  103. } else if (tmp >= CLK_TICKS_PER_JIFFY) {
  104. ticks = 2;
  105. S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
  106. } else {
  107. ticks = 1;
  108. S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
  109. }
  110. #ifdef CONFIG_SMP
  111. /*
  112. * Do not rely on the boot cpu to do the calls to do_timer.
  113. * Spread it over all cpus instead.
  114. */
  115. write_seqlock(&xtime_lock);
  116. if (S390_lowcore.jiffy_timer > xtime_cc) {
  117. __u32 xticks;
  118. tmp = S390_lowcore.jiffy_timer - xtime_cc;
  119. if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
  120. xticks = __div(tmp, CLK_TICKS_PER_JIFFY);
  121. xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
  122. } else {
  123. xticks = 1;
  124. xtime_cc += CLK_TICKS_PER_JIFFY;
  125. }
  126. do_timer(xticks);
  127. }
  128. write_sequnlock(&xtime_lock);
  129. #else
  130. do_timer(ticks);
  131. #endif
  132. while (ticks--)
  133. update_process_times(user_mode(get_irq_regs()));
  134. s390_do_profile();
  135. }
  136. #ifdef CONFIG_NO_IDLE_HZ
  137. #ifdef CONFIG_NO_IDLE_HZ_INIT
  138. int sysctl_hz_timer = 0;
  139. #else
  140. int sysctl_hz_timer = 1;
  141. #endif
  142. /*
  143. * Stop the HZ tick on the current CPU.
  144. * Only cpu_idle may call this function.
  145. */
  146. static void stop_hz_timer(void)
  147. {
  148. unsigned long flags;
  149. unsigned long seq, next;
  150. __u64 timer, todval;
  151. int cpu = smp_processor_id();
  152. if (sysctl_hz_timer != 0)
  153. return;
  154. cpu_set(cpu, nohz_cpu_mask);
  155. /*
  156. * Leave the clock comparator set up for the next timer
  157. * tick if either rcu or a softirq is pending.
  158. */
  159. if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
  160. cpu_clear(cpu, nohz_cpu_mask);
  161. return;
  162. }
  163. /*
  164. * This cpu is going really idle. Set up the clock comparator
  165. * for the next event.
  166. */
  167. next = next_timer_interrupt();
  168. do {
  169. seq = read_seqbegin_irqsave(&xtime_lock, flags);
  170. timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64;
  171. } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
  172. todval = -1ULL;
  173. /* Be careful about overflows. */
  174. if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) {
  175. timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
  176. if (timer >= jiffies_timer_cc)
  177. todval = timer;
  178. }
  179. set_clock_comparator(todval);
  180. }
  181. /*
  182. * Start the HZ tick on the current CPU.
  183. * Only cpu_idle may call this function.
  184. */
  185. static void start_hz_timer(void)
  186. {
  187. if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
  188. return;
  189. account_ticks(get_clock());
  190. set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
  191. cpu_clear(smp_processor_id(), nohz_cpu_mask);
  192. }
  193. static int nohz_idle_notify(struct notifier_block *self,
  194. unsigned long action, void *hcpu)
  195. {
  196. switch (action) {
  197. case S390_CPU_IDLE:
  198. stop_hz_timer();
  199. break;
  200. case S390_CPU_NOT_IDLE:
  201. start_hz_timer();
  202. break;
  203. }
  204. return NOTIFY_OK;
  205. }
  206. static struct notifier_block nohz_idle_nb = {
  207. .notifier_call = nohz_idle_notify,
  208. };
  209. static void __init nohz_init(void)
  210. {
  211. if (register_idle_notifier(&nohz_idle_nb))
  212. panic("Couldn't register idle notifier");
  213. }
  214. #endif
  215. /*
  216. * Set up per cpu jiffy timer and set the clock comparator.
  217. */
  218. static void setup_jiffy_timer(void)
  219. {
  220. /* Set up clock comparator to next jiffy. */
  221. S390_lowcore.jiffy_timer =
  222. jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY;
  223. set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
  224. }
  225. /*
  226. * Set up lowcore and control register of the current cpu to
  227. * enable TOD clock and clock comparator interrupts.
  228. */
  229. void init_cpu_timer(void)
  230. {
  231. setup_jiffy_timer();
  232. /* Enable clock comparator timer interrupt. */
  233. __ctl_set_bit(0,11);
  234. /* Always allow ETR external interrupts, even without an ETR. */
  235. __ctl_set_bit(0, 4);
  236. }
  237. static void clock_comparator_interrupt(__u16 code)
  238. {
  239. /* set clock comparator for next tick */
  240. set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
  241. }
  242. static void etr_reset(void);
  243. static void etr_ext_handler(__u16);
  244. /*
  245. * Get the TOD clock running.
  246. */
  247. static u64 __init reset_tod_clock(void)
  248. {
  249. u64 time;
  250. etr_reset();
  251. if (store_clock(&time) == 0)
  252. return time;
  253. /* TOD clock not running. Set the clock to Unix Epoch. */
  254. if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
  255. panic("TOD clock not operational.");
  256. return TOD_UNIX_EPOCH;
  257. }
  258. static cycle_t read_tod_clock(void)
  259. {
  260. return get_clock();
  261. }
  262. static struct clocksource clocksource_tod = {
  263. .name = "tod",
  264. .rating = 400,
  265. .read = read_tod_clock,
  266. .mask = -1ULL,
  267. .mult = 1000,
  268. .shift = 12,
  269. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  270. };
  271. /*
  272. * Initialize the TOD clock and the CPU timer of
  273. * the boot cpu.
  274. */
  275. void __init time_init(void)
  276. {
  277. init_timer_cc = reset_tod_clock();
  278. xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
  279. jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
  280. /* set xtime */
  281. tod_to_timeval(init_timer_cc - TOD_UNIX_EPOCH, &xtime);
  282. set_normalized_timespec(&wall_to_monotonic,
  283. -xtime.tv_sec, -xtime.tv_nsec);
  284. /* request the clock comparator external interrupt */
  285. if (register_early_external_interrupt(0x1004,
  286. clock_comparator_interrupt,
  287. &ext_int_info_cc) != 0)
  288. panic("Couldn't request external interrupt 0x1004");
  289. if (clocksource_register(&clocksource_tod) != 0)
  290. panic("Could not register TOD clock source");
  291. /* request the etr external interrupt */
  292. if (register_early_external_interrupt(0x1406, etr_ext_handler,
  293. &ext_int_etr_cc) != 0)
  294. panic("Couldn't request external interrupt 0x1406");
  295. /* Enable TOD clock interrupts on the boot cpu. */
  296. init_cpu_timer();
  297. #ifdef CONFIG_NO_IDLE_HZ
  298. nohz_init();
  299. #endif
  300. #ifdef CONFIG_VIRT_TIMER
  301. vtime_init();
  302. #endif
  303. }
  304. /*
  305. * External Time Reference (ETR) code.
  306. */
  307. static int etr_port0_online;
  308. static int etr_port1_online;
  309. static int __init early_parse_etr(char *p)
  310. {
  311. if (strncmp(p, "off", 3) == 0)
  312. etr_port0_online = etr_port1_online = 0;
  313. else if (strncmp(p, "port0", 5) == 0)
  314. etr_port0_online = 1;
  315. else if (strncmp(p, "port1", 5) == 0)
  316. etr_port1_online = 1;
  317. else if (strncmp(p, "on", 2) == 0)
  318. etr_port0_online = etr_port1_online = 1;
  319. return 0;
  320. }
  321. early_param("etr", early_parse_etr);
  322. enum etr_event {
  323. ETR_EVENT_PORT0_CHANGE,
  324. ETR_EVENT_PORT1_CHANGE,
  325. ETR_EVENT_PORT_ALERT,
  326. ETR_EVENT_SYNC_CHECK,
  327. ETR_EVENT_SWITCH_LOCAL,
  328. ETR_EVENT_UPDATE,
  329. };
  330. enum etr_flags {
  331. ETR_FLAG_ENOSYS,
  332. ETR_FLAG_EACCES,
  333. ETR_FLAG_STEAI,
  334. };
  335. /*
  336. * Valid bit combinations of the eacr register are (x = don't care):
  337. * e0 e1 dp p0 p1 ea es sl
  338. * 0 0 x 0 0 0 0 0 initial, disabled state
  339. * 0 0 x 0 1 1 0 0 port 1 online
  340. * 0 0 x 1 0 1 0 0 port 0 online
  341. * 0 0 x 1 1 1 0 0 both ports online
  342. * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode
  343. * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode
  344. * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync
  345. * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync
  346. * 0 1 x 1 1 1 0 0 both ports online, port 1 usable
  347. * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync
  348. * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync
  349. * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode
  350. * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode
  351. * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync
  352. * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync
  353. * 1 0 x 1 1 1 0 0 both ports online, port 0 usable
  354. * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync
  355. * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync
  356. * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync
  357. * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync
  358. */
  359. static struct etr_eacr etr_eacr;
  360. static u64 etr_tolec; /* time of last eacr update */
  361. static unsigned long etr_flags;
  362. static struct etr_aib etr_port0;
  363. static int etr_port0_uptodate;
  364. static struct etr_aib etr_port1;
  365. static int etr_port1_uptodate;
  366. static unsigned long etr_events;
  367. static struct timer_list etr_timer;
  368. static DEFINE_PER_CPU(atomic_t, etr_sync_word);
  369. static void etr_timeout(unsigned long dummy);
  370. static void etr_work_fn(struct work_struct *work);
  371. static DECLARE_WORK(etr_work, etr_work_fn);
  372. /*
  373. * The etr get_clock function. It will write the current clock value
  374. * to the clock pointer and return 0 if the clock is in sync with the
  375. * external time source. If the clock mode is local it will return
  376. * -ENOSYS and -EAGAIN if the clock is not in sync with the external
  377. * reference. This function is what ETR is all about..
  378. */
  379. int get_sync_clock(unsigned long long *clock)
  380. {
  381. atomic_t *sw_ptr;
  382. unsigned int sw0, sw1;
  383. sw_ptr = &get_cpu_var(etr_sync_word);
  384. sw0 = atomic_read(sw_ptr);
  385. *clock = get_clock();
  386. sw1 = atomic_read(sw_ptr);
  387. put_cpu_var(etr_sync_sync);
  388. if (sw0 == sw1 && (sw0 & 0x80000000U))
  389. /* Success: time is in sync. */
  390. return 0;
  391. if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
  392. return -ENOSYS;
  393. if (test_bit(ETR_FLAG_EACCES, &etr_flags))
  394. return -EACCES;
  395. return -EAGAIN;
  396. }
  397. EXPORT_SYMBOL(get_sync_clock);
  398. /*
  399. * Make get_sync_clock return -EAGAIN.
  400. */
  401. static void etr_disable_sync_clock(void *dummy)
  402. {
  403. atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
  404. /*
  405. * Clear the in-sync bit 2^31. All get_sync_clock calls will
  406. * fail until the sync bit is turned back on. In addition
  407. * increase the "sequence" counter to avoid the race of an
  408. * etr event and the complete recovery against get_sync_clock.
  409. */
  410. atomic_clear_mask(0x80000000, sw_ptr);
  411. atomic_inc(sw_ptr);
  412. }
  413. /*
  414. * Make get_sync_clock return 0 again.
  415. * Needs to be called from a context disabled for preemption.
  416. */
  417. static void etr_enable_sync_clock(void)
  418. {
  419. atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
  420. atomic_set_mask(0x80000000, sw_ptr);
  421. }
  422. /*
  423. * Reset ETR attachment.
  424. */
  425. static void etr_reset(void)
  426. {
  427. etr_eacr = (struct etr_eacr) {
  428. .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
  429. .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
  430. .es = 0, .sl = 0 };
  431. if (etr_setr(&etr_eacr) == 0)
  432. etr_tolec = get_clock();
  433. else {
  434. set_bit(ETR_FLAG_ENOSYS, &etr_flags);
  435. if (etr_port0_online || etr_port1_online) {
  436. printk(KERN_WARNING "Running on non ETR capable "
  437. "machine, only local mode available.\n");
  438. etr_port0_online = etr_port1_online = 0;
  439. }
  440. }
  441. }
  442. static int __init etr_init(void)
  443. {
  444. struct etr_aib aib;
  445. if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
  446. return 0;
  447. /* Check if this machine has the steai instruction. */
  448. if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
  449. set_bit(ETR_FLAG_STEAI, &etr_flags);
  450. setup_timer(&etr_timer, etr_timeout, 0UL);
  451. if (!etr_port0_online && !etr_port1_online)
  452. set_bit(ETR_FLAG_EACCES, &etr_flags);
  453. if (etr_port0_online) {
  454. set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
  455. schedule_work(&etr_work);
  456. }
  457. if (etr_port1_online) {
  458. set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
  459. schedule_work(&etr_work);
  460. }
  461. return 0;
  462. }
  463. arch_initcall(etr_init);
  464. /*
  465. * Two sorts of ETR machine checks. The architecture reads:
  466. * "When a machine-check niterruption occurs and if a switch-to-local or
  467. * ETR-sync-check interrupt request is pending but disabled, this pending
  468. * disabled interruption request is indicated and is cleared".
  469. * Which means that we can get etr_switch_to_local events from the machine
  470. * check handler although the interruption condition is disabled. Lovely..
  471. */
  472. /*
  473. * Switch to local machine check. This is called when the last usable
  474. * ETR port goes inactive. After switch to local the clock is not in sync.
  475. */
  476. void etr_switch_to_local(void)
  477. {
  478. if (!etr_eacr.sl)
  479. return;
  480. etr_disable_sync_clock(NULL);
  481. set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
  482. schedule_work(&etr_work);
  483. }
  484. /*
  485. * ETR sync check machine check. This is called when the ETR OTE and the
  486. * local clock OTE are farther apart than the ETR sync check tolerance.
  487. * After a ETR sync check the clock is not in sync. The machine check
  488. * is broadcasted to all cpus at the same time.
  489. */
  490. void etr_sync_check(void)
  491. {
  492. if (!etr_eacr.es)
  493. return;
  494. etr_disable_sync_clock(NULL);
  495. set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
  496. schedule_work(&etr_work);
  497. }
  498. /*
  499. * ETR external interrupt. There are two causes:
  500. * 1) port state change, check the usability of the port
  501. * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
  502. * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
  503. * or ETR-data word 4 (edf4) has changed.
  504. */
  505. static void etr_ext_handler(__u16 code)
  506. {
  507. struct etr_interruption_parameter *intparm =
  508. (struct etr_interruption_parameter *) &S390_lowcore.ext_params;
  509. if (intparm->pc0)
  510. /* ETR port 0 state change. */
  511. set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
  512. if (intparm->pc1)
  513. /* ETR port 1 state change. */
  514. set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
  515. if (intparm->eai)
  516. /*
  517. * ETR port alert on either port 0, 1 or both.
  518. * Both ports are not up-to-date now.
  519. */
  520. set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
  521. schedule_work(&etr_work);
  522. }
  523. static void etr_timeout(unsigned long dummy)
  524. {
  525. set_bit(ETR_EVENT_UPDATE, &etr_events);
  526. schedule_work(&etr_work);
  527. }
  528. /*
  529. * Check if the etr mode is pss.
  530. */
  531. static inline int etr_mode_is_pps(struct etr_eacr eacr)
  532. {
  533. return eacr.es && !eacr.sl;
  534. }
  535. /*
  536. * Check if the etr mode is etr.
  537. */
  538. static inline int etr_mode_is_etr(struct etr_eacr eacr)
  539. {
  540. return eacr.es && eacr.sl;
  541. }
  542. /*
  543. * Check if the port can be used for TOD synchronization.
  544. * For PPS mode the port has to receive OTEs. For ETR mode
  545. * the port has to receive OTEs, the ETR stepping bit has to
  546. * be zero and the validity bits for data frame 1, 2, and 3
  547. * have to be 1.
  548. */
  549. static int etr_port_valid(struct etr_aib *aib, int port)
  550. {
  551. unsigned int psc;
  552. /* Check that this port is receiving OTEs. */
  553. if (aib->tsp == 0)
  554. return 0;
  555. psc = port ? aib->esw.psc1 : aib->esw.psc0;
  556. if (psc == etr_lpsc_pps_mode)
  557. return 1;
  558. if (psc == etr_lpsc_operational_step)
  559. return !aib->esw.y && aib->slsw.v1 &&
  560. aib->slsw.v2 && aib->slsw.v3;
  561. return 0;
  562. }
  563. /*
  564. * Check if two ports are on the same network.
  565. */
  566. static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2)
  567. {
  568. // FIXME: any other fields we have to compare?
  569. return aib1->edf1.net_id == aib2->edf1.net_id;
  570. }
  571. /*
  572. * Wrapper for etr_stei that converts physical port states
  573. * to logical port states to be consistent with the output
  574. * of stetr (see etr_psc vs. etr_lpsc).
  575. */
  576. static void etr_steai_cv(struct etr_aib *aib, unsigned int func)
  577. {
  578. BUG_ON(etr_steai(aib, func) != 0);
  579. /* Convert port state to logical port state. */
  580. if (aib->esw.psc0 == 1)
  581. aib->esw.psc0 = 2;
  582. else if (aib->esw.psc0 == 0 && aib->esw.p == 0)
  583. aib->esw.psc0 = 1;
  584. if (aib->esw.psc1 == 1)
  585. aib->esw.psc1 = 2;
  586. else if (aib->esw.psc1 == 0 && aib->esw.p == 1)
  587. aib->esw.psc1 = 1;
  588. }
  589. /*
  590. * Check if the aib a2 is still connected to the same attachment as
  591. * aib a1, the etv values differ by one and a2 is valid.
  592. */
  593. static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
  594. {
  595. int state_a1, state_a2;
  596. /* Paranoia check: e0/e1 should better be the same. */
  597. if (a1->esw.eacr.e0 != a2->esw.eacr.e0 ||
  598. a1->esw.eacr.e1 != a2->esw.eacr.e1)
  599. return 0;
  600. /* Still connected to the same etr ? */
  601. state_a1 = p ? a1->esw.psc1 : a1->esw.psc0;
  602. state_a2 = p ? a2->esw.psc1 : a2->esw.psc0;
  603. if (state_a1 == etr_lpsc_operational_step) {
  604. if (state_a2 != etr_lpsc_operational_step ||
  605. a1->edf1.net_id != a2->edf1.net_id ||
  606. a1->edf1.etr_id != a2->edf1.etr_id ||
  607. a1->edf1.etr_pn != a2->edf1.etr_pn)
  608. return 0;
  609. } else if (state_a2 != etr_lpsc_pps_mode)
  610. return 0;
  611. /* The ETV value of a2 needs to be ETV of a1 + 1. */
  612. if (a1->edf2.etv + 1 != a2->edf2.etv)
  613. return 0;
  614. if (!etr_port_valid(a2, p))
  615. return 0;
  616. return 1;
  617. }
  618. /*
  619. * The time is "clock". xtime is what we think the time is.
  620. * Adjust the value by a multiple of jiffies and add the delta to ntp.
  621. * "delay" is an approximation how long the synchronization took. If
  622. * the time correction is positive, then "delay" is subtracted from
  623. * the time difference and only the remaining part is passed to ntp.
  624. */
  625. static void etr_adjust_time(unsigned long long clock, unsigned long long delay)
  626. {
  627. unsigned long long delta, ticks;
  628. struct timex adjust;
  629. /*
  630. * We don't have to take the xtime lock because the cpu
  631. * executing etr_adjust_time is running disabled in
  632. * tasklet context and all other cpus are looping in
  633. * etr_sync_cpu_start.
  634. */
  635. if (clock > xtime_cc) {
  636. /* It is later than we thought. */
  637. delta = ticks = clock - xtime_cc;
  638. delta = ticks = (delta < delay) ? 0 : delta - delay;
  639. delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
  640. init_timer_cc = init_timer_cc + delta;
  641. jiffies_timer_cc = jiffies_timer_cc + delta;
  642. xtime_cc = xtime_cc + delta;
  643. adjust.offset = ticks * (1000000 / HZ);
  644. } else {
  645. /* It is earlier than we thought. */
  646. delta = ticks = xtime_cc - clock;
  647. delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
  648. init_timer_cc = init_timer_cc - delta;
  649. jiffies_timer_cc = jiffies_timer_cc - delta;
  650. xtime_cc = xtime_cc - delta;
  651. adjust.offset = -ticks * (1000000 / HZ);
  652. }
  653. if (adjust.offset != 0) {
  654. printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
  655. adjust.offset);
  656. adjust.modes = ADJ_OFFSET_SINGLESHOT;
  657. do_adjtimex(&adjust);
  658. }
  659. }
  660. static void etr_sync_cpu_start(void *dummy)
  661. {
  662. int *in_sync = dummy;
  663. etr_enable_sync_clock();
  664. /*
  665. * This looks like a busy wait loop but it isn't. etr_sync_cpus
  666. * is called on all other cpus while the TOD clocks is stopped.
  667. * __udelay will stop the cpu on an enabled wait psw until the
  668. * TOD is running again.
  669. */
  670. while (*in_sync == 0) {
  671. __udelay(1);
  672. /*
  673. * A different cpu changes *in_sync. Therefore use
  674. * barrier() to force memory access.
  675. */
  676. barrier();
  677. }
  678. if (*in_sync != 1)
  679. /* Didn't work. Clear per-cpu in sync bit again. */
  680. etr_disable_sync_clock(NULL);
  681. /*
  682. * This round of TOD syncing is done. Set the clock comparator
  683. * to the next tick and let the processor continue.
  684. */
  685. setup_jiffy_timer();
  686. }
  687. static void etr_sync_cpu_end(void *dummy)
  688. {
  689. }
  690. /*
  691. * Sync the TOD clock using the port refered to by aibp. This port
  692. * has to be enabled and the other port has to be disabled. The
  693. * last eacr update has to be more than 1.6 seconds in the past.
  694. */
  695. static int etr_sync_clock(struct etr_aib *aib, int port)
  696. {
  697. struct etr_aib *sync_port;
  698. unsigned long long clock, delay;
  699. int in_sync, follows;
  700. int rc;
  701. /* Check if the current aib is adjacent to the sync port aib. */
  702. sync_port = (port == 0) ? &etr_port0 : &etr_port1;
  703. follows = etr_aib_follows(sync_port, aib, port);
  704. memcpy(sync_port, aib, sizeof(*aib));
  705. if (!follows)
  706. return -EAGAIN;
  707. /*
  708. * Catch all other cpus and make them wait until we have
  709. * successfully synced the clock. smp_call_function will
  710. * return after all other cpus are in etr_sync_cpu_start.
  711. */
  712. in_sync = 0;
  713. preempt_disable();
  714. smp_call_function(etr_sync_cpu_start,&in_sync,0,0);
  715. local_irq_disable();
  716. etr_enable_sync_clock();
  717. /* Set clock to next OTE. */
  718. __ctl_set_bit(14, 21);
  719. __ctl_set_bit(0, 29);
  720. clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
  721. if (set_clock(clock) == 0) {
  722. __udelay(1); /* Wait for the clock to start. */
  723. __ctl_clear_bit(0, 29);
  724. __ctl_clear_bit(14, 21);
  725. etr_stetr(aib);
  726. /* Adjust Linux timing variables. */
  727. delay = (unsigned long long)
  728. (aib->edf2.etv - sync_port->edf2.etv) << 32;
  729. etr_adjust_time(clock, delay);
  730. setup_jiffy_timer();
  731. /* Verify that the clock is properly set. */
  732. if (!etr_aib_follows(sync_port, aib, port)) {
  733. /* Didn't work. */
  734. etr_disable_sync_clock(NULL);
  735. in_sync = -EAGAIN;
  736. rc = -EAGAIN;
  737. } else {
  738. in_sync = 1;
  739. rc = 0;
  740. }
  741. } else {
  742. /* Could not set the clock ?!? */
  743. __ctl_clear_bit(0, 29);
  744. __ctl_clear_bit(14, 21);
  745. etr_disable_sync_clock(NULL);
  746. in_sync = -EAGAIN;
  747. rc = -EAGAIN;
  748. }
  749. local_irq_enable();
  750. smp_call_function(etr_sync_cpu_end,NULL,0,0);
  751. preempt_enable();
  752. return rc;
  753. }
  754. /*
  755. * Handle the immediate effects of the different events.
  756. * The port change event is used for online/offline changes.
  757. */
  758. static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
  759. {
  760. if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events))
  761. eacr.es = 0;
  762. if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events))
  763. eacr.es = eacr.sl = 0;
  764. if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events))
  765. etr_port0_uptodate = etr_port1_uptodate = 0;
  766. if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) {
  767. if (eacr.e0)
  768. /*
  769. * Port change of an enabled port. We have to
  770. * assume that this can have caused an stepping
  771. * port switch.
  772. */
  773. etr_tolec = get_clock();
  774. eacr.p0 = etr_port0_online;
  775. if (!eacr.p0)
  776. eacr.e0 = 0;
  777. etr_port0_uptodate = 0;
  778. }
  779. if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) {
  780. if (eacr.e1)
  781. /*
  782. * Port change of an enabled port. We have to
  783. * assume that this can have caused an stepping
  784. * port switch.
  785. */
  786. etr_tolec = get_clock();
  787. eacr.p1 = etr_port1_online;
  788. if (!eacr.p1)
  789. eacr.e1 = 0;
  790. etr_port1_uptodate = 0;
  791. }
  792. clear_bit(ETR_EVENT_UPDATE, &etr_events);
  793. return eacr;
  794. }
  795. /*
  796. * Set up a timer that expires after the etr_tolec + 1.6 seconds if
  797. * one of the ports needs an update.
  798. */
  799. static void etr_set_tolec_timeout(unsigned long long now)
  800. {
  801. unsigned long micros;
  802. if ((!etr_eacr.p0 || etr_port0_uptodate) &&
  803. (!etr_eacr.p1 || etr_port1_uptodate))
  804. return;
  805. micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0;
  806. micros = (micros > 1600000) ? 0 : 1600000 - micros;
  807. mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1);
  808. }
  809. /*
  810. * Set up a time that expires after 1/2 second.
  811. */
  812. static void etr_set_sync_timeout(void)
  813. {
  814. mod_timer(&etr_timer, jiffies + HZ/2);
  815. }
  816. /*
  817. * Update the aib information for one or both ports.
  818. */
  819. static struct etr_eacr etr_handle_update(struct etr_aib *aib,
  820. struct etr_eacr eacr)
  821. {
  822. /* With both ports disabled the aib information is useless. */
  823. if (!eacr.e0 && !eacr.e1)
  824. return eacr;
  825. /* Update port0 or port1 with aib stored in etr_work_fn. */
  826. if (aib->esw.q == 0) {
  827. /* Information for port 0 stored. */
  828. if (eacr.p0 && !etr_port0_uptodate) {
  829. etr_port0 = *aib;
  830. if (etr_port0_online)
  831. etr_port0_uptodate = 1;
  832. }
  833. } else {
  834. /* Information for port 1 stored. */
  835. if (eacr.p1 && !etr_port1_uptodate) {
  836. etr_port1 = *aib;
  837. if (etr_port0_online)
  838. etr_port1_uptodate = 1;
  839. }
  840. }
  841. /*
  842. * Do not try to get the alternate port aib if the clock
  843. * is not in sync yet.
  844. */
  845. if (!eacr.es)
  846. return eacr;
  847. /*
  848. * If steai is available we can get the information about
  849. * the other port immediately. If only stetr is available the
  850. * data-port bit toggle has to be used.
  851. */
  852. if (test_bit(ETR_FLAG_STEAI, &etr_flags)) {
  853. if (eacr.p0 && !etr_port0_uptodate) {
  854. etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
  855. etr_port0_uptodate = 1;
  856. }
  857. if (eacr.p1 && !etr_port1_uptodate) {
  858. etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1);
  859. etr_port1_uptodate = 1;
  860. }
  861. } else {
  862. /*
  863. * One port was updated above, if the other
  864. * port is not uptodate toggle dp bit.
  865. */
  866. if ((eacr.p0 && !etr_port0_uptodate) ||
  867. (eacr.p1 && !etr_port1_uptodate))
  868. eacr.dp ^= 1;
  869. else
  870. eacr.dp = 0;
  871. }
  872. return eacr;
  873. }
  874. /*
  875. * Write new etr control register if it differs from the current one.
  876. * Return 1 if etr_tolec has been updated as well.
  877. */
  878. static void etr_update_eacr(struct etr_eacr eacr)
  879. {
  880. int dp_changed;
  881. if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0)
  882. /* No change, return. */
  883. return;
  884. /*
  885. * The disable of an active port of the change of the data port
  886. * bit can/will cause a change in the data port.
  887. */
  888. dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 ||
  889. (etr_eacr.dp ^ eacr.dp) != 0;
  890. etr_eacr = eacr;
  891. etr_setr(&etr_eacr);
  892. if (dp_changed)
  893. etr_tolec = get_clock();
  894. }
  895. /*
  896. * ETR tasklet. In this function you'll find the main logic. In
  897. * particular this is the only function that calls etr_update_eacr(),
  898. * it "controls" the etr control register.
  899. */
  900. static void etr_work_fn(struct work_struct *work)
  901. {
  902. unsigned long long now;
  903. struct etr_eacr eacr;
  904. struct etr_aib aib;
  905. int sync_port;
  906. /* Create working copy of etr_eacr. */
  907. eacr = etr_eacr;
  908. /* Check for the different events and their immediate effects. */
  909. eacr = etr_handle_events(eacr);
  910. /* Check if ETR is supposed to be active. */
  911. eacr.ea = eacr.p0 || eacr.p1;
  912. if (!eacr.ea) {
  913. /* Both ports offline. Reset everything. */
  914. eacr.dp = eacr.es = eacr.sl = 0;
  915. on_each_cpu(etr_disable_sync_clock, NULL, 0, 1);
  916. del_timer_sync(&etr_timer);
  917. etr_update_eacr(eacr);
  918. set_bit(ETR_FLAG_EACCES, &etr_flags);
  919. return;
  920. }
  921. /* Store aib to get the current ETR status word. */
  922. BUG_ON(etr_stetr(&aib) != 0);
  923. etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */
  924. now = get_clock();
  925. /*
  926. * Update the port information if the last stepping port change
  927. * or data port change is older than 1.6 seconds.
  928. */
  929. if (now >= etr_tolec + (1600000 << 12))
  930. eacr = etr_handle_update(&aib, eacr);
  931. /*
  932. * Select ports to enable. The prefered synchronization mode is PPS.
  933. * If a port can be enabled depends on a number of things:
  934. * 1) The port needs to be online and uptodate. A port is not
  935. * disabled just because it is not uptodate, but it is only
  936. * enabled if it is uptodate.
  937. * 2) The port needs to have the same mode (pps / etr).
  938. * 3) The port needs to be usable -> etr_port_valid() == 1
  939. * 4) To enable the second port the clock needs to be in sync.
  940. * 5) If both ports are useable and are ETR ports, the network id
  941. * has to be the same.
  942. * The eacr.sl bit is used to indicate etr mode vs. pps mode.
  943. */
  944. if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) {
  945. eacr.sl = 0;
  946. eacr.e0 = 1;
  947. if (!etr_mode_is_pps(etr_eacr))
  948. eacr.es = 0;
  949. if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode)
  950. eacr.e1 = 0;
  951. // FIXME: uptodate checks ?
  952. else if (etr_port0_uptodate && etr_port1_uptodate)
  953. eacr.e1 = 1;
  954. sync_port = (etr_port0_uptodate &&
  955. etr_port_valid(&etr_port0, 0)) ? 0 : -1;
  956. clear_bit(ETR_FLAG_EACCES, &etr_flags);
  957. } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
  958. eacr.sl = 0;
  959. eacr.e0 = 0;
  960. eacr.e1 = 1;
  961. if (!etr_mode_is_pps(etr_eacr))
  962. eacr.es = 0;
  963. sync_port = (etr_port1_uptodate &&
  964. etr_port_valid(&etr_port1, 1)) ? 1 : -1;
  965. clear_bit(ETR_FLAG_EACCES, &etr_flags);
  966. } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
  967. eacr.sl = 1;
  968. eacr.e0 = 1;
  969. if (!etr_mode_is_etr(etr_eacr))
  970. eacr.es = 0;
  971. if (!eacr.es || !eacr.p1 ||
  972. aib.esw.psc1 != etr_lpsc_operational_alt)
  973. eacr.e1 = 0;
  974. else if (etr_port0_uptodate && etr_port1_uptodate &&
  975. etr_compare_network(&etr_port0, &etr_port1))
  976. eacr.e1 = 1;
  977. sync_port = (etr_port0_uptodate &&
  978. etr_port_valid(&etr_port0, 0)) ? 0 : -1;
  979. clear_bit(ETR_FLAG_EACCES, &etr_flags);
  980. } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
  981. eacr.sl = 1;
  982. eacr.e0 = 0;
  983. eacr.e1 = 1;
  984. if (!etr_mode_is_etr(etr_eacr))
  985. eacr.es = 0;
  986. sync_port = (etr_port1_uptodate &&
  987. etr_port_valid(&etr_port1, 1)) ? 1 : -1;
  988. clear_bit(ETR_FLAG_EACCES, &etr_flags);
  989. } else {
  990. /* Both ports not usable. */
  991. eacr.es = eacr.sl = 0;
  992. sync_port = -1;
  993. set_bit(ETR_FLAG_EACCES, &etr_flags);
  994. }
  995. /*
  996. * If the clock is in sync just update the eacr and return.
  997. * If there is no valid sync port wait for a port update.
  998. */
  999. if (eacr.es || sync_port < 0) {
  1000. etr_update_eacr(eacr);
  1001. etr_set_tolec_timeout(now);
  1002. return;
  1003. }
  1004. /*
  1005. * Prepare control register for clock syncing
  1006. * (reset data port bit, set sync check control.
  1007. */
  1008. eacr.dp = 0;
  1009. eacr.es = 1;
  1010. /*
  1011. * Update eacr and try to synchronize the clock. If the update
  1012. * of eacr caused a stepping port switch (or if we have to
  1013. * assume that a stepping port switch has occured) or the
  1014. * clock syncing failed, reset the sync check control bit
  1015. * and set up a timer to try again after 0.5 seconds
  1016. */
  1017. etr_update_eacr(eacr);
  1018. if (now < etr_tolec + (1600000 << 12) ||
  1019. etr_sync_clock(&aib, sync_port) != 0) {
  1020. /* Sync failed. Try again in 1/2 second. */
  1021. eacr.es = 0;
  1022. etr_update_eacr(eacr);
  1023. etr_set_sync_timeout();
  1024. } else
  1025. etr_set_tolec_timeout(now);
  1026. }
  1027. /*
  1028. * Sysfs interface functions
  1029. */
  1030. static struct sysdev_class etr_sysclass = {
  1031. .name = "etr",
  1032. };
  1033. static struct sys_device etr_port0_dev = {
  1034. .id = 0,
  1035. .cls = &etr_sysclass,
  1036. };
  1037. static struct sys_device etr_port1_dev = {
  1038. .id = 1,
  1039. .cls = &etr_sysclass,
  1040. };
  1041. /*
  1042. * ETR class attributes
  1043. */
  1044. static ssize_t etr_stepping_port_show(struct sysdev_class *class, char *buf)
  1045. {
  1046. return sprintf(buf, "%i\n", etr_port0.esw.p);
  1047. }
  1048. static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
  1049. static ssize_t etr_stepping_mode_show(struct sysdev_class *class, char *buf)
  1050. {
  1051. char *mode_str;
  1052. if (etr_mode_is_pps(etr_eacr))
  1053. mode_str = "pps";
  1054. else if (etr_mode_is_etr(etr_eacr))
  1055. mode_str = "etr";
  1056. else
  1057. mode_str = "local";
  1058. return sprintf(buf, "%s\n", mode_str);
  1059. }
  1060. static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
  1061. /*
  1062. * ETR port attributes
  1063. */
  1064. static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
  1065. {
  1066. if (dev == &etr_port0_dev)
  1067. return etr_port0_online ? &etr_port0 : NULL;
  1068. else
  1069. return etr_port1_online ? &etr_port1 : NULL;
  1070. }
  1071. static ssize_t etr_online_show(struct sys_device *dev, char *buf)
  1072. {
  1073. unsigned int online;
  1074. online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online;
  1075. return sprintf(buf, "%i\n", online);
  1076. }
  1077. static ssize_t etr_online_store(struct sys_device *dev,
  1078. const char *buf, size_t count)
  1079. {
  1080. unsigned int value;
  1081. value = simple_strtoul(buf, NULL, 0);
  1082. if (value != 0 && value != 1)
  1083. return -EINVAL;
  1084. if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
  1085. return -ENOSYS;
  1086. if (dev == &etr_port0_dev) {
  1087. if (etr_port0_online == value)
  1088. return count; /* Nothing to do. */
  1089. etr_port0_online = value;
  1090. set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
  1091. schedule_work(&etr_work);
  1092. } else {
  1093. if (etr_port1_online == value)
  1094. return count; /* Nothing to do. */
  1095. etr_port1_online = value;
  1096. set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
  1097. schedule_work(&etr_work);
  1098. }
  1099. return count;
  1100. }
  1101. static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store);
  1102. static ssize_t etr_stepping_control_show(struct sys_device *dev, char *buf)
  1103. {
  1104. return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
  1105. etr_eacr.e0 : etr_eacr.e1);
  1106. }
  1107. static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
  1108. static ssize_t etr_mode_code_show(struct sys_device *dev, char *buf)
  1109. {
  1110. if (!etr_port0_online && !etr_port1_online)
  1111. /* Status word is not uptodate if both ports are offline. */
  1112. return -ENODATA;
  1113. return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
  1114. etr_port0.esw.psc0 : etr_port0.esw.psc1);
  1115. }
  1116. static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL);
  1117. static ssize_t etr_untuned_show(struct sys_device *dev, char *buf)
  1118. {
  1119. struct etr_aib *aib = etr_aib_from_dev(dev);
  1120. if (!aib || !aib->slsw.v1)
  1121. return -ENODATA;
  1122. return sprintf(buf, "%i\n", aib->edf1.u);
  1123. }
  1124. static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL);
  1125. static ssize_t etr_network_id_show(struct sys_device *dev, char *buf)
  1126. {
  1127. struct etr_aib *aib = etr_aib_from_dev(dev);
  1128. if (!aib || !aib->slsw.v1)
  1129. return -ENODATA;
  1130. return sprintf(buf, "%i\n", aib->edf1.net_id);
  1131. }
  1132. static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL);
  1133. static ssize_t etr_id_show(struct sys_device *dev, char *buf)
  1134. {
  1135. struct etr_aib *aib = etr_aib_from_dev(dev);
  1136. if (!aib || !aib->slsw.v1)
  1137. return -ENODATA;
  1138. return sprintf(buf, "%i\n", aib->edf1.etr_id);
  1139. }
  1140. static SYSDEV_ATTR(id, 0400, etr_id_show, NULL);
  1141. static ssize_t etr_port_number_show(struct sys_device *dev, char *buf)
  1142. {
  1143. struct etr_aib *aib = etr_aib_from_dev(dev);
  1144. if (!aib || !aib->slsw.v1)
  1145. return -ENODATA;
  1146. return sprintf(buf, "%i\n", aib->edf1.etr_pn);
  1147. }
  1148. static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL);
  1149. static ssize_t etr_coupled_show(struct sys_device *dev, char *buf)
  1150. {
  1151. struct etr_aib *aib = etr_aib_from_dev(dev);
  1152. if (!aib || !aib->slsw.v3)
  1153. return -ENODATA;
  1154. return sprintf(buf, "%i\n", aib->edf3.c);
  1155. }
  1156. static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL);
  1157. static ssize_t etr_local_time_show(struct sys_device *dev, char *buf)
  1158. {
  1159. struct etr_aib *aib = etr_aib_from_dev(dev);
  1160. if (!aib || !aib->slsw.v3)
  1161. return -ENODATA;
  1162. return sprintf(buf, "%i\n", aib->edf3.blto);
  1163. }
  1164. static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL);
  1165. static ssize_t etr_utc_offset_show(struct sys_device *dev, char *buf)
  1166. {
  1167. struct etr_aib *aib = etr_aib_from_dev(dev);
  1168. if (!aib || !aib->slsw.v3)
  1169. return -ENODATA;
  1170. return sprintf(buf, "%i\n", aib->edf3.buo);
  1171. }
  1172. static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
  1173. static struct sysdev_attribute *etr_port_attributes[] = {
  1174. &attr_online,
  1175. &attr_stepping_control,
  1176. &attr_state_code,
  1177. &attr_untuned,
  1178. &attr_network,
  1179. &attr_id,
  1180. &attr_port,
  1181. &attr_coupled,
  1182. &attr_local_time,
  1183. &attr_utc_offset,
  1184. NULL
  1185. };
  1186. static int __init etr_register_port(struct sys_device *dev)
  1187. {
  1188. struct sysdev_attribute **attr;
  1189. int rc;
  1190. rc = sysdev_register(dev);
  1191. if (rc)
  1192. goto out;
  1193. for (attr = etr_port_attributes; *attr; attr++) {
  1194. rc = sysdev_create_file(dev, *attr);
  1195. if (rc)
  1196. goto out_unreg;
  1197. }
  1198. return 0;
  1199. out_unreg:
  1200. for (; attr >= etr_port_attributes; attr--)
  1201. sysdev_remove_file(dev, *attr);
  1202. sysdev_unregister(dev);
  1203. out:
  1204. return rc;
  1205. }
  1206. static void __init etr_unregister_port(struct sys_device *dev)
  1207. {
  1208. struct sysdev_attribute **attr;
  1209. for (attr = etr_port_attributes; *attr; attr++)
  1210. sysdev_remove_file(dev, *attr);
  1211. sysdev_unregister(dev);
  1212. }
  1213. static int __init etr_init_sysfs(void)
  1214. {
  1215. int rc;
  1216. rc = sysdev_class_register(&etr_sysclass);
  1217. if (rc)
  1218. goto out;
  1219. rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port);
  1220. if (rc)
  1221. goto out_unreg_class;
  1222. rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode);
  1223. if (rc)
  1224. goto out_remove_stepping_port;
  1225. rc = etr_register_port(&etr_port0_dev);
  1226. if (rc)
  1227. goto out_remove_stepping_mode;
  1228. rc = etr_register_port(&etr_port1_dev);
  1229. if (rc)
  1230. goto out_remove_port0;
  1231. return 0;
  1232. out_remove_port0:
  1233. etr_unregister_port(&etr_port0_dev);
  1234. out_remove_stepping_mode:
  1235. sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode);
  1236. out_remove_stepping_port:
  1237. sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port);
  1238. out_unreg_class:
  1239. sysdev_class_unregister(&etr_sysclass);
  1240. out:
  1241. return rc;
  1242. }
  1243. device_initcall(etr_init_sysfs);