vtime.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. /*
  2. * Virtual cpu timer based timer functions.
  3. *
  4. * Copyright IBM Corp. 2004, 2012
  5. * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
  6. */
  7. #include <linux/kernel_stat.h>
  8. #include <linux/notifier.h>
  9. #include <linux/kprobes.h>
  10. #include <linux/export.h>
  11. #include <linux/kernel.h>
  12. #include <linux/timex.h>
  13. #include <linux/types.h>
  14. #include <linux/time.h>
  15. #include <linux/cpu.h>
  16. #include <linux/smp.h>
  17. #include <asm/irq_regs.h>
  18. #include <asm/cputime.h>
  19. #include <asm/vtimer.h>
  20. #include <asm/vtime.h>
  21. #include <asm/irq.h>
  22. #include "entry.h"
  23. static void virt_timer_expire(void);
  24. DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
  25. static LIST_HEAD(virt_timer_list);
  26. static DEFINE_SPINLOCK(virt_timer_lock);
  27. static atomic64_t virt_timer_current;
  28. static atomic64_t virt_timer_elapsed;
  29. static inline u64 get_vtimer(void)
  30. {
  31. u64 timer;
  32. asm volatile("stpt %0" : "=m" (timer));
  33. return timer;
  34. }
  35. static inline void set_vtimer(u64 expires)
  36. {
  37. u64 timer;
  38. asm volatile(
  39. " stpt %0\n" /* Store current cpu timer value */
  40. " spt %1" /* Set new value imm. afterwards */
  41. : "=m" (timer) : "m" (expires));
  42. S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
  43. S390_lowcore.last_update_timer = expires;
  44. }
  45. static inline int virt_timer_forward(u64 elapsed)
  46. {
  47. BUG_ON(!irqs_disabled());
  48. if (list_empty(&virt_timer_list))
  49. return 0;
  50. elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
  51. return elapsed >= atomic64_read(&virt_timer_current);
  52. }
  53. /*
  54. * Update process times based on virtual cpu times stored by entry.S
  55. * to the lowcore fields user_timer, system_timer & steal_clock.
  56. */
  57. static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
  58. {
  59. struct thread_info *ti = task_thread_info(tsk);
  60. u64 timer, clock, user, system, steal;
  61. timer = S390_lowcore.last_update_timer;
  62. clock = S390_lowcore.last_update_clock;
  63. asm volatile(
  64. " stpt %0\n" /* Store current cpu timer value */
  65. " stck %1" /* Store current tod clock value */
  66. : "=m" (S390_lowcore.last_update_timer),
  67. "=m" (S390_lowcore.last_update_clock));
  68. S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
  69. S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
  70. user = S390_lowcore.user_timer - ti->user_timer;
  71. S390_lowcore.steal_timer -= user;
  72. ti->user_timer = S390_lowcore.user_timer;
  73. account_user_time(tsk, user, user);
  74. system = S390_lowcore.system_timer - ti->system_timer;
  75. S390_lowcore.steal_timer -= system;
  76. ti->system_timer = S390_lowcore.system_timer;
  77. account_system_time(tsk, hardirq_offset, system, system);
  78. steal = S390_lowcore.steal_timer;
  79. if ((s64) steal > 0) {
  80. S390_lowcore.steal_timer = 0;
  81. account_steal_time(steal);
  82. }
  83. return virt_timer_forward(user + system);
  84. }
  85. void vtime_task_switch(struct task_struct *prev)
  86. {
  87. struct thread_info *ti;
  88. do_account_vtime(prev, 0);
  89. ti = task_thread_info(prev);
  90. ti->user_timer = S390_lowcore.user_timer;
  91. ti->system_timer = S390_lowcore.system_timer;
  92. ti = task_thread_info(current);
  93. S390_lowcore.user_timer = ti->user_timer;
  94. S390_lowcore.system_timer = ti->system_timer;
  95. }
  96. /*
  97. * In s390, accounting pending user time also implies
  98. * accounting system time in order to correctly compute
  99. * the stolen time accounting.
  100. */
  101. void vtime_account_user(struct task_struct *tsk)
  102. {
  103. if (do_account_vtime(tsk, HARDIRQ_OFFSET))
  104. virt_timer_expire();
  105. }
  106. /*
  107. * Update process times based on virtual cpu times stored by entry.S
  108. * to the lowcore fields user_timer, system_timer & steal_clock.
  109. */
  110. void vtime_account_irq_enter(struct task_struct *tsk)
  111. {
  112. struct thread_info *ti = task_thread_info(tsk);
  113. u64 timer, system;
  114. WARN_ON_ONCE(!irqs_disabled());
  115. timer = S390_lowcore.last_update_timer;
  116. S390_lowcore.last_update_timer = get_vtimer();
  117. S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
  118. system = S390_lowcore.system_timer - ti->system_timer;
  119. S390_lowcore.steal_timer -= system;
  120. ti->system_timer = S390_lowcore.system_timer;
  121. account_system_time(tsk, 0, system, system);
  122. virt_timer_forward(system);
  123. }
  124. EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
  125. void vtime_account_system(struct task_struct *tsk)
  126. __attribute__((alias("vtime_account_irq_enter")));
  127. EXPORT_SYMBOL_GPL(vtime_account_system);
  128. void __kprobes vtime_stop_cpu(void)
  129. {
  130. struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
  131. unsigned long long idle_time;
  132. unsigned long psw_mask;
  133. trace_hardirqs_on();
  134. /* Wait for external, I/O or machine check interrupt. */
  135. psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
  136. PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
  137. idle->nohz_delay = 0;
  138. /* Call the assembler magic in entry.S */
  139. psw_idle(idle, psw_mask);
  140. /* Account time spent with enabled wait psw loaded as idle time. */
  141. idle->sequence++;
  142. smp_wmb();
  143. idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
  144. idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
  145. idle->idle_time += idle_time;
  146. idle->idle_count++;
  147. account_idle_time(idle_time);
  148. smp_wmb();
  149. idle->sequence++;
  150. }
  151. cputime64_t s390_get_idle_time(int cpu)
  152. {
  153. struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
  154. unsigned long long now, idle_enter, idle_exit;
  155. unsigned int sequence;
  156. do {
  157. now = get_tod_clock();
  158. sequence = ACCESS_ONCE(idle->sequence);
  159. idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
  160. idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
  161. } while ((sequence & 1) || (idle->sequence != sequence));
  162. return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
  163. }
  164. /*
  165. * Sorted add to a list. List is linear searched until first bigger
  166. * element is found.
  167. */
  168. static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
  169. {
  170. struct vtimer_list *tmp;
  171. list_for_each_entry(tmp, head, entry) {
  172. if (tmp->expires > timer->expires) {
  173. list_add_tail(&timer->entry, &tmp->entry);
  174. return;
  175. }
  176. }
  177. list_add_tail(&timer->entry, head);
  178. }
  179. /*
  180. * Handler for expired virtual CPU timer.
  181. */
  182. static void virt_timer_expire(void)
  183. {
  184. struct vtimer_list *timer, *tmp;
  185. unsigned long elapsed;
  186. LIST_HEAD(cb_list);
  187. /* walk timer list, fire all expired timers */
  188. spin_lock(&virt_timer_lock);
  189. elapsed = atomic64_read(&virt_timer_elapsed);
  190. list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
  191. if (timer->expires < elapsed)
  192. /* move expired timer to the callback queue */
  193. list_move_tail(&timer->entry, &cb_list);
  194. else
  195. timer->expires -= elapsed;
  196. }
  197. if (!list_empty(&virt_timer_list)) {
  198. timer = list_first_entry(&virt_timer_list,
  199. struct vtimer_list, entry);
  200. atomic64_set(&virt_timer_current, timer->expires);
  201. }
  202. atomic64_sub(elapsed, &virt_timer_elapsed);
  203. spin_unlock(&virt_timer_lock);
  204. /* Do callbacks and recharge periodic timers */
  205. list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
  206. list_del_init(&timer->entry);
  207. timer->function(timer->data);
  208. if (timer->interval) {
  209. /* Recharge interval timer */
  210. timer->expires = timer->interval +
  211. atomic64_read(&virt_timer_elapsed);
  212. spin_lock(&virt_timer_lock);
  213. list_add_sorted(timer, &virt_timer_list);
  214. spin_unlock(&virt_timer_lock);
  215. }
  216. }
  217. }
  218. void init_virt_timer(struct vtimer_list *timer)
  219. {
  220. timer->function = NULL;
  221. INIT_LIST_HEAD(&timer->entry);
  222. }
  223. EXPORT_SYMBOL(init_virt_timer);
  224. static inline int vtimer_pending(struct vtimer_list *timer)
  225. {
  226. return !list_empty(&timer->entry);
  227. }
  228. static void internal_add_vtimer(struct vtimer_list *timer)
  229. {
  230. if (list_empty(&virt_timer_list)) {
  231. /* First timer, just program it. */
  232. atomic64_set(&virt_timer_current, timer->expires);
  233. atomic64_set(&virt_timer_elapsed, 0);
  234. list_add(&timer->entry, &virt_timer_list);
  235. } else {
  236. /* Update timer against current base. */
  237. timer->expires += atomic64_read(&virt_timer_elapsed);
  238. if (likely((s64) timer->expires <
  239. (s64) atomic64_read(&virt_timer_current)))
  240. /* The new timer expires before the current timer. */
  241. atomic64_set(&virt_timer_current, timer->expires);
  242. /* Insert new timer into the list. */
  243. list_add_sorted(timer, &virt_timer_list);
  244. }
  245. }
  246. static void __add_vtimer(struct vtimer_list *timer, int periodic)
  247. {
  248. unsigned long flags;
  249. timer->interval = periodic ? timer->expires : 0;
  250. spin_lock_irqsave(&virt_timer_lock, flags);
  251. internal_add_vtimer(timer);
  252. spin_unlock_irqrestore(&virt_timer_lock, flags);
  253. }
  254. /*
  255. * add_virt_timer - add an oneshot virtual CPU timer
  256. */
  257. void add_virt_timer(struct vtimer_list *timer)
  258. {
  259. __add_vtimer(timer, 0);
  260. }
  261. EXPORT_SYMBOL(add_virt_timer);
  262. /*
  263. * add_virt_timer_int - add an interval virtual CPU timer
  264. */
  265. void add_virt_timer_periodic(struct vtimer_list *timer)
  266. {
  267. __add_vtimer(timer, 1);
  268. }
  269. EXPORT_SYMBOL(add_virt_timer_periodic);
  270. static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
  271. {
  272. unsigned long flags;
  273. int rc;
  274. BUG_ON(!timer->function);
  275. if (timer->expires == expires && vtimer_pending(timer))
  276. return 1;
  277. spin_lock_irqsave(&virt_timer_lock, flags);
  278. rc = vtimer_pending(timer);
  279. if (rc)
  280. list_del_init(&timer->entry);
  281. timer->interval = periodic ? expires : 0;
  282. timer->expires = expires;
  283. internal_add_vtimer(timer);
  284. spin_unlock_irqrestore(&virt_timer_lock, flags);
  285. return rc;
  286. }
  287. /*
  288. * returns whether it has modified a pending timer (1) or not (0)
  289. */
  290. int mod_virt_timer(struct vtimer_list *timer, u64 expires)
  291. {
  292. return __mod_vtimer(timer, expires, 0);
  293. }
  294. EXPORT_SYMBOL(mod_virt_timer);
  295. /*
  296. * returns whether it has modified a pending timer (1) or not (0)
  297. */
  298. int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
  299. {
  300. return __mod_vtimer(timer, expires, 1);
  301. }
  302. EXPORT_SYMBOL(mod_virt_timer_periodic);
  303. /*
  304. * Delete a virtual timer.
  305. *
  306. * returns whether the deleted timer was pending (1) or not (0)
  307. */
  308. int del_virt_timer(struct vtimer_list *timer)
  309. {
  310. unsigned long flags;
  311. if (!vtimer_pending(timer))
  312. return 0;
  313. spin_lock_irqsave(&virt_timer_lock, flags);
  314. list_del_init(&timer->entry);
  315. spin_unlock_irqrestore(&virt_timer_lock, flags);
  316. return 1;
  317. }
  318. EXPORT_SYMBOL(del_virt_timer);
  319. /*
  320. * Start the virtual CPU timer on the current CPU.
  321. */
  322. void init_cpu_vtimer(void)
  323. {
  324. /* set initial cpu timer */
  325. set_vtimer(VTIMER_MAX_SLICE);
  326. }
  327. static int s390_nohz_notify(struct notifier_block *self, unsigned long action,
  328. void *hcpu)
  329. {
  330. struct s390_idle_data *idle;
  331. long cpu = (long) hcpu;
  332. idle = &per_cpu(s390_idle, cpu);
  333. switch (action & ~CPU_TASKS_FROZEN) {
  334. case CPU_DYING:
  335. idle->nohz_delay = 0;
  336. default:
  337. break;
  338. }
  339. return NOTIFY_OK;
  340. }
  341. void __init vtime_init(void)
  342. {
  343. /* Enable cpu timer interrupts on the boot cpu. */
  344. init_cpu_vtimer();
  345. cpu_notifier(s390_nohz_notify, 0);
  346. }