posix-cpu-timers.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598
  1. /*
  2. * Implement CPU time clocks for the POSIX clock interface.
  3. */
  4. #include <linux/sched.h>
  5. #include <linux/posix-timers.h>
  6. #include <linux/errno.h>
  7. #include <linux/math64.h>
  8. #include <asm/uaccess.h>
  9. #include <linux/kernel_stat.h>
  10. #include <trace/events/timer.h>
  11. #include <linux/random.h>
  12. #include <linux/tick.h>
  13. #include <linux/workqueue.h>
  14. /*
  15. * Called after updating RLIMIT_CPU to run cpu timer and update
  16. * tsk->signal->cputime_expires expiration cache if necessary. Needs
  17. * siglock protection since other code may update expiration cache as
  18. * well.
  19. */
  20. void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
  21. {
  22. cputime_t cputime = secs_to_cputime(rlim_new);
  23. spin_lock_irq(&task->sighand->siglock);
  24. set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
  25. spin_unlock_irq(&task->sighand->siglock);
  26. }
  27. static int check_clock(const clockid_t which_clock)
  28. {
  29. int error = 0;
  30. struct task_struct *p;
  31. const pid_t pid = CPUCLOCK_PID(which_clock);
  32. if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
  33. return -EINVAL;
  34. if (pid == 0)
  35. return 0;
  36. rcu_read_lock();
  37. p = find_task_by_vpid(pid);
  38. if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
  39. same_thread_group(p, current) : has_group_leader_pid(p))) {
  40. error = -EINVAL;
  41. }
  42. rcu_read_unlock();
  43. return error;
  44. }
  45. static inline unsigned long long
  46. timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
  47. {
  48. unsigned long long ret;
  49. ret = 0; /* high half always zero when .cpu used */
  50. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  51. ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
  52. } else {
  53. ret = cputime_to_expires(timespec_to_cputime(tp));
  54. }
  55. return ret;
  56. }
  57. static void sample_to_timespec(const clockid_t which_clock,
  58. unsigned long long expires,
  59. struct timespec *tp)
  60. {
  61. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
  62. *tp = ns_to_timespec(expires);
  63. else
  64. cputime_to_timespec((__force cputime_t)expires, tp);
  65. }
  66. /*
  67. * Update expiry time from increment, and increase overrun count,
  68. * given the current clock sample.
  69. */
  70. static void bump_cpu_timer(struct k_itimer *timer,
  71. unsigned long long now)
  72. {
  73. int i;
  74. unsigned long long delta, incr;
  75. if (timer->it.cpu.incr == 0)
  76. return;
  77. if (now < timer->it.cpu.expires)
  78. return;
  79. incr = timer->it.cpu.incr;
  80. delta = now + incr - timer->it.cpu.expires;
  81. /* Don't use (incr*2 < delta), incr*2 might overflow. */
  82. for (i = 0; incr < delta - incr; i++)
  83. incr = incr << 1;
  84. for (; i >= 0; incr >>= 1, i--) {
  85. if (delta < incr)
  86. continue;
  87. timer->it.cpu.expires += incr;
  88. timer->it_overrun += 1 << i;
  89. delta -= incr;
  90. }
  91. }
  92. /**
  93. * task_cputime_zero - Check a task_cputime struct for all zero fields.
  94. *
  95. * @cputime: The struct to compare.
  96. *
  97. * Checks @cputime to see if all fields are zero. Returns true if all fields
  98. * are zero, false if any field is nonzero.
  99. */
  100. static inline int task_cputime_zero(const struct task_cputime *cputime)
  101. {
  102. if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
  103. return 1;
  104. return 0;
  105. }
  106. static inline unsigned long long prof_ticks(struct task_struct *p)
  107. {
  108. cputime_t utime, stime;
  109. task_cputime(p, &utime, &stime);
  110. return cputime_to_expires(utime + stime);
  111. }
  112. static inline unsigned long long virt_ticks(struct task_struct *p)
  113. {
  114. cputime_t utime;
  115. task_cputime(p, &utime, NULL);
  116. return cputime_to_expires(utime);
  117. }
  118. static int
  119. posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
  120. {
  121. int error = check_clock(which_clock);
  122. if (!error) {
  123. tp->tv_sec = 0;
  124. tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
  125. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  126. /*
  127. * If sched_clock is using a cycle counter, we
  128. * don't have any idea of its true resolution
  129. * exported, but it is much more than 1s/HZ.
  130. */
  131. tp->tv_nsec = 1;
  132. }
  133. }
  134. return error;
  135. }
  136. static int
  137. posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
  138. {
  139. /*
  140. * You can never reset a CPU clock, but we check for other errors
  141. * in the call before failing with EPERM.
  142. */
  143. int error = check_clock(which_clock);
  144. if (error == 0) {
  145. error = -EPERM;
  146. }
  147. return error;
  148. }
  149. /*
  150. * Sample a per-thread clock for the given task.
  151. */
  152. static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
  153. unsigned long long *sample)
  154. {
  155. switch (CPUCLOCK_WHICH(which_clock)) {
  156. default:
  157. return -EINVAL;
  158. case CPUCLOCK_PROF:
  159. *sample = prof_ticks(p);
  160. break;
  161. case CPUCLOCK_VIRT:
  162. *sample = virt_ticks(p);
  163. break;
  164. case CPUCLOCK_SCHED:
  165. *sample = task_sched_runtime(p);
  166. break;
  167. }
  168. return 0;
  169. }
  170. static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
  171. {
  172. if (b->utime > a->utime)
  173. a->utime = b->utime;
  174. if (b->stime > a->stime)
  175. a->stime = b->stime;
  176. if (b->sum_exec_runtime > a->sum_exec_runtime)
  177. a->sum_exec_runtime = b->sum_exec_runtime;
  178. }
  179. void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
  180. {
  181. struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
  182. struct task_cputime sum;
  183. unsigned long flags;
  184. if (!cputimer->running) {
  185. /*
  186. * The POSIX timer interface allows for absolute time expiry
  187. * values through the TIMER_ABSTIME flag, therefore we have
  188. * to synchronize the timer to the clock every time we start
  189. * it.
  190. */
  191. thread_group_cputime(tsk, &sum);
  192. raw_spin_lock_irqsave(&cputimer->lock, flags);
  193. cputimer->running = 1;
  194. update_gt_cputime(&cputimer->cputime, &sum);
  195. } else
  196. raw_spin_lock_irqsave(&cputimer->lock, flags);
  197. *times = cputimer->cputime;
  198. raw_spin_unlock_irqrestore(&cputimer->lock, flags);
  199. }
  200. /*
  201. * Sample a process (thread group) clock for the given group_leader task.
  202. * Must be called with tasklist_lock held for reading.
  203. */
  204. static int cpu_clock_sample_group(const clockid_t which_clock,
  205. struct task_struct *p,
  206. unsigned long long *sample)
  207. {
  208. struct task_cputime cputime;
  209. switch (CPUCLOCK_WHICH(which_clock)) {
  210. default:
  211. return -EINVAL;
  212. case CPUCLOCK_PROF:
  213. thread_group_cputime(p, &cputime);
  214. *sample = cputime_to_expires(cputime.utime + cputime.stime);
  215. break;
  216. case CPUCLOCK_VIRT:
  217. thread_group_cputime(p, &cputime);
  218. *sample = cputime_to_expires(cputime.utime);
  219. break;
  220. case CPUCLOCK_SCHED:
  221. thread_group_cputime(p, &cputime);
  222. *sample = cputime.sum_exec_runtime;
  223. break;
  224. }
  225. return 0;
  226. }
  227. static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
  228. {
  229. const pid_t pid = CPUCLOCK_PID(which_clock);
  230. int error = -EINVAL;
  231. unsigned long long rtn;
  232. if (pid == 0) {
  233. /*
  234. * Special case constant value for our own clocks.
  235. * We don't have to do any lookup to find ourselves.
  236. */
  237. if (CPUCLOCK_PERTHREAD(which_clock)) {
  238. /*
  239. * Sampling just ourselves we can do with no locking.
  240. */
  241. error = cpu_clock_sample(which_clock,
  242. current, &rtn);
  243. } else {
  244. read_lock(&tasklist_lock);
  245. error = cpu_clock_sample_group(which_clock,
  246. current, &rtn);
  247. read_unlock(&tasklist_lock);
  248. }
  249. } else {
  250. /*
  251. * Find the given PID, and validate that the caller
  252. * should be able to see it.
  253. */
  254. struct task_struct *p;
  255. rcu_read_lock();
  256. p = find_task_by_vpid(pid);
  257. if (p) {
  258. if (CPUCLOCK_PERTHREAD(which_clock)) {
  259. if (same_thread_group(p, current)) {
  260. error = cpu_clock_sample(which_clock,
  261. p, &rtn);
  262. }
  263. } else {
  264. read_lock(&tasklist_lock);
  265. if (thread_group_leader(p) && p->sighand) {
  266. error =
  267. cpu_clock_sample_group(which_clock,
  268. p, &rtn);
  269. }
  270. read_unlock(&tasklist_lock);
  271. }
  272. }
  273. rcu_read_unlock();
  274. }
  275. if (error)
  276. return error;
  277. sample_to_timespec(which_clock, rtn, tp);
  278. return 0;
  279. }
  280. /*
  281. * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
  282. * This is called from sys_timer_create() and do_cpu_nanosleep() with the
  283. * new timer already all-zeros initialized.
  284. */
  285. static int posix_cpu_timer_create(struct k_itimer *new_timer)
  286. {
  287. int ret = 0;
  288. const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
  289. struct task_struct *p;
  290. if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
  291. return -EINVAL;
  292. INIT_LIST_HEAD(&new_timer->it.cpu.entry);
  293. rcu_read_lock();
  294. if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
  295. if (pid == 0) {
  296. p = current;
  297. } else {
  298. p = find_task_by_vpid(pid);
  299. if (p && !same_thread_group(p, current))
  300. p = NULL;
  301. }
  302. } else {
  303. if (pid == 0) {
  304. p = current->group_leader;
  305. } else {
  306. p = find_task_by_vpid(pid);
  307. if (p && !has_group_leader_pid(p))
  308. p = NULL;
  309. }
  310. }
  311. new_timer->it.cpu.task = p;
  312. if (p) {
  313. get_task_struct(p);
  314. } else {
  315. ret = -EINVAL;
  316. }
  317. rcu_read_unlock();
  318. return ret;
  319. }
  320. /*
  321. * Clean up a CPU-clock timer that is about to be destroyed.
  322. * This is called from timer deletion with the timer already locked.
  323. * If we return TIMER_RETRY, it's necessary to release the timer's lock
  324. * and try again. (This happens when the timer is in the middle of firing.)
  325. */
  326. static int posix_cpu_timer_del(struct k_itimer *timer)
  327. {
  328. struct task_struct *p = timer->it.cpu.task;
  329. int ret = 0;
  330. if (likely(p != NULL)) {
  331. read_lock(&tasklist_lock);
  332. if (unlikely(p->sighand == NULL)) {
  333. /*
  334. * We raced with the reaping of the task.
  335. * The deletion should have cleared us off the list.
  336. */
  337. BUG_ON(!list_empty(&timer->it.cpu.entry));
  338. } else {
  339. spin_lock(&p->sighand->siglock);
  340. if (timer->it.cpu.firing)
  341. ret = TIMER_RETRY;
  342. else
  343. list_del(&timer->it.cpu.entry);
  344. spin_unlock(&p->sighand->siglock);
  345. }
  346. read_unlock(&tasklist_lock);
  347. if (!ret)
  348. put_task_struct(p);
  349. }
  350. return ret;
  351. }
  352. static void cleanup_timers_list(struct list_head *head,
  353. unsigned long long curr)
  354. {
  355. struct cpu_timer_list *timer, *next;
  356. list_for_each_entry_safe(timer, next, head, entry) {
  357. list_del_init(&timer->entry);
  358. if (timer->expires < curr) {
  359. timer->expires = 0;
  360. } else {
  361. timer->expires -= curr;
  362. }
  363. }
  364. }
  365. /*
  366. * Clean out CPU timers still ticking when a thread exited. The task
  367. * pointer is cleared, and the expiry time is replaced with the residual
  368. * time for later timer_gettime calls to return.
  369. * This must be called with the siglock held.
  370. */
  371. static void cleanup_timers(struct list_head *head,
  372. cputime_t utime, cputime_t stime,
  373. unsigned long long sum_exec_runtime)
  374. {
  375. cputime_t ptime = utime + stime;
  376. cleanup_timers_list(head, cputime_to_expires(ptime));
  377. cleanup_timers_list(++head, cputime_to_expires(utime));
  378. cleanup_timers_list(++head, sum_exec_runtime);
  379. }
  380. /*
  381. * These are both called with the siglock held, when the current thread
  382. * is being reaped. When the final (leader) thread in the group is reaped,
  383. * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
  384. */
  385. void posix_cpu_timers_exit(struct task_struct *tsk)
  386. {
  387. cputime_t utime, stime;
  388. add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
  389. sizeof(unsigned long long));
  390. task_cputime(tsk, &utime, &stime);
  391. cleanup_timers(tsk->cpu_timers,
  392. utime, stime, tsk->se.sum_exec_runtime);
  393. }
  394. void posix_cpu_timers_exit_group(struct task_struct *tsk)
  395. {
  396. struct signal_struct *const sig = tsk->signal;
  397. cputime_t utime, stime;
  398. task_cputime(tsk, &utime, &stime);
  399. cleanup_timers(tsk->signal->cpu_timers,
  400. utime + sig->utime, stime + sig->stime,
  401. tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
  402. }
  403. static void clear_dead_task(struct k_itimer *timer, unsigned long long now)
  404. {
  405. /*
  406. * That's all for this thread or process.
  407. * We leave our residual in expires to be reported.
  408. */
  409. put_task_struct(timer->it.cpu.task);
  410. timer->it.cpu.task = NULL;
  411. timer->it.cpu.expires -= now;
  412. }
  413. static inline int expires_gt(cputime_t expires, cputime_t new_exp)
  414. {
  415. return expires == 0 || expires > new_exp;
  416. }
  417. /*
  418. * Insert the timer on the appropriate list before any timers that
  419. * expire later. This must be called with the tasklist_lock held
  420. * for reading, interrupts disabled and p->sighand->siglock taken.
  421. */
  422. static void arm_timer(struct k_itimer *timer)
  423. {
  424. struct task_struct *p = timer->it.cpu.task;
  425. struct list_head *head, *listpos;
  426. struct task_cputime *cputime_expires;
  427. struct cpu_timer_list *const nt = &timer->it.cpu;
  428. struct cpu_timer_list *next;
  429. if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
  430. head = p->cpu_timers;
  431. cputime_expires = &p->cputime_expires;
  432. } else {
  433. head = p->signal->cpu_timers;
  434. cputime_expires = &p->signal->cputime_expires;
  435. }
  436. head += CPUCLOCK_WHICH(timer->it_clock);
  437. listpos = head;
  438. list_for_each_entry(next, head, entry) {
  439. if (nt->expires < next->expires)
  440. break;
  441. listpos = &next->entry;
  442. }
  443. list_add(&nt->entry, listpos);
  444. if (listpos == head) {
  445. unsigned long long exp = nt->expires;
  446. /*
  447. * We are the new earliest-expiring POSIX 1.b timer, hence
  448. * need to update expiration cache. Take into account that
  449. * for process timers we share expiration cache with itimers
  450. * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
  451. */
  452. switch (CPUCLOCK_WHICH(timer->it_clock)) {
  453. case CPUCLOCK_PROF:
  454. if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp)))
  455. cputime_expires->prof_exp = expires_to_cputime(exp);
  456. break;
  457. case CPUCLOCK_VIRT:
  458. if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp)))
  459. cputime_expires->virt_exp = expires_to_cputime(exp);
  460. break;
  461. case CPUCLOCK_SCHED:
  462. if (cputime_expires->sched_exp == 0 ||
  463. cputime_expires->sched_exp > exp)
  464. cputime_expires->sched_exp = exp;
  465. break;
  466. }
  467. }
  468. }
  469. /*
  470. * The timer is locked, fire it and arrange for its reload.
  471. */
  472. static void cpu_timer_fire(struct k_itimer *timer)
  473. {
  474. if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
  475. /*
  476. * User don't want any signal.
  477. */
  478. timer->it.cpu.expires = 0;
  479. } else if (unlikely(timer->sigq == NULL)) {
  480. /*
  481. * This a special case for clock_nanosleep,
  482. * not a normal timer from sys_timer_create.
  483. */
  484. wake_up_process(timer->it_process);
  485. timer->it.cpu.expires = 0;
  486. } else if (timer->it.cpu.incr == 0) {
  487. /*
  488. * One-shot timer. Clear it as soon as it's fired.
  489. */
  490. posix_timer_event(timer, 0);
  491. timer->it.cpu.expires = 0;
  492. } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
  493. /*
  494. * The signal did not get queued because the signal
  495. * was ignored, so we won't get any callback to
  496. * reload the timer. But we need to keep it
  497. * ticking in case the signal is deliverable next time.
  498. */
  499. posix_cpu_timer_schedule(timer);
  500. }
  501. }
  502. /*
  503. * Sample a process (thread group) timer for the given group_leader task.
  504. * Must be called with tasklist_lock held for reading.
  505. */
  506. static int cpu_timer_sample_group(const clockid_t which_clock,
  507. struct task_struct *p,
  508. unsigned long long *sample)
  509. {
  510. struct task_cputime cputime;
  511. thread_group_cputimer(p, &cputime);
  512. switch (CPUCLOCK_WHICH(which_clock)) {
  513. default:
  514. return -EINVAL;
  515. case CPUCLOCK_PROF:
  516. *sample = cputime_to_expires(cputime.utime + cputime.stime);
  517. break;
  518. case CPUCLOCK_VIRT:
  519. *sample = cputime_to_expires(cputime.utime);
  520. break;
  521. case CPUCLOCK_SCHED:
  522. *sample = cputime.sum_exec_runtime + task_delta_exec(p);
  523. break;
  524. }
  525. return 0;
  526. }
  527. #ifdef CONFIG_NO_HZ_FULL
  528. static void nohz_kick_work_fn(struct work_struct *work)
  529. {
  530. tick_nohz_full_kick_all();
  531. }
  532. static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
  533. /*
  534. * We need the IPIs to be sent from sane process context.
  535. * The posix cpu timers are always set with irqs disabled.
  536. */
  537. static void posix_cpu_timer_kick_nohz(void)
  538. {
  539. schedule_work(&nohz_kick_work);
  540. }
  541. bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
  542. {
  543. if (!task_cputime_zero(&tsk->cputime_expires))
  544. return false;
  545. if (tsk->signal->cputimer.running)
  546. return false;
  547. return true;
  548. }
  549. #else
  550. static inline void posix_cpu_timer_kick_nohz(void) { }
  551. #endif
  552. /*
  553. * Guts of sys_timer_settime for CPU timers.
  554. * This is called with the timer locked and interrupts disabled.
  555. * If we return TIMER_RETRY, it's necessary to release the timer's lock
  556. * and try again. (This happens when the timer is in the middle of firing.)
  557. */
  558. static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
  559. struct itimerspec *new, struct itimerspec *old)
  560. {
  561. struct task_struct *p = timer->it.cpu.task;
  562. unsigned long long old_expires, new_expires, old_incr, val;
  563. int ret;
  564. if (unlikely(p == NULL)) {
  565. /*
  566. * Timer refers to a dead task's clock.
  567. */
  568. return -ESRCH;
  569. }
  570. new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
  571. read_lock(&tasklist_lock);
  572. /*
  573. * We need the tasklist_lock to protect against reaping that
  574. * clears p->sighand. If p has just been reaped, we can no
  575. * longer get any information about it at all.
  576. */
  577. if (unlikely(p->sighand == NULL)) {
  578. read_unlock(&tasklist_lock);
  579. put_task_struct(p);
  580. timer->it.cpu.task = NULL;
  581. return -ESRCH;
  582. }
  583. /*
  584. * Disarm any old timer after extracting its expiry time.
  585. */
  586. BUG_ON(!irqs_disabled());
  587. ret = 0;
  588. old_incr = timer->it.cpu.incr;
  589. spin_lock(&p->sighand->siglock);
  590. old_expires = timer->it.cpu.expires;
  591. if (unlikely(timer->it.cpu.firing)) {
  592. timer->it.cpu.firing = -1;
  593. ret = TIMER_RETRY;
  594. } else
  595. list_del_init(&timer->it.cpu.entry);
  596. /*
  597. * We need to sample the current value to convert the new
  598. * value from to relative and absolute, and to convert the
  599. * old value from absolute to relative. To set a process
  600. * timer, we need a sample to balance the thread expiry
  601. * times (in arm_timer). With an absolute time, we must
  602. * check if it's already passed. In short, we need a sample.
  603. */
  604. if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
  605. cpu_clock_sample(timer->it_clock, p, &val);
  606. } else {
  607. cpu_timer_sample_group(timer->it_clock, p, &val);
  608. }
  609. if (old) {
  610. if (old_expires == 0) {
  611. old->it_value.tv_sec = 0;
  612. old->it_value.tv_nsec = 0;
  613. } else {
  614. /*
  615. * Update the timer in case it has
  616. * overrun already. If it has,
  617. * we'll report it as having overrun
  618. * and with the next reloaded timer
  619. * already ticking, though we are
  620. * swallowing that pending
  621. * notification here to install the
  622. * new setting.
  623. */
  624. bump_cpu_timer(timer, val);
  625. if (val < timer->it.cpu.expires) {
  626. old_expires = timer->it.cpu.expires - val;
  627. sample_to_timespec(timer->it_clock,
  628. old_expires,
  629. &old->it_value);
  630. } else {
  631. old->it_value.tv_nsec = 1;
  632. old->it_value.tv_sec = 0;
  633. }
  634. }
  635. }
  636. if (unlikely(ret)) {
  637. /*
  638. * We are colliding with the timer actually firing.
  639. * Punt after filling in the timer's old value, and
  640. * disable this firing since we are already reporting
  641. * it as an overrun (thanks to bump_cpu_timer above).
  642. */
  643. spin_unlock(&p->sighand->siglock);
  644. read_unlock(&tasklist_lock);
  645. goto out;
  646. }
  647. if (new_expires != 0 && !(flags & TIMER_ABSTIME)) {
  648. new_expires += val;
  649. }
  650. /*
  651. * Install the new expiry time (or zero).
  652. * For a timer with no notification action, we don't actually
  653. * arm the timer (we'll just fake it for timer_gettime).
  654. */
  655. timer->it.cpu.expires = new_expires;
  656. if (new_expires != 0 && val < new_expires) {
  657. arm_timer(timer);
  658. }
  659. spin_unlock(&p->sighand->siglock);
  660. read_unlock(&tasklist_lock);
  661. /*
  662. * Install the new reload setting, and
  663. * set up the signal and overrun bookkeeping.
  664. */
  665. timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
  666. &new->it_interval);
  667. /*
  668. * This acts as a modification timestamp for the timer,
  669. * so any automatic reload attempt will punt on seeing
  670. * that we have reset the timer manually.
  671. */
  672. timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
  673. ~REQUEUE_PENDING;
  674. timer->it_overrun_last = 0;
  675. timer->it_overrun = -1;
  676. if (new_expires != 0 && !(val < new_expires)) {
  677. /*
  678. * The designated time already passed, so we notify
  679. * immediately, even if the thread never runs to
  680. * accumulate more time on this clock.
  681. */
  682. cpu_timer_fire(timer);
  683. }
  684. ret = 0;
  685. out:
  686. if (old) {
  687. sample_to_timespec(timer->it_clock,
  688. old_incr, &old->it_interval);
  689. }
  690. if (!ret)
  691. posix_cpu_timer_kick_nohz();
  692. return ret;
  693. }
  694. static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
  695. {
  696. unsigned long long now;
  697. struct task_struct *p = timer->it.cpu.task;
  698. int clear_dead;
  699. /*
  700. * Easy part: convert the reload time.
  701. */
  702. sample_to_timespec(timer->it_clock,
  703. timer->it.cpu.incr, &itp->it_interval);
  704. if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */
  705. itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
  706. return;
  707. }
  708. if (unlikely(p == NULL)) {
  709. /*
  710. * This task already died and the timer will never fire.
  711. * In this case, expires is actually the dead value.
  712. */
  713. dead:
  714. sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
  715. &itp->it_value);
  716. return;
  717. }
  718. /*
  719. * Sample the clock to take the difference with the expiry time.
  720. */
  721. if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
  722. cpu_clock_sample(timer->it_clock, p, &now);
  723. clear_dead = p->exit_state;
  724. } else {
  725. read_lock(&tasklist_lock);
  726. if (unlikely(p->sighand == NULL)) {
  727. /*
  728. * The process has been reaped.
  729. * We can't even collect a sample any more.
  730. * Call the timer disarmed, nothing else to do.
  731. */
  732. put_task_struct(p);
  733. timer->it.cpu.task = NULL;
  734. timer->it.cpu.expires = 0;
  735. read_unlock(&tasklist_lock);
  736. goto dead;
  737. } else {
  738. cpu_timer_sample_group(timer->it_clock, p, &now);
  739. clear_dead = (unlikely(p->exit_state) &&
  740. thread_group_empty(p));
  741. }
  742. read_unlock(&tasklist_lock);
  743. }
  744. if (unlikely(clear_dead)) {
  745. /*
  746. * We've noticed that the thread is dead, but
  747. * not yet reaped. Take this opportunity to
  748. * drop our task ref.
  749. */
  750. clear_dead_task(timer, now);
  751. goto dead;
  752. }
  753. if (now < timer->it.cpu.expires) {
  754. sample_to_timespec(timer->it_clock,
  755. timer->it.cpu.expires - now,
  756. &itp->it_value);
  757. } else {
  758. /*
  759. * The timer should have expired already, but the firing
  760. * hasn't taken place yet. Say it's just about to expire.
  761. */
  762. itp->it_value.tv_nsec = 1;
  763. itp->it_value.tv_sec = 0;
  764. }
  765. }
  766. /*
  767. * Check for any per-thread CPU timers that have fired and move them off
  768. * the tsk->cpu_timers[N] list onto the firing list. Here we update the
  769. * tsk->it_*_expires values to reflect the remaining thread CPU timers.
  770. */
  771. static void check_thread_timers(struct task_struct *tsk,
  772. struct list_head *firing)
  773. {
  774. int maxfire;
  775. struct list_head *timers = tsk->cpu_timers;
  776. struct signal_struct *const sig = tsk->signal;
  777. unsigned long soft;
  778. maxfire = 20;
  779. tsk->cputime_expires.prof_exp = 0;
  780. while (!list_empty(timers)) {
  781. struct cpu_timer_list *t = list_first_entry(timers,
  782. struct cpu_timer_list,
  783. entry);
  784. if (!--maxfire || prof_ticks(tsk) < t->expires) {
  785. tsk->cputime_expires.prof_exp = expires_to_cputime(t->expires);
  786. break;
  787. }
  788. t->firing = 1;
  789. list_move_tail(&t->entry, firing);
  790. }
  791. ++timers;
  792. maxfire = 20;
  793. tsk->cputime_expires.virt_exp = 0;
  794. while (!list_empty(timers)) {
  795. struct cpu_timer_list *t = list_first_entry(timers,
  796. struct cpu_timer_list,
  797. entry);
  798. if (!--maxfire || virt_ticks(tsk) < t->expires) {
  799. tsk->cputime_expires.virt_exp = expires_to_cputime(t->expires);
  800. break;
  801. }
  802. t->firing = 1;
  803. list_move_tail(&t->entry, firing);
  804. }
  805. ++timers;
  806. maxfire = 20;
  807. tsk->cputime_expires.sched_exp = 0;
  808. while (!list_empty(timers)) {
  809. struct cpu_timer_list *t = list_first_entry(timers,
  810. struct cpu_timer_list,
  811. entry);
  812. if (!--maxfire || tsk->se.sum_exec_runtime < t->expires) {
  813. tsk->cputime_expires.sched_exp = t->expires;
  814. break;
  815. }
  816. t->firing = 1;
  817. list_move_tail(&t->entry, firing);
  818. }
  819. /*
  820. * Check for the special case thread timers.
  821. */
  822. soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
  823. if (soft != RLIM_INFINITY) {
  824. unsigned long hard =
  825. ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
  826. if (hard != RLIM_INFINITY &&
  827. tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
  828. /*
  829. * At the hard limit, we just die.
  830. * No need to calculate anything else now.
  831. */
  832. __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
  833. return;
  834. }
  835. if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
  836. /*
  837. * At the soft limit, send a SIGXCPU every second.
  838. */
  839. if (soft < hard) {
  840. soft += USEC_PER_SEC;
  841. sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
  842. }
  843. printk(KERN_INFO
  844. "RT Watchdog Timeout: %s[%d]\n",
  845. tsk->comm, task_pid_nr(tsk));
  846. __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
  847. }
  848. }
  849. }
  850. static void stop_process_timers(struct signal_struct *sig)
  851. {
  852. struct thread_group_cputimer *cputimer = &sig->cputimer;
  853. unsigned long flags;
  854. raw_spin_lock_irqsave(&cputimer->lock, flags);
  855. cputimer->running = 0;
  856. raw_spin_unlock_irqrestore(&cputimer->lock, flags);
  857. }
  858. static u32 onecputick;
  859. static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
  860. unsigned long long *expires,
  861. unsigned long long cur_time, int signo)
  862. {
  863. if (!it->expires)
  864. return;
  865. if (cur_time >= it->expires) {
  866. if (it->incr) {
  867. it->expires += it->incr;
  868. it->error += it->incr_error;
  869. if (it->error >= onecputick) {
  870. it->expires -= cputime_one_jiffy;
  871. it->error -= onecputick;
  872. }
  873. } else {
  874. it->expires = 0;
  875. }
  876. trace_itimer_expire(signo == SIGPROF ?
  877. ITIMER_PROF : ITIMER_VIRTUAL,
  878. tsk->signal->leader_pid, cur_time);
  879. __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
  880. }
  881. if (it->expires && (!*expires || it->expires < *expires)) {
  882. *expires = it->expires;
  883. }
  884. }
  885. /*
  886. * Check for any per-thread CPU timers that have fired and move them
  887. * off the tsk->*_timers list onto the firing list. Per-thread timers
  888. * have already been taken off.
  889. */
  890. static void check_process_timers(struct task_struct *tsk,
  891. struct list_head *firing)
  892. {
  893. int maxfire;
  894. struct signal_struct *const sig = tsk->signal;
  895. unsigned long long utime, ptime, virt_expires, prof_expires;
  896. unsigned long long sum_sched_runtime, sched_expires;
  897. struct list_head *timers = sig->cpu_timers;
  898. struct task_cputime cputime;
  899. unsigned long soft;
  900. /*
  901. * Collect the current process totals.
  902. */
  903. thread_group_cputimer(tsk, &cputime);
  904. utime = cputime_to_expires(cputime.utime);
  905. ptime = utime + cputime_to_expires(cputime.stime);
  906. sum_sched_runtime = cputime.sum_exec_runtime;
  907. maxfire = 20;
  908. prof_expires = 0;
  909. while (!list_empty(timers)) {
  910. struct cpu_timer_list *tl = list_first_entry(timers,
  911. struct cpu_timer_list,
  912. entry);
  913. if (!--maxfire || ptime < tl->expires) {
  914. prof_expires = tl->expires;
  915. break;
  916. }
  917. tl->firing = 1;
  918. list_move_tail(&tl->entry, firing);
  919. }
  920. ++timers;
  921. maxfire = 20;
  922. virt_expires = 0;
  923. while (!list_empty(timers)) {
  924. struct cpu_timer_list *tl = list_first_entry(timers,
  925. struct cpu_timer_list,
  926. entry);
  927. if (!--maxfire || utime < tl->expires) {
  928. virt_expires = tl->expires;
  929. break;
  930. }
  931. tl->firing = 1;
  932. list_move_tail(&tl->entry, firing);
  933. }
  934. ++timers;
  935. maxfire = 20;
  936. sched_expires = 0;
  937. while (!list_empty(timers)) {
  938. struct cpu_timer_list *tl = list_first_entry(timers,
  939. struct cpu_timer_list,
  940. entry);
  941. if (!--maxfire || sum_sched_runtime < tl->expires) {
  942. sched_expires = tl->expires;
  943. break;
  944. }
  945. tl->firing = 1;
  946. list_move_tail(&tl->entry, firing);
  947. }
  948. /*
  949. * Check for the special case process timers.
  950. */
  951. check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
  952. SIGPROF);
  953. check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
  954. SIGVTALRM);
  955. soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
  956. if (soft != RLIM_INFINITY) {
  957. unsigned long psecs = cputime_to_secs(ptime);
  958. unsigned long hard =
  959. ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
  960. cputime_t x;
  961. if (psecs >= hard) {
  962. /*
  963. * At the hard limit, we just die.
  964. * No need to calculate anything else now.
  965. */
  966. __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
  967. return;
  968. }
  969. if (psecs >= soft) {
  970. /*
  971. * At the soft limit, send a SIGXCPU every second.
  972. */
  973. __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
  974. if (soft < hard) {
  975. soft++;
  976. sig->rlim[RLIMIT_CPU].rlim_cur = soft;
  977. }
  978. }
  979. x = secs_to_cputime(soft);
  980. if (!prof_expires || x < prof_expires) {
  981. prof_expires = x;
  982. }
  983. }
  984. sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires);
  985. sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires);
  986. sig->cputime_expires.sched_exp = sched_expires;
  987. if (task_cputime_zero(&sig->cputime_expires))
  988. stop_process_timers(sig);
  989. }
  990. /*
  991. * This is called from the signal code (via do_schedule_next_timer)
  992. * when the last timer signal was delivered and we have to reload the timer.
  993. */
  994. void posix_cpu_timer_schedule(struct k_itimer *timer)
  995. {
  996. struct task_struct *p = timer->it.cpu.task;
  997. unsigned long long now;
  998. if (unlikely(p == NULL))
  999. /*
  1000. * The task was cleaned up already, no future firings.
  1001. */
  1002. goto out;
  1003. /*
  1004. * Fetch the current sample and update the timer's expiry time.
  1005. */
  1006. if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
  1007. cpu_clock_sample(timer->it_clock, p, &now);
  1008. bump_cpu_timer(timer, now);
  1009. if (unlikely(p->exit_state)) {
  1010. clear_dead_task(timer, now);
  1011. goto out;
  1012. }
  1013. read_lock(&tasklist_lock); /* arm_timer needs it. */
  1014. spin_lock(&p->sighand->siglock);
  1015. } else {
  1016. read_lock(&tasklist_lock);
  1017. if (unlikely(p->sighand == NULL)) {
  1018. /*
  1019. * The process has been reaped.
  1020. * We can't even collect a sample any more.
  1021. */
  1022. put_task_struct(p);
  1023. timer->it.cpu.task = p = NULL;
  1024. timer->it.cpu.expires = 0;
  1025. goto out_unlock;
  1026. } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
  1027. /*
  1028. * We've noticed that the thread is dead, but
  1029. * not yet reaped. Take this opportunity to
  1030. * drop our task ref.
  1031. */
  1032. clear_dead_task(timer, now);
  1033. goto out_unlock;
  1034. }
  1035. spin_lock(&p->sighand->siglock);
  1036. cpu_timer_sample_group(timer->it_clock, p, &now);
  1037. bump_cpu_timer(timer, now);
  1038. /* Leave the tasklist_lock locked for the call below. */
  1039. }
  1040. /*
  1041. * Now re-arm for the new expiry time.
  1042. */
  1043. BUG_ON(!irqs_disabled());
  1044. arm_timer(timer);
  1045. spin_unlock(&p->sighand->siglock);
  1046. out_unlock:
  1047. read_unlock(&tasklist_lock);
  1048. out:
  1049. timer->it_overrun_last = timer->it_overrun;
  1050. timer->it_overrun = -1;
  1051. ++timer->it_requeue_pending;
  1052. }
  1053. /**
  1054. * task_cputime_expired - Compare two task_cputime entities.
  1055. *
  1056. * @sample: The task_cputime structure to be checked for expiration.
  1057. * @expires: Expiration times, against which @sample will be checked.
  1058. *
  1059. * Checks @sample against @expires to see if any field of @sample has expired.
  1060. * Returns true if any field of the former is greater than the corresponding
  1061. * field of the latter if the latter field is set. Otherwise returns false.
  1062. */
  1063. static inline int task_cputime_expired(const struct task_cputime *sample,
  1064. const struct task_cputime *expires)
  1065. {
  1066. if (expires->utime && sample->utime >= expires->utime)
  1067. return 1;
  1068. if (expires->stime && sample->utime + sample->stime >= expires->stime)
  1069. return 1;
  1070. if (expires->sum_exec_runtime != 0 &&
  1071. sample->sum_exec_runtime >= expires->sum_exec_runtime)
  1072. return 1;
  1073. return 0;
  1074. }
  1075. /**
  1076. * fastpath_timer_check - POSIX CPU timers fast path.
  1077. *
  1078. * @tsk: The task (thread) being checked.
  1079. *
  1080. * Check the task and thread group timers. If both are zero (there are no
  1081. * timers set) return false. Otherwise snapshot the task and thread group
  1082. * timers and compare them with the corresponding expiration times. Return
  1083. * true if a timer has expired, else return false.
  1084. */
  1085. static inline int fastpath_timer_check(struct task_struct *tsk)
  1086. {
  1087. struct signal_struct *sig;
  1088. cputime_t utime, stime;
  1089. task_cputime(tsk, &utime, &stime);
  1090. if (!task_cputime_zero(&tsk->cputime_expires)) {
  1091. struct task_cputime task_sample = {
  1092. .utime = utime,
  1093. .stime = stime,
  1094. .sum_exec_runtime = tsk->se.sum_exec_runtime
  1095. };
  1096. if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
  1097. return 1;
  1098. }
  1099. sig = tsk->signal;
  1100. if (sig->cputimer.running) {
  1101. struct task_cputime group_sample;
  1102. raw_spin_lock(&sig->cputimer.lock);
  1103. group_sample = sig->cputimer.cputime;
  1104. raw_spin_unlock(&sig->cputimer.lock);
  1105. if (task_cputime_expired(&group_sample, &sig->cputime_expires))
  1106. return 1;
  1107. }
  1108. return 0;
  1109. }
  1110. /*
  1111. * This is called from the timer interrupt handler. The irq handler has
  1112. * already updated our counts. We need to check if any timers fire now.
  1113. * Interrupts are disabled.
  1114. */
  1115. void run_posix_cpu_timers(struct task_struct *tsk)
  1116. {
  1117. LIST_HEAD(firing);
  1118. struct k_itimer *timer, *next;
  1119. unsigned long flags;
  1120. BUG_ON(!irqs_disabled());
  1121. /*
  1122. * The fast path checks that there are no expired thread or thread
  1123. * group timers. If that's so, just return.
  1124. */
  1125. if (!fastpath_timer_check(tsk))
  1126. return;
  1127. if (!lock_task_sighand(tsk, &flags))
  1128. return;
  1129. /*
  1130. * Here we take off tsk->signal->cpu_timers[N] and
  1131. * tsk->cpu_timers[N] all the timers that are firing, and
  1132. * put them on the firing list.
  1133. */
  1134. check_thread_timers(tsk, &firing);
  1135. /*
  1136. * If there are any active process wide timers (POSIX 1.b, itimers,
  1137. * RLIMIT_CPU) cputimer must be running.
  1138. */
  1139. if (tsk->signal->cputimer.running)
  1140. check_process_timers(tsk, &firing);
  1141. /*
  1142. * We must release these locks before taking any timer's lock.
  1143. * There is a potential race with timer deletion here, as the
  1144. * siglock now protects our private firing list. We have set
  1145. * the firing flag in each timer, so that a deletion attempt
  1146. * that gets the timer lock before we do will give it up and
  1147. * spin until we've taken care of that timer below.
  1148. */
  1149. unlock_task_sighand(tsk, &flags);
  1150. /*
  1151. * Now that all the timers on our list have the firing flag,
  1152. * no one will touch their list entries but us. We'll take
  1153. * each timer's lock before clearing its firing flag, so no
  1154. * timer call will interfere.
  1155. */
  1156. list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
  1157. int cpu_firing;
  1158. spin_lock(&timer->it_lock);
  1159. list_del_init(&timer->it.cpu.entry);
  1160. cpu_firing = timer->it.cpu.firing;
  1161. timer->it.cpu.firing = 0;
  1162. /*
  1163. * The firing flag is -1 if we collided with a reset
  1164. * of the timer, which already reported this
  1165. * almost-firing as an overrun. So don't generate an event.
  1166. */
  1167. if (likely(cpu_firing >= 0))
  1168. cpu_timer_fire(timer);
  1169. spin_unlock(&timer->it_lock);
  1170. }
  1171. /*
  1172. * In case some timers were rescheduled after the queue got emptied,
  1173. * wake up full dynticks CPUs.
  1174. */
  1175. if (tsk->signal->cputimer.running)
  1176. posix_cpu_timer_kick_nohz();
  1177. }
  1178. /*
  1179. * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
  1180. * The tsk->sighand->siglock must be held by the caller.
  1181. */
  1182. void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
  1183. cputime_t *newval, cputime_t *oldval)
  1184. {
  1185. unsigned long long now;
  1186. BUG_ON(clock_idx == CPUCLOCK_SCHED);
  1187. cpu_timer_sample_group(clock_idx, tsk, &now);
  1188. if (oldval) {
  1189. /*
  1190. * We are setting itimer. The *oldval is absolute and we update
  1191. * it to be relative, *newval argument is relative and we update
  1192. * it to be absolute.
  1193. */
  1194. if (*oldval) {
  1195. if (*oldval <= now) {
  1196. /* Just about to fire. */
  1197. *oldval = cputime_one_jiffy;
  1198. } else {
  1199. *oldval -= now;
  1200. }
  1201. }
  1202. if (!*newval)
  1203. goto out;
  1204. *newval += now;
  1205. }
  1206. /*
  1207. * Update expiration cache if we are the earliest timer, or eventually
  1208. * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
  1209. */
  1210. switch (clock_idx) {
  1211. case CPUCLOCK_PROF:
  1212. if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
  1213. tsk->signal->cputime_expires.prof_exp = *newval;
  1214. break;
  1215. case CPUCLOCK_VIRT:
  1216. if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
  1217. tsk->signal->cputime_expires.virt_exp = *newval;
  1218. break;
  1219. }
  1220. out:
  1221. posix_cpu_timer_kick_nohz();
  1222. }
  1223. static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
  1224. struct timespec *rqtp, struct itimerspec *it)
  1225. {
  1226. struct k_itimer timer;
  1227. int error;
  1228. /*
  1229. * Set up a temporary timer and then wait for it to go off.
  1230. */
  1231. memset(&timer, 0, sizeof timer);
  1232. spin_lock_init(&timer.it_lock);
  1233. timer.it_clock = which_clock;
  1234. timer.it_overrun = -1;
  1235. error = posix_cpu_timer_create(&timer);
  1236. timer.it_process = current;
  1237. if (!error) {
  1238. static struct itimerspec zero_it;
  1239. memset(it, 0, sizeof *it);
  1240. it->it_value = *rqtp;
  1241. spin_lock_irq(&timer.it_lock);
  1242. error = posix_cpu_timer_set(&timer, flags, it, NULL);
  1243. if (error) {
  1244. spin_unlock_irq(&timer.it_lock);
  1245. return error;
  1246. }
  1247. while (!signal_pending(current)) {
  1248. if (timer.it.cpu.expires == 0) {
  1249. /*
  1250. * Our timer fired and was reset, below
  1251. * deletion can not fail.
  1252. */
  1253. posix_cpu_timer_del(&timer);
  1254. spin_unlock_irq(&timer.it_lock);
  1255. return 0;
  1256. }
  1257. /*
  1258. * Block until cpu_timer_fire (or a signal) wakes us.
  1259. */
  1260. __set_current_state(TASK_INTERRUPTIBLE);
  1261. spin_unlock_irq(&timer.it_lock);
  1262. schedule();
  1263. spin_lock_irq(&timer.it_lock);
  1264. }
  1265. /*
  1266. * We were interrupted by a signal.
  1267. */
  1268. sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
  1269. error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
  1270. if (!error) {
  1271. /*
  1272. * Timer is now unarmed, deletion can not fail.
  1273. */
  1274. posix_cpu_timer_del(&timer);
  1275. }
  1276. spin_unlock_irq(&timer.it_lock);
  1277. while (error == TIMER_RETRY) {
  1278. /*
  1279. * We need to handle case when timer was or is in the
  1280. * middle of firing. In other cases we already freed
  1281. * resources.
  1282. */
  1283. spin_lock_irq(&timer.it_lock);
  1284. error = posix_cpu_timer_del(&timer);
  1285. spin_unlock_irq(&timer.it_lock);
  1286. }
  1287. if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
  1288. /*
  1289. * It actually did fire already.
  1290. */
  1291. return 0;
  1292. }
  1293. error = -ERESTART_RESTARTBLOCK;
  1294. }
  1295. return error;
  1296. }
  1297. static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
  1298. static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
  1299. struct timespec *rqtp, struct timespec __user *rmtp)
  1300. {
  1301. struct restart_block *restart_block =
  1302. &current_thread_info()->restart_block;
  1303. struct itimerspec it;
  1304. int error;
  1305. /*
  1306. * Diagnose required errors first.
  1307. */
  1308. if (CPUCLOCK_PERTHREAD(which_clock) &&
  1309. (CPUCLOCK_PID(which_clock) == 0 ||
  1310. CPUCLOCK_PID(which_clock) == current->pid))
  1311. return -EINVAL;
  1312. error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
  1313. if (error == -ERESTART_RESTARTBLOCK) {
  1314. if (flags & TIMER_ABSTIME)
  1315. return -ERESTARTNOHAND;
  1316. /*
  1317. * Report back to the user the time still remaining.
  1318. */
  1319. if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
  1320. return -EFAULT;
  1321. restart_block->fn = posix_cpu_nsleep_restart;
  1322. restart_block->nanosleep.clockid = which_clock;
  1323. restart_block->nanosleep.rmtp = rmtp;
  1324. restart_block->nanosleep.expires = timespec_to_ns(rqtp);
  1325. }
  1326. return error;
  1327. }
  1328. static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
  1329. {
  1330. clockid_t which_clock = restart_block->nanosleep.clockid;
  1331. struct timespec t;
  1332. struct itimerspec it;
  1333. int error;
  1334. t = ns_to_timespec(restart_block->nanosleep.expires);
  1335. error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
  1336. if (error == -ERESTART_RESTARTBLOCK) {
  1337. struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
  1338. /*
  1339. * Report back to the user the time still remaining.
  1340. */
  1341. if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
  1342. return -EFAULT;
  1343. restart_block->nanosleep.expires = timespec_to_ns(&t);
  1344. }
  1345. return error;
  1346. }
  1347. #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
  1348. #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
  1349. static int process_cpu_clock_getres(const clockid_t which_clock,
  1350. struct timespec *tp)
  1351. {
  1352. return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
  1353. }
  1354. static int process_cpu_clock_get(const clockid_t which_clock,
  1355. struct timespec *tp)
  1356. {
  1357. return posix_cpu_clock_get(PROCESS_CLOCK, tp);
  1358. }
  1359. static int process_cpu_timer_create(struct k_itimer *timer)
  1360. {
  1361. timer->it_clock = PROCESS_CLOCK;
  1362. return posix_cpu_timer_create(timer);
  1363. }
  1364. static int process_cpu_nsleep(const clockid_t which_clock, int flags,
  1365. struct timespec *rqtp,
  1366. struct timespec __user *rmtp)
  1367. {
  1368. return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
  1369. }
  1370. static long process_cpu_nsleep_restart(struct restart_block *restart_block)
  1371. {
  1372. return -EINVAL;
  1373. }
  1374. static int thread_cpu_clock_getres(const clockid_t which_clock,
  1375. struct timespec *tp)
  1376. {
  1377. return posix_cpu_clock_getres(THREAD_CLOCK, tp);
  1378. }
  1379. static int thread_cpu_clock_get(const clockid_t which_clock,
  1380. struct timespec *tp)
  1381. {
  1382. return posix_cpu_clock_get(THREAD_CLOCK, tp);
  1383. }
  1384. static int thread_cpu_timer_create(struct k_itimer *timer)
  1385. {
  1386. timer->it_clock = THREAD_CLOCK;
  1387. return posix_cpu_timer_create(timer);
  1388. }
  1389. struct k_clock clock_posix_cpu = {
  1390. .clock_getres = posix_cpu_clock_getres,
  1391. .clock_set = posix_cpu_clock_set,
  1392. .clock_get = posix_cpu_clock_get,
  1393. .timer_create = posix_cpu_timer_create,
  1394. .nsleep = posix_cpu_nsleep,
  1395. .nsleep_restart = posix_cpu_nsleep_restart,
  1396. .timer_set = posix_cpu_timer_set,
  1397. .timer_del = posix_cpu_timer_del,
  1398. .timer_get = posix_cpu_timer_get,
  1399. };
  1400. static __init int init_posix_cpu_timers(void)
  1401. {
  1402. struct k_clock process = {
  1403. .clock_getres = process_cpu_clock_getres,
  1404. .clock_get = process_cpu_clock_get,
  1405. .timer_create = process_cpu_timer_create,
  1406. .nsleep = process_cpu_nsleep,
  1407. .nsleep_restart = process_cpu_nsleep_restart,
  1408. };
  1409. struct k_clock thread = {
  1410. .clock_getres = thread_cpu_clock_getres,
  1411. .clock_get = thread_cpu_clock_get,
  1412. .timer_create = thread_cpu_timer_create,
  1413. };
  1414. struct timespec ts;
  1415. posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
  1416. posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
  1417. cputime_to_timespec(cputime_one_jiffy, &ts);
  1418. onecputick = ts.tv_nsec;
  1419. WARN_ON(ts.tv_sec != 0);
  1420. return 0;
  1421. }
  1422. __initcall(init_posix_cpu_timers);