posix-cpu-timers.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567
  1. /*
  2. * Implement CPU time clocks for the POSIX clock interface.
  3. */
  4. #include <linux/sched.h>
  5. #include <linux/posix-timers.h>
  6. #include <asm/uaccess.h>
  7. #include <linux/errno.h>
  8. static int check_clock(clockid_t which_clock)
  9. {
  10. int error = 0;
  11. struct task_struct *p;
  12. const pid_t pid = CPUCLOCK_PID(which_clock);
  13. if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
  14. return -EINVAL;
  15. if (pid == 0)
  16. return 0;
  17. read_lock(&tasklist_lock);
  18. p = find_task_by_pid(pid);
  19. if (!p || (CPUCLOCK_PERTHREAD(which_clock) ?
  20. p->tgid != current->tgid : p->tgid != pid)) {
  21. error = -EINVAL;
  22. }
  23. read_unlock(&tasklist_lock);
  24. return error;
  25. }
  26. static inline union cpu_time_count
  27. timespec_to_sample(clockid_t which_clock, const struct timespec *tp)
  28. {
  29. union cpu_time_count ret;
  30. ret.sched = 0; /* high half always zero when .cpu used */
  31. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  32. ret.sched = tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
  33. } else {
  34. ret.cpu = timespec_to_cputime(tp);
  35. }
  36. return ret;
  37. }
  38. static void sample_to_timespec(clockid_t which_clock,
  39. union cpu_time_count cpu,
  40. struct timespec *tp)
  41. {
  42. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  43. tp->tv_sec = div_long_long_rem(cpu.sched,
  44. NSEC_PER_SEC, &tp->tv_nsec);
  45. } else {
  46. cputime_to_timespec(cpu.cpu, tp);
  47. }
  48. }
  49. static inline int cpu_time_before(clockid_t which_clock,
  50. union cpu_time_count now,
  51. union cpu_time_count then)
  52. {
  53. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  54. return now.sched < then.sched;
  55. } else {
  56. return cputime_lt(now.cpu, then.cpu);
  57. }
  58. }
  59. static inline void cpu_time_add(clockid_t which_clock,
  60. union cpu_time_count *acc,
  61. union cpu_time_count val)
  62. {
  63. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  64. acc->sched += val.sched;
  65. } else {
  66. acc->cpu = cputime_add(acc->cpu, val.cpu);
  67. }
  68. }
  69. static inline union cpu_time_count cpu_time_sub(clockid_t which_clock,
  70. union cpu_time_count a,
  71. union cpu_time_count b)
  72. {
  73. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  74. a.sched -= b.sched;
  75. } else {
  76. a.cpu = cputime_sub(a.cpu, b.cpu);
  77. }
  78. return a;
  79. }
  80. /*
  81. * Update expiry time from increment, and increase overrun count,
  82. * given the current clock sample.
  83. */
  84. static inline void bump_cpu_timer(struct k_itimer *timer,
  85. union cpu_time_count now)
  86. {
  87. int i;
  88. if (timer->it.cpu.incr.sched == 0)
  89. return;
  90. if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
  91. unsigned long long delta, incr;
  92. if (now.sched < timer->it.cpu.expires.sched)
  93. return;
  94. incr = timer->it.cpu.incr.sched;
  95. delta = now.sched + incr - timer->it.cpu.expires.sched;
  96. /* Don't use (incr*2 < delta), incr*2 might overflow. */
  97. for (i = 0; incr < delta - incr; i++)
  98. incr = incr << 1;
  99. for (; i >= 0; incr >>= 1, i--) {
  100. if (delta <= incr)
  101. continue;
  102. timer->it.cpu.expires.sched += incr;
  103. timer->it_overrun += 1 << i;
  104. delta -= incr;
  105. }
  106. } else {
  107. cputime_t delta, incr;
  108. if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
  109. return;
  110. incr = timer->it.cpu.incr.cpu;
  111. delta = cputime_sub(cputime_add(now.cpu, incr),
  112. timer->it.cpu.expires.cpu);
  113. /* Don't use (incr*2 < delta), incr*2 might overflow. */
  114. for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
  115. incr = cputime_add(incr, incr);
  116. for (; i >= 0; incr = cputime_halve(incr), i--) {
  117. if (cputime_le(delta, incr))
  118. continue;
  119. timer->it.cpu.expires.cpu =
  120. cputime_add(timer->it.cpu.expires.cpu, incr);
  121. timer->it_overrun += 1 << i;
  122. delta = cputime_sub(delta, incr);
  123. }
  124. }
  125. }
  126. static inline cputime_t prof_ticks(struct task_struct *p)
  127. {
  128. return cputime_add(p->utime, p->stime);
  129. }
  130. static inline cputime_t virt_ticks(struct task_struct *p)
  131. {
  132. return p->utime;
  133. }
  134. static inline unsigned long long sched_ns(struct task_struct *p)
  135. {
  136. return (p == current) ? current_sched_time(p) : p->sched_time;
  137. }
  138. int posix_cpu_clock_getres(clockid_t which_clock, struct timespec *tp)
  139. {
  140. int error = check_clock(which_clock);
  141. if (!error) {
  142. tp->tv_sec = 0;
  143. tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
  144. if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
  145. /*
  146. * If sched_clock is using a cycle counter, we
  147. * don't have any idea of its true resolution
  148. * exported, but it is much more than 1s/HZ.
  149. */
  150. tp->tv_nsec = 1;
  151. }
  152. }
  153. return error;
  154. }
  155. int posix_cpu_clock_set(clockid_t which_clock, const struct timespec *tp)
  156. {
  157. /*
  158. * You can never reset a CPU clock, but we check for other errors
  159. * in the call before failing with EPERM.
  160. */
  161. int error = check_clock(which_clock);
  162. if (error == 0) {
  163. error = -EPERM;
  164. }
  165. return error;
  166. }
  167. /*
  168. * Sample a per-thread clock for the given task.
  169. */
  170. static int cpu_clock_sample(clockid_t which_clock, struct task_struct *p,
  171. union cpu_time_count *cpu)
  172. {
  173. switch (CPUCLOCK_WHICH(which_clock)) {
  174. default:
  175. return -EINVAL;
  176. case CPUCLOCK_PROF:
  177. cpu->cpu = prof_ticks(p);
  178. break;
  179. case CPUCLOCK_VIRT:
  180. cpu->cpu = virt_ticks(p);
  181. break;
  182. case CPUCLOCK_SCHED:
  183. cpu->sched = sched_ns(p);
  184. break;
  185. }
  186. return 0;
  187. }
  188. /*
  189. * Sample a process (thread group) clock for the given group_leader task.
  190. * Must be called with tasklist_lock held for reading.
  191. * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
  192. */
  193. static int cpu_clock_sample_group_locked(unsigned int clock_idx,
  194. struct task_struct *p,
  195. union cpu_time_count *cpu)
  196. {
  197. struct task_struct *t = p;
  198. switch (clock_idx) {
  199. default:
  200. return -EINVAL;
  201. case CPUCLOCK_PROF:
  202. cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
  203. do {
  204. cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
  205. t = next_thread(t);
  206. } while (t != p);
  207. break;
  208. case CPUCLOCK_VIRT:
  209. cpu->cpu = p->signal->utime;
  210. do {
  211. cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
  212. t = next_thread(t);
  213. } while (t != p);
  214. break;
  215. case CPUCLOCK_SCHED:
  216. cpu->sched = p->signal->sched_time;
  217. /* Add in each other live thread. */
  218. while ((t = next_thread(t)) != p) {
  219. cpu->sched += t->sched_time;
  220. }
  221. if (p->tgid == current->tgid) {
  222. /*
  223. * We're sampling ourselves, so include the
  224. * cycles not yet banked. We still omit
  225. * other threads running on other CPUs,
  226. * so the total can always be behind as
  227. * much as max(nthreads-1,ncpus) * (NSEC_PER_SEC/HZ).
  228. */
  229. cpu->sched += current_sched_time(current);
  230. } else {
  231. cpu->sched += p->sched_time;
  232. }
  233. break;
  234. }
  235. return 0;
  236. }
  237. /*
  238. * Sample a process (thread group) clock for the given group_leader task.
  239. * Must be called with tasklist_lock held for reading.
  240. */
  241. static int cpu_clock_sample_group(clockid_t which_clock,
  242. struct task_struct *p,
  243. union cpu_time_count *cpu)
  244. {
  245. int ret;
  246. unsigned long flags;
  247. spin_lock_irqsave(&p->sighand->siglock, flags);
  248. ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
  249. cpu);
  250. spin_unlock_irqrestore(&p->sighand->siglock, flags);
  251. return ret;
  252. }
  253. int posix_cpu_clock_get(clockid_t which_clock, struct timespec *tp)
  254. {
  255. const pid_t pid = CPUCLOCK_PID(which_clock);
  256. int error = -EINVAL;
  257. union cpu_time_count rtn;
  258. if (pid == 0) {
  259. /*
  260. * Special case constant value for our own clocks.
  261. * We don't have to do any lookup to find ourselves.
  262. */
  263. if (CPUCLOCK_PERTHREAD(which_clock)) {
  264. /*
  265. * Sampling just ourselves we can do with no locking.
  266. */
  267. error = cpu_clock_sample(which_clock,
  268. current, &rtn);
  269. } else {
  270. read_lock(&tasklist_lock);
  271. error = cpu_clock_sample_group(which_clock,
  272. current, &rtn);
  273. read_unlock(&tasklist_lock);
  274. }
  275. } else {
  276. /*
  277. * Find the given PID, and validate that the caller
  278. * should be able to see it.
  279. */
  280. struct task_struct *p;
  281. read_lock(&tasklist_lock);
  282. p = find_task_by_pid(pid);
  283. if (p) {
  284. if (CPUCLOCK_PERTHREAD(which_clock)) {
  285. if (p->tgid == current->tgid) {
  286. error = cpu_clock_sample(which_clock,
  287. p, &rtn);
  288. }
  289. } else if (p->tgid == pid && p->signal) {
  290. error = cpu_clock_sample_group(which_clock,
  291. p, &rtn);
  292. }
  293. }
  294. read_unlock(&tasklist_lock);
  295. }
  296. if (error)
  297. return error;
  298. sample_to_timespec(which_clock, rtn, tp);
  299. return 0;
  300. }
  301. /*
  302. * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
  303. * This is called from sys_timer_create with the new timer already locked.
  304. */
  305. int posix_cpu_timer_create(struct k_itimer *new_timer)
  306. {
  307. int ret = 0;
  308. const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
  309. struct task_struct *p;
  310. if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
  311. return -EINVAL;
  312. INIT_LIST_HEAD(&new_timer->it.cpu.entry);
  313. new_timer->it.cpu.incr.sched = 0;
  314. new_timer->it.cpu.expires.sched = 0;
  315. read_lock(&tasklist_lock);
  316. if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
  317. if (pid == 0) {
  318. p = current;
  319. } else {
  320. p = find_task_by_pid(pid);
  321. if (p && p->tgid != current->tgid)
  322. p = NULL;
  323. }
  324. } else {
  325. if (pid == 0) {
  326. p = current->group_leader;
  327. } else {
  328. p = find_task_by_pid(pid);
  329. if (p && p->tgid != pid)
  330. p = NULL;
  331. }
  332. }
  333. new_timer->it.cpu.task = p;
  334. if (p) {
  335. get_task_struct(p);
  336. } else {
  337. ret = -EINVAL;
  338. }
  339. read_unlock(&tasklist_lock);
  340. return ret;
  341. }
  342. /*
  343. * Clean up a CPU-clock timer that is about to be destroyed.
  344. * This is called from timer deletion with the timer already locked.
  345. * If we return TIMER_RETRY, it's necessary to release the timer's lock
  346. * and try again. (This happens when the timer is in the middle of firing.)
  347. */
  348. int posix_cpu_timer_del(struct k_itimer *timer)
  349. {
  350. struct task_struct *p = timer->it.cpu.task;
  351. int ret = 0;
  352. if (likely(p != NULL)) {
  353. read_lock(&tasklist_lock);
  354. if (unlikely(p->signal == NULL)) {
  355. /*
  356. * We raced with the reaping of the task.
  357. * The deletion should have cleared us off the list.
  358. */
  359. BUG_ON(!list_empty(&timer->it.cpu.entry));
  360. } else {
  361. spin_lock(&p->sighand->siglock);
  362. if (timer->it.cpu.firing)
  363. ret = TIMER_RETRY;
  364. else
  365. list_del(&timer->it.cpu.entry);
  366. spin_unlock(&p->sighand->siglock);
  367. }
  368. read_unlock(&tasklist_lock);
  369. if (!ret)
  370. put_task_struct(p);
  371. }
  372. return ret;
  373. }
  374. /*
  375. * Clean out CPU timers still ticking when a thread exited. The task
  376. * pointer is cleared, and the expiry time is replaced with the residual
  377. * time for later timer_gettime calls to return.
  378. * This must be called with the siglock held.
  379. */
  380. static void cleanup_timers(struct list_head *head,
  381. cputime_t utime, cputime_t stime,
  382. unsigned long long sched_time)
  383. {
  384. struct cpu_timer_list *timer, *next;
  385. cputime_t ptime = cputime_add(utime, stime);
  386. list_for_each_entry_safe(timer, next, head, entry) {
  387. list_del_init(&timer->entry);
  388. if (cputime_lt(timer->expires.cpu, ptime)) {
  389. timer->expires.cpu = cputime_zero;
  390. } else {
  391. timer->expires.cpu = cputime_sub(timer->expires.cpu,
  392. ptime);
  393. }
  394. }
  395. ++head;
  396. list_for_each_entry_safe(timer, next, head, entry) {
  397. list_del_init(&timer->entry);
  398. if (cputime_lt(timer->expires.cpu, utime)) {
  399. timer->expires.cpu = cputime_zero;
  400. } else {
  401. timer->expires.cpu = cputime_sub(timer->expires.cpu,
  402. utime);
  403. }
  404. }
  405. ++head;
  406. list_for_each_entry_safe(timer, next, head, entry) {
  407. list_del_init(&timer->entry);
  408. if (timer->expires.sched < sched_time) {
  409. timer->expires.sched = 0;
  410. } else {
  411. timer->expires.sched -= sched_time;
  412. }
  413. }
  414. }
  415. /*
  416. * These are both called with the siglock held, when the current thread
  417. * is being reaped. When the final (leader) thread in the group is reaped,
  418. * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
  419. */
  420. void posix_cpu_timers_exit(struct task_struct *tsk)
  421. {
  422. cleanup_timers(tsk->cpu_timers,
  423. tsk->utime, tsk->stime, tsk->sched_time);
  424. }
  425. void posix_cpu_timers_exit_group(struct task_struct *tsk)
  426. {
  427. cleanup_timers(tsk->signal->cpu_timers,
  428. cputime_add(tsk->utime, tsk->signal->utime),
  429. cputime_add(tsk->stime, tsk->signal->stime),
  430. tsk->sched_time + tsk->signal->sched_time);
  431. }
  432. /*
  433. * Set the expiry times of all the threads in the process so one of them
  434. * will go off before the process cumulative expiry total is reached.
  435. */
  436. static void process_timer_rebalance(struct task_struct *p,
  437. unsigned int clock_idx,
  438. union cpu_time_count expires,
  439. union cpu_time_count val)
  440. {
  441. cputime_t ticks, left;
  442. unsigned long long ns, nsleft;
  443. struct task_struct *t = p;
  444. unsigned int nthreads = atomic_read(&p->signal->live);
  445. if (!nthreads)
  446. return;
  447. switch (clock_idx) {
  448. default:
  449. BUG();
  450. break;
  451. case CPUCLOCK_PROF:
  452. left = cputime_div(cputime_sub(expires.cpu, val.cpu),
  453. nthreads);
  454. do {
  455. if (!unlikely(t->exit_state)) {
  456. ticks = cputime_add(prof_ticks(t), left);
  457. if (cputime_eq(t->it_prof_expires,
  458. cputime_zero) ||
  459. cputime_gt(t->it_prof_expires, ticks)) {
  460. t->it_prof_expires = ticks;
  461. }
  462. }
  463. t = next_thread(t);
  464. } while (t != p);
  465. break;
  466. case CPUCLOCK_VIRT:
  467. left = cputime_div(cputime_sub(expires.cpu, val.cpu),
  468. nthreads);
  469. do {
  470. if (!unlikely(t->exit_state)) {
  471. ticks = cputime_add(virt_ticks(t), left);
  472. if (cputime_eq(t->it_virt_expires,
  473. cputime_zero) ||
  474. cputime_gt(t->it_virt_expires, ticks)) {
  475. t->it_virt_expires = ticks;
  476. }
  477. }
  478. t = next_thread(t);
  479. } while (t != p);
  480. break;
  481. case CPUCLOCK_SCHED:
  482. nsleft = expires.sched - val.sched;
  483. do_div(nsleft, nthreads);
  484. do {
  485. if (!unlikely(t->exit_state)) {
  486. ns = t->sched_time + nsleft;
  487. if (t->it_sched_expires == 0 ||
  488. t->it_sched_expires > ns) {
  489. t->it_sched_expires = ns;
  490. }
  491. }
  492. t = next_thread(t);
  493. } while (t != p);
  494. break;
  495. }
  496. }
  497. static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
  498. {
  499. /*
  500. * That's all for this thread or process.
  501. * We leave our residual in expires to be reported.
  502. */
  503. put_task_struct(timer->it.cpu.task);
  504. timer->it.cpu.task = NULL;
  505. timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
  506. timer->it.cpu.expires,
  507. now);
  508. }
  509. /*
  510. * Insert the timer on the appropriate list before any timers that
  511. * expire later. This must be called with the tasklist_lock held
  512. * for reading, and interrupts disabled.
  513. */
  514. static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
  515. {
  516. struct task_struct *p = timer->it.cpu.task;
  517. struct list_head *head, *listpos;
  518. struct cpu_timer_list *const nt = &timer->it.cpu;
  519. struct cpu_timer_list *next;
  520. unsigned long i;
  521. head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
  522. p->cpu_timers : p->signal->cpu_timers);
  523. head += CPUCLOCK_WHICH(timer->it_clock);
  524. BUG_ON(!irqs_disabled());
  525. spin_lock(&p->sighand->siglock);
  526. listpos = head;
  527. if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
  528. list_for_each_entry(next, head, entry) {
  529. if (next->expires.sched > nt->expires.sched) {
  530. listpos = &next->entry;
  531. break;
  532. }
  533. }
  534. } else {
  535. list_for_each_entry(next, head, entry) {
  536. if (cputime_gt(next->expires.cpu, nt->expires.cpu)) {
  537. listpos = &next->entry;
  538. break;
  539. }
  540. }
  541. }
  542. list_add(&nt->entry, listpos);
  543. if (listpos == head) {
  544. /*
  545. * We are the new earliest-expiring timer.
  546. * If we are a thread timer, there can always
  547. * be a process timer telling us to stop earlier.
  548. */
  549. if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
  550. switch (CPUCLOCK_WHICH(timer->it_clock)) {
  551. default:
  552. BUG();
  553. case CPUCLOCK_PROF:
  554. if (cputime_eq(p->it_prof_expires,
  555. cputime_zero) ||
  556. cputime_gt(p->it_prof_expires,
  557. nt->expires.cpu))
  558. p->it_prof_expires = nt->expires.cpu;
  559. break;
  560. case CPUCLOCK_VIRT:
  561. if (cputime_eq(p->it_virt_expires,
  562. cputime_zero) ||
  563. cputime_gt(p->it_virt_expires,
  564. nt->expires.cpu))
  565. p->it_virt_expires = nt->expires.cpu;
  566. break;
  567. case CPUCLOCK_SCHED:
  568. if (p->it_sched_expires == 0 ||
  569. p->it_sched_expires > nt->expires.sched)
  570. p->it_sched_expires = nt->expires.sched;
  571. break;
  572. }
  573. } else {
  574. /*
  575. * For a process timer, we must balance
  576. * all the live threads' expirations.
  577. */
  578. switch (CPUCLOCK_WHICH(timer->it_clock)) {
  579. default:
  580. BUG();
  581. case CPUCLOCK_VIRT:
  582. if (!cputime_eq(p->signal->it_virt_expires,
  583. cputime_zero) &&
  584. cputime_lt(p->signal->it_virt_expires,
  585. timer->it.cpu.expires.cpu))
  586. break;
  587. goto rebalance;
  588. case CPUCLOCK_PROF:
  589. if (!cputime_eq(p->signal->it_prof_expires,
  590. cputime_zero) &&
  591. cputime_lt(p->signal->it_prof_expires,
  592. timer->it.cpu.expires.cpu))
  593. break;
  594. i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
  595. if (i != RLIM_INFINITY &&
  596. i <= cputime_to_secs(timer->it.cpu.expires.cpu))
  597. break;
  598. goto rebalance;
  599. case CPUCLOCK_SCHED:
  600. rebalance:
  601. process_timer_rebalance(
  602. timer->it.cpu.task,
  603. CPUCLOCK_WHICH(timer->it_clock),
  604. timer->it.cpu.expires, now);
  605. break;
  606. }
  607. }
  608. }
  609. spin_unlock(&p->sighand->siglock);
  610. }
  611. /*
  612. * The timer is locked, fire it and arrange for its reload.
  613. */
  614. static void cpu_timer_fire(struct k_itimer *timer)
  615. {
  616. if (unlikely(timer->sigq == NULL)) {
  617. /*
  618. * This a special case for clock_nanosleep,
  619. * not a normal timer from sys_timer_create.
  620. */
  621. wake_up_process(timer->it_process);
  622. timer->it.cpu.expires.sched = 0;
  623. } else if (timer->it.cpu.incr.sched == 0) {
  624. /*
  625. * One-shot timer. Clear it as soon as it's fired.
  626. */
  627. posix_timer_event(timer, 0);
  628. timer->it.cpu.expires.sched = 0;
  629. } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
  630. /*
  631. * The signal did not get queued because the signal
  632. * was ignored, so we won't get any callback to
  633. * reload the timer. But we need to keep it
  634. * ticking in case the signal is deliverable next time.
  635. */
  636. posix_cpu_timer_schedule(timer);
  637. }
  638. }
  639. /*
  640. * Guts of sys_timer_settime for CPU timers.
  641. * This is called with the timer locked and interrupts disabled.
  642. * If we return TIMER_RETRY, it's necessary to release the timer's lock
  643. * and try again. (This happens when the timer is in the middle of firing.)
  644. */
  645. int posix_cpu_timer_set(struct k_itimer *timer, int flags,
  646. struct itimerspec *new, struct itimerspec *old)
  647. {
  648. struct task_struct *p = timer->it.cpu.task;
  649. union cpu_time_count old_expires, new_expires, val;
  650. int ret;
  651. if (unlikely(p == NULL)) {
  652. /*
  653. * Timer refers to a dead task's clock.
  654. */
  655. return -ESRCH;
  656. }
  657. new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
  658. read_lock(&tasklist_lock);
  659. /*
  660. * We need the tasklist_lock to protect against reaping that
  661. * clears p->signal. If p has just been reaped, we can no
  662. * longer get any information about it at all.
  663. */
  664. if (unlikely(p->signal == NULL)) {
  665. read_unlock(&tasklist_lock);
  666. put_task_struct(p);
  667. timer->it.cpu.task = NULL;
  668. return -ESRCH;
  669. }
  670. /*
  671. * Disarm any old timer after extracting its expiry time.
  672. */
  673. BUG_ON(!irqs_disabled());
  674. spin_lock(&p->sighand->siglock);
  675. old_expires = timer->it.cpu.expires;
  676. list_del_init(&timer->it.cpu.entry);
  677. spin_unlock(&p->sighand->siglock);
  678. /*
  679. * We need to sample the current value to convert the new
  680. * value from to relative and absolute, and to convert the
  681. * old value from absolute to relative. To set a process
  682. * timer, we need a sample to balance the thread expiry
  683. * times (in arm_timer). With an absolute time, we must
  684. * check if it's already passed. In short, we need a sample.
  685. */
  686. if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
  687. cpu_clock_sample(timer->it_clock, p, &val);
  688. } else {
  689. cpu_clock_sample_group(timer->it_clock, p, &val);
  690. }
  691. if (old) {
  692. if (old_expires.sched == 0) {
  693. old->it_value.tv_sec = 0;
  694. old->it_value.tv_nsec = 0;
  695. } else {
  696. /*
  697. * Update the timer in case it has
  698. * overrun already. If it has,
  699. * we'll report it as having overrun
  700. * and with the next reloaded timer
  701. * already ticking, though we are
  702. * swallowing that pending
  703. * notification here to install the
  704. * new setting.
  705. */
  706. bump_cpu_timer(timer, val);
  707. if (cpu_time_before(timer->it_clock, val,
  708. timer->it.cpu.expires)) {
  709. old_expires = cpu_time_sub(
  710. timer->it_clock,
  711. timer->it.cpu.expires, val);
  712. sample_to_timespec(timer->it_clock,
  713. old_expires,
  714. &old->it_value);
  715. } else {
  716. old->it_value.tv_nsec = 1;
  717. old->it_value.tv_sec = 0;
  718. }
  719. }
  720. }
  721. if (unlikely(timer->it.cpu.firing)) {
  722. /*
  723. * We are colliding with the timer actually firing.
  724. * Punt after filling in the timer's old value, and
  725. * disable this firing since we are already reporting
  726. * it as an overrun (thanks to bump_cpu_timer above).
  727. */
  728. read_unlock(&tasklist_lock);
  729. timer->it.cpu.firing = -1;
  730. ret = TIMER_RETRY;
  731. goto out;
  732. }
  733. if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
  734. cpu_time_add(timer->it_clock, &new_expires, val);
  735. }
  736. /*
  737. * Install the new expiry time (or zero).
  738. * For a timer with no notification action, we don't actually
  739. * arm the timer (we'll just fake it for timer_gettime).
  740. */
  741. timer->it.cpu.expires = new_expires;
  742. if (new_expires.sched != 0 &&
  743. (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
  744. cpu_time_before(timer->it_clock, val, new_expires)) {
  745. arm_timer(timer, val);
  746. }
  747. read_unlock(&tasklist_lock);
  748. /*
  749. * Install the new reload setting, and
  750. * set up the signal and overrun bookkeeping.
  751. */
  752. timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
  753. &new->it_interval);
  754. /*
  755. * This acts as a modification timestamp for the timer,
  756. * so any automatic reload attempt will punt on seeing
  757. * that we have reset the timer manually.
  758. */
  759. timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
  760. ~REQUEUE_PENDING;
  761. timer->it_overrun_last = 0;
  762. timer->it_overrun = -1;
  763. if (new_expires.sched != 0 &&
  764. (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
  765. !cpu_time_before(timer->it_clock, val, new_expires)) {
  766. /*
  767. * The designated time already passed, so we notify
  768. * immediately, even if the thread never runs to
  769. * accumulate more time on this clock.
  770. */
  771. cpu_timer_fire(timer);
  772. }
  773. ret = 0;
  774. out:
  775. if (old) {
  776. sample_to_timespec(timer->it_clock,
  777. timer->it.cpu.incr, &old->it_interval);
  778. }
  779. return ret;
  780. }
  781. void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
  782. {
  783. union cpu_time_count now;
  784. struct task_struct *p = timer->it.cpu.task;
  785. int clear_dead;
  786. /*
  787. * Easy part: convert the reload time.
  788. */
  789. sample_to_timespec(timer->it_clock,
  790. timer->it.cpu.incr, &itp->it_interval);
  791. if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
  792. itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
  793. return;
  794. }
  795. if (unlikely(p == NULL)) {
  796. /*
  797. * This task already died and the timer will never fire.
  798. * In this case, expires is actually the dead value.
  799. */
  800. dead:
  801. sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
  802. &itp->it_value);
  803. return;
  804. }
  805. /*
  806. * Sample the clock to take the difference with the expiry time.
  807. */
  808. if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
  809. cpu_clock_sample(timer->it_clock, p, &now);
  810. clear_dead = p->exit_state;
  811. } else {
  812. read_lock(&tasklist_lock);
  813. if (unlikely(p->signal == NULL)) {
  814. /*
  815. * The process has been reaped.
  816. * We can't even collect a sample any more.
  817. * Call the timer disarmed, nothing else to do.
  818. */
  819. put_task_struct(p);
  820. timer->it.cpu.task = NULL;
  821. timer->it.cpu.expires.sched = 0;
  822. read_unlock(&tasklist_lock);
  823. goto dead;
  824. } else {
  825. cpu_clock_sample_group(timer->it_clock, p, &now);
  826. clear_dead = (unlikely(p->exit_state) &&
  827. thread_group_empty(p));
  828. }
  829. read_unlock(&tasklist_lock);
  830. }
  831. if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
  832. if (timer->it.cpu.incr.sched == 0 &&
  833. cpu_time_before(timer->it_clock,
  834. timer->it.cpu.expires, now)) {
  835. /*
  836. * Do-nothing timer expired and has no reload,
  837. * so it's as if it was never set.
  838. */
  839. timer->it.cpu.expires.sched = 0;
  840. itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
  841. return;
  842. }
  843. /*
  844. * Account for any expirations and reloads that should
  845. * have happened.
  846. */
  847. bump_cpu_timer(timer, now);
  848. }
  849. if (unlikely(clear_dead)) {
  850. /*
  851. * We've noticed that the thread is dead, but
  852. * not yet reaped. Take this opportunity to
  853. * drop our task ref.
  854. */
  855. clear_dead_task(timer, now);
  856. goto dead;
  857. }
  858. if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
  859. sample_to_timespec(timer->it_clock,
  860. cpu_time_sub(timer->it_clock,
  861. timer->it.cpu.expires, now),
  862. &itp->it_value);
  863. } else {
  864. /*
  865. * The timer should have expired already, but the firing
  866. * hasn't taken place yet. Say it's just about to expire.
  867. */
  868. itp->it_value.tv_nsec = 1;
  869. itp->it_value.tv_sec = 0;
  870. }
  871. }
  872. /*
  873. * Check for any per-thread CPU timers that have fired and move them off
  874. * the tsk->cpu_timers[N] list onto the firing list. Here we update the
  875. * tsk->it_*_expires values to reflect the remaining thread CPU timers.
  876. */
  877. static void check_thread_timers(struct task_struct *tsk,
  878. struct list_head *firing)
  879. {
  880. int maxfire;
  881. struct list_head *timers = tsk->cpu_timers;
  882. maxfire = 20;
  883. tsk->it_prof_expires = cputime_zero;
  884. while (!list_empty(timers)) {
  885. struct cpu_timer_list *t = list_entry(timers->next,
  886. struct cpu_timer_list,
  887. entry);
  888. if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
  889. tsk->it_prof_expires = t->expires.cpu;
  890. break;
  891. }
  892. t->firing = 1;
  893. list_move_tail(&t->entry, firing);
  894. }
  895. ++timers;
  896. maxfire = 20;
  897. tsk->it_virt_expires = cputime_zero;
  898. while (!list_empty(timers)) {
  899. struct cpu_timer_list *t = list_entry(timers->next,
  900. struct cpu_timer_list,
  901. entry);
  902. if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
  903. tsk->it_virt_expires = t->expires.cpu;
  904. break;
  905. }
  906. t->firing = 1;
  907. list_move_tail(&t->entry, firing);
  908. }
  909. ++timers;
  910. maxfire = 20;
  911. tsk->it_sched_expires = 0;
  912. while (!list_empty(timers)) {
  913. struct cpu_timer_list *t = list_entry(timers->next,
  914. struct cpu_timer_list,
  915. entry);
  916. if (!--maxfire || tsk->sched_time < t->expires.sched) {
  917. tsk->it_sched_expires = t->expires.sched;
  918. break;
  919. }
  920. t->firing = 1;
  921. list_move_tail(&t->entry, firing);
  922. }
  923. }
  924. /*
  925. * Check for any per-thread CPU timers that have fired and move them
  926. * off the tsk->*_timers list onto the firing list. Per-thread timers
  927. * have already been taken off.
  928. */
  929. static void check_process_timers(struct task_struct *tsk,
  930. struct list_head *firing)
  931. {
  932. int maxfire;
  933. struct signal_struct *const sig = tsk->signal;
  934. cputime_t utime, stime, ptime, virt_expires, prof_expires;
  935. unsigned long long sched_time, sched_expires;
  936. struct task_struct *t;
  937. struct list_head *timers = sig->cpu_timers;
  938. /*
  939. * Don't sample the current process CPU clocks if there are no timers.
  940. */
  941. if (list_empty(&timers[CPUCLOCK_PROF]) &&
  942. cputime_eq(sig->it_prof_expires, cputime_zero) &&
  943. sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
  944. list_empty(&timers[CPUCLOCK_VIRT]) &&
  945. cputime_eq(sig->it_virt_expires, cputime_zero) &&
  946. list_empty(&timers[CPUCLOCK_SCHED]))
  947. return;
  948. /*
  949. * Collect the current process totals.
  950. */
  951. utime = sig->utime;
  952. stime = sig->stime;
  953. sched_time = sig->sched_time;
  954. t = tsk;
  955. do {
  956. utime = cputime_add(utime, t->utime);
  957. stime = cputime_add(stime, t->stime);
  958. sched_time += t->sched_time;
  959. t = next_thread(t);
  960. } while (t != tsk);
  961. ptime = cputime_add(utime, stime);
  962. maxfire = 20;
  963. prof_expires = cputime_zero;
  964. while (!list_empty(timers)) {
  965. struct cpu_timer_list *t = list_entry(timers->next,
  966. struct cpu_timer_list,
  967. entry);
  968. if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
  969. prof_expires = t->expires.cpu;
  970. break;
  971. }
  972. t->firing = 1;
  973. list_move_tail(&t->entry, firing);
  974. }
  975. ++timers;
  976. maxfire = 20;
  977. virt_expires = cputime_zero;
  978. while (!list_empty(timers)) {
  979. struct cpu_timer_list *t = list_entry(timers->next,
  980. struct cpu_timer_list,
  981. entry);
  982. if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
  983. virt_expires = t->expires.cpu;
  984. break;
  985. }
  986. t->firing = 1;
  987. list_move_tail(&t->entry, firing);
  988. }
  989. ++timers;
  990. maxfire = 20;
  991. sched_expires = 0;
  992. while (!list_empty(timers)) {
  993. struct cpu_timer_list *t = list_entry(timers->next,
  994. struct cpu_timer_list,
  995. entry);
  996. if (!--maxfire || sched_time < t->expires.sched) {
  997. sched_expires = t->expires.sched;
  998. break;
  999. }
  1000. t->firing = 1;
  1001. list_move_tail(&t->entry, firing);
  1002. }
  1003. /*
  1004. * Check for the special case process timers.
  1005. */
  1006. if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
  1007. if (cputime_ge(ptime, sig->it_prof_expires)) {
  1008. /* ITIMER_PROF fires and reloads. */
  1009. sig->it_prof_expires = sig->it_prof_incr;
  1010. if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
  1011. sig->it_prof_expires = cputime_add(
  1012. sig->it_prof_expires, ptime);
  1013. }
  1014. __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
  1015. }
  1016. if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
  1017. (cputime_eq(prof_expires, cputime_zero) ||
  1018. cputime_lt(sig->it_prof_expires, prof_expires))) {
  1019. prof_expires = sig->it_prof_expires;
  1020. }
  1021. }
  1022. if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
  1023. if (cputime_ge(utime, sig->it_virt_expires)) {
  1024. /* ITIMER_VIRTUAL fires and reloads. */
  1025. sig->it_virt_expires = sig->it_virt_incr;
  1026. if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
  1027. sig->it_virt_expires = cputime_add(
  1028. sig->it_virt_expires, utime);
  1029. }
  1030. __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
  1031. }
  1032. if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
  1033. (cputime_eq(virt_expires, cputime_zero) ||
  1034. cputime_lt(sig->it_virt_expires, virt_expires))) {
  1035. virt_expires = sig->it_virt_expires;
  1036. }
  1037. }
  1038. if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
  1039. unsigned long psecs = cputime_to_secs(ptime);
  1040. cputime_t x;
  1041. if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
  1042. /*
  1043. * At the hard limit, we just die.
  1044. * No need to calculate anything else now.
  1045. */
  1046. __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
  1047. return;
  1048. }
  1049. if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
  1050. /*
  1051. * At the soft limit, send a SIGXCPU every second.
  1052. */
  1053. __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
  1054. if (sig->rlim[RLIMIT_CPU].rlim_cur
  1055. < sig->rlim[RLIMIT_CPU].rlim_max) {
  1056. sig->rlim[RLIMIT_CPU].rlim_cur++;
  1057. }
  1058. }
  1059. x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
  1060. if (cputime_eq(prof_expires, cputime_zero) ||
  1061. cputime_lt(x, prof_expires)) {
  1062. prof_expires = x;
  1063. }
  1064. }
  1065. if (!cputime_eq(prof_expires, cputime_zero) ||
  1066. !cputime_eq(virt_expires, cputime_zero) ||
  1067. sched_expires != 0) {
  1068. /*
  1069. * Rebalance the threads' expiry times for the remaining
  1070. * process CPU timers.
  1071. */
  1072. cputime_t prof_left, virt_left, ticks;
  1073. unsigned long long sched_left, sched;
  1074. const unsigned int nthreads = atomic_read(&sig->live);
  1075. if (!nthreads)
  1076. return;
  1077. prof_left = cputime_sub(prof_expires, utime);
  1078. prof_left = cputime_sub(prof_left, stime);
  1079. prof_left = cputime_div(prof_left, nthreads);
  1080. virt_left = cputime_sub(virt_expires, utime);
  1081. virt_left = cputime_div(virt_left, nthreads);
  1082. if (sched_expires) {
  1083. sched_left = sched_expires - sched_time;
  1084. do_div(sched_left, nthreads);
  1085. } else {
  1086. sched_left = 0;
  1087. }
  1088. t = tsk;
  1089. do {
  1090. ticks = cputime_add(cputime_add(t->utime, t->stime),
  1091. prof_left);
  1092. if (!cputime_eq(prof_expires, cputime_zero) &&
  1093. (cputime_eq(t->it_prof_expires, cputime_zero) ||
  1094. cputime_gt(t->it_prof_expires, ticks))) {
  1095. t->it_prof_expires = ticks;
  1096. }
  1097. ticks = cputime_add(t->utime, virt_left);
  1098. if (!cputime_eq(virt_expires, cputime_zero) &&
  1099. (cputime_eq(t->it_virt_expires, cputime_zero) ||
  1100. cputime_gt(t->it_virt_expires, ticks))) {
  1101. t->it_virt_expires = ticks;
  1102. }
  1103. sched = t->sched_time + sched_left;
  1104. if (sched_expires && (t->it_sched_expires == 0 ||
  1105. t->it_sched_expires > sched)) {
  1106. t->it_sched_expires = sched;
  1107. }
  1108. do {
  1109. t = next_thread(t);
  1110. } while (unlikely(t->exit_state));
  1111. } while (t != tsk);
  1112. }
  1113. }
  1114. /*
  1115. * This is called from the signal code (via do_schedule_next_timer)
  1116. * when the last timer signal was delivered and we have to reload the timer.
  1117. */
  1118. void posix_cpu_timer_schedule(struct k_itimer *timer)
  1119. {
  1120. struct task_struct *p = timer->it.cpu.task;
  1121. union cpu_time_count now;
  1122. if (unlikely(p == NULL))
  1123. /*
  1124. * The task was cleaned up already, no future firings.
  1125. */
  1126. return;
  1127. /*
  1128. * Fetch the current sample and update the timer's expiry time.
  1129. */
  1130. if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
  1131. cpu_clock_sample(timer->it_clock, p, &now);
  1132. bump_cpu_timer(timer, now);
  1133. if (unlikely(p->exit_state)) {
  1134. clear_dead_task(timer, now);
  1135. return;
  1136. }
  1137. read_lock(&tasklist_lock); /* arm_timer needs it. */
  1138. } else {
  1139. read_lock(&tasklist_lock);
  1140. if (unlikely(p->signal == NULL)) {
  1141. /*
  1142. * The process has been reaped.
  1143. * We can't even collect a sample any more.
  1144. */
  1145. put_task_struct(p);
  1146. timer->it.cpu.task = p = NULL;
  1147. timer->it.cpu.expires.sched = 0;
  1148. read_unlock(&tasklist_lock);
  1149. return;
  1150. } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
  1151. /*
  1152. * We've noticed that the thread is dead, but
  1153. * not yet reaped. Take this opportunity to
  1154. * drop our task ref.
  1155. */
  1156. clear_dead_task(timer, now);
  1157. read_unlock(&tasklist_lock);
  1158. return;
  1159. }
  1160. cpu_clock_sample_group(timer->it_clock, p, &now);
  1161. bump_cpu_timer(timer, now);
  1162. /* Leave the tasklist_lock locked for the call below. */
  1163. }
  1164. /*
  1165. * Now re-arm for the new expiry time.
  1166. */
  1167. arm_timer(timer, now);
  1168. read_unlock(&tasklist_lock);
  1169. }
  1170. /*
  1171. * This is called from the timer interrupt handler. The irq handler has
  1172. * already updated our counts. We need to check if any timers fire now.
  1173. * Interrupts are disabled.
  1174. */
  1175. void run_posix_cpu_timers(struct task_struct *tsk)
  1176. {
  1177. LIST_HEAD(firing);
  1178. struct k_itimer *timer, *next;
  1179. BUG_ON(!irqs_disabled());
  1180. #define UNEXPIRED(clock) \
  1181. (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
  1182. cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
  1183. if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
  1184. (tsk->it_sched_expires == 0 ||
  1185. tsk->sched_time < tsk->it_sched_expires))
  1186. return;
  1187. #undef UNEXPIRED
  1188. /*
  1189. * Double-check with locks held.
  1190. */
  1191. read_lock(&tasklist_lock);
  1192. if (likely(tsk->signal != NULL)) {
  1193. spin_lock(&tsk->sighand->siglock);
  1194. /*
  1195. * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
  1196. * all the timers that are firing, and put them on the firing list.
  1197. */
  1198. check_thread_timers(tsk, &firing);
  1199. check_process_timers(tsk, &firing);
  1200. /*
  1201. * We must release these locks before taking any timer's lock.
  1202. * There is a potential race with timer deletion here, as the
  1203. * siglock now protects our private firing list. We have set
  1204. * the firing flag in each timer, so that a deletion attempt
  1205. * that gets the timer lock before we do will give it up and
  1206. * spin until we've taken care of that timer below.
  1207. */
  1208. spin_unlock(&tsk->sighand->siglock);
  1209. }
  1210. read_unlock(&tasklist_lock);
  1211. /*
  1212. * Now that all the timers on our list have the firing flag,
  1213. * noone will touch their list entries but us. We'll take
  1214. * each timer's lock before clearing its firing flag, so no
  1215. * timer call will interfere.
  1216. */
  1217. list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
  1218. int firing;
  1219. spin_lock(&timer->it_lock);
  1220. list_del_init(&timer->it.cpu.entry);
  1221. firing = timer->it.cpu.firing;
  1222. timer->it.cpu.firing = 0;
  1223. /*
  1224. * The firing flag is -1 if we collided with a reset
  1225. * of the timer, which already reported this
  1226. * almost-firing as an overrun. So don't generate an event.
  1227. */
  1228. if (likely(firing >= 0)) {
  1229. cpu_timer_fire(timer);
  1230. }
  1231. spin_unlock(&timer->it_lock);
  1232. }
  1233. }
  1234. /*
  1235. * Set one of the process-wide special case CPU timers.
  1236. * The tasklist_lock and tsk->sighand->siglock must be held by the caller.
  1237. * The oldval argument is null for the RLIMIT_CPU timer, where *newval is
  1238. * absolute; non-null for ITIMER_*, where *newval is relative and we update
  1239. * it to be absolute, *oldval is absolute and we update it to be relative.
  1240. */
  1241. void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
  1242. cputime_t *newval, cputime_t *oldval)
  1243. {
  1244. union cpu_time_count now;
  1245. struct list_head *head;
  1246. BUG_ON(clock_idx == CPUCLOCK_SCHED);
  1247. cpu_clock_sample_group_locked(clock_idx, tsk, &now);
  1248. if (oldval) {
  1249. if (!cputime_eq(*oldval, cputime_zero)) {
  1250. if (cputime_le(*oldval, now.cpu)) {
  1251. /* Just about to fire. */
  1252. *oldval = jiffies_to_cputime(1);
  1253. } else {
  1254. *oldval = cputime_sub(*oldval, now.cpu);
  1255. }
  1256. }
  1257. if (cputime_eq(*newval, cputime_zero))
  1258. return;
  1259. *newval = cputime_add(*newval, now.cpu);
  1260. /*
  1261. * If the RLIMIT_CPU timer will expire before the
  1262. * ITIMER_PROF timer, we have nothing else to do.
  1263. */
  1264. if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
  1265. < cputime_to_secs(*newval))
  1266. return;
  1267. }
  1268. /*
  1269. * Check whether there are any process timers already set to fire
  1270. * before this one. If so, we don't have anything more to do.
  1271. */
  1272. head = &tsk->signal->cpu_timers[clock_idx];
  1273. if (list_empty(head) ||
  1274. cputime_ge(list_entry(head->next,
  1275. struct cpu_timer_list, entry)->expires.cpu,
  1276. *newval)) {
  1277. /*
  1278. * Rejigger each thread's expiry time so that one will
  1279. * notice before we hit the process-cumulative expiry time.
  1280. */
  1281. union cpu_time_count expires = { .sched = 0 };
  1282. expires.cpu = *newval;
  1283. process_timer_rebalance(tsk, clock_idx, expires, now);
  1284. }
  1285. }
  1286. static long posix_cpu_clock_nanosleep_restart(struct restart_block *);
  1287. int posix_cpu_nsleep(clockid_t which_clock, int flags,
  1288. struct timespec *rqtp)
  1289. {
  1290. struct restart_block *restart_block =
  1291. &current_thread_info()->restart_block;
  1292. struct k_itimer timer;
  1293. int error;
  1294. /*
  1295. * Diagnose required errors first.
  1296. */
  1297. if (CPUCLOCK_PERTHREAD(which_clock) &&
  1298. (CPUCLOCK_PID(which_clock) == 0 ||
  1299. CPUCLOCK_PID(which_clock) == current->pid))
  1300. return -EINVAL;
  1301. /*
  1302. * Set up a temporary timer and then wait for it to go off.
  1303. */
  1304. memset(&timer, 0, sizeof timer);
  1305. spin_lock_init(&timer.it_lock);
  1306. timer.it_clock = which_clock;
  1307. timer.it_overrun = -1;
  1308. error = posix_cpu_timer_create(&timer);
  1309. timer.it_process = current;
  1310. if (!error) {
  1311. struct timespec __user *rmtp;
  1312. static struct itimerspec zero_it;
  1313. struct itimerspec it = { .it_value = *rqtp,
  1314. .it_interval = {} };
  1315. spin_lock_irq(&timer.it_lock);
  1316. error = posix_cpu_timer_set(&timer, flags, &it, NULL);
  1317. if (error) {
  1318. spin_unlock_irq(&timer.it_lock);
  1319. return error;
  1320. }
  1321. while (!signal_pending(current)) {
  1322. if (timer.it.cpu.expires.sched == 0) {
  1323. /*
  1324. * Our timer fired and was reset.
  1325. */
  1326. spin_unlock_irq(&timer.it_lock);
  1327. return 0;
  1328. }
  1329. /*
  1330. * Block until cpu_timer_fire (or a signal) wakes us.
  1331. */
  1332. __set_current_state(TASK_INTERRUPTIBLE);
  1333. spin_unlock_irq(&timer.it_lock);
  1334. schedule();
  1335. spin_lock_irq(&timer.it_lock);
  1336. }
  1337. /*
  1338. * We were interrupted by a signal.
  1339. */
  1340. sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
  1341. posix_cpu_timer_set(&timer, 0, &zero_it, &it);
  1342. spin_unlock_irq(&timer.it_lock);
  1343. if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
  1344. /*
  1345. * It actually did fire already.
  1346. */
  1347. return 0;
  1348. }
  1349. /*
  1350. * Report back to the user the time still remaining.
  1351. */
  1352. rmtp = (struct timespec __user *) restart_block->arg1;
  1353. if (rmtp != NULL && !(flags & TIMER_ABSTIME) &&
  1354. copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
  1355. return -EFAULT;
  1356. restart_block->fn = posix_cpu_clock_nanosleep_restart;
  1357. /* Caller already set restart_block->arg1 */
  1358. restart_block->arg0 = which_clock;
  1359. restart_block->arg2 = rqtp->tv_sec;
  1360. restart_block->arg3 = rqtp->tv_nsec;
  1361. error = -ERESTART_RESTARTBLOCK;
  1362. }
  1363. return error;
  1364. }
  1365. static long
  1366. posix_cpu_clock_nanosleep_restart(struct restart_block *restart_block)
  1367. {
  1368. clockid_t which_clock = restart_block->arg0;
  1369. struct timespec t = { .tv_sec = restart_block->arg2,
  1370. .tv_nsec = restart_block->arg3 };
  1371. restart_block->fn = do_no_restart_syscall;
  1372. return posix_cpu_nsleep(which_clock, TIMER_ABSTIME, &t);
  1373. }
  1374. #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
  1375. #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
  1376. static int process_cpu_clock_getres(clockid_t which_clock, struct timespec *tp)
  1377. {
  1378. return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
  1379. }
  1380. static int process_cpu_clock_get(clockid_t which_clock, struct timespec *tp)
  1381. {
  1382. return posix_cpu_clock_get(PROCESS_CLOCK, tp);
  1383. }
  1384. static int process_cpu_timer_create(struct k_itimer *timer)
  1385. {
  1386. timer->it_clock = PROCESS_CLOCK;
  1387. return posix_cpu_timer_create(timer);
  1388. }
  1389. static int process_cpu_nsleep(clockid_t which_clock, int flags,
  1390. struct timespec *rqtp)
  1391. {
  1392. return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
  1393. }
  1394. static int thread_cpu_clock_getres(clockid_t which_clock, struct timespec *tp)
  1395. {
  1396. return posix_cpu_clock_getres(THREAD_CLOCK, tp);
  1397. }
  1398. static int thread_cpu_clock_get(clockid_t which_clock, struct timespec *tp)
  1399. {
  1400. return posix_cpu_clock_get(THREAD_CLOCK, tp);
  1401. }
  1402. static int thread_cpu_timer_create(struct k_itimer *timer)
  1403. {
  1404. timer->it_clock = THREAD_CLOCK;
  1405. return posix_cpu_timer_create(timer);
  1406. }
  1407. static int thread_cpu_nsleep(clockid_t which_clock, int flags,
  1408. struct timespec *rqtp)
  1409. {
  1410. return -EINVAL;
  1411. }
  1412. static __init int init_posix_cpu_timers(void)
  1413. {
  1414. struct k_clock process = {
  1415. .clock_getres = process_cpu_clock_getres,
  1416. .clock_get = process_cpu_clock_get,
  1417. .clock_set = do_posix_clock_nosettime,
  1418. .timer_create = process_cpu_timer_create,
  1419. .nsleep = process_cpu_nsleep,
  1420. };
  1421. struct k_clock thread = {
  1422. .clock_getres = thread_cpu_clock_getres,
  1423. .clock_get = thread_cpu_clock_get,
  1424. .clock_set = do_posix_clock_nosettime,
  1425. .timer_create = thread_cpu_timer_create,
  1426. .nsleep = thread_cpu_nsleep,
  1427. };
  1428. register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
  1429. register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
  1430. return 0;
  1431. }
  1432. __initcall(init_posix_cpu_timers);