|
@@ -982,6 +982,7 @@ static void check_thread_timers(struct task_struct *tsk,
|
|
|
int maxfire;
|
|
|
struct list_head *timers = tsk->cpu_timers;
|
|
|
struct signal_struct *const sig = tsk->signal;
|
|
|
+ unsigned long soft;
|
|
|
|
|
|
maxfire = 20;
|
|
|
tsk->cputime_expires.prof_exp = cputime_zero;
|
|
@@ -1030,9 +1031,9 @@ static void check_thread_timers(struct task_struct *tsk,
|
|
|
/*
|
|
|
* Check for the special case thread timers.
|
|
|
*/
|
|
|
- if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
|
|
|
+ soft = sig->rlim[RLIMIT_RTTIME].rlim_cur;
|
|
|
+ if (soft != RLIM_INFINITY) {
|
|
|
unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
|
|
|
- unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
|
|
|
|
|
|
if (hard != RLIM_INFINITY &&
|
|
|
tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
|
|
@@ -1043,14 +1044,13 @@ static void check_thread_timers(struct task_struct *tsk,
|
|
|
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
|
|
|
return;
|
|
|
}
|
|
|
- if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
|
|
|
+ if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
|
|
|
/*
|
|
|
* At the soft limit, send a SIGXCPU every second.
|
|
|
*/
|
|
|
- if (sig->rlim[RLIMIT_RTTIME].rlim_cur
|
|
|
- < sig->rlim[RLIMIT_RTTIME].rlim_max) {
|
|
|
- sig->rlim[RLIMIT_RTTIME].rlim_cur +=
|
|
|
- USEC_PER_SEC;
|
|
|
+ if (soft < hard) {
|
|
|
+ soft += USEC_PER_SEC;
|
|
|
+ sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
|
|
|
}
|
|
|
printk(KERN_INFO
|
|
|
"RT Watchdog Timeout: %s[%d]\n",
|
|
@@ -1121,6 +1121,7 @@ static void check_process_timers(struct task_struct *tsk,
|
|
|
unsigned long long sum_sched_runtime, sched_expires;
|
|
|
struct list_head *timers = sig->cpu_timers;
|
|
|
struct task_cputime cputime;
|
|
|
+ unsigned long soft;
|
|
|
|
|
|
/*
|
|
|
* Don't sample the current process CPU clocks if there are no timers.
|
|
@@ -1193,11 +1194,12 @@ static void check_process_timers(struct task_struct *tsk,
|
|
|
SIGPROF);
|
|
|
check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
|
|
|
SIGVTALRM);
|
|
|
-
|
|
|
- if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
|
|
|
+ soft = sig->rlim[RLIMIT_CPU].rlim_cur;
|
|
|
+ if (soft != RLIM_INFINITY) {
|
|
|
unsigned long psecs = cputime_to_secs(ptime);
|
|
|
+ unsigned long hard = sig->rlim[RLIMIT_CPU].rlim_max;
|
|
|
cputime_t x;
|
|
|
- if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
|
|
|
+ if (psecs >= hard) {
|
|
|
/*
|
|
|
* At the hard limit, we just die.
|
|
|
* No need to calculate anything else now.
|
|
@@ -1205,17 +1207,17 @@ static void check_process_timers(struct task_struct *tsk,
|
|
|
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
|
|
|
return;
|
|
|
}
|
|
|
- if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
|
|
|
+ if (psecs >= soft) {
|
|
|
/*
|
|
|
* At the soft limit, send a SIGXCPU every second.
|
|
|
*/
|
|
|
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
|
|
|
- if (sig->rlim[RLIMIT_CPU].rlim_cur
|
|
|
- < sig->rlim[RLIMIT_CPU].rlim_max) {
|
|
|
- sig->rlim[RLIMIT_CPU].rlim_cur++;
|
|
|
+ if (soft < hard) {
|
|
|
+ soft++;
|
|
|
+ sig->rlim[RLIMIT_CPU].rlim_cur = soft;
|
|
|
}
|
|
|
}
|
|
|
- x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
|
|
|
+ x = secs_to_cputime(soft);
|
|
|
if (cputime_eq(prof_expires, cputime_zero) ||
|
|
|
cputime_lt(x, prof_expires)) {
|
|
|
prof_expires = x;
|