|
@@ -1272,10 +1272,6 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
|
|
{
|
|
{
|
|
struct signal_struct *sig;
|
|
struct signal_struct *sig;
|
|
|
|
|
|
- /* tsk == current, ensure it is safe to use ->signal/sighand */
|
|
|
|
- if (unlikely(tsk->exit_state))
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
if (!task_cputime_zero(&tsk->cputime_expires)) {
|
|
if (!task_cputime_zero(&tsk->cputime_expires)) {
|
|
struct task_cputime task_sample = {
|
|
struct task_cputime task_sample = {
|
|
.utime = tsk->utime,
|
|
.utime = tsk->utime,
|
|
@@ -1308,6 +1304,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
|
{
|
|
{
|
|
LIST_HEAD(firing);
|
|
LIST_HEAD(firing);
|
|
struct k_itimer *timer, *next;
|
|
struct k_itimer *timer, *next;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
BUG_ON(!irqs_disabled());
|
|
BUG_ON(!irqs_disabled());
|
|
|
|
|
|
@@ -1318,7 +1315,8 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
|
if (!fastpath_timer_check(tsk))
|
|
if (!fastpath_timer_check(tsk))
|
|
return;
|
|
return;
|
|
|
|
|
|
- spin_lock(&tsk->sighand->siglock);
|
|
|
|
|
|
+ if (!lock_task_sighand(tsk, &flags))
|
|
|
|
+ return;
|
|
/*
|
|
/*
|
|
* Here we take off tsk->signal->cpu_timers[N] and
|
|
* Here we take off tsk->signal->cpu_timers[N] and
|
|
* tsk->cpu_timers[N] all the timers that are firing, and
|
|
* tsk->cpu_timers[N] all the timers that are firing, and
|
|
@@ -1340,7 +1338,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
|
* that gets the timer lock before we do will give it up and
|
|
* that gets the timer lock before we do will give it up and
|
|
* spin until we've taken care of that timer below.
|
|
* spin until we've taken care of that timer below.
|
|
*/
|
|
*/
|
|
- spin_unlock(&tsk->sighand->siglock);
|
|
|
|
|
|
+ unlock_task_sighand(tsk, &flags);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Now that all the timers on our list have the firing flag,
|
|
* Now that all the timers on our list have the firing flag,
|