|
@@ -456,26 +456,50 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
|
|
|
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
|
|
|
{
|
|
|
int signr = __dequeue_signal(&tsk->pending, mask, info);
|
|
|
- if (!signr)
|
|
|
+ if (!signr) {
|
|
|
signr = __dequeue_signal(&tsk->signal->shared_pending,
|
|
|
mask, info);
|
|
|
+ /*
|
|
|
+ * itimer signal ?
|
|
|
+ *
|
|
|
+ * itimers are process shared and we restart periodic
|
|
|
+ * itimers in the signal delivery path to prevent DoS
|
|
|
+ * attacks in the high resolution timer case. This is
|
|
|
+ * compliant with the old way of self restarting
|
|
|
+ * itimers, as the SIGALRM is a legacy signal and only
|
|
|
+ * queued once. Changing the restart behaviour to
|
|
|
+ * restart the timer in the signal dequeue path is
|
|
|
+ * reducing the timer noise on heavy loaded !highres
|
|
|
+ * systems too.
|
|
|
+ */
|
|
|
+ if (unlikely(signr == SIGALRM)) {
|
|
|
+ struct hrtimer *tmr = &tsk->signal->real_timer;
|
|
|
+
|
|
|
+ if (!hrtimer_is_queued(tmr) &&
|
|
|
+ tsk->signal->it_real_incr.tv64 != 0) {
|
|
|
+ hrtimer_forward(tmr, tmr->base->get_time(),
|
|
|
+ tsk->signal->it_real_incr);
|
|
|
+ hrtimer_restart(tmr);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
recalc_sigpending_tsk(tsk);
|
|
|
- if (signr && unlikely(sig_kernel_stop(signr))) {
|
|
|
- /*
|
|
|
- * Set a marker that we have dequeued a stop signal. Our
|
|
|
- * caller might release the siglock and then the pending
|
|
|
- * stop signal it is about to process is no longer in the
|
|
|
- * pending bitmasks, but must still be cleared by a SIGCONT
|
|
|
- * (and overruled by a SIGKILL). So those cases clear this
|
|
|
- * shared flag after we've set it. Note that this flag may
|
|
|
- * remain set after the signal we return is ignored or
|
|
|
- * handled. That doesn't matter because its only purpose
|
|
|
- * is to alert stop-signal processing code when another
|
|
|
- * processor has come along and cleared the flag.
|
|
|
- */
|
|
|
- if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
|
|
|
- tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
|
|
|
- }
|
|
|
+ if (signr && unlikely(sig_kernel_stop(signr))) {
|
|
|
+ /*
|
|
|
+ * Set a marker that we have dequeued a stop signal. Our
|
|
|
+ * caller might release the siglock and then the pending
|
|
|
+ * stop signal it is about to process is no longer in the
|
|
|
+ * pending bitmasks, but must still be cleared by a SIGCONT
|
|
|
+ * (and overruled by a SIGKILL). So those cases clear this
|
|
|
+ * shared flag after we've set it. Note that this flag may
|
|
|
+ * remain set after the signal we return is ignored or
|
|
|
+ * handled. That doesn't matter because its only purpose
|
|
|
+ * is to alert stop-signal processing code when another
|
|
|
+ * processor has come along and cleared the flag.
|
|
|
+ */
|
|
|
+ if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
|
|
|
+ tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
|
|
|
+ }
|
|
|
if ( signr &&
|
|
|
((info->si_code & __SI_MASK) == __SI_TIMER) &&
|
|
|
info->si_sys_private){
|