|
@@ -3324,10 +3324,8 @@ int tty_ioctl(struct inode * inode, struct file * file,
|
|
|
* Nasty bug: do_SAK is being called in interrupt context. This can
|
|
|
* deadlock. We punt it up to process context. AKPM - 16Mar2001
|
|
|
*/
|
|
|
-static void __do_SAK(struct work_struct *work)
|
|
|
+void __do_SAK(struct tty_struct *tty)
|
|
|
{
|
|
|
- struct tty_struct *tty =
|
|
|
- container_of(work, struct tty_struct, SAK_work);
|
|
|
#ifdef TTY_SOFT_SAK
|
|
|
tty_hangup(tty);
|
|
|
#else
|
|
@@ -3394,6 +3392,13 @@ static void __do_SAK(struct work_struct *work)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+static void do_SAK_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct tty_struct *tty =
|
|
|
+ container_of(work, struct tty_struct, SAK_work);
|
|
|
+ __do_SAK(tty);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* The tq handling here is a little racy - tty->SAK_work may already be queued.
|
|
|
* Fortunately we don't need to worry, because if ->SAK_work is already queued,
|
|
@@ -3404,7 +3409,7 @@ void do_SAK(struct tty_struct *tty)
|
|
|
{
|
|
|
if (!tty)
|
|
|
return;
|
|
|
- PREPARE_WORK(&tty->SAK_work, __do_SAK);
|
|
|
+ PREPARE_WORK(&tty->SAK_work, do_SAK_work);
|
|
|
schedule_work(&tty->SAK_work);
|
|
|
}
|
|
|
|