|
@@ -61,6 +61,133 @@ extern struct tty_driver *console_driver;
|
|
|
|
|
|
static void complete_change_console(struct vc_data *vc);
|
|
|
|
|
|
+/*
|
|
|
+ * User space VT_EVENT handlers
|
|
|
+ */
|
|
|
+
|
|
|
+struct vt_event_wait {
|
|
|
+ struct list_head list;
|
|
|
+ struct vt_event event;
|
|
|
+ int done;
|
|
|
+};
|
|
|
+
|
|
|
+static LIST_HEAD(vt_events);
|
|
|
+static DEFINE_SPINLOCK(vt_event_lock);
|
|
|
+static DECLARE_WAIT_QUEUE_HEAD(vt_event_waitqueue);
|
|
|
+
|
|
|
+/**
|
|
|
+ * vt_event_post
|
|
|
+ * @event: the event that occurred
|
|
|
+ * @old: old console
|
|
|
+ * @new: new console
|
|
|
+ *
|
|
|
+ * Post an VT event to interested VT handlers
|
|
|
+ */
|
|
|
+
|
|
|
+void vt_event_post(unsigned int event, unsigned int old, unsigned int new)
|
|
|
+{
|
|
|
+ struct list_head *pos, *head;
|
|
|
+ unsigned long flags;
|
|
|
+ int wake = 0;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&vt_event_lock, flags);
|
|
|
+ head = &vt_events;
|
|
|
+
|
|
|
+ list_for_each(pos, head) {
|
|
|
+ struct vt_event_wait *ve = list_entry(pos,
|
|
|
+ struct vt_event_wait, list);
|
|
|
+ if (!(ve->event.event & event))
|
|
|
+ continue;
|
|
|
+ ve->event.event = event;
|
|
|
+ /* kernel view is consoles 0..n-1, user space view is
|
|
|
+ console 1..n with 0 meaning current, so we must bias */
|
|
|
+ ve->event.old = old + 1;
|
|
|
+ ve->event.new = new + 1;
|
|
|
+ wake = 1;
|
|
|
+ ve->done = 1;
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&vt_event_lock, flags);
|
|
|
+ if (wake)
|
|
|
+ wake_up_interruptible(&vt_event_waitqueue);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vt_event_wait - wait for an event
|
|
|
+ * @vw: our event
|
|
|
+ *
|
|
|
+ * Waits for an event to occur which completes our vt_event_wait
|
|
|
+ * structure. On return the structure has wv->done set to 1 for success
|
|
|
+ * or 0 if some event such as a signal ended the wait.
|
|
|
+ */
|
|
|
+
|
|
|
+static void vt_event_wait(struct vt_event_wait *vw)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ /* Prepare the event */
|
|
|
+ INIT_LIST_HEAD(&vw->list);
|
|
|
+ vw->done = 0;
|
|
|
+ /* Queue our event */
|
|
|
+ spin_lock_irqsave(&vt_event_lock, flags);
|
|
|
+ list_add(&vw->list, &vt_events);
|
|
|
+ spin_unlock_irqrestore(&vt_event_lock, flags);
|
|
|
+ /* Wait for it to pass */
|
|
|
+ wait_event_interruptible(vt_event_waitqueue, vw->done);
|
|
|
+ /* Dequeue it */
|
|
|
+ spin_lock_irqsave(&vt_event_lock, flags);
|
|
|
+ list_del(&vw->list);
|
|
|
+ spin_unlock_irqrestore(&vt_event_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vt_event_wait_ioctl - event ioctl handler
|
|
|
+ * @arg: argument to ioctl
|
|
|
+ *
|
|
|
+ * Implement the VT_WAITEVENT ioctl using the VT event interface
|
|
|
+ */
|
|
|
+
|
|
|
+static int vt_event_wait_ioctl(struct vt_event __user *event)
|
|
|
+{
|
|
|
+ struct vt_event_wait vw;
|
|
|
+
|
|
|
+ if (copy_from_user(&vw.event, event, sizeof(struct vt_event)))
|
|
|
+ return -EFAULT;
|
|
|
+ /* Highest supported event for now */
|
|
|
+ if (vw.event.event & ~VT_MAX_EVENT)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ vt_event_wait(&vw);
|
|
|
+ /* If it occurred report it */
|
|
|
+ if (vw.done) {
|
|
|
+ if (copy_to_user(event, &vw.event, sizeof(struct vt_event)))
|
|
|
+ return -EFAULT;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ return -EINTR;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * vt_waitactive - active console wait
|
|
|
+ * @event: event code
|
|
|
+ * @n: new console
|
|
|
+ *
|
|
|
+ * Helper for event waits. Used to implement the legacy
|
|
|
+ * event waiting ioctls in terms of events
|
|
|
+ */
|
|
|
+
|
|
|
+int vt_waitactive(int n)
|
|
|
+{
|
|
|
+ struct vt_event_wait vw;
|
|
|
+ do {
|
|
|
+ if (n == fg_console + 1)
|
|
|
+ break;
|
|
|
+ vw.event.event = VT_EVENT_SWITCH;
|
|
|
+ vt_event_wait(&vw);
|
|
|
+ if (vw.done == 0)
|
|
|
+ return -EINTR;
|
|
|
+ } while (vw.event.new != n);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* these are the valid i/o ports we're allowed to change. they map all the
|
|
|
* video ports
|
|
@@ -360,6 +487,8 @@ do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+
|
|
|
/*
|
|
|
* We handle the console-specific ioctl's here. We allow the
|
|
|
* capability to modify any console, not just the fg_console.
|
|
@@ -851,7 +980,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
|
|
|
if (arg == 0 || arg > MAX_NR_CONSOLES)
|
|
|
ret = -ENXIO;
|
|
|
else
|
|
|
- ret = vt_waitactive(arg - 1);
|
|
|
+ ret = vt_waitactive(arg);
|
|
|
break;
|
|
|
|
|
|
/*
|
|
@@ -1159,6 +1288,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
|
|
|
ret = put_user(vc->vc_hi_font_mask,
|
|
|
(unsigned short __user *)arg);
|
|
|
break;
|
|
|
+ case VT_WAITEVENT:
|
|
|
+ ret = vt_event_wait_ioctl((struct vt_event __user *)arg);
|
|
|
+ break;
|
|
|
default:
|
|
|
ret = -ENOIOCTLCMD;
|
|
|
}
|
|
@@ -1170,54 +1302,6 @@ eperm:
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Sometimes we want to wait until a particular VT has been activated. We
|
|
|
- * do it in a very simple manner. Everybody waits on a single queue and
|
|
|
- * get woken up at once. Those that are satisfied go on with their business,
|
|
|
- * while those not ready go back to sleep. Seems overkill to add a wait
|
|
|
- * to each vt just for this - usually this does nothing!
|
|
|
- */
|
|
|
-static DECLARE_WAIT_QUEUE_HEAD(vt_activate_queue);
|
|
|
-
|
|
|
-/*
|
|
|
- * Sleeps until a vt is activated, or the task is interrupted. Returns
|
|
|
- * 0 if activation, -EINTR if interrupted by a signal handler.
|
|
|
- */
|
|
|
-int vt_waitactive(int vt)
|
|
|
-{
|
|
|
- int retval;
|
|
|
- DECLARE_WAITQUEUE(wait, current);
|
|
|
-
|
|
|
- add_wait_queue(&vt_activate_queue, &wait);
|
|
|
- for (;;) {
|
|
|
- retval = 0;
|
|
|
-
|
|
|
- /*
|
|
|
- * Synchronize with redraw_screen(). By acquiring the console
|
|
|
- * semaphore we make sure that the console switch is completed
|
|
|
- * before we return. If we didn't wait for the semaphore, we
|
|
|
- * could return at a point where fg_console has already been
|
|
|
- * updated, but the console switch hasn't been completed.
|
|
|
- */
|
|
|
- acquire_console_sem();
|
|
|
- set_current_state(TASK_INTERRUPTIBLE);
|
|
|
- if (vt == fg_console) {
|
|
|
- release_console_sem();
|
|
|
- break;
|
|
|
- }
|
|
|
- release_console_sem();
|
|
|
- retval = -ERESTARTNOHAND;
|
|
|
- if (signal_pending(current))
|
|
|
- break;
|
|
|
- schedule();
|
|
|
- }
|
|
|
- remove_wait_queue(&vt_activate_queue, &wait);
|
|
|
- __set_current_state(TASK_RUNNING);
|
|
|
- return retval;
|
|
|
-}
|
|
|
-
|
|
|
-#define vt_wake_waitactive() wake_up(&vt_activate_queue)
|
|
|
-
|
|
|
void reset_vc(struct vc_data *vc)
|
|
|
{
|
|
|
vc->vc_mode = KD_TEXT;
|
|
@@ -1262,6 +1346,7 @@ void vc_SAK(struct work_struct *work)
|
|
|
static void complete_change_console(struct vc_data *vc)
|
|
|
{
|
|
|
unsigned char old_vc_mode;
|
|
|
+ int old = fg_console;
|
|
|
|
|
|
last_console = fg_console;
|
|
|
|
|
@@ -1325,7 +1410,7 @@ static void complete_change_console(struct vc_data *vc)
|
|
|
/*
|
|
|
* Wake anyone waiting for their VT to activate
|
|
|
*/
|
|
|
- vt_wake_waitactive();
|
|
|
+ vt_event_post(VT_EVENT_SWITCH, old, vc->vc_num);
|
|
|
return;
|
|
|
}
|
|
|
|