|
@@ -413,7 +413,8 @@ static void flush_to_ldisc(struct work_struct *work)
|
|
spin_lock_irqsave(&tty->buf.lock, flags);
|
|
spin_lock_irqsave(&tty->buf.lock, flags);
|
|
|
|
|
|
if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
|
|
if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
|
|
- struct tty_buffer *head;
|
|
|
|
|
|
+ struct tty_buffer *head, *tail = tty->buf.tail;
|
|
|
|
+ int seen_tail = 0;
|
|
while ((head = tty->buf.head) != NULL) {
|
|
while ((head = tty->buf.head) != NULL) {
|
|
int count;
|
|
int count;
|
|
char *char_buf;
|
|
char *char_buf;
|
|
@@ -423,6 +424,15 @@ static void flush_to_ldisc(struct work_struct *work)
|
|
if (!count) {
|
|
if (!count) {
|
|
if (head->next == NULL)
|
|
if (head->next == NULL)
|
|
break;
|
|
break;
|
|
|
|
+ /*
|
|
|
|
+ There's a possibility tty might get new buffer
|
|
|
|
+ added during the unlock window below. We could
|
|
|
|
+ end up spinning in here forever hogging the CPU
|
|
|
|
+ completely. To avoid this let's have a rest each
|
|
|
|
+ time we processed the tail buffer.
|
|
|
|
+ */
|
|
|
|
+ if (tail == head)
|
|
|
|
+ seen_tail = 1;
|
|
tty->buf.head = head->next;
|
|
tty->buf.head = head->next;
|
|
tty_buffer_free(tty, head);
|
|
tty_buffer_free(tty, head);
|
|
continue;
|
|
continue;
|
|
@@ -432,7 +442,7 @@ static void flush_to_ldisc(struct work_struct *work)
|
|
line discipline as we want to empty the queue */
|
|
line discipline as we want to empty the queue */
|
|
if (test_bit(TTY_FLUSHPENDING, &tty->flags))
|
|
if (test_bit(TTY_FLUSHPENDING, &tty->flags))
|
|
break;
|
|
break;
|
|
- if (!tty->receive_room) {
|
|
|
|
|
|
+ if (!tty->receive_room || seen_tail) {
|
|
schedule_delayed_work(&tty->buf.work, 1);
|
|
schedule_delayed_work(&tty->buf.work, 1);
|
|
break;
|
|
break;
|
|
}
|
|
}
|