tty_buffer.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. /*
  2. * Tty buffer allocation management
  3. */
  4. #include <linux/types.h>
  5. #include <linux/errno.h>
  6. #include <linux/tty.h>
  7. #include <linux/tty_driver.h>
  8. #include <linux/tty_flip.h>
  9. #include <linux/timer.h>
  10. #include <linux/string.h>
  11. #include <linux/slab.h>
  12. #include <linux/sched.h>
  13. #include <linux/init.h>
  14. #include <linux/wait.h>
  15. #include <linux/bitops.h>
  16. #include <linux/delay.h>
  17. #include <linux/module.h>
  18. /**
  19. * tty_buffer_free_all - free buffers used by a tty
  20. * @tty: tty to free from
  21. *
  22. * Remove all the buffers pending on a tty whether queued with data
  23. * or in the free ring. Must be called when the tty is no longer in use
  24. *
  25. * Locking: none
  26. */
  27. void tty_buffer_free_all(struct tty_struct *tty)
  28. {
  29. struct tty_buffer *thead;
  30. while ((thead = tty->buf.head) != NULL) {
  31. tty->buf.head = thead->next;
  32. kfree(thead);
  33. }
  34. while ((thead = tty->buf.free) != NULL) {
  35. tty->buf.free = thead->next;
  36. kfree(thead);
  37. }
  38. tty->buf.tail = NULL;
  39. tty->buf.memory_used = 0;
  40. }
  41. /**
  42. * tty_buffer_alloc - allocate a tty buffer
  43. * @tty: tty device
  44. * @size: desired size (characters)
  45. *
  46. * Allocate a new tty buffer to hold the desired number of characters.
  47. * Return NULL if out of memory or the allocation would exceed the
  48. * per device queue
  49. *
  50. * Locking: Caller must hold tty->buf.lock
  51. */
  52. static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size)
  53. {
  54. struct tty_buffer *p;
  55. if (tty->buf.memory_used + size > 65536)
  56. return NULL;
  57. p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
  58. if (p == NULL)
  59. return NULL;
  60. p->used = 0;
  61. p->size = size;
  62. p->next = NULL;
  63. p->commit = 0;
  64. p->read = 0;
  65. p->char_buf_ptr = (char *)(p->data);
  66. p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
  67. tty->buf.memory_used += size;
  68. return p;
  69. }
  70. /**
  71. * tty_buffer_free - free a tty buffer
  72. * @tty: tty owning the buffer
  73. * @b: the buffer to free
  74. *
  75. * Free a tty buffer, or add it to the free list according to our
  76. * internal strategy
  77. *
  78. * Locking: Caller must hold tty->buf.lock
  79. */
  80. static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
  81. {
  82. /* Dumb strategy for now - should keep some stats */
  83. tty->buf.memory_used -= b->size;
  84. WARN_ON(tty->buf.memory_used < 0);
  85. if (b->size >= 512)
  86. kfree(b);
  87. else {
  88. b->next = tty->buf.free;
  89. tty->buf.free = b;
  90. }
  91. }
  92. /**
  93. * __tty_buffer_flush - flush full tty buffers
  94. * @tty: tty to flush
  95. *
  96. * flush all the buffers containing receive data. Caller must
  97. * hold the buffer lock and must have ensured no parallel flush to
  98. * ldisc is running.
  99. *
  100. * Locking: Caller must hold tty->buf.lock
  101. */
  102. static void __tty_buffer_flush(struct tty_struct *tty)
  103. {
  104. struct tty_buffer *thead;
  105. while ((thead = tty->buf.head) != NULL) {
  106. tty->buf.head = thead->next;
  107. tty_buffer_free(tty, thead);
  108. }
  109. tty->buf.tail = NULL;
  110. }
  111. /**
  112. * tty_buffer_flush - flush full tty buffers
  113. * @tty: tty to flush
  114. *
  115. * flush all the buffers containing receive data. If the buffer is
  116. * being processed by flush_to_ldisc then we defer the processing
  117. * to that function
  118. *
  119. * Locking: none
  120. */
  121. void tty_buffer_flush(struct tty_struct *tty)
  122. {
  123. unsigned long flags;
  124. spin_lock_irqsave(&tty->buf.lock, flags);
  125. /* If the data is being pushed to the tty layer then we can't
  126. process it here. Instead set a flag and the flush_to_ldisc
  127. path will process the flush request before it exits */
  128. if (test_bit(TTY_FLUSHING, &tty->flags)) {
  129. set_bit(TTY_FLUSHPENDING, &tty->flags);
  130. spin_unlock_irqrestore(&tty->buf.lock, flags);
  131. wait_event(tty->read_wait,
  132. test_bit(TTY_FLUSHPENDING, &tty->flags) == 0);
  133. return;
  134. } else
  135. __tty_buffer_flush(tty);
  136. spin_unlock_irqrestore(&tty->buf.lock, flags);
  137. }
  138. /**
  139. * tty_buffer_find - find a free tty buffer
  140. * @tty: tty owning the buffer
  141. * @size: characters wanted
  142. *
  143. * Locate an existing suitable tty buffer or if we are lacking one then
  144. * allocate a new one. We round our buffers off in 256 character chunks
  145. * to get better allocation behaviour.
  146. *
  147. * Locking: Caller must hold tty->buf.lock
  148. */
  149. static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
  150. {
  151. struct tty_buffer **tbh = &tty->buf.free;
  152. while ((*tbh) != NULL) {
  153. struct tty_buffer *t = *tbh;
  154. if (t->size >= size) {
  155. *tbh = t->next;
  156. t->next = NULL;
  157. t->used = 0;
  158. t->commit = 0;
  159. t->read = 0;
  160. tty->buf.memory_used += t->size;
  161. return t;
  162. }
  163. tbh = &((*tbh)->next);
  164. }
  165. /* Round the buffer size out */
  166. size = (size + 0xFF) & ~0xFF;
  167. return tty_buffer_alloc(tty, size);
  168. /* Should possibly check if this fails for the largest buffer we
  169. have queued and recycle that ? */
  170. }
  171. /**
  172. * __tty_buffer_request_room - grow tty buffer if needed
  173. * @tty: tty structure
  174. * @size: size desired
  175. *
  176. * Make at least size bytes of linear space available for the tty
  177. * buffer. If we fail return the size we managed to find.
  178. * Locking: Caller must hold tty->buf.lock
  179. */
  180. static int __tty_buffer_request_room(struct tty_struct *tty, size_t size)
  181. {
  182. struct tty_buffer *b, *n;
  183. int left;
  184. /* OPTIMISATION: We could keep a per tty "zero" sized buffer to
  185. remove this conditional if its worth it. This would be invisible
  186. to the callers */
  187. if ((b = tty->buf.tail) != NULL)
  188. left = b->size - b->used;
  189. else
  190. left = 0;
  191. if (left < size) {
  192. /* This is the slow path - looking for new buffers to use */
  193. if ((n = tty_buffer_find(tty, size)) != NULL) {
  194. if (b != NULL) {
  195. b->next = n;
  196. b->commit = b->used;
  197. } else
  198. tty->buf.head = n;
  199. tty->buf.tail = n;
  200. } else
  201. size = left;
  202. }
  203. return size;
  204. }
  205. /**
  206. * tty_buffer_request_room - grow tty buffer if needed
  207. * @tty: tty structure
  208. * @size: size desired
  209. *
  210. * Make at least size bytes of linear space available for the tty
  211. * buffer. If we fail return the size we managed to find.
  212. *
  213. * Locking: Takes tty->buf.lock
  214. */
  215. int tty_buffer_request_room(struct tty_struct *tty, size_t size)
  216. {
  217. unsigned long flags;
  218. int length;
  219. spin_lock_irqsave(&tty->buf.lock, flags);
  220. length = __tty_buffer_request_room(tty, size);
  221. spin_unlock_irqrestore(&tty->buf.lock, flags);
  222. return length;
  223. }
  224. EXPORT_SYMBOL_GPL(tty_buffer_request_room);
  225. /**
  226. * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
  227. * @tty: tty structure
  228. * @chars: characters
  229. * @flag: flag value for each character
  230. * @size: size
  231. *
  232. * Queue a series of bytes to the tty buffering. All the characters
  233. * passed are marked with the supplied flag. Returns the number added.
  234. *
  235. * Locking: Called functions may take tty->buf.lock
  236. */
  237. int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
  238. const unsigned char *chars, char flag, size_t size)
  239. {
  240. int copied = 0;
  241. do {
  242. int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
  243. int space;
  244. unsigned long flags;
  245. struct tty_buffer *tb;
  246. spin_lock_irqsave(&tty->buf.lock, flags);
  247. space = __tty_buffer_request_room(tty, goal);
  248. tb = tty->buf.tail;
  249. /* If there is no space then tb may be NULL */
  250. if (unlikely(space == 0)) {
  251. spin_unlock_irqrestore(&tty->buf.lock, flags);
  252. break;
  253. }
  254. memcpy(tb->char_buf_ptr + tb->used, chars, space);
  255. memset(tb->flag_buf_ptr + tb->used, flag, space);
  256. tb->used += space;
  257. spin_unlock_irqrestore(&tty->buf.lock, flags);
  258. copied += space;
  259. chars += space;
  260. /* There is a small chance that we need to split the data over
  261. several buffers. If this is the case we must loop */
  262. } while (unlikely(size > copied));
  263. return copied;
  264. }
  265. EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
  266. /**
  267. * tty_insert_flip_string_flags - Add characters to the tty buffer
  268. * @tty: tty structure
  269. * @chars: characters
  270. * @flags: flag bytes
  271. * @size: size
  272. *
  273. * Queue a series of bytes to the tty buffering. For each character
  274. * the flags array indicates the status of the character. Returns the
  275. * number added.
  276. *
  277. * Locking: Called functions may take tty->buf.lock
  278. */
  279. int tty_insert_flip_string_flags(struct tty_struct *tty,
  280. const unsigned char *chars, const char *flags, size_t size)
  281. {
  282. int copied = 0;
  283. do {
  284. int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
  285. int space;
  286. unsigned long __flags;
  287. struct tty_buffer *tb;
  288. spin_lock_irqsave(&tty->buf.lock, __flags);
  289. space = __tty_buffer_request_room(tty, goal);
  290. tb = tty->buf.tail;
  291. /* If there is no space then tb may be NULL */
  292. if (unlikely(space == 0)) {
  293. spin_unlock_irqrestore(&tty->buf.lock, __flags);
  294. break;
  295. }
  296. memcpy(tb->char_buf_ptr + tb->used, chars, space);
  297. memcpy(tb->flag_buf_ptr + tb->used, flags, space);
  298. tb->used += space;
  299. spin_unlock_irqrestore(&tty->buf.lock, __flags);
  300. copied += space;
  301. chars += space;
  302. flags += space;
  303. /* There is a small chance that we need to split the data over
  304. several buffers. If this is the case we must loop */
  305. } while (unlikely(size > copied));
  306. return copied;
  307. }
  308. EXPORT_SYMBOL(tty_insert_flip_string_flags);
  309. /**
  310. * tty_schedule_flip - push characters to ldisc
  311. * @tty: tty to push from
  312. *
  313. * Takes any pending buffers and transfers their ownership to the
  314. * ldisc side of the queue. It then schedules those characters for
  315. * processing by the line discipline.
  316. * Note that this function can only be used when the low_latency flag
  317. * is unset. Otherwise the workqueue won't be flushed.
  318. *
  319. * Locking: Takes tty->buf.lock
  320. */
  321. void tty_schedule_flip(struct tty_struct *tty)
  322. {
  323. unsigned long flags;
  324. spin_lock_irqsave(&tty->buf.lock, flags);
  325. if (tty->buf.tail != NULL)
  326. tty->buf.tail->commit = tty->buf.tail->used;
  327. spin_unlock_irqrestore(&tty->buf.lock, flags);
  328. schedule_work(&tty->buf.work);
  329. }
  330. EXPORT_SYMBOL(tty_schedule_flip);
  331. /**
  332. * tty_prepare_flip_string - make room for characters
  333. * @tty: tty
  334. * @chars: return pointer for character write area
  335. * @size: desired size
  336. *
  337. * Prepare a block of space in the buffer for data. Returns the length
  338. * available and buffer pointer to the space which is now allocated and
  339. * accounted for as ready for normal characters. This is used for drivers
  340. * that need their own block copy routines into the buffer. There is no
  341. * guarantee the buffer is a DMA target!
  342. *
  343. * Locking: May call functions taking tty->buf.lock
  344. */
  345. int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars,
  346. size_t size)
  347. {
  348. int space;
  349. unsigned long flags;
  350. struct tty_buffer *tb;
  351. spin_lock_irqsave(&tty->buf.lock, flags);
  352. space = __tty_buffer_request_room(tty, size);
  353. tb = tty->buf.tail;
  354. if (likely(space)) {
  355. *chars = tb->char_buf_ptr + tb->used;
  356. memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
  357. tb->used += space;
  358. }
  359. spin_unlock_irqrestore(&tty->buf.lock, flags);
  360. return space;
  361. }
  362. EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
  363. /**
  364. * tty_prepare_flip_string_flags - make room for characters
  365. * @tty: tty
  366. * @chars: return pointer for character write area
  367. * @flags: return pointer for status flag write area
  368. * @size: desired size
  369. *
  370. * Prepare a block of space in the buffer for data. Returns the length
  371. * available and buffer pointer to the space which is now allocated and
  372. * accounted for as ready for characters. This is used for drivers
  373. * that need their own block copy routines into the buffer. There is no
  374. * guarantee the buffer is a DMA target!
  375. *
  376. * Locking: May call functions taking tty->buf.lock
  377. */
  378. int tty_prepare_flip_string_flags(struct tty_struct *tty,
  379. unsigned char **chars, char **flags, size_t size)
  380. {
  381. int space;
  382. unsigned long __flags;
  383. struct tty_buffer *tb;
  384. spin_lock_irqsave(&tty->buf.lock, __flags);
  385. space = __tty_buffer_request_room(tty, size);
  386. tb = tty->buf.tail;
  387. if (likely(space)) {
  388. *chars = tb->char_buf_ptr + tb->used;
  389. *flags = tb->flag_buf_ptr + tb->used;
  390. tb->used += space;
  391. }
  392. spin_unlock_irqrestore(&tty->buf.lock, __flags);
  393. return space;
  394. }
  395. EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
  396. /**
  397. * flush_to_ldisc
  398. * @work: tty structure passed from work queue.
  399. *
  400. * This routine is called out of the software interrupt to flush data
  401. * from the buffer chain to the line discipline.
  402. *
  403. * Locking: holds tty->buf.lock to guard buffer list. Drops the lock
  404. * while invoking the line discipline receive_buf method. The
  405. * receive_buf method is single threaded for each tty instance.
  406. */
  407. static void flush_to_ldisc(struct work_struct *work)
  408. {
  409. struct tty_struct *tty =
  410. container_of(work, struct tty_struct, buf.work);
  411. unsigned long flags;
  412. struct tty_ldisc *disc;
  413. disc = tty_ldisc_ref(tty);
  414. if (disc == NULL) /* !TTY_LDISC */
  415. return;
  416. spin_lock_irqsave(&tty->buf.lock, flags);
  417. if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
  418. struct tty_buffer *head;
  419. while ((head = tty->buf.head) != NULL) {
  420. int count;
  421. char *char_buf;
  422. unsigned char *flag_buf;
  423. count = head->commit - head->read;
  424. if (!count) {
  425. if (head->next == NULL)
  426. break;
  427. tty->buf.head = head->next;
  428. tty_buffer_free(tty, head);
  429. continue;
  430. }
  431. /* Ldisc or user is trying to flush the buffers
  432. we are feeding to the ldisc, stop feeding the
  433. line discipline as we want to empty the queue */
  434. if (test_bit(TTY_FLUSHPENDING, &tty->flags))
  435. break;
  436. if (!tty->receive_room)
  437. break;
  438. if (count > tty->receive_room)
  439. count = tty->receive_room;
  440. char_buf = head->char_buf_ptr + head->read;
  441. flag_buf = head->flag_buf_ptr + head->read;
  442. head->read += count;
  443. spin_unlock_irqrestore(&tty->buf.lock, flags);
  444. disc->ops->receive_buf(tty, char_buf,
  445. flag_buf, count);
  446. spin_lock_irqsave(&tty->buf.lock, flags);
  447. }
  448. clear_bit(TTY_FLUSHING, &tty->flags);
  449. }
  450. /* We may have a deferred request to flush the input buffer,
  451. if so pull the chain under the lock and empty the queue */
  452. if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
  453. __tty_buffer_flush(tty);
  454. clear_bit(TTY_FLUSHPENDING, &tty->flags);
  455. wake_up(&tty->read_wait);
  456. }
  457. spin_unlock_irqrestore(&tty->buf.lock, flags);
  458. tty_ldisc_deref(disc);
  459. }
  460. /**
  461. * tty_flush_to_ldisc
  462. * @tty: tty to push
  463. *
  464. * Push the terminal flip buffers to the line discipline.
  465. *
  466. * Must not be called from IRQ context.
  467. */
  468. void tty_flush_to_ldisc(struct tty_struct *tty)
  469. {
  470. if (!tty->low_latency)
  471. flush_work(&tty->buf.work);
  472. }
  473. /**
  474. * tty_flip_buffer_push - terminal
  475. * @tty: tty to push
  476. *
  477. * Queue a push of the terminal flip buffers to the line discipline. This
  478. * function must not be called from IRQ context if tty->low_latency is set.
  479. *
  480. * In the event of the queue being busy for flipping the work will be
  481. * held off and retried later.
  482. *
  483. * Locking: tty buffer lock. Driver locks in low latency mode.
  484. */
  485. void tty_flip_buffer_push(struct tty_struct *tty)
  486. {
  487. unsigned long flags;
  488. spin_lock_irqsave(&tty->buf.lock, flags);
  489. if (tty->buf.tail != NULL)
  490. tty->buf.tail->commit = tty->buf.tail->used;
  491. spin_unlock_irqrestore(&tty->buf.lock, flags);
  492. if (tty->low_latency)
  493. flush_to_ldisc(&tty->buf.work);
  494. else
  495. schedule_work(&tty->buf.work);
  496. }
  497. EXPORT_SYMBOL(tty_flip_buffer_push);
  498. /**
  499. * tty_buffer_init - prepare a tty buffer structure
  500. * @tty: tty to initialise
  501. *
  502. * Set up the initial state of the buffer management for a tty device.
  503. * Must be called before the other tty buffer functions are used.
  504. *
  505. * Locking: none
  506. */
  507. void tty_buffer_init(struct tty_struct *tty)
  508. {
  509. spin_lock_init(&tty->buf.lock);
  510. tty->buf.head = NULL;
  511. tty->buf.tail = NULL;
  512. tty->buf.free = NULL;
  513. tty->buf.memory_used = 0;
  514. INIT_WORK(&tty->buf.work, flush_to_ldisc);
  515. }