tty_buffer.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. /*
  2. * Tty buffer allocation management
  3. */
  4. #include <linux/types.h>
  5. #include <linux/errno.h>
  6. #include <linux/tty.h>
  7. #include <linux/tty_driver.h>
  8. #include <linux/tty_flip.h>
  9. #include <linux/timer.h>
  10. #include <linux/string.h>
  11. #include <linux/slab.h>
  12. #include <linux/sched.h>
  13. #include <linux/init.h>
  14. #include <linux/wait.h>
  15. #include <linux/bitops.h>
  16. #include <linux/delay.h>
  17. #include <linux/module.h>
  18. /**
  19. * tty_buffer_free_all - free buffers used by a tty
  20. * @tty: tty to free from
  21. *
  22. * Remove all the buffers pending on a tty whether queued with data
  23. * or in the free ring. Must be called when the tty is no longer in use
  24. *
  25. * Locking: none
  26. */
  27. void tty_buffer_free_all(struct tty_struct *tty)
  28. {
  29. struct tty_buffer *thead;
  30. while ((thead = tty->buf.head) != NULL) {
  31. tty->buf.head = thead->next;
  32. kfree(thead);
  33. }
  34. while ((thead = tty->buf.free) != NULL) {
  35. tty->buf.free = thead->next;
  36. kfree(thead);
  37. }
  38. tty->buf.tail = NULL;
  39. tty->buf.memory_used = 0;
  40. }
  41. /**
  42. * tty_buffer_alloc - allocate a tty buffer
  43. * @tty: tty device
  44. * @size: desired size (characters)
  45. *
  46. * Allocate a new tty buffer to hold the desired number of characters.
  47. * Return NULL if out of memory or the allocation would exceed the
  48. * per device queue
  49. *
  50. * Locking: Caller must hold tty->buf.lock
  51. */
  52. static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size)
  53. {
  54. struct tty_buffer *p;
  55. if (tty->buf.memory_used + size > 65536)
  56. return NULL;
  57. p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
  58. if (p == NULL)
  59. return NULL;
  60. p->used = 0;
  61. p->size = size;
  62. p->next = NULL;
  63. p->commit = 0;
  64. p->read = 0;
  65. p->char_buf_ptr = (char *)(p->data);
  66. p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
  67. tty->buf.memory_used += size;
  68. return p;
  69. }
  70. /**
  71. * tty_buffer_free - free a tty buffer
  72. * @tty: tty owning the buffer
  73. * @b: the buffer to free
  74. *
  75. * Free a tty buffer, or add it to the free list according to our
  76. * internal strategy
  77. *
  78. * Locking: Caller must hold tty->buf.lock
  79. */
  80. static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
  81. {
  82. /* Dumb strategy for now - should keep some stats */
  83. tty->buf.memory_used -= b->size;
  84. WARN_ON(tty->buf.memory_used < 0);
  85. if (b->size >= 512)
  86. kfree(b);
  87. else {
  88. b->next = tty->buf.free;
  89. tty->buf.free = b;
  90. }
  91. }
  92. /**
  93. * __tty_buffer_flush - flush full tty buffers
  94. * @tty: tty to flush
  95. *
  96. * flush all the buffers containing receive data. Caller must
  97. * hold the buffer lock and must have ensured no parallel flush to
  98. * ldisc is running.
  99. *
  100. * Locking: Caller must hold tty->buf.lock
  101. */
  102. static void __tty_buffer_flush(struct tty_struct *tty)
  103. {
  104. struct tty_buffer *thead;
  105. while ((thead = tty->buf.head) != NULL) {
  106. tty->buf.head = thead->next;
  107. tty_buffer_free(tty, thead);
  108. }
  109. tty->buf.tail = NULL;
  110. }
  111. /**
  112. * tty_buffer_flush - flush full tty buffers
  113. * @tty: tty to flush
  114. *
  115. * flush all the buffers containing receive data. If the buffer is
  116. * being processed by flush_to_ldisc then we defer the processing
  117. * to that function
  118. *
  119. * Locking: none
  120. */
  121. void tty_buffer_flush(struct tty_struct *tty)
  122. {
  123. unsigned long flags;
  124. spin_lock_irqsave(&tty->buf.lock, flags);
  125. /* If the data is being pushed to the tty layer then we can't
  126. process it here. Instead set a flag and the flush_to_ldisc
  127. path will process the flush request before it exits */
  128. if (test_bit(TTY_FLUSHING, &tty->flags)) {
  129. set_bit(TTY_FLUSHPENDING, &tty->flags);
  130. spin_unlock_irqrestore(&tty->buf.lock, flags);
  131. wait_event(tty->read_wait,
  132. test_bit(TTY_FLUSHPENDING, &tty->flags) == 0);
  133. return;
  134. } else
  135. __tty_buffer_flush(tty);
  136. spin_unlock_irqrestore(&tty->buf.lock, flags);
  137. }
  138. /**
  139. * tty_buffer_find - find a free tty buffer
  140. * @tty: tty owning the buffer
  141. * @size: characters wanted
  142. *
  143. * Locate an existing suitable tty buffer or if we are lacking one then
  144. * allocate a new one. We round our buffers off in 256 character chunks
  145. * to get better allocation behaviour.
  146. *
  147. * Locking: Caller must hold tty->buf.lock
  148. */
  149. static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
  150. {
  151. struct tty_buffer **tbh = &tty->buf.free;
  152. while ((*tbh) != NULL) {
  153. struct tty_buffer *t = *tbh;
  154. if (t->size >= size) {
  155. *tbh = t->next;
  156. t->next = NULL;
  157. t->used = 0;
  158. t->commit = 0;
  159. t->read = 0;
  160. tty->buf.memory_used += t->size;
  161. return t;
  162. }
  163. tbh = &((*tbh)->next);
  164. }
  165. /* Round the buffer size out */
  166. size = (size + 0xFF) & ~0xFF;
  167. return tty_buffer_alloc(tty, size);
  168. /* Should possibly check if this fails for the largest buffer we
  169. have queued and recycle that ? */
  170. }
  171. /**
  172. * tty_buffer_request_room - grow tty buffer if needed
  173. * @tty: tty structure
  174. * @size: size desired
  175. *
  176. * Make at least size bytes of linear space available for the tty
  177. * buffer. If we fail return the size we managed to find.
  178. *
  179. * Locking: Takes tty->buf.lock
  180. */
  181. int tty_buffer_request_room(struct tty_struct *tty, size_t size)
  182. {
  183. struct tty_buffer *b, *n;
  184. int left;
  185. unsigned long flags;
  186. spin_lock_irqsave(&tty->buf.lock, flags);
  187. /* OPTIMISATION: We could keep a per tty "zero" sized buffer to
  188. remove this conditional if its worth it. This would be invisible
  189. to the callers */
  190. if ((b = tty->buf.tail) != NULL)
  191. left = b->size - b->used;
  192. else
  193. left = 0;
  194. if (left < size) {
  195. /* This is the slow path - looking for new buffers to use */
  196. if ((n = tty_buffer_find(tty, size)) != NULL) {
  197. if (b != NULL) {
  198. b->next = n;
  199. b->commit = b->used;
  200. } else
  201. tty->buf.head = n;
  202. tty->buf.tail = n;
  203. } else
  204. size = left;
  205. }
  206. spin_unlock_irqrestore(&tty->buf.lock, flags);
  207. return size;
  208. }
  209. EXPORT_SYMBOL_GPL(tty_buffer_request_room);
  210. /**
  211. * tty_insert_flip_string - Add characters to the tty buffer
  212. * @tty: tty structure
  213. * @chars: characters
  214. * @size: size
  215. *
  216. * Queue a series of bytes to the tty buffering. All the characters
  217. * passed are marked as without error. Returns the number added.
  218. *
  219. * Locking: Called functions may take tty->buf.lock
  220. */
  221. int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
  222. size_t size)
  223. {
  224. int copied = 0;
  225. do {
  226. int space = tty_buffer_request_room(tty, size - copied);
  227. struct tty_buffer *tb = tty->buf.tail;
  228. /* If there is no space then tb may be NULL */
  229. if (unlikely(space == 0))
  230. break;
  231. memcpy(tb->char_buf_ptr + tb->used, chars, space);
  232. memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
  233. tb->used += space;
  234. copied += space;
  235. chars += space;
  236. /* There is a small chance that we need to split the data over
  237. several buffers. If this is the case we must loop */
  238. } while (unlikely(size > copied));
  239. return copied;
  240. }
  241. EXPORT_SYMBOL(tty_insert_flip_string);
  242. /**
  243. * tty_insert_flip_string_flags - Add characters to the tty buffer
  244. * @tty: tty structure
  245. * @chars: characters
  246. * @flags: flag bytes
  247. * @size: size
  248. *
  249. * Queue a series of bytes to the tty buffering. For each character
  250. * the flags array indicates the status of the character. Returns the
  251. * number added.
  252. *
  253. * Locking: Called functions may take tty->buf.lock
  254. */
  255. int tty_insert_flip_string_flags(struct tty_struct *tty,
  256. const unsigned char *chars, const char *flags, size_t size)
  257. {
  258. int copied = 0;
  259. do {
  260. int space = tty_buffer_request_room(tty, size - copied);
  261. struct tty_buffer *tb = tty->buf.tail;
  262. /* If there is no space then tb may be NULL */
  263. if (unlikely(space == 0))
  264. break;
  265. memcpy(tb->char_buf_ptr + tb->used, chars, space);
  266. memcpy(tb->flag_buf_ptr + tb->used, flags, space);
  267. tb->used += space;
  268. copied += space;
  269. chars += space;
  270. flags += space;
  271. /* There is a small chance that we need to split the data over
  272. several buffers. If this is the case we must loop */
  273. } while (unlikely(size > copied));
  274. return copied;
  275. }
  276. EXPORT_SYMBOL(tty_insert_flip_string_flags);
  277. /**
  278. * tty_schedule_flip - push characters to ldisc
  279. * @tty: tty to push from
  280. *
  281. * Takes any pending buffers and transfers their ownership to the
  282. * ldisc side of the queue. It then schedules those characters for
  283. * processing by the line discipline.
  284. *
  285. * Locking: Takes tty->buf.lock
  286. */
  287. void tty_schedule_flip(struct tty_struct *tty)
  288. {
  289. unsigned long flags;
  290. spin_lock_irqsave(&tty->buf.lock, flags);
  291. if (tty->buf.tail != NULL)
  292. tty->buf.tail->commit = tty->buf.tail->used;
  293. spin_unlock_irqrestore(&tty->buf.lock, flags);
  294. schedule_delayed_work(&tty->buf.work, 1);
  295. }
  296. EXPORT_SYMBOL(tty_schedule_flip);
  297. /**
  298. * tty_prepare_flip_string - make room for characters
  299. * @tty: tty
  300. * @chars: return pointer for character write area
  301. * @size: desired size
  302. *
  303. * Prepare a block of space in the buffer for data. Returns the length
  304. * available and buffer pointer to the space which is now allocated and
  305. * accounted for as ready for normal characters. This is used for drivers
  306. * that need their own block copy routines into the buffer. There is no
  307. * guarantee the buffer is a DMA target!
  308. *
  309. * Locking: May call functions taking tty->buf.lock
  310. */
  311. int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars,
  312. size_t size)
  313. {
  314. int space = tty_buffer_request_room(tty, size);
  315. if (likely(space)) {
  316. struct tty_buffer *tb = tty->buf.tail;
  317. *chars = tb->char_buf_ptr + tb->used;
  318. memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
  319. tb->used += space;
  320. }
  321. return space;
  322. }
  323. EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
  324. /**
  325. * tty_prepare_flip_string_flags - make room for characters
  326. * @tty: tty
  327. * @chars: return pointer for character write area
  328. * @flags: return pointer for status flag write area
  329. * @size: desired size
  330. *
  331. * Prepare a block of space in the buffer for data. Returns the length
  332. * available and buffer pointer to the space which is now allocated and
  333. * accounted for as ready for characters. This is used for drivers
  334. * that need their own block copy routines into the buffer. There is no
  335. * guarantee the buffer is a DMA target!
  336. *
  337. * Locking: May call functions taking tty->buf.lock
  338. */
  339. int tty_prepare_flip_string_flags(struct tty_struct *tty,
  340. unsigned char **chars, char **flags, size_t size)
  341. {
  342. int space = tty_buffer_request_room(tty, size);
  343. if (likely(space)) {
  344. struct tty_buffer *tb = tty->buf.tail;
  345. *chars = tb->char_buf_ptr + tb->used;
  346. *flags = tb->flag_buf_ptr + tb->used;
  347. tb->used += space;
  348. }
  349. return space;
  350. }
  351. EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
  352. /**
  353. * flush_to_ldisc
  354. * @work: tty structure passed from work queue.
  355. *
  356. * This routine is called out of the software interrupt to flush data
  357. * from the buffer chain to the line discipline.
  358. *
  359. * Locking: holds tty->buf.lock to guard buffer list. Drops the lock
  360. * while invoking the line discipline receive_buf method. The
  361. * receive_buf method is single threaded for each tty instance.
  362. */
  363. static void flush_to_ldisc(struct work_struct *work)
  364. {
  365. struct tty_struct *tty =
  366. container_of(work, struct tty_struct, buf.work.work);
  367. unsigned long flags;
  368. struct tty_ldisc *disc;
  369. disc = tty_ldisc_ref(tty);
  370. if (disc == NULL) /* !TTY_LDISC */
  371. return;
  372. spin_lock_irqsave(&tty->buf.lock, flags);
  373. if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
  374. struct tty_buffer *head;
  375. while ((head = tty->buf.head) != NULL) {
  376. int count;
  377. char *char_buf;
  378. unsigned char *flag_buf;
  379. count = head->commit - head->read;
  380. if (!count) {
  381. if (head->next == NULL)
  382. break;
  383. tty->buf.head = head->next;
  384. tty_buffer_free(tty, head);
  385. continue;
  386. }
  387. /* Ldisc or user is trying to flush the buffers
  388. we are feeding to the ldisc, stop feeding the
  389. line discipline as we want to empty the queue */
  390. if (test_bit(TTY_FLUSHPENDING, &tty->flags))
  391. break;
  392. if (!tty->receive_room) {
  393. schedule_delayed_work(&tty->buf.work, 1);
  394. break;
  395. }
  396. if (count > tty->receive_room)
  397. count = tty->receive_room;
  398. char_buf = head->char_buf_ptr + head->read;
  399. flag_buf = head->flag_buf_ptr + head->read;
  400. head->read += count;
  401. spin_unlock_irqrestore(&tty->buf.lock, flags);
  402. disc->ops->receive_buf(tty, char_buf,
  403. flag_buf, count);
  404. spin_lock_irqsave(&tty->buf.lock, flags);
  405. }
  406. clear_bit(TTY_FLUSHING, &tty->flags);
  407. }
  408. /* We may have a deferred request to flush the input buffer,
  409. if so pull the chain under the lock and empty the queue */
  410. if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
  411. __tty_buffer_flush(tty);
  412. clear_bit(TTY_FLUSHPENDING, &tty->flags);
  413. wake_up(&tty->read_wait);
  414. }
  415. spin_unlock_irqrestore(&tty->buf.lock, flags);
  416. tty_ldisc_deref(disc);
  417. }
  418. /**
  419. * tty_flush_to_ldisc
  420. * @tty: tty to push
  421. *
  422. * Push the terminal flip buffers to the line discipline.
  423. *
  424. * Must not be called from IRQ context.
  425. */
  426. void tty_flush_to_ldisc(struct tty_struct *tty)
  427. {
  428. flush_delayed_work(&tty->buf.work);
  429. }
  430. /**
  431. * tty_flip_buffer_push - terminal
  432. * @tty: tty to push
  433. *
  434. * Queue a push of the terminal flip buffers to the line discipline. This
  435. * function must not be called from IRQ context if tty->low_latency is set.
  436. *
  437. * In the event of the queue being busy for flipping the work will be
  438. * held off and retried later.
  439. *
  440. * Locking: tty buffer lock. Driver locks in low latency mode.
  441. */
  442. void tty_flip_buffer_push(struct tty_struct *tty)
  443. {
  444. unsigned long flags;
  445. spin_lock_irqsave(&tty->buf.lock, flags);
  446. if (tty->buf.tail != NULL)
  447. tty->buf.tail->commit = tty->buf.tail->used;
  448. spin_unlock_irqrestore(&tty->buf.lock, flags);
  449. if (tty->low_latency)
  450. flush_to_ldisc(&tty->buf.work.work);
  451. else
  452. schedule_delayed_work(&tty->buf.work, 1);
  453. }
  454. EXPORT_SYMBOL(tty_flip_buffer_push);
  455. /**
  456. * tty_buffer_init - prepare a tty buffer structure
  457. * @tty: tty to initialise
  458. *
  459. * Set up the initial state of the buffer management for a tty device.
  460. * Must be called before the other tty buffer functions are used.
  461. *
  462. * Locking: none
  463. */
  464. void tty_buffer_init(struct tty_struct *tty)
  465. {
  466. spin_lock_init(&tty->buf.lock);
  467. tty->buf.head = NULL;
  468. tty->buf.tail = NULL;
  469. tty->buf.free = NULL;
  470. tty->buf.memory_used = 0;
  471. INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
  472. }