tty_buffer.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. /*
  2. * Tty buffer allocation management
  3. */
  4. #include <linux/types.h>
  5. #include <linux/errno.h>
  6. #include <linux/tty.h>
  7. #include <linux/tty_driver.h>
  8. #include <linux/tty_flip.h>
  9. #include <linux/timer.h>
  10. #include <linux/string.h>
  11. #include <linux/slab.h>
  12. #include <linux/sched.h>
  13. #include <linux/init.h>
  14. #include <linux/wait.h>
  15. #include <linux/bitops.h>
  16. #include <linux/delay.h>
  17. #include <linux/module.h>
  18. /**
  19. * tty_buffer_free_all - free buffers used by a tty
  20. * @tty: tty to free from
  21. *
  22. * Remove all the buffers pending on a tty whether queued with data
  23. * or in the free ring. Must be called when the tty is no longer in use
  24. *
  25. * Locking: none
  26. */
  27. void tty_buffer_free_all(struct tty_struct *tty)
  28. {
  29. struct tty_buffer *thead;
  30. while ((thead = tty->buf.head) != NULL) {
  31. tty->buf.head = thead->next;
  32. kfree(thead);
  33. }
  34. while ((thead = tty->buf.free) != NULL) {
  35. tty->buf.free = thead->next;
  36. kfree(thead);
  37. }
  38. tty->buf.tail = NULL;
  39. tty->buf.memory_used = 0;
  40. }
  41. /**
  42. * tty_buffer_alloc - allocate a tty buffer
  43. * @tty: tty device
  44. * @size: desired size (characters)
  45. *
  46. * Allocate a new tty buffer to hold the desired number of characters.
  47. * Return NULL if out of memory or the allocation would exceed the
  48. * per device queue
  49. *
  50. * Locking: Caller must hold tty->buf.lock
  51. */
  52. static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size)
  53. {
  54. struct tty_buffer *p;
  55. if (tty->buf.memory_used + size > 65536)
  56. return NULL;
  57. p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
  58. if (p == NULL)
  59. return NULL;
  60. p->used = 0;
  61. p->size = size;
  62. p->next = NULL;
  63. p->commit = 0;
  64. p->read = 0;
  65. p->char_buf_ptr = (char *)(p->data);
  66. p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
  67. tty->buf.memory_used += size;
  68. return p;
  69. }
  70. /**
  71. * tty_buffer_free - free a tty buffer
  72. * @tty: tty owning the buffer
  73. * @b: the buffer to free
  74. *
  75. * Free a tty buffer, or add it to the free list according to our
  76. * internal strategy
  77. *
  78. * Locking: Caller must hold tty->buf.lock
  79. */
  80. static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
  81. {
  82. /* Dumb strategy for now - should keep some stats */
  83. tty->buf.memory_used -= b->size;
  84. WARN_ON(tty->buf.memory_used < 0);
  85. if (b->size >= 512)
  86. kfree(b);
  87. else {
  88. b->next = tty->buf.free;
  89. tty->buf.free = b;
  90. }
  91. }
  92. /**
  93. * __tty_buffer_flush - flush full tty buffers
  94. * @tty: tty to flush
  95. *
  96. * flush all the buffers containing receive data. Caller must
  97. * hold the buffer lock and must have ensured no parallel flush to
  98. * ldisc is running.
  99. *
  100. * Locking: Caller must hold tty->buf.lock
  101. */
  102. static void __tty_buffer_flush(struct tty_struct *tty)
  103. {
  104. struct tty_buffer *thead;
  105. while ((thead = tty->buf.head) != NULL) {
  106. tty->buf.head = thead->next;
  107. tty_buffer_free(tty, thead);
  108. }
  109. tty->buf.tail = NULL;
  110. }
  111. /**
  112. * tty_buffer_flush - flush full tty buffers
  113. * @tty: tty to flush
  114. *
  115. * flush all the buffers containing receive data. If the buffer is
  116. * being processed by flush_to_ldisc then we defer the processing
  117. * to that function
  118. *
  119. * Locking: none
  120. */
  121. void tty_buffer_flush(struct tty_struct *tty)
  122. {
  123. unsigned long flags;
  124. spin_lock_irqsave(&tty->buf.lock, flags);
  125. /* If the data is being pushed to the tty layer then we can't
  126. process it here. Instead set a flag and the flush_to_ldisc
  127. path will process the flush request before it exits */
  128. if (test_bit(TTY_FLUSHING, &tty->flags)) {
  129. set_bit(TTY_FLUSHPENDING, &tty->flags);
  130. spin_unlock_irqrestore(&tty->buf.lock, flags);
  131. wait_event(tty->read_wait,
  132. test_bit(TTY_FLUSHPENDING, &tty->flags) == 0);
  133. return;
  134. } else
  135. __tty_buffer_flush(tty);
  136. spin_unlock_irqrestore(&tty->buf.lock, flags);
  137. }
  138. /**
  139. * tty_buffer_find - find a free tty buffer
  140. * @tty: tty owning the buffer
  141. * @size: characters wanted
  142. *
  143. * Locate an existing suitable tty buffer or if we are lacking one then
  144. * allocate a new one. We round our buffers off in 256 character chunks
  145. * to get better allocation behaviour.
  146. *
  147. * Locking: Caller must hold tty->buf.lock
  148. */
  149. static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
  150. {
  151. struct tty_buffer **tbh = &tty->buf.free;
  152. while ((*tbh) != NULL) {
  153. struct tty_buffer *t = *tbh;
  154. if (t->size >= size) {
  155. *tbh = t->next;
  156. t->next = NULL;
  157. t->used = 0;
  158. t->commit = 0;
  159. t->read = 0;
  160. tty->buf.memory_used += t->size;
  161. return t;
  162. }
  163. tbh = &((*tbh)->next);
  164. }
  165. /* Round the buffer size out */
  166. size = (size + 0xFF) & ~0xFF;
  167. return tty_buffer_alloc(tty, size);
  168. /* Should possibly check if this fails for the largest buffer we
  169. have queued and recycle that ? */
  170. }
  171. /**
  172. * tty_buffer_request_room - grow tty buffer if needed
  173. * @tty: tty structure
  174. * @size: size desired
  175. *
  176. * Make at least size bytes of linear space available for the tty
  177. * buffer. If we fail return the size we managed to find.
  178. *
  179. * Locking: Takes tty->buf.lock
  180. */
  181. int tty_buffer_request_room(struct tty_struct *tty, size_t size)
  182. {
  183. struct tty_buffer *b, *n;
  184. int left;
  185. unsigned long flags;
  186. spin_lock_irqsave(&tty->buf.lock, flags);
  187. /* OPTIMISATION: We could keep a per tty "zero" sized buffer to
  188. remove this conditional if its worth it. This would be invisible
  189. to the callers */
  190. if ((b = tty->buf.tail) != NULL)
  191. left = b->size - b->used;
  192. else
  193. left = 0;
  194. if (left < size) {
  195. /* This is the slow path - looking for new buffers to use */
  196. if ((n = tty_buffer_find(tty, size)) != NULL) {
  197. if (b != NULL) {
  198. b->next = n;
  199. b->commit = b->used;
  200. } else
  201. tty->buf.head = n;
  202. tty->buf.tail = n;
  203. } else
  204. size = left;
  205. }
  206. spin_unlock_irqrestore(&tty->buf.lock, flags);
  207. return size;
  208. }
  209. EXPORT_SYMBOL_GPL(tty_buffer_request_room);
  210. /**
  211. * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
  212. * @tty: tty structure
  213. * @chars: characters
  214. * @flag: flag value for each character
  215. * @size: size
  216. *
  217. * Queue a series of bytes to the tty buffering. All the characters
  218. * passed are marked with the supplied flag. Returns the number added.
  219. *
  220. * Locking: Called functions may take tty->buf.lock
  221. */
  222. int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
  223. const unsigned char *chars, char flag, size_t size)
  224. {
  225. int copied = 0;
  226. do {
  227. int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
  228. int space = tty_buffer_request_room(tty, goal);
  229. struct tty_buffer *tb = tty->buf.tail;
  230. /* If there is no space then tb may be NULL */
  231. if (unlikely(space == 0))
  232. break;
  233. memcpy(tb->char_buf_ptr + tb->used, chars, space);
  234. memset(tb->flag_buf_ptr + tb->used, flag, space);
  235. tb->used += space;
  236. copied += space;
  237. chars += space;
  238. /* There is a small chance that we need to split the data over
  239. several buffers. If this is the case we must loop */
  240. } while (unlikely(size > copied));
  241. return copied;
  242. }
  243. EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
  244. /**
  245. * tty_insert_flip_string_flags - Add characters to the tty buffer
  246. * @tty: tty structure
  247. * @chars: characters
  248. * @flags: flag bytes
  249. * @size: size
  250. *
  251. * Queue a series of bytes to the tty buffering. For each character
  252. * the flags array indicates the status of the character. Returns the
  253. * number added.
  254. *
  255. * Locking: Called functions may take tty->buf.lock
  256. */
  257. int tty_insert_flip_string_flags(struct tty_struct *tty,
  258. const unsigned char *chars, const char *flags, size_t size)
  259. {
  260. int copied = 0;
  261. do {
  262. int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
  263. int space = tty_buffer_request_room(tty, goal);
  264. struct tty_buffer *tb = tty->buf.tail;
  265. /* If there is no space then tb may be NULL */
  266. if (unlikely(space == 0))
  267. break;
  268. memcpy(tb->char_buf_ptr + tb->used, chars, space);
  269. memcpy(tb->flag_buf_ptr + tb->used, flags, space);
  270. tb->used += space;
  271. copied += space;
  272. chars += space;
  273. flags += space;
  274. /* There is a small chance that we need to split the data over
  275. several buffers. If this is the case we must loop */
  276. } while (unlikely(size > copied));
  277. return copied;
  278. }
  279. EXPORT_SYMBOL(tty_insert_flip_string_flags);
  280. /**
  281. * tty_schedule_flip - push characters to ldisc
  282. * @tty: tty to push from
  283. *
  284. * Takes any pending buffers and transfers their ownership to the
  285. * ldisc side of the queue. It then schedules those characters for
  286. * processing by the line discipline.
  287. *
  288. * Locking: Takes tty->buf.lock
  289. */
  290. void tty_schedule_flip(struct tty_struct *tty)
  291. {
  292. unsigned long flags;
  293. spin_lock_irqsave(&tty->buf.lock, flags);
  294. if (tty->buf.tail != NULL)
  295. tty->buf.tail->commit = tty->buf.tail->used;
  296. spin_unlock_irqrestore(&tty->buf.lock, flags);
  297. schedule_work(&tty->buf.work);
  298. }
  299. EXPORT_SYMBOL(tty_schedule_flip);
  300. /**
  301. * tty_prepare_flip_string - make room for characters
  302. * @tty: tty
  303. * @chars: return pointer for character write area
  304. * @size: desired size
  305. *
  306. * Prepare a block of space in the buffer for data. Returns the length
  307. * available and buffer pointer to the space which is now allocated and
  308. * accounted for as ready for normal characters. This is used for drivers
  309. * that need their own block copy routines into the buffer. There is no
  310. * guarantee the buffer is a DMA target!
  311. *
  312. * Locking: May call functions taking tty->buf.lock
  313. */
  314. int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars,
  315. size_t size)
  316. {
  317. int space = tty_buffer_request_room(tty, size);
  318. if (likely(space)) {
  319. struct tty_buffer *tb = tty->buf.tail;
  320. *chars = tb->char_buf_ptr + tb->used;
  321. memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
  322. tb->used += space;
  323. }
  324. return space;
  325. }
  326. EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
  327. /**
  328. * tty_prepare_flip_string_flags - make room for characters
  329. * @tty: tty
  330. * @chars: return pointer for character write area
  331. * @flags: return pointer for status flag write area
  332. * @size: desired size
  333. *
  334. * Prepare a block of space in the buffer for data. Returns the length
  335. * available and buffer pointer to the space which is now allocated and
  336. * accounted for as ready for characters. This is used for drivers
  337. * that need their own block copy routines into the buffer. There is no
  338. * guarantee the buffer is a DMA target!
  339. *
  340. * Locking: May call functions taking tty->buf.lock
  341. */
  342. int tty_prepare_flip_string_flags(struct tty_struct *tty,
  343. unsigned char **chars, char **flags, size_t size)
  344. {
  345. int space = tty_buffer_request_room(tty, size);
  346. if (likely(space)) {
  347. struct tty_buffer *tb = tty->buf.tail;
  348. *chars = tb->char_buf_ptr + tb->used;
  349. *flags = tb->flag_buf_ptr + tb->used;
  350. tb->used += space;
  351. }
  352. return space;
  353. }
  354. EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
  355. /**
  356. * flush_to_ldisc
  357. * @work: tty structure passed from work queue.
  358. *
  359. * This routine is called out of the software interrupt to flush data
  360. * from the buffer chain to the line discipline.
  361. *
  362. * Locking: holds tty->buf.lock to guard buffer list. Drops the lock
  363. * while invoking the line discipline receive_buf method. The
  364. * receive_buf method is single threaded for each tty instance.
  365. */
  366. static void flush_to_ldisc(struct work_struct *work)
  367. {
  368. struct tty_struct *tty =
  369. container_of(work, struct tty_struct, buf.work);
  370. unsigned long flags;
  371. struct tty_ldisc *disc;
  372. disc = tty_ldisc_ref(tty);
  373. if (disc == NULL) /* !TTY_LDISC */
  374. return;
  375. spin_lock_irqsave(&tty->buf.lock, flags);
  376. if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
  377. struct tty_buffer *head, *tail = tty->buf.tail;
  378. int seen_tail = 0;
  379. while ((head = tty->buf.head) != NULL) {
  380. int copied;
  381. int count;
  382. char *char_buf;
  383. unsigned char *flag_buf;
  384. count = head->commit - head->read;
  385. if (!count) {
  386. if (head->next == NULL)
  387. break;
  388. /*
  389. There's a possibility tty might get new buffer
  390. added during the unlock window below. We could
  391. end up spinning in here forever hogging the CPU
  392. completely. To avoid this let's have a rest each
  393. time we processed the tail buffer.
  394. */
  395. if (tail == head)
  396. seen_tail = 1;
  397. tty->buf.head = head->next;
  398. tty_buffer_free(tty, head);
  399. continue;
  400. }
  401. /* Ldisc or user is trying to flush the buffers
  402. we are feeding to the ldisc, stop feeding the
  403. line discipline as we want to empty the queue */
  404. if (test_bit(TTY_FLUSHPENDING, &tty->flags))
  405. break;
  406. char_buf = head->char_buf_ptr + head->read;
  407. flag_buf = head->flag_buf_ptr + head->read;
  408. spin_unlock_irqrestore(&tty->buf.lock, flags);
  409. copied = disc->ops->receive_buf(tty, char_buf,
  410. flag_buf, count);
  411. spin_lock_irqsave(&tty->buf.lock, flags);
  412. head->read += copied;
  413. if (copied == 0 || seen_tail) {
  414. schedule_work(&tty->buf.work);
  415. break;
  416. }
  417. }
  418. clear_bit(TTY_FLUSHING, &tty->flags);
  419. }
  420. /* We may have a deferred request to flush the input buffer,
  421. if so pull the chain under the lock and empty the queue */
  422. if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
  423. __tty_buffer_flush(tty);
  424. clear_bit(TTY_FLUSHPENDING, &tty->flags);
  425. wake_up(&tty->read_wait);
  426. }
  427. spin_unlock_irqrestore(&tty->buf.lock, flags);
  428. tty_ldisc_deref(disc);
  429. }
  430. /**
  431. * tty_flush_to_ldisc
  432. * @tty: tty to push
  433. *
  434. * Push the terminal flip buffers to the line discipline.
  435. *
  436. * Must not be called from IRQ context.
  437. */
  438. void tty_flush_to_ldisc(struct tty_struct *tty)
  439. {
  440. flush_work(&tty->buf.work);
  441. }
  442. /**
  443. * tty_flip_buffer_push - terminal
  444. * @tty: tty to push
  445. *
  446. * Queue a push of the terminal flip buffers to the line discipline. This
  447. * function must not be called from IRQ context if tty->low_latency is set.
  448. *
  449. * In the event of the queue being busy for flipping the work will be
  450. * held off and retried later.
  451. *
  452. * Locking: tty buffer lock. Driver locks in low latency mode.
  453. */
  454. void tty_flip_buffer_push(struct tty_struct *tty)
  455. {
  456. unsigned long flags;
  457. spin_lock_irqsave(&tty->buf.lock, flags);
  458. if (tty->buf.tail != NULL)
  459. tty->buf.tail->commit = tty->buf.tail->used;
  460. spin_unlock_irqrestore(&tty->buf.lock, flags);
  461. if (tty->low_latency)
  462. flush_to_ldisc(&tty->buf.work);
  463. else
  464. schedule_work(&tty->buf.work);
  465. }
  466. EXPORT_SYMBOL(tty_flip_buffer_push);
  467. /**
  468. * tty_buffer_init - prepare a tty buffer structure
  469. * @tty: tty to initialise
  470. *
  471. * Set up the initial state of the buffer management for a tty device.
  472. * Must be called before the other tty buffer functions are used.
  473. *
  474. * Locking: none
  475. */
  476. void tty_buffer_init(struct tty_struct *tty)
  477. {
  478. spin_lock_init(&tty->buf.lock);
  479. tty->buf.head = NULL;
  480. tty->buf.tail = NULL;
  481. tty->buf.free = NULL;
  482. tty->buf.memory_used = 0;
  483. INIT_WORK(&tty->buf.work, flush_to_ldisc);
  484. }