line.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781
  1. /*
  2. * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #include "linux/irqreturn.h"
  6. #include "linux/kd.h"
  7. #include "linux/sched.h"
  8. #include "linux/slab.h"
  9. #include "chan.h"
  10. #include "irq_kern.h"
  11. #include "irq_user.h"
  12. #include "kern_util.h"
  13. #include "os.h"
  14. #define LINE_BUFSIZE 4096
  15. static irqreturn_t line_interrupt(int irq, void *data)
  16. {
  17. struct chan *chan = data;
  18. struct line *line = chan->line;
  19. struct tty_struct *tty = tty_port_tty_get(&line->port);
  20. if (line)
  21. chan_interrupt(line, tty, irq);
  22. tty_kref_put(tty);
  23. return IRQ_HANDLED;
  24. }
  25. /*
  26. * Returns the free space inside the ring buffer of this line.
  27. *
  28. * Should be called while holding line->lock (this does not modify data).
  29. */
  30. static int write_room(struct line *line)
  31. {
  32. int n;
  33. if (line->buffer == NULL)
  34. return LINE_BUFSIZE - 1;
  35. /* This is for the case where the buffer is wrapped! */
  36. n = line->head - line->tail;
  37. if (n <= 0)
  38. n += LINE_BUFSIZE; /* The other case */
  39. return n - 1;
  40. }
  41. int line_write_room(struct tty_struct *tty)
  42. {
  43. struct line *line = tty->driver_data;
  44. unsigned long flags;
  45. int room;
  46. spin_lock_irqsave(&line->lock, flags);
  47. room = write_room(line);
  48. spin_unlock_irqrestore(&line->lock, flags);
  49. return room;
  50. }
  51. int line_chars_in_buffer(struct tty_struct *tty)
  52. {
  53. struct line *line = tty->driver_data;
  54. unsigned long flags;
  55. int ret;
  56. spin_lock_irqsave(&line->lock, flags);
  57. /* write_room subtracts 1 for the needed NULL, so we readd it.*/
  58. ret = LINE_BUFSIZE - (write_room(line) + 1);
  59. spin_unlock_irqrestore(&line->lock, flags);
  60. return ret;
  61. }
  62. /*
  63. * This copies the content of buf into the circular buffer associated with
  64. * this line.
  65. * The return value is the number of characters actually copied, i.e. the ones
  66. * for which there was space: this function is not supposed to ever flush out
  67. * the circular buffer.
  68. *
  69. * Must be called while holding line->lock!
  70. */
  71. static int buffer_data(struct line *line, const char *buf, int len)
  72. {
  73. int end, room;
  74. if (line->buffer == NULL) {
  75. line->buffer = kmalloc(LINE_BUFSIZE, GFP_ATOMIC);
  76. if (line->buffer == NULL) {
  77. printk(KERN_ERR "buffer_data - atomic allocation "
  78. "failed\n");
  79. return 0;
  80. }
  81. line->head = line->buffer;
  82. line->tail = line->buffer;
  83. }
  84. room = write_room(line);
  85. len = (len > room) ? room : len;
  86. end = line->buffer + LINE_BUFSIZE - line->tail;
  87. if (len < end) {
  88. memcpy(line->tail, buf, len);
  89. line->tail += len;
  90. }
  91. else {
  92. /* The circular buffer is wrapping */
  93. memcpy(line->tail, buf, end);
  94. buf += end;
  95. memcpy(line->buffer, buf, len - end);
  96. line->tail = line->buffer + len - end;
  97. }
  98. return len;
  99. }
  100. /*
  101. * Flushes the ring buffer to the output channels. That is, write_chan is
  102. * called, passing it line->head as buffer, and an appropriate count.
  103. *
  104. * On exit, returns 1 when the buffer is empty,
  105. * 0 when the buffer is not empty on exit,
  106. * and -errno when an error occurred.
  107. *
  108. * Must be called while holding line->lock!*/
  109. static int flush_buffer(struct line *line)
  110. {
  111. int n, count;
  112. if ((line->buffer == NULL) || (line->head == line->tail))
  113. return 1;
  114. if (line->tail < line->head) {
  115. /* line->buffer + LINE_BUFSIZE is the end of the buffer! */
  116. count = line->buffer + LINE_BUFSIZE - line->head;
  117. n = write_chan(line->chan_out, line->head, count,
  118. line->driver->write_irq);
  119. if (n < 0)
  120. return n;
  121. if (n == count) {
  122. /*
  123. * We have flushed from ->head to buffer end, now we
  124. * must flush only from the beginning to ->tail.
  125. */
  126. line->head = line->buffer;
  127. } else {
  128. line->head += n;
  129. return 0;
  130. }
  131. }
  132. count = line->tail - line->head;
  133. n = write_chan(line->chan_out, line->head, count,
  134. line->driver->write_irq);
  135. if (n < 0)
  136. return n;
  137. line->head += n;
  138. return line->head == line->tail;
  139. }
  140. void line_flush_buffer(struct tty_struct *tty)
  141. {
  142. struct line *line = tty->driver_data;
  143. unsigned long flags;
  144. spin_lock_irqsave(&line->lock, flags);
  145. flush_buffer(line);
  146. spin_unlock_irqrestore(&line->lock, flags);
  147. }
  148. /*
  149. * We map both ->flush_chars and ->put_char (which go in pair) onto
  150. * ->flush_buffer and ->write. Hope it's not that bad.
  151. */
  152. void line_flush_chars(struct tty_struct *tty)
  153. {
  154. line_flush_buffer(tty);
  155. }
  156. int line_put_char(struct tty_struct *tty, unsigned char ch)
  157. {
  158. return line_write(tty, &ch, sizeof(ch));
  159. }
  160. int line_write(struct tty_struct *tty, const unsigned char *buf, int len)
  161. {
  162. struct line *line = tty->driver_data;
  163. unsigned long flags;
  164. int n, ret = 0;
  165. spin_lock_irqsave(&line->lock, flags);
  166. if (line->head != line->tail)
  167. ret = buffer_data(line, buf, len);
  168. else {
  169. n = write_chan(line->chan_out, buf, len,
  170. line->driver->write_irq);
  171. if (n < 0) {
  172. ret = n;
  173. goto out_up;
  174. }
  175. len -= n;
  176. ret += n;
  177. if (len > 0)
  178. ret += buffer_data(line, buf + n, len);
  179. }
  180. out_up:
  181. spin_unlock_irqrestore(&line->lock, flags);
  182. return ret;
  183. }
  184. void line_set_termios(struct tty_struct *tty, struct ktermios * old)
  185. {
  186. /* nothing */
  187. }
  188. void line_throttle(struct tty_struct *tty)
  189. {
  190. struct line *line = tty->driver_data;
  191. deactivate_chan(line->chan_in, line->driver->read_irq);
  192. line->throttled = 1;
  193. }
  194. void line_unthrottle(struct tty_struct *tty)
  195. {
  196. struct line *line = tty->driver_data;
  197. line->throttled = 0;
  198. chan_interrupt(line, tty, line->driver->read_irq);
  199. /*
  200. * Maybe there is enough stuff pending that calling the interrupt
  201. * throttles us again. In this case, line->throttled will be 1
  202. * again and we shouldn't turn the interrupt back on.
  203. */
  204. if (!line->throttled)
  205. reactivate_chan(line->chan_in, line->driver->read_irq);
  206. }
  207. static irqreturn_t line_write_interrupt(int irq, void *data)
  208. {
  209. struct chan *chan = data;
  210. struct line *line = chan->line;
  211. struct tty_struct *tty;
  212. int err;
  213. /*
  214. * Interrupts are disabled here because genirq keep irqs disabled when
  215. * calling the action handler.
  216. */
  217. spin_lock(&line->lock);
  218. err = flush_buffer(line);
  219. if (err == 0) {
  220. spin_unlock(&line->lock);
  221. return IRQ_NONE;
  222. } else if (err < 0) {
  223. line->head = line->buffer;
  224. line->tail = line->buffer;
  225. }
  226. spin_unlock(&line->lock);
  227. tty = tty_port_tty_get(&line->port);
  228. if (tty == NULL)
  229. return IRQ_NONE;
  230. tty_wakeup(tty);
  231. tty_kref_put(tty);
  232. return IRQ_HANDLED;
  233. }
  234. int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
  235. {
  236. const struct line_driver *driver = line->driver;
  237. int err = 0, flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
  238. if (input)
  239. err = um_request_irq(driver->read_irq, fd, IRQ_READ,
  240. line_interrupt, flags,
  241. driver->read_irq_name, data);
  242. if (err)
  243. return err;
  244. if (output)
  245. err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
  246. line_write_interrupt, flags,
  247. driver->write_irq_name, data);
  248. return err;
  249. }
  250. /*
  251. * Normally, a driver like this can rely mostly on the tty layer
  252. * locking, particularly when it comes to the driver structure.
  253. * However, in this case, mconsole requests can come in "from the
  254. * side", and race with opens and closes.
  255. *
  256. * mconsole config requests will want to be sure the device isn't in
  257. * use, and get_config, open, and close will want a stable
  258. * configuration. The checking and modification of the configuration
  259. * is done under a spinlock. Checking whether the device is in use is
  260. * line->tty->count > 1, also under the spinlock.
  261. *
  262. * line->count serves to decide whether the device should be enabled or
  263. * disabled on the host. If it's equal to 0, then we are doing the
  264. * first open or last close. Otherwise, open and close just return.
  265. */
  266. int line_open(struct line *lines, struct tty_struct *tty)
  267. {
  268. struct line *line = &lines[tty->index];
  269. int err = -ENODEV;
  270. mutex_lock(&line->count_lock);
  271. if (!line->valid)
  272. goto out_unlock;
  273. err = 0;
  274. if (line->port.count++)
  275. goto out_unlock;
  276. BUG_ON(tty->driver_data);
  277. tty->driver_data = line;
  278. tty_port_tty_set(&line->port, tty);
  279. err = enable_chan(line);
  280. if (err) /* line_close() will be called by our caller */
  281. goto out_unlock;
  282. if (!line->sigio) {
  283. chan_enable_winch(line->chan_out, tty);
  284. line->sigio = 1;
  285. }
  286. chan_window_size(line, &tty->winsize.ws_row,
  287. &tty->winsize.ws_col);
  288. out_unlock:
  289. mutex_unlock(&line->count_lock);
  290. return err;
  291. }
  292. static void unregister_winch(struct tty_struct *tty);
  293. void line_close(struct tty_struct *tty, struct file * filp)
  294. {
  295. struct line *line = tty->driver_data;
  296. /*
  297. * If line_open fails (and tty->driver_data is never set),
  298. * tty_open will call line_close. So just return in this case.
  299. */
  300. if (line == NULL)
  301. return;
  302. /* We ignore the error anyway! */
  303. flush_buffer(line);
  304. mutex_lock(&line->count_lock);
  305. BUG_ON(!line->valid);
  306. if (--line->port.count)
  307. goto out_unlock;
  308. tty_port_tty_set(&line->port, NULL);
  309. tty->driver_data = NULL;
  310. if (line->sigio) {
  311. unregister_winch(tty);
  312. line->sigio = 0;
  313. }
  314. out_unlock:
  315. mutex_unlock(&line->count_lock);
  316. }
  317. void close_lines(struct line *lines, int nlines)
  318. {
  319. int i;
  320. for(i = 0; i < nlines; i++)
  321. close_chan(&lines[i]);
  322. }
  323. int setup_one_line(struct line *lines, int n, char *init,
  324. const struct chan_opts *opts, char **error_out)
  325. {
  326. struct line *line = &lines[n];
  327. struct tty_driver *driver = line->driver->driver;
  328. int err = -EINVAL;
  329. mutex_lock(&line->count_lock);
  330. if (line->port.count) {
  331. *error_out = "Device is already open";
  332. goto out;
  333. }
  334. if (!strcmp(init, "none")) {
  335. if (line->valid) {
  336. line->valid = 0;
  337. kfree(line->init_str);
  338. tty_unregister_device(driver, n);
  339. parse_chan_pair(NULL, line, n, opts, error_out);
  340. err = 0;
  341. }
  342. } else {
  343. char *new = kstrdup(init, GFP_KERNEL);
  344. if (!new) {
  345. *error_out = "Failed to allocate memory";
  346. return -ENOMEM;
  347. }
  348. if (line->valid) {
  349. tty_unregister_device(driver, n);
  350. kfree(line->init_str);
  351. }
  352. line->init_str = new;
  353. line->valid = 1;
  354. err = parse_chan_pair(new, line, n, opts, error_out);
  355. if (!err) {
  356. struct device *d = tty_register_device(driver, n, NULL);
  357. if (IS_ERR(d)) {
  358. *error_out = "Failed to register device";
  359. err = PTR_ERR(d);
  360. parse_chan_pair(NULL, line, n, opts, error_out);
  361. }
  362. }
  363. if (err) {
  364. line->init_str = NULL;
  365. line->valid = 0;
  366. kfree(new);
  367. }
  368. }
  369. out:
  370. mutex_unlock(&line->count_lock);
  371. return err;
  372. }
  373. /*
  374. * Common setup code for both startup command line and mconsole initialization.
  375. * @lines contains the array (of size @num) to modify;
  376. * @init is the setup string;
  377. * @error_out is an error string in the case of failure;
  378. */
  379. int line_setup(char **conf, unsigned int num, char **def,
  380. char *init, char *name)
  381. {
  382. char *error;
  383. if (*init == '=') {
  384. /*
  385. * We said con=/ssl= instead of con#=, so we are configuring all
  386. * consoles at once.
  387. */
  388. *def = init + 1;
  389. } else {
  390. char *end;
  391. unsigned n = simple_strtoul(init, &end, 0);
  392. if (*end != '=') {
  393. error = "Couldn't parse device number";
  394. goto out;
  395. }
  396. if (n >= num) {
  397. error = "Device number out of range";
  398. goto out;
  399. }
  400. conf[n] = end + 1;
  401. }
  402. return 0;
  403. out:
  404. printk(KERN_ERR "Failed to set up %s with "
  405. "configuration string \"%s\" : %s\n", name, init, error);
  406. return -EINVAL;
  407. }
  408. int line_config(struct line *lines, unsigned int num, char *str,
  409. const struct chan_opts *opts, char **error_out)
  410. {
  411. char *end;
  412. int n;
  413. if (*str == '=') {
  414. *error_out = "Can't configure all devices from mconsole";
  415. return -EINVAL;
  416. }
  417. n = simple_strtoul(str, &end, 0);
  418. if (*end++ != '=') {
  419. *error_out = "Couldn't parse device number";
  420. return -EINVAL;
  421. }
  422. if (n >= num) {
  423. *error_out = "Device number out of range";
  424. return -EINVAL;
  425. }
  426. return setup_one_line(lines, n, end, opts, error_out);
  427. }
  428. int line_get_config(char *name, struct line *lines, unsigned int num, char *str,
  429. int size, char **error_out)
  430. {
  431. struct line *line;
  432. char *end;
  433. int dev, n = 0;
  434. dev = simple_strtoul(name, &end, 0);
  435. if ((*end != '\0') || (end == name)) {
  436. *error_out = "line_get_config failed to parse device number";
  437. return 0;
  438. }
  439. if ((dev < 0) || (dev >= num)) {
  440. *error_out = "device number out of range";
  441. return 0;
  442. }
  443. line = &lines[dev];
  444. mutex_lock(&line->count_lock);
  445. if (!line->valid)
  446. CONFIG_CHUNK(str, size, n, "none", 1);
  447. else {
  448. struct tty_struct *tty = tty_port_tty_get(&line->port);
  449. if (tty == NULL) {
  450. CONFIG_CHUNK(str, size, n, line->init_str, 1);
  451. } else {
  452. n = chan_config_string(line, str, size, error_out);
  453. tty_kref_put(tty);
  454. }
  455. }
  456. mutex_unlock(&line->count_lock);
  457. return n;
  458. }
  459. int line_id(char **str, int *start_out, int *end_out)
  460. {
  461. char *end;
  462. int n;
  463. n = simple_strtoul(*str, &end, 0);
  464. if ((*end != '\0') || (end == *str))
  465. return -1;
  466. *str = end;
  467. *start_out = n;
  468. *end_out = n;
  469. return n;
  470. }
  471. int line_remove(struct line *lines, unsigned int num, int n, char **error_out)
  472. {
  473. if (n >= num) {
  474. *error_out = "Device number out of range";
  475. return -EINVAL;
  476. }
  477. return setup_one_line(lines, n, "none", NULL, error_out);
  478. }
  479. int register_lines(struct line_driver *line_driver,
  480. const struct tty_operations *ops,
  481. struct line *lines, int nlines)
  482. {
  483. struct tty_driver *driver = alloc_tty_driver(nlines);
  484. int err;
  485. int i;
  486. if (!driver)
  487. return -ENOMEM;
  488. driver->driver_name = line_driver->name;
  489. driver->name = line_driver->device_name;
  490. driver->major = line_driver->major;
  491. driver->minor_start = line_driver->minor_start;
  492. driver->type = line_driver->type;
  493. driver->subtype = line_driver->subtype;
  494. driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
  495. driver->init_termios = tty_std_termios;
  496. for (i = 0; i < nlines; i++) {
  497. tty_port_init(&lines[i].port);
  498. spin_lock_init(&lines[i].lock);
  499. mutex_init(&lines[i].count_lock);
  500. lines[i].driver = line_driver;
  501. INIT_LIST_HEAD(&lines[i].chan_list);
  502. }
  503. tty_set_operations(driver, ops);
  504. err = tty_register_driver(driver);
  505. if (err) {
  506. printk(KERN_ERR "register_lines : can't register %s driver\n",
  507. line_driver->name);
  508. put_tty_driver(driver);
  509. return err;
  510. }
  511. line_driver->driver = driver;
  512. mconsole_register_dev(&line_driver->mc);
  513. return 0;
  514. }
  515. static DEFINE_SPINLOCK(winch_handler_lock);
  516. static LIST_HEAD(winch_handlers);
  517. struct winch {
  518. struct list_head list;
  519. int fd;
  520. int tty_fd;
  521. int pid;
  522. struct tty_struct *tty;
  523. unsigned long stack;
  524. struct work_struct work;
  525. };
  526. static void __free_winch(struct work_struct *work)
  527. {
  528. struct winch *winch = container_of(work, struct winch, work);
  529. um_free_irq(WINCH_IRQ, winch);
  530. if (winch->pid != -1)
  531. os_kill_process(winch->pid, 1);
  532. if (winch->stack != 0)
  533. free_stack(winch->stack, 0);
  534. kfree(winch);
  535. }
  536. static void free_winch(struct winch *winch)
  537. {
  538. int fd = winch->fd;
  539. winch->fd = -1;
  540. if (fd != -1)
  541. os_close_file(fd);
  542. list_del(&winch->list);
  543. __free_winch(&winch->work);
  544. }
  545. static irqreturn_t winch_interrupt(int irq, void *data)
  546. {
  547. struct winch *winch = data;
  548. struct tty_struct *tty;
  549. struct line *line;
  550. int fd = winch->fd;
  551. int err;
  552. char c;
  553. if (fd != -1) {
  554. err = generic_read(fd, &c, NULL);
  555. if (err < 0) {
  556. if (err != -EAGAIN) {
  557. winch->fd = -1;
  558. list_del(&winch->list);
  559. os_close_file(fd);
  560. printk(KERN_ERR "winch_interrupt : "
  561. "read failed, errno = %d\n", -err);
  562. printk(KERN_ERR "fd %d is losing SIGWINCH "
  563. "support\n", winch->tty_fd);
  564. INIT_WORK(&winch->work, __free_winch);
  565. schedule_work(&winch->work);
  566. return IRQ_HANDLED;
  567. }
  568. goto out;
  569. }
  570. }
  571. tty = winch->tty;
  572. if (tty != NULL) {
  573. line = tty->driver_data;
  574. if (line != NULL) {
  575. chan_window_size(line, &tty->winsize.ws_row,
  576. &tty->winsize.ws_col);
  577. kill_pgrp(tty->pgrp, SIGWINCH, 1);
  578. }
  579. }
  580. out:
  581. if (winch->fd != -1)
  582. reactivate_fd(winch->fd, WINCH_IRQ);
  583. return IRQ_HANDLED;
  584. }
  585. void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
  586. unsigned long stack)
  587. {
  588. struct winch *winch;
  589. winch = kmalloc(sizeof(*winch), GFP_KERNEL);
  590. if (winch == NULL) {
  591. printk(KERN_ERR "register_winch_irq - kmalloc failed\n");
  592. goto cleanup;
  593. }
  594. *winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list),
  595. .fd = fd,
  596. .tty_fd = tty_fd,
  597. .pid = pid,
  598. .tty = tty,
  599. .stack = stack });
  600. if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
  601. IRQF_SHARED | IRQF_SAMPLE_RANDOM,
  602. "winch", winch) < 0) {
  603. printk(KERN_ERR "register_winch_irq - failed to register "
  604. "IRQ\n");
  605. goto out_free;
  606. }
  607. spin_lock(&winch_handler_lock);
  608. list_add(&winch->list, &winch_handlers);
  609. spin_unlock(&winch_handler_lock);
  610. return;
  611. out_free:
  612. kfree(winch);
  613. cleanup:
  614. os_kill_process(pid, 1);
  615. os_close_file(fd);
  616. if (stack != 0)
  617. free_stack(stack, 0);
  618. }
  619. static void unregister_winch(struct tty_struct *tty)
  620. {
  621. struct list_head *ele, *next;
  622. struct winch *winch;
  623. spin_lock(&winch_handler_lock);
  624. list_for_each_safe(ele, next, &winch_handlers) {
  625. winch = list_entry(ele, struct winch, list);
  626. if (winch->tty == tty) {
  627. free_winch(winch);
  628. break;
  629. }
  630. }
  631. spin_unlock(&winch_handler_lock);
  632. }
  633. static void winch_cleanup(void)
  634. {
  635. struct list_head *ele, *next;
  636. struct winch *winch;
  637. spin_lock(&winch_handler_lock);
  638. list_for_each_safe(ele, next, &winch_handlers) {
  639. winch = list_entry(ele, struct winch, list);
  640. free_winch(winch);
  641. }
  642. spin_unlock(&winch_handler_lock);
  643. }
  644. __uml_exitcall(winch_cleanup);
  645. char *add_xterm_umid(char *base)
  646. {
  647. char *umid, *title;
  648. int len;
  649. umid = get_umid();
  650. if (*umid == '\0')
  651. return base;
  652. len = strlen(base) + strlen(" ()") + strlen(umid) + 1;
  653. title = kmalloc(len, GFP_KERNEL);
  654. if (title == NULL) {
  655. printk(KERN_ERR "Failed to allocate buffer for xterm title\n");
  656. return base;
  657. }
  658. snprintf(title, len, "%s (%s)", base, umid);
  659. return title;
  660. }