hvc_console.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. /*
  2. * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  3. * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
  4. * Copyright (C) 2004 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
  5. * Copyright (C) 2004 IBM Corporation
  6. *
  7. * Additional Author(s):
  8. * Ryan S. Arnold <rsa@us.ibm.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23. */
  24. #include <linux/config.h>
  25. #include <linux/console.h>
  26. #include <linux/cpumask.h>
  27. #include <linux/init.h>
  28. #include <linux/kbd_kern.h>
  29. #include <linux/kernel.h>
  30. #include <linux/kobject.h>
  31. #include <linux/kthread.h>
  32. #include <linux/list.h>
  33. #include <linux/module.h>
  34. #include <linux/major.h>
  35. #include <linux/sysrq.h>
  36. #include <linux/tty.h>
  37. #include <linux/tty_flip.h>
  38. #include <linux/sched.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/delay.h>
  41. #include <asm/uaccess.h>
  42. #include <asm/hvconsole.h>
  43. #include <asm/vio.h>
  44. #define HVC_MAJOR 229
  45. #define HVC_MINOR 0
  46. #define TIMEOUT (10)
  47. /*
  48. * Wait this long per iteration while trying to push buffered data to the
  49. * hypervisor before allowing the tty to complete a close operation.
  50. */
  51. #define HVC_CLOSE_WAIT (HZ/100) /* 1/10 of a second */
  52. /*
  53. * The Linux TTY code does not support dynamic addition of tty derived devices
  54. * so we need to know how many tty devices we might need when space is allocated
  55. * for the tty device. Since this driver supports hotplug of vty adapters we
  56. * need to make sure we have enough allocated.
  57. */
  58. #define HVC_ALLOC_TTY_ADAPTERS 8
  59. #define N_OUTBUF 16
  60. #define N_INBUF 16
  61. #define __ALIGNED__ __attribute__((__aligned__(8)))
  62. static struct tty_driver *hvc_driver;
  63. static struct task_struct *hvc_task;
  64. /* Picks up late kicks after list walk but before schedule() */
  65. static int hvc_kicked;
  66. #ifdef CONFIG_MAGIC_SYSRQ
  67. static int sysrq_pressed;
  68. #endif
  69. struct hvc_struct {
  70. spinlock_t lock;
  71. int index;
  72. struct tty_struct *tty;
  73. unsigned int count;
  74. int do_wakeup;
  75. char outbuf[N_OUTBUF] __ALIGNED__;
  76. int n_outbuf;
  77. uint32_t vtermno;
  78. int irq_requested;
  79. int irq;
  80. struct list_head next;
  81. struct kobject kobj; /* ref count & hvc_struct lifetime */
  82. struct vio_dev *vdev;
  83. };
  84. /* dynamic list of hvc_struct instances */
  85. static struct list_head hvc_structs = LIST_HEAD_INIT(hvc_structs);
  86. /*
  87. * Protect the list of hvc_struct instances from inserts and removals during
  88. * list traversal.
  89. */
  90. static DEFINE_SPINLOCK(hvc_structs_lock);
  91. /*
  92. * This value is used to assign a tty->index value to a hvc_struct based
  93. * upon order of exposure via hvc_probe(), when we can not match it to
  94. * a console canidate registered with hvc_instantiate().
  95. */
  96. static int last_hvc = -1;
  97. /*
  98. * Do not call this function with either the hvc_strucst_lock or the hvc_struct
  99. * lock held. If successful, this function increments the kobject reference
  100. * count against the target hvc_struct so it should be released when finished.
  101. */
  102. struct hvc_struct *hvc_get_by_index(int index)
  103. {
  104. struct hvc_struct *hp;
  105. unsigned long flags;
  106. spin_lock(&hvc_structs_lock);
  107. list_for_each_entry(hp, &hvc_structs, next) {
  108. spin_lock_irqsave(&hp->lock, flags);
  109. if (hp->index == index) {
  110. kobject_get(&hp->kobj);
  111. spin_unlock_irqrestore(&hp->lock, flags);
  112. spin_unlock(&hvc_structs_lock);
  113. return hp;
  114. }
  115. spin_unlock_irqrestore(&hp->lock, flags);
  116. }
  117. hp = NULL;
  118. spin_unlock(&hvc_structs_lock);
  119. return hp;
  120. }
  121. /*
  122. * Initial console vtermnos for console API usage prior to full console
  123. * initialization. Any vty adapter outside this range will not have usable
  124. * console interfaces but can still be used as a tty device. This has to be
  125. * static because kmalloc will not work during early console init.
  126. */
  127. static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
  128. {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
  129. /*
  130. * Console APIs, NOT TTY. These APIs are available immediately when
  131. * hvc_console_setup() finds adapters.
  132. */
  133. void hvc_console_print(struct console *co, const char *b, unsigned count)
  134. {
  135. char c[16] __ALIGNED__;
  136. unsigned i = 0, n = 0;
  137. int r, donecr = 0;
  138. /* Console access attempt outside of acceptable console range. */
  139. if (co->index >= MAX_NR_HVC_CONSOLES)
  140. return;
  141. /* This console adapter was removed so it is not useable. */
  142. if (vtermnos[co->index] < 0)
  143. return;
  144. while (count > 0 || i > 0) {
  145. if (count > 0 && i < sizeof(c)) {
  146. if (b[n] == '\n' && !donecr) {
  147. c[i++] = '\r';
  148. donecr = 1;
  149. } else {
  150. c[i++] = b[n++];
  151. donecr = 0;
  152. --count;
  153. }
  154. } else {
  155. r = hvc_put_chars(vtermnos[co->index], c, i);
  156. if (r < 0) {
  157. /* throw away chars on error */
  158. i = 0;
  159. } else if (r > 0) {
  160. i -= r;
  161. if (i > 0)
  162. memmove(c, c+r, i);
  163. }
  164. }
  165. }
  166. }
  167. static struct tty_driver *hvc_console_device(struct console *c, int *index)
  168. {
  169. if (vtermnos[c->index] == -1)
  170. return NULL;
  171. *index = c->index;
  172. return hvc_driver;
  173. }
  174. static int __init hvc_console_setup(struct console *co, char *options)
  175. {
  176. if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
  177. return -ENODEV;
  178. if (vtermnos[co->index] == -1)
  179. return -ENODEV;
  180. return 0;
  181. }
  182. struct console hvc_con_driver = {
  183. .name = "hvc",
  184. .write = hvc_console_print,
  185. .device = hvc_console_device,
  186. .setup = hvc_console_setup,
  187. .flags = CON_PRINTBUFFER,
  188. .index = -1,
  189. };
  190. /* Early console initialization. Preceeds driver initialization. */
  191. static int __init hvc_console_init(void)
  192. {
  193. hvc_find_vtys();
  194. register_console(&hvc_con_driver);
  195. return 0;
  196. }
  197. console_initcall(hvc_console_init);
  198. /*
  199. * hvc_instantiate() is an early console discovery method which locates
  200. * consoles * prior to the vio subsystem discovering them. Hotplugged
  201. * vty adapters do NOT get an hvc_instantiate() callback since they
  202. * appear after early console init.
  203. */
  204. int hvc_instantiate(uint32_t vtermno, int index)
  205. {
  206. struct hvc_struct *hp;
  207. if (index < 0 || index >= MAX_NR_HVC_CONSOLES)
  208. return -1;
  209. if (vtermnos[index] != -1)
  210. return -1;
  211. /* make sure no no tty has been registerd in this index */
  212. hp = hvc_get_by_index(index);
  213. if (hp) {
  214. kobject_put(&hp->kobj);
  215. return -1;
  216. }
  217. vtermnos[index] = vtermno;
  218. /* reserve all indices upto and including this index */
  219. if (last_hvc < index)
  220. last_hvc = index;
  221. return 0;
  222. }
  223. /* Wake the sleeping khvcd */
  224. static void hvc_kick(void)
  225. {
  226. hvc_kicked = 1;
  227. wake_up_process(hvc_task);
  228. }
  229. static int hvc_poll(struct hvc_struct *hp);
  230. /*
  231. * NOTE: This API isn't used if the console adapter doesn't support interrupts.
  232. * In this case the console is poll driven.
  233. */
  234. static irqreturn_t hvc_handle_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
  235. {
  236. /* if hvc_poll request a repoll, then kick the hvcd thread */
  237. if (hvc_poll(dev_instance))
  238. hvc_kick();
  239. return IRQ_HANDLED;
  240. }
  241. static void hvc_unthrottle(struct tty_struct *tty)
  242. {
  243. hvc_kick();
  244. }
  245. /*
  246. * The TTY interface won't be used until after the vio layer has exposed the vty
  247. * adapter to the kernel.
  248. */
  249. static int hvc_open(struct tty_struct *tty, struct file * filp)
  250. {
  251. struct hvc_struct *hp;
  252. unsigned long flags;
  253. int irq = NO_IRQ;
  254. int rc = 0;
  255. struct kobject *kobjp;
  256. /* Auto increments kobject reference if found. */
  257. if (!(hp = hvc_get_by_index(tty->index))) {
  258. printk(KERN_WARNING "hvc_console: tty open failed, no vty associated with tty.\n");
  259. return -ENODEV;
  260. }
  261. spin_lock_irqsave(&hp->lock, flags);
  262. /* Check and then increment for fast path open. */
  263. if (hp->count++ > 0) {
  264. spin_unlock_irqrestore(&hp->lock, flags);
  265. hvc_kick();
  266. return 0;
  267. } /* else count == 0 */
  268. tty->driver_data = hp;
  269. hp->tty = tty;
  270. /* Save for request_irq outside of spin_lock. */
  271. irq = hp->irq;
  272. if (irq != NO_IRQ)
  273. hp->irq_requested = 1;
  274. kobjp = &hp->kobj;
  275. spin_unlock_irqrestore(&hp->lock, flags);
  276. /* check error, fallback to non-irq */
  277. if (irq != NO_IRQ)
  278. rc = request_irq(irq, hvc_handle_interrupt, SA_INTERRUPT, "hvc_console", hp);
  279. /*
  280. * If the request_irq() fails and we return an error. The tty layer
  281. * will call hvc_close() after a failed open but we don't want to clean
  282. * up there so we'll clean up here and clear out the previously set
  283. * tty fields and return the kobject reference.
  284. */
  285. if (rc) {
  286. spin_lock_irqsave(&hp->lock, flags);
  287. hp->tty = NULL;
  288. hp->irq_requested = 0;
  289. spin_unlock_irqrestore(&hp->lock, flags);
  290. tty->driver_data = NULL;
  291. kobject_put(kobjp);
  292. printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
  293. }
  294. /* Force wakeup of the polling thread */
  295. hvc_kick();
  296. return rc;
  297. }
  298. static void hvc_close(struct tty_struct *tty, struct file * filp)
  299. {
  300. struct hvc_struct *hp;
  301. struct kobject *kobjp;
  302. int irq = NO_IRQ;
  303. unsigned long flags;
  304. if (tty_hung_up_p(filp))
  305. return;
  306. /*
  307. * No driver_data means that this close was issued after a failed
  308. * hvc_open by the tty layer's release_dev() function and we can just
  309. * exit cleanly because the kobject reference wasn't made.
  310. */
  311. if (!tty->driver_data)
  312. return;
  313. hp = tty->driver_data;
  314. spin_lock_irqsave(&hp->lock, flags);
  315. kobjp = &hp->kobj;
  316. if (--hp->count == 0) {
  317. if (hp->irq_requested)
  318. irq = hp->irq;
  319. hp->irq_requested = 0;
  320. /* We are done with the tty pointer now. */
  321. hp->tty = NULL;
  322. spin_unlock_irqrestore(&hp->lock, flags);
  323. /*
  324. * Chain calls chars_in_buffer() and returns immediately if
  325. * there is no buffered data otherwise sleeps on a wait queue
  326. * waking periodically to check chars_in_buffer().
  327. */
  328. tty_wait_until_sent(tty, HVC_CLOSE_WAIT);
  329. if (irq != NO_IRQ)
  330. free_irq(irq, hp);
  331. } else {
  332. if (hp->count < 0)
  333. printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
  334. hp->vtermno, hp->count);
  335. spin_unlock_irqrestore(&hp->lock, flags);
  336. }
  337. kobject_put(kobjp);
  338. }
  339. static void hvc_hangup(struct tty_struct *tty)
  340. {
  341. struct hvc_struct *hp = tty->driver_data;
  342. unsigned long flags;
  343. int irq = NO_IRQ;
  344. int temp_open_count;
  345. struct kobject *kobjp;
  346. if (!hp)
  347. return;
  348. spin_lock_irqsave(&hp->lock, flags);
  349. /*
  350. * The N_TTY line discipline has problems such that in a close vs
  351. * open->hangup case this can be called after the final close so prevent
  352. * that from happening for now.
  353. */
  354. if (hp->count <= 0) {
  355. spin_unlock_irqrestore(&hp->lock, flags);
  356. return;
  357. }
  358. kobjp = &hp->kobj;
  359. temp_open_count = hp->count;
  360. hp->count = 0;
  361. hp->n_outbuf = 0;
  362. hp->tty = NULL;
  363. if (hp->irq_requested)
  364. /* Saved for use outside of spin_lock. */
  365. irq = hp->irq;
  366. hp->irq_requested = 0;
  367. spin_unlock_irqrestore(&hp->lock, flags);
  368. if (irq != NO_IRQ)
  369. free_irq(irq, hp);
  370. while(temp_open_count) {
  371. --temp_open_count;
  372. kobject_put(kobjp);
  373. }
  374. }
  375. /*
  376. * Push buffered characters whether they were just recently buffered or waiting
  377. * on a blocked hypervisor. Call this function with hp->lock held.
  378. */
  379. static void hvc_push(struct hvc_struct *hp)
  380. {
  381. int n;
  382. n = hvc_put_chars(hp->vtermno, hp->outbuf, hp->n_outbuf);
  383. if (n <= 0) {
  384. if (n == 0)
  385. return;
  386. /* throw away output on error; this happens when
  387. there is no session connected to the vterm. */
  388. hp->n_outbuf = 0;
  389. } else
  390. hp->n_outbuf -= n;
  391. if (hp->n_outbuf > 0)
  392. memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf);
  393. else
  394. hp->do_wakeup = 1;
  395. }
  396. static inline int __hvc_write_kernel(struct hvc_struct *hp,
  397. const unsigned char *buf, int count)
  398. {
  399. unsigned long flags;
  400. int rsize, written = 0;
  401. spin_lock_irqsave(&hp->lock, flags);
  402. /* Push pending writes */
  403. if (hp->n_outbuf > 0)
  404. hvc_push(hp);
  405. while (count > 0 && (rsize = N_OUTBUF - hp->n_outbuf) > 0) {
  406. if (rsize > count)
  407. rsize = count;
  408. memcpy(hp->outbuf + hp->n_outbuf, buf, rsize);
  409. count -= rsize;
  410. buf += rsize;
  411. hp->n_outbuf += rsize;
  412. written += rsize;
  413. hvc_push(hp);
  414. }
  415. spin_unlock_irqrestore(&hp->lock, flags);
  416. return written;
  417. }
  418. static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count)
  419. {
  420. struct hvc_struct *hp = tty->driver_data;
  421. int written;
  422. /* This write was probably executed during a tty close. */
  423. if (!hp)
  424. return -EPIPE;
  425. if (hp->count <= 0)
  426. return -EIO;
  427. written = __hvc_write_kernel(hp, buf, count);
  428. /*
  429. * Racy, but harmless, kick thread if there is still pending data.
  430. * There really is nothing wrong with kicking the thread, even if there
  431. * is no buffered data.
  432. */
  433. if (hp->n_outbuf)
  434. hvc_kick();
  435. return written;
  436. }
  437. /*
  438. * This is actually a contract between the driver and the tty layer outlining
  439. * how much write room the driver can guarentee will be sent OR BUFFERED. This
  440. * driver MUST honor the return value.
  441. */
  442. static int hvc_write_room(struct tty_struct *tty)
  443. {
  444. struct hvc_struct *hp = tty->driver_data;
  445. if (!hp)
  446. return -1;
  447. return N_OUTBUF - hp->n_outbuf;
  448. }
  449. static int hvc_chars_in_buffer(struct tty_struct *tty)
  450. {
  451. struct hvc_struct *hp = tty->driver_data;
  452. if (!hp)
  453. return -1;
  454. return hp->n_outbuf;
  455. }
  456. #define HVC_POLL_READ 0x00000001
  457. #define HVC_POLL_WRITE 0x00000002
  458. #define HVC_POLL_QUICK 0x00000004
  459. static int hvc_poll(struct hvc_struct *hp)
  460. {
  461. struct tty_struct *tty;
  462. int i, n, poll_mask = 0;
  463. char buf[N_INBUF] __ALIGNED__;
  464. unsigned long flags;
  465. int read_total = 0;
  466. spin_lock_irqsave(&hp->lock, flags);
  467. /* Push pending writes */
  468. if (hp->n_outbuf > 0)
  469. hvc_push(hp);
  470. /* Reschedule us if still some write pending */
  471. if (hp->n_outbuf > 0)
  472. poll_mask |= HVC_POLL_WRITE;
  473. /* No tty attached, just skip */
  474. tty = hp->tty;
  475. if (tty == NULL)
  476. goto bail;
  477. /* Now check if we can get data (are we throttled ?) */
  478. if (test_bit(TTY_THROTTLED, &tty->flags))
  479. goto throttled;
  480. /* If we aren't interrupt driven and aren't throttled, we always
  481. * request a reschedule
  482. */
  483. if (hp->irq == NO_IRQ)
  484. poll_mask |= HVC_POLL_READ;
  485. /* Read data if any */
  486. for (;;) {
  487. int count = N_INBUF;
  488. if (count > (TTY_FLIPBUF_SIZE - tty->flip.count))
  489. count = TTY_FLIPBUF_SIZE - tty->flip.count;
  490. /* If flip is full, just reschedule a later read */
  491. if (count == 0) {
  492. poll_mask |= HVC_POLL_READ;
  493. break;
  494. }
  495. n = hvc_get_chars(hp->vtermno, buf, count);
  496. if (n <= 0) {
  497. /* Hangup the tty when disconnected from host */
  498. if (n == -EPIPE) {
  499. spin_unlock_irqrestore(&hp->lock, flags);
  500. tty_hangup(tty);
  501. spin_lock_irqsave(&hp->lock, flags);
  502. }
  503. break;
  504. }
  505. for (i = 0; i < n; ++i) {
  506. #ifdef CONFIG_MAGIC_SYSRQ
  507. if (hp->index == hvc_con_driver.index) {
  508. /* Handle the SysRq Hack */
  509. /* XXX should support a sequence */
  510. if (buf[i] == '\x0f') { /* ^O */
  511. sysrq_pressed = 1;
  512. continue;
  513. } else if (sysrq_pressed) {
  514. handle_sysrq(buf[i], NULL, tty);
  515. sysrq_pressed = 0;
  516. continue;
  517. }
  518. }
  519. #endif /* CONFIG_MAGIC_SYSRQ */
  520. tty_insert_flip_char(tty, buf[i], 0);
  521. }
  522. if (tty->flip.count)
  523. tty_schedule_flip(tty);
  524. /*
  525. * Account for the total amount read in one loop, and if above
  526. * 64 bytes, we do a quick schedule loop to let the tty grok
  527. * the data and eventually throttle us.
  528. */
  529. read_total += n;
  530. if (read_total >= 64) {
  531. poll_mask |= HVC_POLL_QUICK;
  532. break;
  533. }
  534. }
  535. throttled:
  536. /* Wakeup write queue if necessary */
  537. if (hp->do_wakeup) {
  538. hp->do_wakeup = 0;
  539. tty_wakeup(tty);
  540. }
  541. bail:
  542. spin_unlock_irqrestore(&hp->lock, flags);
  543. return poll_mask;
  544. }
  545. #if defined(CONFIG_XMON) && defined(CONFIG_SMP)
  546. extern cpumask_t cpus_in_xmon;
  547. #else
  548. static const cpumask_t cpus_in_xmon = CPU_MASK_NONE;
  549. #endif
  550. /*
  551. * This kthread is either polling or interrupt driven. This is determined by
  552. * calling hvc_poll() who determines whether a console adapter support
  553. * interrupts.
  554. */
  555. int khvcd(void *unused)
  556. {
  557. int poll_mask;
  558. struct hvc_struct *hp;
  559. __set_current_state(TASK_RUNNING);
  560. do {
  561. poll_mask = 0;
  562. hvc_kicked = 0;
  563. wmb();
  564. if (cpus_empty(cpus_in_xmon)) {
  565. spin_lock(&hvc_structs_lock);
  566. list_for_each_entry(hp, &hvc_structs, next) {
  567. poll_mask |= hvc_poll(hp);
  568. }
  569. spin_unlock(&hvc_structs_lock);
  570. } else
  571. poll_mask |= HVC_POLL_READ;
  572. if (hvc_kicked)
  573. continue;
  574. if (poll_mask & HVC_POLL_QUICK) {
  575. yield();
  576. continue;
  577. }
  578. set_current_state(TASK_INTERRUPTIBLE);
  579. if (!hvc_kicked) {
  580. if (poll_mask == 0)
  581. schedule();
  582. else
  583. msleep_interruptible(TIMEOUT);
  584. }
  585. __set_current_state(TASK_RUNNING);
  586. } while (!kthread_should_stop());
  587. return 0;
  588. }
  589. static struct tty_operations hvc_ops = {
  590. .open = hvc_open,
  591. .close = hvc_close,
  592. .write = hvc_write,
  593. .hangup = hvc_hangup,
  594. .unthrottle = hvc_unthrottle,
  595. .write_room = hvc_write_room,
  596. .chars_in_buffer = hvc_chars_in_buffer,
  597. };
  598. /* callback when the kboject ref count reaches zero. */
  599. static void destroy_hvc_struct(struct kobject *kobj)
  600. {
  601. struct hvc_struct *hp = container_of(kobj, struct hvc_struct, kobj);
  602. unsigned long flags;
  603. spin_lock(&hvc_structs_lock);
  604. spin_lock_irqsave(&hp->lock, flags);
  605. list_del(&(hp->next));
  606. spin_unlock_irqrestore(&hp->lock, flags);
  607. spin_unlock(&hvc_structs_lock);
  608. kfree(hp);
  609. }
  610. static struct kobj_type hvc_kobj_type = {
  611. .release = destroy_hvc_struct,
  612. };
  613. static int __devinit hvc_probe(
  614. struct vio_dev *dev,
  615. const struct vio_device_id *id)
  616. {
  617. struct hvc_struct *hp;
  618. int i;
  619. /* probed with invalid parameters. */
  620. if (!dev || !id)
  621. return -EPERM;
  622. hp = kmalloc(sizeof(*hp), GFP_KERNEL);
  623. if (!hp)
  624. return -ENOMEM;
  625. memset(hp, 0x00, sizeof(*hp));
  626. hp->vtermno = dev->unit_address;
  627. hp->vdev = dev;
  628. hp->vdev->dev.driver_data = hp;
  629. hp->irq = dev->irq;
  630. kobject_init(&hp->kobj);
  631. hp->kobj.ktype = &hvc_kobj_type;
  632. spin_lock_init(&hp->lock);
  633. spin_lock(&hvc_structs_lock);
  634. /*
  635. * find index to use:
  636. * see if this vterm id matches one registered for console.
  637. */
  638. for (i=0; i < MAX_NR_HVC_CONSOLES; i++)
  639. if (vtermnos[i] == hp->vtermno)
  640. break;
  641. /* no matching slot, just use a counter */
  642. if (i >= MAX_NR_HVC_CONSOLES)
  643. i = ++last_hvc;
  644. hp->index = i;
  645. list_add_tail(&(hp->next), &hvc_structs);
  646. spin_unlock(&hvc_structs_lock);
  647. return 0;
  648. }
  649. static int __devexit hvc_remove(struct vio_dev *dev)
  650. {
  651. struct hvc_struct *hp = dev->dev.driver_data;
  652. unsigned long flags;
  653. struct kobject *kobjp;
  654. struct tty_struct *tty;
  655. spin_lock_irqsave(&hp->lock, flags);
  656. tty = hp->tty;
  657. kobjp = &hp->kobj;
  658. if (hp->index < MAX_NR_HVC_CONSOLES)
  659. vtermnos[hp->index] = -1;
  660. /* Don't whack hp->irq because tty_hangup() will need to free the irq. */
  661. spin_unlock_irqrestore(&hp->lock, flags);
  662. /*
  663. * We 'put' the instance that was grabbed when the kobject instance
  664. * was intialized using kobject_init(). Let the last holder of this
  665. * kobject cause it to be removed, which will probably be the tty_hangup
  666. * below.
  667. */
  668. kobject_put(kobjp);
  669. /*
  670. * This function call will auto chain call hvc_hangup. The tty should
  671. * always be valid at this time unless a simultaneous tty close already
  672. * cleaned up the hvc_struct.
  673. */
  674. if (tty)
  675. tty_hangup(tty);
  676. return 0;
  677. }
  678. char hvc_driver_name[] = "hvc_console";
  679. static struct vio_device_id hvc_driver_table[] __devinitdata= {
  680. {"serial", "hvterm1"},
  681. { NULL, }
  682. };
  683. MODULE_DEVICE_TABLE(vio, hvc_driver_table);
  684. static struct vio_driver hvc_vio_driver = {
  685. .name = hvc_driver_name,
  686. .id_table = hvc_driver_table,
  687. .probe = hvc_probe,
  688. .remove = hvc_remove,
  689. };
  690. /* Driver initialization. Follow console initialization. This is where the TTY
  691. * interfaces start to become available. */
  692. int __init hvc_init(void)
  693. {
  694. int rc;
  695. /* We need more than hvc_count adapters due to hotplug additions. */
  696. hvc_driver = alloc_tty_driver(HVC_ALLOC_TTY_ADAPTERS);
  697. if (!hvc_driver)
  698. return -ENOMEM;
  699. hvc_driver->owner = THIS_MODULE;
  700. hvc_driver->devfs_name = "hvc/";
  701. hvc_driver->driver_name = "hvc";
  702. hvc_driver->name = "hvc";
  703. hvc_driver->major = HVC_MAJOR;
  704. hvc_driver->minor_start = HVC_MINOR;
  705. hvc_driver->type = TTY_DRIVER_TYPE_SYSTEM;
  706. hvc_driver->init_termios = tty_std_termios;
  707. hvc_driver->flags = TTY_DRIVER_REAL_RAW;
  708. tty_set_operations(hvc_driver, &hvc_ops);
  709. if (tty_register_driver(hvc_driver))
  710. panic("Couldn't register hvc console driver\n");
  711. /* Always start the kthread because there can be hotplug vty adapters
  712. * added later. */
  713. hvc_task = kthread_run(khvcd, NULL, "khvcd");
  714. if (IS_ERR(hvc_task)) {
  715. panic("Couldn't create kthread for console.\n");
  716. put_tty_driver(hvc_driver);
  717. return -EIO;
  718. }
  719. /* Register as a vio device to receive callbacks */
  720. rc = vio_register_driver(&hvc_vio_driver);
  721. return rc;
  722. }
  723. module_init(hvc_init);
  724. /* This isn't particularily necessary due to this being a console driver
  725. * but it is nice to be thorough.
  726. */
  727. static void __exit hvc_exit(void)
  728. {
  729. kthread_stop(hvc_task);
  730. vio_unregister_driver(&hvc_vio_driver);
  731. tty_unregister_driver(hvc_driver);
  732. /* return tty_struct instances allocated in hvc_init(). */
  733. put_tty_driver(hvc_driver);
  734. unregister_console(&hvc_con_driver);
  735. }
  736. module_exit(hvc_exit);