hvsi.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317
  1. /*
  2. * Copyright (C) 2004 Hollis Blanchard <hollisb@us.ibm.com>, IBM
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. /* Host Virtual Serial Interface (HVSI) is a protocol between the hosted OS
  19. * and the service processor on IBM pSeries servers. On these servers, there
  20. * are no serial ports under the OS's control, and sometimes there is no other
  21. * console available either. However, the service processor has two standard
  22. * serial ports, so this over-complicated protocol allows the OS to control
  23. * those ports by proxy.
  24. *
  25. * Besides data, the procotol supports the reading/writing of the serial
  26. * port's DTR line, and the reading of the CD line. This is to allow the OS to
  27. * control a modem attached to the service processor's serial port. Note that
  28. * the OS cannot change the speed of the port through this protocol.
  29. */
  30. #undef DEBUG
  31. #include <linux/console.h>
  32. #include <linux/ctype.h>
  33. #include <linux/delay.h>
  34. #include <linux/init.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/module.h>
  37. #include <linux/major.h>
  38. #include <linux/kernel.h>
  39. #include <linux/sched.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/sysrq.h>
  42. #include <linux/tty.h>
  43. #include <linux/tty_flip.h>
  44. #include <asm/hvcall.h>
  45. #include <asm/hvconsole.h>
  46. #include <asm/prom.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/vio.h>
  49. #include <asm/param.h>
  50. #define HVSI_MAJOR 229
  51. #define HVSI_MINOR 128
  52. #define MAX_NR_HVSI_CONSOLES 4
  53. #define HVSI_TIMEOUT (5*HZ)
  54. #define HVSI_VERSION 1
  55. #define HVSI_MAX_PACKET 256
  56. #define HVSI_MAX_READ 16
  57. #define HVSI_MAX_OUTGOING_DATA 12
  58. #define N_OUTBUF 12
  59. /*
  60. * we pass data via two 8-byte registers, so we would like our char arrays
  61. * properly aligned for those loads.
  62. */
  63. #define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
  64. struct hvsi_struct {
  65. struct delayed_work writer;
  66. struct work_struct handshaker;
  67. wait_queue_head_t emptyq; /* woken when outbuf is emptied */
  68. wait_queue_head_t stateq; /* woken when HVSI state changes */
  69. spinlock_t lock;
  70. int index;
  71. struct tty_struct *tty;
  72. unsigned int count;
  73. uint8_t throttle_buf[128];
  74. uint8_t outbuf[N_OUTBUF]; /* to implement write_room and chars_in_buffer */
  75. /* inbuf is for packet reassembly. leave a little room for leftovers. */
  76. uint8_t inbuf[HVSI_MAX_PACKET + HVSI_MAX_READ];
  77. uint8_t *inbuf_end;
  78. int n_throttle;
  79. int n_outbuf;
  80. uint32_t vtermno;
  81. uint32_t virq;
  82. atomic_t seqno; /* HVSI packet sequence number */
  83. uint16_t mctrl;
  84. uint8_t state; /* HVSI protocol state */
  85. uint8_t flags;
  86. #ifdef CONFIG_MAGIC_SYSRQ
  87. uint8_t sysrq;
  88. #endif /* CONFIG_MAGIC_SYSRQ */
  89. };
  90. static struct hvsi_struct hvsi_ports[MAX_NR_HVSI_CONSOLES];
  91. static struct tty_driver *hvsi_driver;
  92. static int hvsi_count;
  93. static int (*hvsi_wait)(struct hvsi_struct *hp, int state);
  94. enum HVSI_PROTOCOL_STATE {
  95. HVSI_CLOSED,
  96. HVSI_WAIT_FOR_VER_RESPONSE,
  97. HVSI_WAIT_FOR_VER_QUERY,
  98. HVSI_OPEN,
  99. HVSI_WAIT_FOR_MCTRL_RESPONSE,
  100. HVSI_FSP_DIED,
  101. };
  102. #define HVSI_CONSOLE 0x1
  103. #define VS_DATA_PACKET_HEADER 0xff
  104. #define VS_CONTROL_PACKET_HEADER 0xfe
  105. #define VS_QUERY_PACKET_HEADER 0xfd
  106. #define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc
  107. /* control verbs */
  108. #define VSV_SET_MODEM_CTL 1 /* to service processor only */
  109. #define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */
  110. #define VSV_CLOSE_PROTOCOL 3
  111. /* query verbs */
  112. #define VSV_SEND_VERSION_NUMBER 1
  113. #define VSV_SEND_MODEM_CTL_STATUS 2
  114. /* yes, these masks are not consecutive. */
  115. #define HVSI_TSDTR 0x01
  116. #define HVSI_TSCD 0x20
  117. struct hvsi_header {
  118. uint8_t type;
  119. uint8_t len;
  120. uint16_t seqno;
  121. } __attribute__((packed));
  122. struct hvsi_data {
  123. uint8_t type;
  124. uint8_t len;
  125. uint16_t seqno;
  126. uint8_t data[HVSI_MAX_OUTGOING_DATA];
  127. } __attribute__((packed));
  128. struct hvsi_control {
  129. uint8_t type;
  130. uint8_t len;
  131. uint16_t seqno;
  132. uint16_t verb;
  133. /* optional depending on verb: */
  134. uint32_t word;
  135. uint32_t mask;
  136. } __attribute__((packed));
  137. struct hvsi_query {
  138. uint8_t type;
  139. uint8_t len;
  140. uint16_t seqno;
  141. uint16_t verb;
  142. } __attribute__((packed));
  143. struct hvsi_query_response {
  144. uint8_t type;
  145. uint8_t len;
  146. uint16_t seqno;
  147. uint16_t verb;
  148. uint16_t query_seqno;
  149. union {
  150. uint8_t version;
  151. uint32_t mctrl_word;
  152. } u;
  153. } __attribute__((packed));
  154. static inline int is_console(struct hvsi_struct *hp)
  155. {
  156. return hp->flags & HVSI_CONSOLE;
  157. }
  158. static inline int is_open(struct hvsi_struct *hp)
  159. {
  160. /* if we're waiting for an mctrl then we're already open */
  161. return (hp->state == HVSI_OPEN)
  162. || (hp->state == HVSI_WAIT_FOR_MCTRL_RESPONSE);
  163. }
  164. static inline void print_state(struct hvsi_struct *hp)
  165. {
  166. #ifdef DEBUG
  167. static const char *state_names[] = {
  168. "HVSI_CLOSED",
  169. "HVSI_WAIT_FOR_VER_RESPONSE",
  170. "HVSI_WAIT_FOR_VER_QUERY",
  171. "HVSI_OPEN",
  172. "HVSI_WAIT_FOR_MCTRL_RESPONSE",
  173. "HVSI_FSP_DIED",
  174. };
  175. const char *name = state_names[hp->state];
  176. if (hp->state > ARRAY_SIZE(state_names))
  177. name = "UNKNOWN";
  178. pr_debug("hvsi%i: state = %s\n", hp->index, name);
  179. #endif /* DEBUG */
  180. }
  181. static inline void __set_state(struct hvsi_struct *hp, int state)
  182. {
  183. hp->state = state;
  184. print_state(hp);
  185. wake_up_all(&hp->stateq);
  186. }
  187. static inline void set_state(struct hvsi_struct *hp, int state)
  188. {
  189. unsigned long flags;
  190. spin_lock_irqsave(&hp->lock, flags);
  191. __set_state(hp, state);
  192. spin_unlock_irqrestore(&hp->lock, flags);
  193. }
  194. static inline int len_packet(const uint8_t *packet)
  195. {
  196. return (int)((struct hvsi_header *)packet)->len;
  197. }
  198. static inline int is_header(const uint8_t *packet)
  199. {
  200. struct hvsi_header *header = (struct hvsi_header *)packet;
  201. return header->type >= VS_QUERY_RESPONSE_PACKET_HEADER;
  202. }
  203. static inline int got_packet(const struct hvsi_struct *hp, uint8_t *packet)
  204. {
  205. if (hp->inbuf_end < packet + sizeof(struct hvsi_header))
  206. return 0; /* don't even have the packet header */
  207. if (hp->inbuf_end < (packet + len_packet(packet)))
  208. return 0; /* don't have the rest of the packet */
  209. return 1;
  210. }
  211. /* shift remaining bytes in packetbuf down */
  212. static void compact_inbuf(struct hvsi_struct *hp, uint8_t *read_to)
  213. {
  214. int remaining = (int)(hp->inbuf_end - read_to);
  215. pr_debug("%s: %i chars remain\n", __FUNCTION__, remaining);
  216. if (read_to != hp->inbuf)
  217. memmove(hp->inbuf, read_to, remaining);
  218. hp->inbuf_end = hp->inbuf + remaining;
  219. }
  220. #ifdef DEBUG
  221. #define dbg_dump_packet(packet) dump_packet(packet)
  222. #define dbg_dump_hex(data, len) dump_hex(data, len)
  223. #else
  224. #define dbg_dump_packet(packet) do { } while (0)
  225. #define dbg_dump_hex(data, len) do { } while (0)
  226. #endif
  227. static void dump_hex(const uint8_t *data, int len)
  228. {
  229. int i;
  230. printk(" ");
  231. for (i=0; i < len; i++)
  232. printk("%.2x", data[i]);
  233. printk("\n ");
  234. for (i=0; i < len; i++) {
  235. if (isprint(data[i]))
  236. printk("%c", data[i]);
  237. else
  238. printk(".");
  239. }
  240. printk("\n");
  241. }
  242. static void dump_packet(uint8_t *packet)
  243. {
  244. struct hvsi_header *header = (struct hvsi_header *)packet;
  245. printk("type 0x%x, len %i, seqno %i:\n", header->type, header->len,
  246. header->seqno);
  247. dump_hex(packet, header->len);
  248. }
  249. static int hvsi_read(struct hvsi_struct *hp, char *buf, int count)
  250. {
  251. unsigned long got;
  252. got = hvc_get_chars(hp->vtermno, buf, count);
  253. return got;
  254. }
  255. static void hvsi_recv_control(struct hvsi_struct *hp, uint8_t *packet,
  256. struct tty_struct **to_hangup, struct hvsi_struct **to_handshake)
  257. {
  258. struct hvsi_control *header = (struct hvsi_control *)packet;
  259. switch (header->verb) {
  260. case VSV_MODEM_CTL_UPDATE:
  261. if ((header->word & HVSI_TSCD) == 0) {
  262. /* CD went away; no more connection */
  263. pr_debug("hvsi%i: CD dropped\n", hp->index);
  264. hp->mctrl &= TIOCM_CD;
  265. /* If userland hasn't done an open(2) yet, hp->tty is NULL. */
  266. if (hp->tty && !(hp->tty->flags & CLOCAL))
  267. *to_hangup = hp->tty;
  268. }
  269. break;
  270. case VSV_CLOSE_PROTOCOL:
  271. pr_debug("hvsi%i: service processor came back\n", hp->index);
  272. if (hp->state != HVSI_CLOSED) {
  273. *to_handshake = hp;
  274. }
  275. break;
  276. default:
  277. printk(KERN_WARNING "hvsi%i: unknown HVSI control packet: ",
  278. hp->index);
  279. dump_packet(packet);
  280. break;
  281. }
  282. }
  283. static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet)
  284. {
  285. struct hvsi_query_response *resp = (struct hvsi_query_response *)packet;
  286. switch (hp->state) {
  287. case HVSI_WAIT_FOR_VER_RESPONSE:
  288. __set_state(hp, HVSI_WAIT_FOR_VER_QUERY);
  289. break;
  290. case HVSI_WAIT_FOR_MCTRL_RESPONSE:
  291. hp->mctrl = 0;
  292. if (resp->u.mctrl_word & HVSI_TSDTR)
  293. hp->mctrl |= TIOCM_DTR;
  294. if (resp->u.mctrl_word & HVSI_TSCD)
  295. hp->mctrl |= TIOCM_CD;
  296. __set_state(hp, HVSI_OPEN);
  297. break;
  298. default:
  299. printk(KERN_ERR "hvsi%i: unexpected query response: ", hp->index);
  300. dump_packet(packet);
  301. break;
  302. }
  303. }
  304. /* respond to service processor's version query */
  305. static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
  306. {
  307. struct hvsi_query_response packet __ALIGNED__;
  308. int wrote;
  309. packet.type = VS_QUERY_RESPONSE_PACKET_HEADER;
  310. packet.len = sizeof(struct hvsi_query_response);
  311. packet.seqno = atomic_inc_return(&hp->seqno);
  312. packet.verb = VSV_SEND_VERSION_NUMBER;
  313. packet.u.version = HVSI_VERSION;
  314. packet.query_seqno = query_seqno+1;
  315. pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len);
  316. dbg_dump_hex((uint8_t*)&packet, packet.len);
  317. wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
  318. if (wrote != packet.len) {
  319. printk(KERN_ERR "hvsi%i: couldn't send query response!\n",
  320. hp->index);
  321. return -EIO;
  322. }
  323. return 0;
  324. }
  325. static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet)
  326. {
  327. struct hvsi_query *query = (struct hvsi_query *)packet;
  328. switch (hp->state) {
  329. case HVSI_WAIT_FOR_VER_QUERY:
  330. hvsi_version_respond(hp, query->seqno);
  331. __set_state(hp, HVSI_OPEN);
  332. break;
  333. default:
  334. printk(KERN_ERR "hvsi%i: unexpected query: ", hp->index);
  335. dump_packet(packet);
  336. break;
  337. }
  338. }
  339. static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
  340. {
  341. int i;
  342. for (i=0; i < len; i++) {
  343. char c = buf[i];
  344. #ifdef CONFIG_MAGIC_SYSRQ
  345. if (c == '\0') {
  346. hp->sysrq = 1;
  347. continue;
  348. } else if (hp->sysrq) {
  349. handle_sysrq(c, hp->tty);
  350. hp->sysrq = 0;
  351. continue;
  352. }
  353. #endif /* CONFIG_MAGIC_SYSRQ */
  354. tty_insert_flip_char(hp->tty, c, 0);
  355. }
  356. }
  357. /*
  358. * We could get 252 bytes of data at once here. But the tty layer only
  359. * throttles us at TTY_THRESHOLD_THROTTLE (128) bytes, so we could overflow
  360. * it. Accordingly we won't send more than 128 bytes at a time to the flip
  361. * buffer, which will give the tty buffer a chance to throttle us. Should the
  362. * value of TTY_THRESHOLD_THROTTLE change in n_tty.c, this code should be
  363. * revisited.
  364. */
  365. #define TTY_THRESHOLD_THROTTLE 128
  366. static struct tty_struct *hvsi_recv_data(struct hvsi_struct *hp,
  367. const uint8_t *packet)
  368. {
  369. const struct hvsi_header *header = (const struct hvsi_header *)packet;
  370. const uint8_t *data = packet + sizeof(struct hvsi_header);
  371. int datalen = header->len - sizeof(struct hvsi_header);
  372. int overflow = datalen - TTY_THRESHOLD_THROTTLE;
  373. pr_debug("queueing %i chars '%.*s'\n", datalen, datalen, data);
  374. if (datalen == 0)
  375. return NULL;
  376. if (overflow > 0) {
  377. pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __FUNCTION__);
  378. datalen = TTY_THRESHOLD_THROTTLE;
  379. }
  380. hvsi_insert_chars(hp, data, datalen);
  381. if (overflow > 0) {
  382. /*
  383. * we still have more data to deliver, so we need to save off the
  384. * overflow and send it later
  385. */
  386. pr_debug("%s: deferring overflow\n", __FUNCTION__);
  387. memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow);
  388. hp->n_throttle = overflow;
  389. }
  390. return hp->tty;
  391. }
  392. /*
  393. * Returns true/false indicating data successfully read from hypervisor.
  394. * Used both to get packets for tty connections and to advance the state
  395. * machine during console handshaking (in which case tty = NULL and we ignore
  396. * incoming data).
  397. */
  398. static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip,
  399. struct tty_struct **hangup, struct hvsi_struct **handshake)
  400. {
  401. uint8_t *packet = hp->inbuf;
  402. int chunklen;
  403. *flip = NULL;
  404. *hangup = NULL;
  405. *handshake = NULL;
  406. chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ);
  407. if (chunklen == 0) {
  408. pr_debug("%s: 0-length read\n", __FUNCTION__);
  409. return 0;
  410. }
  411. pr_debug("%s: got %i bytes\n", __FUNCTION__, chunklen);
  412. dbg_dump_hex(hp->inbuf_end, chunklen);
  413. hp->inbuf_end += chunklen;
  414. /* handle all completed packets */
  415. while ((packet < hp->inbuf_end) && got_packet(hp, packet)) {
  416. struct hvsi_header *header = (struct hvsi_header *)packet;
  417. if (!is_header(packet)) {
  418. printk(KERN_ERR "hvsi%i: got malformed packet\n", hp->index);
  419. /* skip bytes until we find a header or run out of data */
  420. while ((packet < hp->inbuf_end) && (!is_header(packet)))
  421. packet++;
  422. continue;
  423. }
  424. pr_debug("%s: handling %i-byte packet\n", __FUNCTION__,
  425. len_packet(packet));
  426. dbg_dump_packet(packet);
  427. switch (header->type) {
  428. case VS_DATA_PACKET_HEADER:
  429. if (!is_open(hp))
  430. break;
  431. if (hp->tty == NULL)
  432. break; /* no tty buffer to put data in */
  433. *flip = hvsi_recv_data(hp, packet);
  434. break;
  435. case VS_CONTROL_PACKET_HEADER:
  436. hvsi_recv_control(hp, packet, hangup, handshake);
  437. break;
  438. case VS_QUERY_RESPONSE_PACKET_HEADER:
  439. hvsi_recv_response(hp, packet);
  440. break;
  441. case VS_QUERY_PACKET_HEADER:
  442. hvsi_recv_query(hp, packet);
  443. break;
  444. default:
  445. printk(KERN_ERR "hvsi%i: unknown HVSI packet type 0x%x\n",
  446. hp->index, header->type);
  447. dump_packet(packet);
  448. break;
  449. }
  450. packet += len_packet(packet);
  451. if (*hangup || *handshake) {
  452. pr_debug("%s: hangup or handshake\n", __FUNCTION__);
  453. /*
  454. * we need to send the hangup now before receiving any more data.
  455. * If we get "data, hangup, data", we can't deliver the second
  456. * data before the hangup.
  457. */
  458. break;
  459. }
  460. }
  461. compact_inbuf(hp, packet);
  462. return 1;
  463. }
  464. static void hvsi_send_overflow(struct hvsi_struct *hp)
  465. {
  466. pr_debug("%s: delivering %i bytes overflow\n", __FUNCTION__,
  467. hp->n_throttle);
  468. hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle);
  469. hp->n_throttle = 0;
  470. }
  471. /*
  472. * must get all pending data because we only get an irq on empty->non-empty
  473. * transition
  474. */
  475. static irqreturn_t hvsi_interrupt(int irq, void *arg)
  476. {
  477. struct hvsi_struct *hp = (struct hvsi_struct *)arg;
  478. struct tty_struct *flip;
  479. struct tty_struct *hangup;
  480. struct hvsi_struct *handshake;
  481. unsigned long flags;
  482. int again = 1;
  483. pr_debug("%s\n", __FUNCTION__);
  484. while (again) {
  485. spin_lock_irqsave(&hp->lock, flags);
  486. again = hvsi_load_chunk(hp, &flip, &hangup, &handshake);
  487. spin_unlock_irqrestore(&hp->lock, flags);
  488. /*
  489. * we have to call tty_flip_buffer_push() and tty_hangup() outside our
  490. * spinlock. But we also have to keep going until we've read all the
  491. * available data.
  492. */
  493. if (flip) {
  494. /* there was data put in the tty flip buffer */
  495. tty_flip_buffer_push(flip);
  496. flip = NULL;
  497. }
  498. if (hangup) {
  499. tty_hangup(hangup);
  500. }
  501. if (handshake) {
  502. pr_debug("hvsi%i: attempting re-handshake\n", handshake->index);
  503. schedule_work(&handshake->handshaker);
  504. }
  505. }
  506. spin_lock_irqsave(&hp->lock, flags);
  507. if (hp->tty && hp->n_throttle
  508. && (!test_bit(TTY_THROTTLED, &hp->tty->flags))) {
  509. /* we weren't hung up and we weren't throttled, so we can deliver the
  510. * rest now */
  511. flip = hp->tty;
  512. hvsi_send_overflow(hp);
  513. }
  514. spin_unlock_irqrestore(&hp->lock, flags);
  515. if (flip) {
  516. tty_flip_buffer_push(flip);
  517. }
  518. return IRQ_HANDLED;
  519. }
  520. /* for boot console, before the irq handler is running */
  521. static int __init poll_for_state(struct hvsi_struct *hp, int state)
  522. {
  523. unsigned long end_jiffies = jiffies + HVSI_TIMEOUT;
  524. for (;;) {
  525. hvsi_interrupt(hp->virq, (void *)hp); /* get pending data */
  526. if (hp->state == state)
  527. return 0;
  528. mdelay(5);
  529. if (time_after(jiffies, end_jiffies))
  530. return -EIO;
  531. }
  532. }
  533. /* wait for irq handler to change our state */
  534. static int wait_for_state(struct hvsi_struct *hp, int state)
  535. {
  536. int ret = 0;
  537. if (!wait_event_timeout(hp->stateq, (hp->state == state), HVSI_TIMEOUT))
  538. ret = -EIO;
  539. return ret;
  540. }
  541. static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
  542. {
  543. struct hvsi_query packet __ALIGNED__;
  544. int wrote;
  545. packet.type = VS_QUERY_PACKET_HEADER;
  546. packet.len = sizeof(struct hvsi_query);
  547. packet.seqno = atomic_inc_return(&hp->seqno);
  548. packet.verb = verb;
  549. pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len);
  550. dbg_dump_hex((uint8_t*)&packet, packet.len);
  551. wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
  552. if (wrote != packet.len) {
  553. printk(KERN_ERR "hvsi%i: couldn't send query (%i)!\n", hp->index,
  554. wrote);
  555. return -EIO;
  556. }
  557. return 0;
  558. }
  559. static int hvsi_get_mctrl(struct hvsi_struct *hp)
  560. {
  561. int ret;
  562. set_state(hp, HVSI_WAIT_FOR_MCTRL_RESPONSE);
  563. hvsi_query(hp, VSV_SEND_MODEM_CTL_STATUS);
  564. ret = hvsi_wait(hp, HVSI_OPEN);
  565. if (ret < 0) {
  566. printk(KERN_ERR "hvsi%i: didn't get modem flags\n", hp->index);
  567. set_state(hp, HVSI_OPEN);
  568. return ret;
  569. }
  570. pr_debug("%s: mctrl 0x%x\n", __FUNCTION__, hp->mctrl);
  571. return 0;
  572. }
  573. /* note that we can only set DTR */
  574. static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
  575. {
  576. struct hvsi_control packet __ALIGNED__;
  577. int wrote;
  578. packet.type = VS_CONTROL_PACKET_HEADER,
  579. packet.seqno = atomic_inc_return(&hp->seqno);
  580. packet.len = sizeof(struct hvsi_control);
  581. packet.verb = VSV_SET_MODEM_CTL;
  582. packet.mask = HVSI_TSDTR;
  583. if (mctrl & TIOCM_DTR)
  584. packet.word = HVSI_TSDTR;
  585. pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len);
  586. dbg_dump_hex((uint8_t*)&packet, packet.len);
  587. wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
  588. if (wrote != packet.len) {
  589. printk(KERN_ERR "hvsi%i: couldn't set DTR!\n", hp->index);
  590. return -EIO;
  591. }
  592. return 0;
  593. }
  594. static void hvsi_drain_input(struct hvsi_struct *hp)
  595. {
  596. uint8_t buf[HVSI_MAX_READ] __ALIGNED__;
  597. unsigned long end_jiffies = jiffies + HVSI_TIMEOUT;
  598. while (time_before(end_jiffies, jiffies))
  599. if (0 == hvsi_read(hp, buf, HVSI_MAX_READ))
  600. break;
  601. }
  602. static int hvsi_handshake(struct hvsi_struct *hp)
  603. {
  604. int ret;
  605. /*
  606. * We could have a CLOSE or other data waiting for us before we even try
  607. * to open; try to throw it all away so we don't get confused. (CLOSE
  608. * is the first message sent up the pipe when the FSP comes online. We
  609. * need to distinguish between "it came up a while ago and we're the first
  610. * user" and "it was just reset before it saw our handshake packet".)
  611. */
  612. hvsi_drain_input(hp);
  613. set_state(hp, HVSI_WAIT_FOR_VER_RESPONSE);
  614. ret = hvsi_query(hp, VSV_SEND_VERSION_NUMBER);
  615. if (ret < 0) {
  616. printk(KERN_ERR "hvsi%i: couldn't send version query\n", hp->index);
  617. return ret;
  618. }
  619. ret = hvsi_wait(hp, HVSI_OPEN);
  620. if (ret < 0)
  621. return ret;
  622. return 0;
  623. }
  624. static void hvsi_handshaker(struct work_struct *work)
  625. {
  626. struct hvsi_struct *hp =
  627. container_of(work, struct hvsi_struct, handshaker);
  628. if (hvsi_handshake(hp) >= 0)
  629. return;
  630. printk(KERN_ERR "hvsi%i: re-handshaking failed\n", hp->index);
  631. if (is_console(hp)) {
  632. /*
  633. * ttys will re-attempt the handshake via hvsi_open, but
  634. * the console will not.
  635. */
  636. printk(KERN_ERR "hvsi%i: lost console!\n", hp->index);
  637. }
  638. }
  639. static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
  640. {
  641. struct hvsi_data packet __ALIGNED__;
  642. int ret;
  643. BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
  644. packet.type = VS_DATA_PACKET_HEADER;
  645. packet.seqno = atomic_inc_return(&hp->seqno);
  646. packet.len = count + sizeof(struct hvsi_header);
  647. memcpy(&packet.data, buf, count);
  648. ret = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
  649. if (ret == packet.len) {
  650. /* return the number of chars written, not the packet length */
  651. return count;
  652. }
  653. return ret; /* return any errors */
  654. }
  655. static void hvsi_close_protocol(struct hvsi_struct *hp)
  656. {
  657. struct hvsi_control packet __ALIGNED__;
  658. packet.type = VS_CONTROL_PACKET_HEADER;
  659. packet.seqno = atomic_inc_return(&hp->seqno);
  660. packet.len = 6;
  661. packet.verb = VSV_CLOSE_PROTOCOL;
  662. pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len);
  663. dbg_dump_hex((uint8_t*)&packet, packet.len);
  664. hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
  665. }
  666. static int hvsi_open(struct tty_struct *tty, struct file *filp)
  667. {
  668. struct hvsi_struct *hp;
  669. unsigned long flags;
  670. int line = tty->index;
  671. int ret;
  672. pr_debug("%s\n", __FUNCTION__);
  673. if (line < 0 || line >= hvsi_count)
  674. return -ENODEV;
  675. hp = &hvsi_ports[line];
  676. tty->driver_data = hp;
  677. tty->low_latency = 1; /* avoid throttle/tty_flip_buffer_push race */
  678. mb();
  679. if (hp->state == HVSI_FSP_DIED)
  680. return -EIO;
  681. spin_lock_irqsave(&hp->lock, flags);
  682. hp->tty = tty;
  683. hp->count++;
  684. atomic_set(&hp->seqno, 0);
  685. h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
  686. spin_unlock_irqrestore(&hp->lock, flags);
  687. if (is_console(hp))
  688. return 0; /* this has already been handshaked as the console */
  689. ret = hvsi_handshake(hp);
  690. if (ret < 0) {
  691. printk(KERN_ERR "%s: HVSI handshaking failed\n", tty->name);
  692. return ret;
  693. }
  694. ret = hvsi_get_mctrl(hp);
  695. if (ret < 0) {
  696. printk(KERN_ERR "%s: couldn't get initial modem flags\n", tty->name);
  697. return ret;
  698. }
  699. ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
  700. if (ret < 0) {
  701. printk(KERN_ERR "%s: couldn't set DTR\n", tty->name);
  702. return ret;
  703. }
  704. return 0;
  705. }
  706. /* wait for hvsi_write_worker to empty hp->outbuf */
  707. static void hvsi_flush_output(struct hvsi_struct *hp)
  708. {
  709. wait_event_timeout(hp->emptyq, (hp->n_outbuf <= 0), HVSI_TIMEOUT);
  710. /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
  711. cancel_delayed_work(&hp->writer);
  712. flush_scheduled_work();
  713. /*
  714. * it's also possible that our timeout expired and hvsi_write_worker
  715. * didn't manage to push outbuf. poof.
  716. */
  717. hp->n_outbuf = 0;
  718. }
  719. static void hvsi_close(struct tty_struct *tty, struct file *filp)
  720. {
  721. struct hvsi_struct *hp = tty->driver_data;
  722. unsigned long flags;
  723. pr_debug("%s\n", __FUNCTION__);
  724. if (tty_hung_up_p(filp))
  725. return;
  726. spin_lock_irqsave(&hp->lock, flags);
  727. if (--hp->count == 0) {
  728. hp->tty = NULL;
  729. hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
  730. /* only close down connection if it is not the console */
  731. if (!is_console(hp)) {
  732. h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); /* no more irqs */
  733. __set_state(hp, HVSI_CLOSED);
  734. /*
  735. * any data delivered to the tty layer after this will be
  736. * discarded (except for XON/XOFF)
  737. */
  738. tty->closing = 1;
  739. spin_unlock_irqrestore(&hp->lock, flags);
  740. /* let any existing irq handlers finish. no more will start. */
  741. synchronize_irq(hp->virq);
  742. /* hvsi_write_worker will re-schedule until outbuf is empty. */
  743. hvsi_flush_output(hp);
  744. /* tell FSP to stop sending data */
  745. hvsi_close_protocol(hp);
  746. /*
  747. * drain anything FSP is still in the middle of sending, and let
  748. * hvsi_handshake drain the rest on the next open.
  749. */
  750. hvsi_drain_input(hp);
  751. spin_lock_irqsave(&hp->lock, flags);
  752. }
  753. } else if (hp->count < 0)
  754. printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
  755. hp - hvsi_ports, hp->count);
  756. spin_unlock_irqrestore(&hp->lock, flags);
  757. }
  758. static void hvsi_hangup(struct tty_struct *tty)
  759. {
  760. struct hvsi_struct *hp = tty->driver_data;
  761. unsigned long flags;
  762. pr_debug("%s\n", __FUNCTION__);
  763. spin_lock_irqsave(&hp->lock, flags);
  764. hp->count = 0;
  765. hp->n_outbuf = 0;
  766. hp->tty = NULL;
  767. spin_unlock_irqrestore(&hp->lock, flags);
  768. }
  769. /* called with hp->lock held */
  770. static void hvsi_push(struct hvsi_struct *hp)
  771. {
  772. int n;
  773. if (hp->n_outbuf <= 0)
  774. return;
  775. n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf);
  776. if (n > 0) {
  777. /* success */
  778. pr_debug("%s: wrote %i chars\n", __FUNCTION__, n);
  779. hp->n_outbuf = 0;
  780. } else if (n == -EIO) {
  781. __set_state(hp, HVSI_FSP_DIED);
  782. printk(KERN_ERR "hvsi%i: service processor died\n", hp->index);
  783. }
  784. }
  785. /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
  786. static void hvsi_write_worker(struct work_struct *work)
  787. {
  788. struct hvsi_struct *hp =
  789. container_of(work, struct hvsi_struct, writer.work);
  790. unsigned long flags;
  791. #ifdef DEBUG
  792. static long start_j = 0;
  793. if (start_j == 0)
  794. start_j = jiffies;
  795. #endif /* DEBUG */
  796. spin_lock_irqsave(&hp->lock, flags);
  797. pr_debug("%s: %i chars in buffer\n", __FUNCTION__, hp->n_outbuf);
  798. if (!is_open(hp)) {
  799. /*
  800. * We could have a non-open connection if the service processor died
  801. * while we were busily scheduling ourselves. In that case, it could
  802. * be minutes before the service processor comes back, so only try
  803. * again once a second.
  804. */
  805. schedule_delayed_work(&hp->writer, HZ);
  806. goto out;
  807. }
  808. hvsi_push(hp);
  809. if (hp->n_outbuf > 0)
  810. schedule_delayed_work(&hp->writer, 10);
  811. else {
  812. #ifdef DEBUG
  813. pr_debug("%s: outbuf emptied after %li jiffies\n", __FUNCTION__,
  814. jiffies - start_j);
  815. start_j = 0;
  816. #endif /* DEBUG */
  817. wake_up_all(&hp->emptyq);
  818. tty_wakeup(hp->tty);
  819. }
  820. out:
  821. spin_unlock_irqrestore(&hp->lock, flags);
  822. }
  823. static int hvsi_write_room(struct tty_struct *tty)
  824. {
  825. struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data;
  826. return N_OUTBUF - hp->n_outbuf;
  827. }
  828. static int hvsi_chars_in_buffer(struct tty_struct *tty)
  829. {
  830. struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data;
  831. return hp->n_outbuf;
  832. }
  833. static int hvsi_write(struct tty_struct *tty,
  834. const unsigned char *buf, int count)
  835. {
  836. struct hvsi_struct *hp = tty->driver_data;
  837. const char *source = buf;
  838. unsigned long flags;
  839. int total = 0;
  840. int origcount = count;
  841. spin_lock_irqsave(&hp->lock, flags);
  842. pr_debug("%s: %i chars in buffer\n", __FUNCTION__, hp->n_outbuf);
  843. if (!is_open(hp)) {
  844. /* we're either closing or not yet open; don't accept data */
  845. pr_debug("%s: not open\n", __FUNCTION__);
  846. goto out;
  847. }
  848. /*
  849. * when the hypervisor buffer (16K) fills, data will stay in hp->outbuf
  850. * and hvsi_write_worker will be scheduled. subsequent hvsi_write() calls
  851. * will see there is no room in outbuf and return.
  852. */
  853. while ((count > 0) && (hvsi_write_room(hp->tty) > 0)) {
  854. int chunksize = min(count, hvsi_write_room(hp->tty));
  855. BUG_ON(hp->n_outbuf < 0);
  856. memcpy(hp->outbuf + hp->n_outbuf, source, chunksize);
  857. hp->n_outbuf += chunksize;
  858. total += chunksize;
  859. source += chunksize;
  860. count -= chunksize;
  861. hvsi_push(hp);
  862. }
  863. if (hp->n_outbuf > 0) {
  864. /*
  865. * we weren't able to write it all to the hypervisor.
  866. * schedule another push attempt.
  867. */
  868. schedule_delayed_work(&hp->writer, 10);
  869. }
  870. out:
  871. spin_unlock_irqrestore(&hp->lock, flags);
  872. if (total != origcount)
  873. pr_debug("%s: wanted %i, only wrote %i\n", __FUNCTION__, origcount,
  874. total);
  875. return total;
  876. }
  877. /*
  878. * I have never seen throttle or unthrottle called, so this little throttle
  879. * buffering scheme may or may not work.
  880. */
  881. static void hvsi_throttle(struct tty_struct *tty)
  882. {
  883. struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data;
  884. pr_debug("%s\n", __FUNCTION__);
  885. h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE);
  886. }
  887. static void hvsi_unthrottle(struct tty_struct *tty)
  888. {
  889. struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data;
  890. unsigned long flags;
  891. int shouldflip = 0;
  892. pr_debug("%s\n", __FUNCTION__);
  893. spin_lock_irqsave(&hp->lock, flags);
  894. if (hp->n_throttle) {
  895. hvsi_send_overflow(hp);
  896. shouldflip = 1;
  897. }
  898. spin_unlock_irqrestore(&hp->lock, flags);
  899. if (shouldflip)
  900. tty_flip_buffer_push(hp->tty);
  901. h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
  902. }
  903. static int hvsi_tiocmget(struct tty_struct *tty, struct file *file)
  904. {
  905. struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data;
  906. hvsi_get_mctrl(hp);
  907. return hp->mctrl;
  908. }
  909. static int hvsi_tiocmset(struct tty_struct *tty, struct file *file,
  910. unsigned int set, unsigned int clear)
  911. {
  912. struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data;
  913. unsigned long flags;
  914. uint16_t new_mctrl;
  915. /* we can only alter DTR */
  916. clear &= TIOCM_DTR;
  917. set &= TIOCM_DTR;
  918. spin_lock_irqsave(&hp->lock, flags);
  919. new_mctrl = (hp->mctrl & ~clear) | set;
  920. if (hp->mctrl != new_mctrl) {
  921. hvsi_set_mctrl(hp, new_mctrl);
  922. hp->mctrl = new_mctrl;
  923. }
  924. spin_unlock_irqrestore(&hp->lock, flags);
  925. return 0;
  926. }
  927. static const struct tty_operations hvsi_ops = {
  928. .open = hvsi_open,
  929. .close = hvsi_close,
  930. .write = hvsi_write,
  931. .hangup = hvsi_hangup,
  932. .write_room = hvsi_write_room,
  933. .chars_in_buffer = hvsi_chars_in_buffer,
  934. .throttle = hvsi_throttle,
  935. .unthrottle = hvsi_unthrottle,
  936. .tiocmget = hvsi_tiocmget,
  937. .tiocmset = hvsi_tiocmset,
  938. };
  939. static int __init hvsi_init(void)
  940. {
  941. int i;
  942. hvsi_driver = alloc_tty_driver(hvsi_count);
  943. if (!hvsi_driver)
  944. return -ENOMEM;
  945. hvsi_driver->owner = THIS_MODULE;
  946. hvsi_driver->driver_name = "hvsi";
  947. hvsi_driver->name = "hvsi";
  948. hvsi_driver->major = HVSI_MAJOR;
  949. hvsi_driver->minor_start = HVSI_MINOR;
  950. hvsi_driver->type = TTY_DRIVER_TYPE_SYSTEM;
  951. hvsi_driver->init_termios = tty_std_termios;
  952. hvsi_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL;
  953. hvsi_driver->init_termios.c_ispeed = 9600;
  954. hvsi_driver->init_termios.c_ospeed = 9600;
  955. hvsi_driver->flags = TTY_DRIVER_REAL_RAW;
  956. tty_set_operations(hvsi_driver, &hvsi_ops);
  957. for (i=0; i < hvsi_count; i++) {
  958. struct hvsi_struct *hp = &hvsi_ports[i];
  959. int ret = 1;
  960. ret = request_irq(hp->virq, hvsi_interrupt, IRQF_DISABLED, "hvsi", hp);
  961. if (ret)
  962. printk(KERN_ERR "HVSI: couldn't reserve irq 0x%x (error %i)\n",
  963. hp->virq, ret);
  964. }
  965. hvsi_wait = wait_for_state; /* irqs active now */
  966. if (tty_register_driver(hvsi_driver))
  967. panic("Couldn't register hvsi console driver\n");
  968. printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count);
  969. return 0;
  970. }
  971. device_initcall(hvsi_init);
  972. /***** console (not tty) code: *****/
  973. static void hvsi_console_print(struct console *console, const char *buf,
  974. unsigned int count)
  975. {
  976. struct hvsi_struct *hp = &hvsi_ports[console->index];
  977. char c[HVSI_MAX_OUTGOING_DATA] __ALIGNED__;
  978. unsigned int i = 0, n = 0;
  979. int ret, donecr = 0;
  980. mb();
  981. if (!is_open(hp))
  982. return;
  983. /*
  984. * ugh, we have to translate LF -> CRLF ourselves, in place.
  985. * copied from hvc_console.c:
  986. */
  987. while (count > 0 || i > 0) {
  988. if (count > 0 && i < sizeof(c)) {
  989. if (buf[n] == '\n' && !donecr) {
  990. c[i++] = '\r';
  991. donecr = 1;
  992. } else {
  993. c[i++] = buf[n++];
  994. donecr = 0;
  995. --count;
  996. }
  997. } else {
  998. ret = hvsi_put_chars(hp, c, i);
  999. if (ret < 0)
  1000. i = 0;
  1001. i -= ret;
  1002. }
  1003. }
  1004. }
  1005. static struct tty_driver *hvsi_console_device(struct console *console,
  1006. int *index)
  1007. {
  1008. *index = console->index;
  1009. return hvsi_driver;
  1010. }
  1011. static int __init hvsi_console_setup(struct console *console, char *options)
  1012. {
  1013. struct hvsi_struct *hp = &hvsi_ports[console->index];
  1014. int ret;
  1015. if (console->index < 0 || console->index >= hvsi_count)
  1016. return -1;
  1017. /* give the FSP a chance to change the baud rate when we re-open */
  1018. hvsi_close_protocol(hp);
  1019. ret = hvsi_handshake(hp);
  1020. if (ret < 0)
  1021. return ret;
  1022. ret = hvsi_get_mctrl(hp);
  1023. if (ret < 0)
  1024. return ret;
  1025. ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
  1026. if (ret < 0)
  1027. return ret;
  1028. hp->flags |= HVSI_CONSOLE;
  1029. return 0;
  1030. }
  1031. static struct console hvsi_con_driver = {
  1032. .name = "hvsi",
  1033. .write = hvsi_console_print,
  1034. .device = hvsi_console_device,
  1035. .setup = hvsi_console_setup,
  1036. .flags = CON_PRINTBUFFER,
  1037. .index = -1,
  1038. };
  1039. static int __init hvsi_console_init(void)
  1040. {
  1041. struct device_node *vty;
  1042. hvsi_wait = poll_for_state; /* no irqs yet; must poll */
  1043. /* search device tree for vty nodes */
  1044. for (vty = of_find_compatible_node(NULL, "serial", "hvterm-protocol");
  1045. vty != NULL;
  1046. vty = of_find_compatible_node(vty, "serial", "hvterm-protocol")) {
  1047. struct hvsi_struct *hp;
  1048. const uint32_t *vtermno, *irq;
  1049. vtermno = get_property(vty, "reg", NULL);
  1050. irq = get_property(vty, "interrupts", NULL);
  1051. if (!vtermno || !irq)
  1052. continue;
  1053. if (hvsi_count >= MAX_NR_HVSI_CONSOLES) {
  1054. of_node_put(vty);
  1055. break;
  1056. }
  1057. hp = &hvsi_ports[hvsi_count];
  1058. INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
  1059. INIT_WORK(&hp->handshaker, hvsi_handshaker);
  1060. init_waitqueue_head(&hp->emptyq);
  1061. init_waitqueue_head(&hp->stateq);
  1062. spin_lock_init(&hp->lock);
  1063. hp->index = hvsi_count;
  1064. hp->inbuf_end = hp->inbuf;
  1065. hp->state = HVSI_CLOSED;
  1066. hp->vtermno = *vtermno;
  1067. hp->virq = irq_create_mapping(NULL, irq[0]);
  1068. if (hp->virq == NO_IRQ) {
  1069. printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
  1070. __FUNCTION__, irq[0]);
  1071. continue;
  1072. }
  1073. hvsi_count++;
  1074. }
  1075. if (hvsi_count)
  1076. register_console(&hvsi_con_driver);
  1077. return 0;
  1078. }
  1079. console_initcall(hvsi_console_init);