hvc_iucv.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329
  1. /*
  2. * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
  3. *
  4. * This HVC device driver provides terminal access using
  5. * z/VM IUCV communication paths.
  6. *
  7. * Copyright IBM Corp. 2008, 2009
  8. *
  9. * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  10. */
  11. #define KMSG_COMPONENT "hvc_iucv"
  12. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13. #include <linux/types.h>
  14. #include <asm/ebcdic.h>
  15. #include <linux/ctype.h>
  16. #include <linux/delay.h>
  17. #include <linux/device.h>
  18. #include <linux/init.h>
  19. #include <linux/mempool.h>
  20. #include <linux/moduleparam.h>
  21. #include <linux/tty.h>
  22. #include <linux/wait.h>
  23. #include <net/iucv/iucv.h>
  24. #include "hvc_console.h"
  25. /* General device driver settings */
  26. #define HVC_IUCV_MAGIC 0xc9e4c3e5
  27. #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
  28. #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
  29. /* IUCV TTY message */
  30. #define MSG_VERSION 0x02 /* Message version */
  31. #define MSG_TYPE_ERROR 0x01 /* Error message */
  32. #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
  33. #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
  34. #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
  35. #define MSG_TYPE_DATA 0x10 /* Terminal data */
  36. struct iucv_tty_msg {
  37. u8 version; /* Message version */
  38. u8 type; /* Message type */
  39. #define MSG_MAX_DATALEN ((u16)(~0))
  40. u16 datalen; /* Payload length */
  41. u8 data[]; /* Payload buffer */
  42. } __attribute__((packed));
  43. #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
  44. enum iucv_state_t {
  45. IUCV_DISCONN = 0,
  46. IUCV_CONNECTED = 1,
  47. IUCV_SEVERED = 2,
  48. };
  49. enum tty_state_t {
  50. TTY_CLOSED = 0,
  51. TTY_OPENED = 1,
  52. };
  53. struct hvc_iucv_private {
  54. struct hvc_struct *hvc; /* HVC struct reference */
  55. u8 srv_name[8]; /* IUCV service name (ebcdic) */
  56. unsigned char is_console; /* Linux console usage flag */
  57. enum iucv_state_t iucv_state; /* IUCV connection status */
  58. enum tty_state_t tty_state; /* TTY status */
  59. struct iucv_path *path; /* IUCV path pointer */
  60. spinlock_t lock; /* hvc_iucv_private lock */
  61. #define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
  62. void *sndbuf; /* send buffer */
  63. size_t sndbuf_len; /* length of send buffer */
  64. #define QUEUE_SNDBUF_DELAY (HZ / 25)
  65. struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
  66. wait_queue_head_t sndbuf_waitq; /* wait for send completion */
  67. struct list_head tty_outqueue; /* outgoing IUCV messages */
  68. struct list_head tty_inqueue; /* incoming IUCV messages */
  69. struct device *dev; /* device structure */
  70. };
  71. struct iucv_tty_buffer {
  72. struct list_head list; /* list pointer */
  73. struct iucv_message msg; /* store an IUCV message */
  74. size_t offset; /* data buffer offset */
  75. struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
  76. };
  77. /* IUCV callback handler */
  78. static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
  79. static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
  80. static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
  81. static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
  82. /* Kernel module parameter: use one terminal device as default */
  83. static unsigned long hvc_iucv_devices = 1;
  84. /* Array of allocated hvc iucv tty lines... */
  85. static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
  86. #define IUCV_HVC_CON_IDX (0)
  87. /* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
  88. #define MAX_VMID_FILTER (500)
  89. static size_t hvc_iucv_filter_size;
  90. static void *hvc_iucv_filter;
  91. static const char *hvc_iucv_filter_string;
  92. static DEFINE_RWLOCK(hvc_iucv_filter_lock);
  93. /* Kmem cache and mempool for iucv_tty_buffer elements */
  94. static struct kmem_cache *hvc_iucv_buffer_cache;
  95. static mempool_t *hvc_iucv_mempool;
  96. /* IUCV handler callback functions */
  97. static struct iucv_handler hvc_iucv_handler = {
  98. .path_pending = hvc_iucv_path_pending,
  99. .path_severed = hvc_iucv_path_severed,
  100. .message_complete = hvc_iucv_msg_complete,
  101. .message_pending = hvc_iucv_msg_pending,
  102. };
  103. /**
  104. * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
  105. * @num: The HVC virtual terminal number (vtermno)
  106. *
  107. * This function returns the struct hvc_iucv_private instance that corresponds
  108. * to the HVC virtual terminal number specified as parameter @num.
  109. */
  110. struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
  111. {
  112. if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
  113. return NULL;
  114. return hvc_iucv_table[num - HVC_IUCV_MAGIC];
  115. }
  116. /**
  117. * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
  118. * @size: Size of the internal buffer used to store data.
  119. * @flags: Memory allocation flags passed to mempool.
  120. *
  121. * This function allocates a new struct iucv_tty_buffer element and, optionally,
  122. * allocates an internal data buffer with the specified size @size.
  123. * Note: The total message size arises from the internal buffer size and the
  124. * members of the iucv_tty_msg structure.
  125. * The function returns NULL if memory allocation has failed.
  126. */
  127. static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
  128. {
  129. struct iucv_tty_buffer *bufp;
  130. bufp = mempool_alloc(hvc_iucv_mempool, flags);
  131. if (!bufp)
  132. return NULL;
  133. memset(bufp, 0, sizeof(*bufp));
  134. if (size > 0) {
  135. bufp->msg.length = MSG_SIZE(size);
  136. bufp->mbuf = kmalloc(bufp->msg.length, flags);
  137. if (!bufp->mbuf) {
  138. mempool_free(bufp, hvc_iucv_mempool);
  139. return NULL;
  140. }
  141. bufp->mbuf->version = MSG_VERSION;
  142. bufp->mbuf->type = MSG_TYPE_DATA;
  143. bufp->mbuf->datalen = (u16) size;
  144. }
  145. return bufp;
  146. }
  147. /**
  148. * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
  149. * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
  150. */
  151. static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
  152. {
  153. kfree(bufp->mbuf);
  154. mempool_free(bufp, hvc_iucv_mempool);
  155. }
  156. /**
  157. * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
  158. * @list: List containing struct iucv_tty_buffer elements.
  159. */
  160. static void destroy_tty_buffer_list(struct list_head *list)
  161. {
  162. struct iucv_tty_buffer *ent, *next;
  163. list_for_each_entry_safe(ent, next, list, list) {
  164. list_del(&ent->list);
  165. destroy_tty_buffer(ent);
  166. }
  167. }
  168. /**
  169. * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
  170. * @priv: Pointer to struct hvc_iucv_private
  171. * @buf: HVC buffer for writing received terminal data.
  172. * @count: HVC buffer size.
  173. * @has_more_data: Pointer to an int variable.
  174. *
  175. * The function picks up pending messages from the input queue and receives
  176. * the message data that is then written to the specified buffer @buf.
  177. * If the buffer size @count is less than the data message size, the
  178. * message is kept on the input queue and @has_more_data is set to 1.
  179. * If all message data has been written, the message is removed from
  180. * the input queue.
  181. *
  182. * The function returns the number of bytes written to the terminal, zero if
  183. * there are no pending data messages available or if there is no established
  184. * IUCV path.
  185. * If the IUCV path has been severed, then -EPIPE is returned to cause a
  186. * hang up (that is issued by the HVC layer).
  187. */
  188. static int hvc_iucv_write(struct hvc_iucv_private *priv,
  189. char *buf, int count, int *has_more_data)
  190. {
  191. struct iucv_tty_buffer *rb;
  192. int written;
  193. int rc;
  194. /* immediately return if there is no IUCV connection */
  195. if (priv->iucv_state == IUCV_DISCONN)
  196. return 0;
  197. /* if the IUCV path has been severed, return -EPIPE to inform the
  198. * HVC layer to hang up the tty device. */
  199. if (priv->iucv_state == IUCV_SEVERED)
  200. return -EPIPE;
  201. /* check if there are pending messages */
  202. if (list_empty(&priv->tty_inqueue))
  203. return 0;
  204. /* receive an iucv message and flip data to the tty (ldisc) */
  205. rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
  206. written = 0;
  207. if (!rb->mbuf) { /* message not yet received ... */
  208. /* allocate mem to store msg data; if no memory is available
  209. * then leave the buffer on the list and re-try later */
  210. rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC);
  211. if (!rb->mbuf)
  212. return -ENOMEM;
  213. rc = __iucv_message_receive(priv->path, &rb->msg, 0,
  214. rb->mbuf, rb->msg.length, NULL);
  215. switch (rc) {
  216. case 0: /* Successful */
  217. break;
  218. case 2: /* No message found */
  219. case 9: /* Message purged */
  220. break;
  221. default:
  222. written = -EIO;
  223. }
  224. /* remove buffer if an error has occured or received data
  225. * is not correct */
  226. if (rc || (rb->mbuf->version != MSG_VERSION) ||
  227. (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
  228. goto out_remove_buffer;
  229. }
  230. switch (rb->mbuf->type) {
  231. case MSG_TYPE_DATA:
  232. written = min_t(int, rb->mbuf->datalen - rb->offset, count);
  233. memcpy(buf, rb->mbuf->data + rb->offset, written);
  234. if (written < (rb->mbuf->datalen - rb->offset)) {
  235. rb->offset += written;
  236. *has_more_data = 1;
  237. goto out_written;
  238. }
  239. break;
  240. case MSG_TYPE_WINSIZE:
  241. if (rb->mbuf->datalen != sizeof(struct winsize))
  242. break;
  243. hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
  244. break;
  245. case MSG_TYPE_ERROR: /* ignored ... */
  246. case MSG_TYPE_TERMENV: /* ignored ... */
  247. case MSG_TYPE_TERMIOS: /* ignored ... */
  248. break;
  249. }
  250. out_remove_buffer:
  251. list_del(&rb->list);
  252. destroy_tty_buffer(rb);
  253. *has_more_data = !list_empty(&priv->tty_inqueue);
  254. out_written:
  255. return written;
  256. }
  257. /**
  258. * hvc_iucv_get_chars() - HVC get_chars operation.
  259. * @vtermno: HVC virtual terminal number.
  260. * @buf: Pointer to a buffer to store data
  261. * @count: Size of buffer available for writing
  262. *
  263. * The HVC thread calls this method to read characters from the back-end.
  264. * If an IUCV communication path has been established, pending IUCV messages
  265. * are received and data is copied into buffer @buf up to @count bytes.
  266. *
  267. * Locking: The routine gets called under an irqsave() spinlock; and
  268. * the routine locks the struct hvc_iucv_private->lock to call
  269. * helper functions.
  270. */
  271. static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
  272. {
  273. struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
  274. int written;
  275. int has_more_data;
  276. if (count <= 0)
  277. return 0;
  278. if (!priv)
  279. return -ENODEV;
  280. spin_lock(&priv->lock);
  281. has_more_data = 0;
  282. written = hvc_iucv_write(priv, buf, count, &has_more_data);
  283. spin_unlock(&priv->lock);
  284. /* if there are still messages on the queue... schedule another run */
  285. if (has_more_data)
  286. hvc_kick();
  287. return written;
  288. }
  289. /**
  290. * hvc_iucv_queue() - Buffer terminal data for sending.
  291. * @priv: Pointer to struct hvc_iucv_private instance.
  292. * @buf: Buffer containing data to send.
  293. * @count: Size of buffer and amount of data to send.
  294. *
  295. * The function queues data for sending. To actually send the buffered data,
  296. * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
  297. * The function returns the number of data bytes that has been buffered.
  298. *
  299. * If the device is not connected, data is ignored and the function returns
  300. * @count.
  301. * If the buffer is full, the function returns 0.
  302. * If an existing IUCV communicaton path has been severed, -EPIPE is returned
  303. * (that can be passed to HVC layer to cause a tty hangup).
  304. */
  305. static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
  306. int count)
  307. {
  308. size_t len;
  309. if (priv->iucv_state == IUCV_DISCONN)
  310. return count; /* ignore data */
  311. if (priv->iucv_state == IUCV_SEVERED)
  312. return -EPIPE;
  313. len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
  314. if (!len)
  315. return 0;
  316. memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
  317. priv->sndbuf_len += len;
  318. if (priv->iucv_state == IUCV_CONNECTED)
  319. schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
  320. return len;
  321. }
  322. /**
  323. * hvc_iucv_send() - Send an IUCV message containing terminal data.
  324. * @priv: Pointer to struct hvc_iucv_private instance.
  325. *
  326. * If an IUCV communication path has been established, the buffered output data
  327. * is sent via an IUCV message and the number of bytes sent is returned.
  328. * Returns 0 if there is no established IUCV communication path or
  329. * -EPIPE if an existing IUCV communicaton path has been severed.
  330. */
  331. static int hvc_iucv_send(struct hvc_iucv_private *priv)
  332. {
  333. struct iucv_tty_buffer *sb;
  334. int rc, len;
  335. if (priv->iucv_state == IUCV_SEVERED)
  336. return -EPIPE;
  337. if (priv->iucv_state == IUCV_DISCONN)
  338. return -EIO;
  339. if (!priv->sndbuf_len)
  340. return 0;
  341. /* allocate internal buffer to store msg data and also compute total
  342. * message length */
  343. sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
  344. if (!sb)
  345. return -ENOMEM;
  346. memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
  347. sb->mbuf->datalen = (u16) priv->sndbuf_len;
  348. sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
  349. list_add_tail(&sb->list, &priv->tty_outqueue);
  350. rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
  351. (void *) sb->mbuf, sb->msg.length);
  352. if (rc) {
  353. /* drop the message here; however we might want to handle
  354. * 0x03 (msg limit reached) by trying again... */
  355. list_del(&sb->list);
  356. destroy_tty_buffer(sb);
  357. }
  358. len = priv->sndbuf_len;
  359. priv->sndbuf_len = 0;
  360. return len;
  361. }
  362. /**
  363. * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
  364. * @work: Work structure.
  365. *
  366. * This work queue function sends buffered output data over IUCV and,
  367. * if not all buffered data could be sent, reschedules itself.
  368. */
  369. static void hvc_iucv_sndbuf_work(struct work_struct *work)
  370. {
  371. struct hvc_iucv_private *priv;
  372. priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
  373. if (!priv)
  374. return;
  375. spin_lock_bh(&priv->lock);
  376. hvc_iucv_send(priv);
  377. spin_unlock_bh(&priv->lock);
  378. }
  379. /**
  380. * hvc_iucv_put_chars() - HVC put_chars operation.
  381. * @vtermno: HVC virtual terminal number.
  382. * @buf: Pointer to an buffer to read data from
  383. * @count: Size of buffer available for reading
  384. *
  385. * The HVC thread calls this method to write characters to the back-end.
  386. * The function calls hvc_iucv_queue() to queue terminal data for sending.
  387. *
  388. * Locking: The method gets called under an irqsave() spinlock; and
  389. * locks struct hvc_iucv_private->lock.
  390. */
  391. static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
  392. {
  393. struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
  394. int queued;
  395. if (count <= 0)
  396. return 0;
  397. if (!priv)
  398. return -ENODEV;
  399. spin_lock(&priv->lock);
  400. queued = hvc_iucv_queue(priv, buf, count);
  401. spin_unlock(&priv->lock);
  402. return queued;
  403. }
  404. /**
  405. * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
  406. * @hp: Pointer to the HVC device (struct hvc_struct)
  407. * @id: Additional data (originally passed to hvc_alloc): the index of an struct
  408. * hvc_iucv_private instance.
  409. *
  410. * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
  411. * instance that is derived from @id. Always returns 0.
  412. *
  413. * Locking: struct hvc_iucv_private->lock, spin_lock_bh
  414. */
  415. static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
  416. {
  417. struct hvc_iucv_private *priv;
  418. priv = hvc_iucv_get_private(id);
  419. if (!priv)
  420. return 0;
  421. spin_lock_bh(&priv->lock);
  422. priv->tty_state = TTY_OPENED;
  423. spin_unlock_bh(&priv->lock);
  424. return 0;
  425. }
  426. /**
  427. * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
  428. * @priv: Pointer to the struct hvc_iucv_private instance.
  429. */
  430. static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
  431. {
  432. destroy_tty_buffer_list(&priv->tty_outqueue);
  433. destroy_tty_buffer_list(&priv->tty_inqueue);
  434. priv->tty_state = TTY_CLOSED;
  435. priv->iucv_state = IUCV_DISCONN;
  436. priv->sndbuf_len = 0;
  437. }
  438. /**
  439. * tty_outqueue_empty() - Test if the tty outq is empty
  440. * @priv: Pointer to struct hvc_iucv_private instance.
  441. */
  442. static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
  443. {
  444. int rc;
  445. spin_lock_bh(&priv->lock);
  446. rc = list_empty(&priv->tty_outqueue);
  447. spin_unlock_bh(&priv->lock);
  448. return rc;
  449. }
  450. /**
  451. * flush_sndbuf_sync() - Flush send buffer and wait for completion
  452. * @priv: Pointer to struct hvc_iucv_private instance.
  453. *
  454. * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
  455. * to flush any buffered terminal output data and waits for completion.
  456. */
  457. static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
  458. {
  459. int sync_wait;
  460. cancel_delayed_work_sync(&priv->sndbuf_work);
  461. spin_lock_bh(&priv->lock);
  462. hvc_iucv_send(priv); /* force sending buffered data */
  463. sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
  464. spin_unlock_bh(&priv->lock);
  465. if (sync_wait)
  466. wait_event_timeout(priv->sndbuf_waitq,
  467. tty_outqueue_empty(priv), HZ/10);
  468. }
  469. /**
  470. * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
  471. * @priv: Pointer to hvc_iucv_private structure
  472. *
  473. * This routine severs an existing IUCV communication path and hangs
  474. * up the underlying HVC terminal device.
  475. * The hang-up occurs only if an IUCV communication path is established;
  476. * otherwise there is no need to hang up the terminal device.
  477. *
  478. * The IUCV HVC hang-up is separated into two steps:
  479. * 1. After the IUCV path has been severed, the iucv_state is set to
  480. * IUCV_SEVERED.
  481. * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
  482. * IUCV_SEVERED state causes the tty hang-up in the HVC layer.
  483. *
  484. * If the tty has not yet been opened, clean up the hvc_iucv_private
  485. * structure to allow re-connects.
  486. * If the tty has been opened, let get_chars() return -EPIPE to signal
  487. * the HVC layer to hang up the tty and, if so, wake up the HVC thread
  488. * to call get_chars()...
  489. *
  490. * Special notes on hanging up a HVC terminal instantiated as console:
  491. * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
  492. * 2. do_tty_hangup() calls tty->ops->close() for console_filp
  493. * => no hangup notifier is called by HVC (default)
  494. * 2. hvc_close() returns because of tty_hung_up_p(filp)
  495. * => no delete notifier is called!
  496. * Finally, the back-end is not being notified, thus, the tty session is
  497. * kept active (TTY_OPEN) to be ready for re-connects.
  498. *
  499. * Locking: spin_lock(&priv->lock) w/o disabling bh
  500. */
  501. static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
  502. {
  503. struct iucv_path *path;
  504. path = NULL;
  505. spin_lock(&priv->lock);
  506. if (priv->iucv_state == IUCV_CONNECTED) {
  507. path = priv->path;
  508. priv->path = NULL;
  509. priv->iucv_state = IUCV_SEVERED;
  510. if (priv->tty_state == TTY_CLOSED)
  511. hvc_iucv_cleanup(priv);
  512. else
  513. /* console is special (see above) */
  514. if (priv->is_console) {
  515. hvc_iucv_cleanup(priv);
  516. priv->tty_state = TTY_OPENED;
  517. } else
  518. hvc_kick();
  519. }
  520. spin_unlock(&priv->lock);
  521. /* finally sever path (outside of priv->lock due to lock ordering) */
  522. if (path) {
  523. iucv_path_sever(path, NULL);
  524. iucv_path_free(path);
  525. }
  526. }
  527. /**
  528. * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
  529. * @hp: Pointer to the HVC device (struct hvc_struct)
  530. * @id: Additional data (originally passed to hvc_alloc):
  531. * the index of an struct hvc_iucv_private instance.
  532. *
  533. * This routine notifies the HVC back-end that a tty hangup (carrier loss,
  534. * virtual or otherwise) has occured.
  535. * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
  536. * to keep an existing IUCV communication path established.
  537. * (Background: vhangup() is called from user space (by getty or login) to
  538. * disable writing to the tty by other applications).
  539. * If the tty has been opened and an established IUCV path has been severed
  540. * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
  541. *
  542. * Locking: struct hvc_iucv_private->lock
  543. */
  544. static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
  545. {
  546. struct hvc_iucv_private *priv;
  547. priv = hvc_iucv_get_private(id);
  548. if (!priv)
  549. return;
  550. flush_sndbuf_sync(priv);
  551. spin_lock_bh(&priv->lock);
  552. /* NOTE: If the hangup was scheduled by ourself (from the iucv
  553. * path_servered callback [IUCV_SEVERED]), we have to clean up
  554. * our structure and to set state to TTY_CLOSED.
  555. * If the tty was hung up otherwise (e.g. vhangup()), then we
  556. * ignore this hangup and keep an established IUCV path open...
  557. * (...the reason is that we are not able to connect back to the
  558. * client if we disconnect on hang up) */
  559. priv->tty_state = TTY_CLOSED;
  560. if (priv->iucv_state == IUCV_SEVERED)
  561. hvc_iucv_cleanup(priv);
  562. spin_unlock_bh(&priv->lock);
  563. }
  564. /**
  565. * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
  566. * @hp: Pointer to the HVC device (struct hvc_struct)
  567. * @id: Additional data (originally passed to hvc_alloc):
  568. * the index of an struct hvc_iucv_private instance.
  569. *
  570. * This routine notifies the HVC back-end that the last tty device fd has been
  571. * closed. The function calls hvc_iucv_cleanup() to clean up the struct
  572. * hvc_iucv_private instance.
  573. *
  574. * Locking: struct hvc_iucv_private->lock
  575. */
  576. static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
  577. {
  578. struct hvc_iucv_private *priv;
  579. struct iucv_path *path;
  580. priv = hvc_iucv_get_private(id);
  581. if (!priv)
  582. return;
  583. flush_sndbuf_sync(priv);
  584. spin_lock_bh(&priv->lock);
  585. path = priv->path; /* save reference to IUCV path */
  586. priv->path = NULL;
  587. hvc_iucv_cleanup(priv);
  588. spin_unlock_bh(&priv->lock);
  589. /* sever IUCV path outside of priv->lock due to lock ordering of:
  590. * priv->lock <--> iucv_table_lock */
  591. if (path) {
  592. iucv_path_sever(path, NULL);
  593. iucv_path_free(path);
  594. }
  595. }
  596. /**
  597. * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
  598. * @ipvmid: Originating z/VM user ID (right padded with blanks)
  599. *
  600. * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
  601. * non-zero.
  602. */
  603. static int hvc_iucv_filter_connreq(u8 ipvmid[8])
  604. {
  605. size_t i;
  606. /* Note: default policy is ACCEPT if no filter is set */
  607. if (!hvc_iucv_filter_size)
  608. return 0;
  609. for (i = 0; i < hvc_iucv_filter_size; i++)
  610. if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
  611. return 0;
  612. return 1;
  613. }
  614. /**
  615. * hvc_iucv_path_pending() - IUCV handler to process a connection request.
  616. * @path: Pending path (struct iucv_path)
  617. * @ipvmid: z/VM system identifier of originator
  618. * @ipuser: User specified data for this path
  619. * (AF_IUCV: port/service name and originator port)
  620. *
  621. * The function uses the @ipuser data to determine if the pending path belongs
  622. * to a terminal managed by this device driver.
  623. * If the path belongs to this driver, ensure that the terminal is not accessed
  624. * multiple times (only one connection to a terminal is allowed).
  625. * If the terminal is not yet connected, the pending path is accepted and is
  626. * associated to the appropriate struct hvc_iucv_private instance.
  627. *
  628. * Returns 0 if @path belongs to a terminal managed by the this device driver;
  629. * otherwise returns -ENODEV in order to dispatch this path to other handlers.
  630. *
  631. * Locking: struct hvc_iucv_private->lock
  632. */
  633. static int hvc_iucv_path_pending(struct iucv_path *path,
  634. u8 ipvmid[8], u8 ipuser[16])
  635. {
  636. struct hvc_iucv_private *priv;
  637. u8 nuser_data[16];
  638. u8 vm_user_id[9];
  639. int i, rc;
  640. priv = NULL;
  641. for (i = 0; i < hvc_iucv_devices; i++)
  642. if (hvc_iucv_table[i] &&
  643. (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
  644. priv = hvc_iucv_table[i];
  645. break;
  646. }
  647. if (!priv)
  648. return -ENODEV;
  649. /* Enforce that ipvmid is allowed to connect to us */
  650. read_lock(&hvc_iucv_filter_lock);
  651. rc = hvc_iucv_filter_connreq(ipvmid);
  652. read_unlock(&hvc_iucv_filter_lock);
  653. if (rc) {
  654. iucv_path_sever(path, ipuser);
  655. iucv_path_free(path);
  656. memcpy(vm_user_id, ipvmid, 8);
  657. vm_user_id[8] = 0;
  658. pr_info("A connection request from z/VM user ID %s "
  659. "was refused\n", vm_user_id);
  660. return 0;
  661. }
  662. spin_lock(&priv->lock);
  663. /* If the terminal is already connected or being severed, then sever
  664. * this path to enforce that there is only ONE established communication
  665. * path per terminal. */
  666. if (priv->iucv_state != IUCV_DISCONN) {
  667. iucv_path_sever(path, ipuser);
  668. iucv_path_free(path);
  669. goto out_path_handled;
  670. }
  671. /* accept path */
  672. memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
  673. memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
  674. path->msglim = 0xffff; /* IUCV MSGLIMIT */
  675. path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
  676. rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
  677. if (rc) {
  678. iucv_path_sever(path, ipuser);
  679. iucv_path_free(path);
  680. goto out_path_handled;
  681. }
  682. priv->path = path;
  683. priv->iucv_state = IUCV_CONNECTED;
  684. /* flush buffered output data... */
  685. schedule_delayed_work(&priv->sndbuf_work, 5);
  686. out_path_handled:
  687. spin_unlock(&priv->lock);
  688. return 0;
  689. }
  690. /**
  691. * hvc_iucv_path_severed() - IUCV handler to process a path sever.
  692. * @path: Pending path (struct iucv_path)
  693. * @ipuser: User specified data for this path
  694. * (AF_IUCV: port/service name and originator port)
  695. *
  696. * This function calls the hvc_iucv_hangup() function for the
  697. * respective IUCV HVC terminal.
  698. *
  699. * Locking: struct hvc_iucv_private->lock
  700. */
  701. static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
  702. {
  703. struct hvc_iucv_private *priv = path->private;
  704. hvc_iucv_hangup(priv);
  705. }
  706. /**
  707. * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
  708. * @path: Pending path (struct iucv_path)
  709. * @msg: Pointer to the IUCV message
  710. *
  711. * The function puts an incoming message on the input queue for later
  712. * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
  713. * If the tty has not yet been opened, the message is rejected.
  714. *
  715. * Locking: struct hvc_iucv_private->lock
  716. */
  717. static void hvc_iucv_msg_pending(struct iucv_path *path,
  718. struct iucv_message *msg)
  719. {
  720. struct hvc_iucv_private *priv = path->private;
  721. struct iucv_tty_buffer *rb;
  722. /* reject messages that exceed max size of iucv_tty_msg->datalen */
  723. if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
  724. iucv_message_reject(path, msg);
  725. return;
  726. }
  727. spin_lock(&priv->lock);
  728. /* reject messages if tty has not yet been opened */
  729. if (priv->tty_state == TTY_CLOSED) {
  730. iucv_message_reject(path, msg);
  731. goto unlock_return;
  732. }
  733. /* allocate tty buffer to save iucv msg only */
  734. rb = alloc_tty_buffer(0, GFP_ATOMIC);
  735. if (!rb) {
  736. iucv_message_reject(path, msg);
  737. goto unlock_return; /* -ENOMEM */
  738. }
  739. rb->msg = *msg;
  740. list_add_tail(&rb->list, &priv->tty_inqueue);
  741. hvc_kick(); /* wake up hvc thread */
  742. unlock_return:
  743. spin_unlock(&priv->lock);
  744. }
  745. /**
  746. * hvc_iucv_msg_complete() - IUCV handler to process message completion
  747. * @path: Pending path (struct iucv_path)
  748. * @msg: Pointer to the IUCV message
  749. *
  750. * The function is called upon completion of message delivery to remove the
  751. * message from the outqueue. Additional delivery information can be found
  752. * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
  753. * purged messages (0x010000 (IPADPGNR)).
  754. *
  755. * Locking: struct hvc_iucv_private->lock
  756. */
  757. static void hvc_iucv_msg_complete(struct iucv_path *path,
  758. struct iucv_message *msg)
  759. {
  760. struct hvc_iucv_private *priv = path->private;
  761. struct iucv_tty_buffer *ent, *next;
  762. LIST_HEAD(list_remove);
  763. spin_lock(&priv->lock);
  764. list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
  765. if (ent->msg.id == msg->id) {
  766. list_move(&ent->list, &list_remove);
  767. break;
  768. }
  769. wake_up(&priv->sndbuf_waitq);
  770. spin_unlock(&priv->lock);
  771. destroy_tty_buffer_list(&list_remove);
  772. }
  773. /**
  774. * hvc_iucv_pm_freeze() - Freeze PM callback
  775. * @dev: IUVC HVC terminal device
  776. *
  777. * Sever an established IUCV communication path and
  778. * trigger a hang-up of the underlying HVC terminal.
  779. */
  780. static int hvc_iucv_pm_freeze(struct device *dev)
  781. {
  782. struct hvc_iucv_private *priv = dev_get_drvdata(dev);
  783. local_bh_disable();
  784. hvc_iucv_hangup(priv);
  785. local_bh_enable();
  786. return 0;
  787. }
  788. /**
  789. * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
  790. * @dev: IUVC HVC terminal device
  791. *
  792. * Wake up the HVC thread to trigger hang-up and respective
  793. * HVC back-end notifier invocations.
  794. */
  795. static int hvc_iucv_pm_restore_thaw(struct device *dev)
  796. {
  797. hvc_kick();
  798. return 0;
  799. }
  800. /* HVC operations */
  801. static struct hv_ops hvc_iucv_ops = {
  802. .get_chars = hvc_iucv_get_chars,
  803. .put_chars = hvc_iucv_put_chars,
  804. .notifier_add = hvc_iucv_notifier_add,
  805. .notifier_del = hvc_iucv_notifier_del,
  806. .notifier_hangup = hvc_iucv_notifier_hangup,
  807. };
  808. /* Suspend / resume device operations */
  809. static struct dev_pm_ops hvc_iucv_pm_ops = {
  810. .freeze = hvc_iucv_pm_freeze,
  811. .thaw = hvc_iucv_pm_restore_thaw,
  812. .restore = hvc_iucv_pm_restore_thaw,
  813. };
  814. /* IUCV HVC device driver */
  815. static struct device_driver hvc_iucv_driver = {
  816. .name = KMSG_COMPONENT,
  817. .bus = &iucv_bus,
  818. .pm = &hvc_iucv_pm_ops,
  819. };
  820. /**
  821. * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
  822. * @id: hvc_iucv_table index
  823. * @is_console: Flag if the instance is used as Linux console
  824. *
  825. * This function allocates a new hvc_iucv_private structure and stores
  826. * the instance in hvc_iucv_table at index @id.
  827. * Returns 0 on success; otherwise non-zero.
  828. */
  829. static int __init hvc_iucv_alloc(int id, unsigned int is_console)
  830. {
  831. struct hvc_iucv_private *priv;
  832. char name[9];
  833. int rc;
  834. priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
  835. if (!priv)
  836. return -ENOMEM;
  837. spin_lock_init(&priv->lock);
  838. INIT_LIST_HEAD(&priv->tty_outqueue);
  839. INIT_LIST_HEAD(&priv->tty_inqueue);
  840. INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
  841. init_waitqueue_head(&priv->sndbuf_waitq);
  842. priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
  843. if (!priv->sndbuf) {
  844. kfree(priv);
  845. return -ENOMEM;
  846. }
  847. /* set console flag */
  848. priv->is_console = is_console;
  849. /* allocate hvc device */
  850. priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
  851. HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
  852. if (IS_ERR(priv->hvc)) {
  853. rc = PTR_ERR(priv->hvc);
  854. goto out_error_hvc;
  855. }
  856. /* notify HVC thread instead of using polling */
  857. priv->hvc->irq_requested = 1;
  858. /* setup iucv related information */
  859. snprintf(name, 9, "lnxhvc%-2d", id);
  860. memcpy(priv->srv_name, name, 8);
  861. ASCEBC(priv->srv_name, 8);
  862. /* create and setup device */
  863. priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
  864. if (!priv->dev) {
  865. rc = -ENOMEM;
  866. goto out_error_dev;
  867. }
  868. dev_set_name(priv->dev, "hvc_iucv%d", id);
  869. dev_set_drvdata(priv->dev, priv);
  870. priv->dev->bus = &iucv_bus;
  871. priv->dev->parent = iucv_root;
  872. priv->dev->driver = &hvc_iucv_driver;
  873. priv->dev->release = (void (*)(struct device *)) kfree;
  874. rc = device_register(priv->dev);
  875. if (rc) {
  876. put_device(priv->dev);
  877. goto out_error_dev;
  878. }
  879. hvc_iucv_table[id] = priv;
  880. return 0;
  881. out_error_dev:
  882. hvc_remove(priv->hvc);
  883. out_error_hvc:
  884. free_page((unsigned long) priv->sndbuf);
  885. kfree(priv);
  886. return rc;
  887. }
  888. /**
  889. * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
  890. */
  891. static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
  892. {
  893. hvc_remove(priv->hvc);
  894. device_unregister(priv->dev);
  895. free_page((unsigned long) priv->sndbuf);
  896. kfree(priv);
  897. }
  898. /**
  899. * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
  900. * @filter: String containing a comma-separated list of z/VM user IDs
  901. */
  902. static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
  903. {
  904. const char *nextdelim, *residual;
  905. size_t len;
  906. nextdelim = strchr(filter, ',');
  907. if (nextdelim) {
  908. len = nextdelim - filter;
  909. residual = nextdelim + 1;
  910. } else {
  911. len = strlen(filter);
  912. residual = filter + len;
  913. }
  914. if (len == 0)
  915. return ERR_PTR(-EINVAL);
  916. /* check for '\n' (if called from sysfs) */
  917. if (filter[len - 1] == '\n')
  918. len--;
  919. if (len > 8)
  920. return ERR_PTR(-EINVAL);
  921. /* pad with blanks and save upper case version of user ID */
  922. memset(dest, ' ', 8);
  923. while (len--)
  924. dest[len] = toupper(filter[len]);
  925. return residual;
  926. }
  927. /**
  928. * hvc_iucv_setup_filter() - Set up z/VM user ID filter
  929. * @filter: String consisting of a comma-separated list of z/VM user IDs
  930. *
  931. * The function parses the @filter string and creates an array containing
  932. * the list of z/VM user ID filter entries.
  933. * Return code 0 means success, -EINVAL if the filter is syntactically
  934. * incorrect, -ENOMEM if there was not enough memory to allocate the
  935. * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
  936. */
  937. static int hvc_iucv_setup_filter(const char *val)
  938. {
  939. const char *residual;
  940. int err;
  941. size_t size, count;
  942. void *array, *old_filter;
  943. count = strlen(val);
  944. if (count == 0 || (count == 1 && val[0] == '\n')) {
  945. size = 0;
  946. array = NULL;
  947. goto out_replace_filter; /* clear filter */
  948. }
  949. /* count user IDs in order to allocate sufficient memory */
  950. size = 1;
  951. residual = val;
  952. while ((residual = strchr(residual, ',')) != NULL) {
  953. residual++;
  954. size++;
  955. }
  956. /* check if the specified list exceeds the filter limit */
  957. if (size > MAX_VMID_FILTER)
  958. return -ENOSPC;
  959. array = kzalloc(size * 8, GFP_KERNEL);
  960. if (!array)
  961. return -ENOMEM;
  962. count = size;
  963. residual = val;
  964. while (*residual && count) {
  965. residual = hvc_iucv_parse_filter(residual,
  966. array + ((size - count) * 8));
  967. if (IS_ERR(residual)) {
  968. err = PTR_ERR(residual);
  969. kfree(array);
  970. goto out_err;
  971. }
  972. count--;
  973. }
  974. out_replace_filter:
  975. write_lock_bh(&hvc_iucv_filter_lock);
  976. old_filter = hvc_iucv_filter;
  977. hvc_iucv_filter_size = size;
  978. hvc_iucv_filter = array;
  979. write_unlock_bh(&hvc_iucv_filter_lock);
  980. kfree(old_filter);
  981. err = 0;
  982. out_err:
  983. return err;
  984. }
  985. /**
  986. * param_set_vmidfilter() - Set z/VM user ID filter parameter
  987. * @val: String consisting of a comma-separated list of z/VM user IDs
  988. * @kp: Kernel parameter pointing to hvc_iucv_filter array
  989. *
  990. * The function sets up the z/VM user ID filter specified as comma-separated
  991. * list of user IDs in @val.
  992. * Note: If it is called early in the boot process, @val is stored and
  993. * parsed later in hvc_iucv_init().
  994. */
  995. static int param_set_vmidfilter(const char *val, struct kernel_param *kp)
  996. {
  997. int rc;
  998. if (!MACHINE_IS_VM || !hvc_iucv_devices)
  999. return -ENODEV;
  1000. if (!val)
  1001. return -EINVAL;
  1002. rc = 0;
  1003. if (slab_is_available())
  1004. rc = hvc_iucv_setup_filter(val);
  1005. else
  1006. hvc_iucv_filter_string = val; /* defer... */
  1007. return rc;
  1008. }
  1009. /**
  1010. * param_get_vmidfilter() - Get z/VM user ID filter
  1011. * @buffer: Buffer to store z/VM user ID filter,
  1012. * (buffer size assumption PAGE_SIZE)
  1013. * @kp: Kernel parameter pointing to the hvc_iucv_filter array
  1014. *
  1015. * The function stores the filter as a comma-separated list of z/VM user IDs
  1016. * in @buffer. Typically, sysfs routines call this function for attr show.
  1017. */
  1018. static int param_get_vmidfilter(char *buffer, struct kernel_param *kp)
  1019. {
  1020. int rc;
  1021. size_t index, len;
  1022. void *start, *end;
  1023. if (!MACHINE_IS_VM || !hvc_iucv_devices)
  1024. return -ENODEV;
  1025. rc = 0;
  1026. read_lock_bh(&hvc_iucv_filter_lock);
  1027. for (index = 0; index < hvc_iucv_filter_size; index++) {
  1028. start = hvc_iucv_filter + (8 * index);
  1029. end = memchr(start, ' ', 8);
  1030. len = (end) ? end - start : 8;
  1031. memcpy(buffer + rc, start, len);
  1032. rc += len;
  1033. buffer[rc++] = ',';
  1034. }
  1035. read_unlock_bh(&hvc_iucv_filter_lock);
  1036. if (rc)
  1037. buffer[--rc] = '\0'; /* replace last comma and update rc */
  1038. return rc;
  1039. }
  1040. #define param_check_vmidfilter(name, p) __param_check(name, p, void)
  1041. /**
  1042. * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
  1043. */
  1044. static int __init hvc_iucv_init(void)
  1045. {
  1046. int rc;
  1047. unsigned int i;
  1048. if (!hvc_iucv_devices)
  1049. return -ENODEV;
  1050. if (!MACHINE_IS_VM) {
  1051. pr_notice("The z/VM IUCV HVC device driver cannot "
  1052. "be used without z/VM\n");
  1053. rc = -ENODEV;
  1054. goto out_error;
  1055. }
  1056. if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
  1057. pr_err("%lu is not a valid value for the hvc_iucv= "
  1058. "kernel parameter\n", hvc_iucv_devices);
  1059. rc = -EINVAL;
  1060. goto out_error;
  1061. }
  1062. /* register IUCV HVC device driver */
  1063. rc = driver_register(&hvc_iucv_driver);
  1064. if (rc)
  1065. goto out_error;
  1066. /* parse hvc_iucv_allow string and create z/VM user ID filter list */
  1067. if (hvc_iucv_filter_string) {
  1068. rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
  1069. switch (rc) {
  1070. case 0:
  1071. break;
  1072. case -ENOMEM:
  1073. pr_err("Allocating memory failed with "
  1074. "reason code=%d\n", 3);
  1075. goto out_error;
  1076. case -EINVAL:
  1077. pr_err("hvc_iucv_allow= does not specify a valid "
  1078. "z/VM user ID list\n");
  1079. goto out_error;
  1080. case -ENOSPC:
  1081. pr_err("hvc_iucv_allow= specifies too many "
  1082. "z/VM user IDs\n");
  1083. goto out_error;
  1084. default:
  1085. goto out_error;
  1086. }
  1087. }
  1088. hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
  1089. sizeof(struct iucv_tty_buffer),
  1090. 0, 0, NULL);
  1091. if (!hvc_iucv_buffer_cache) {
  1092. pr_err("Allocating memory failed with reason code=%d\n", 1);
  1093. rc = -ENOMEM;
  1094. goto out_error;
  1095. }
  1096. hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
  1097. hvc_iucv_buffer_cache);
  1098. if (!hvc_iucv_mempool) {
  1099. pr_err("Allocating memory failed with reason code=%d\n", 2);
  1100. kmem_cache_destroy(hvc_iucv_buffer_cache);
  1101. rc = -ENOMEM;
  1102. goto out_error;
  1103. }
  1104. /* register the first terminal device as console
  1105. * (must be done before allocating hvc terminal devices) */
  1106. rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
  1107. if (rc) {
  1108. pr_err("Registering HVC terminal device as "
  1109. "Linux console failed\n");
  1110. goto out_error_memory;
  1111. }
  1112. /* allocate hvc_iucv_private structs */
  1113. for (i = 0; i < hvc_iucv_devices; i++) {
  1114. rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
  1115. if (rc) {
  1116. pr_err("Creating a new HVC terminal device "
  1117. "failed with error code=%d\n", rc);
  1118. goto out_error_hvc;
  1119. }
  1120. }
  1121. /* register IUCV callback handler */
  1122. rc = iucv_register(&hvc_iucv_handler, 0);
  1123. if (rc) {
  1124. pr_err("Registering IUCV handlers failed with error code=%d\n",
  1125. rc);
  1126. goto out_error_iucv;
  1127. }
  1128. return 0;
  1129. out_error_iucv:
  1130. iucv_unregister(&hvc_iucv_handler, 0);
  1131. out_error_hvc:
  1132. for (i = 0; i < hvc_iucv_devices; i++)
  1133. if (hvc_iucv_table[i])
  1134. hvc_iucv_destroy(hvc_iucv_table[i]);
  1135. out_error_memory:
  1136. mempool_destroy(hvc_iucv_mempool);
  1137. kmem_cache_destroy(hvc_iucv_buffer_cache);
  1138. out_error:
  1139. if (hvc_iucv_filter)
  1140. kfree(hvc_iucv_filter);
  1141. hvc_iucv_devices = 0; /* ensure that we do not provide any device */
  1142. return rc;
  1143. }
  1144. /**
  1145. * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
  1146. * @val: Parameter value (numeric)
  1147. */
  1148. static int __init hvc_iucv_config(char *val)
  1149. {
  1150. return strict_strtoul(val, 10, &hvc_iucv_devices);
  1151. }
  1152. device_initcall(hvc_iucv_init);
  1153. __setup("hvc_iucv=", hvc_iucv_config);
  1154. core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);