hvc_iucv.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211
  1. /*
  2. * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
  3. *
  4. * This HVC device driver provides terminal access using
  5. * z/VM IUCV communication paths.
  6. *
  7. * Copyright IBM Corp. 2008
  8. *
  9. * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  10. */
  11. #define KMSG_COMPONENT "hvc_iucv"
  12. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  13. #include <linux/types.h>
  14. #include <asm/ebcdic.h>
  15. #include <linux/ctype.h>
  16. #include <linux/delay.h>
  17. #include <linux/init.h>
  18. #include <linux/mempool.h>
  19. #include <linux/moduleparam.h>
  20. #include <linux/tty.h>
  21. #include <linux/wait.h>
  22. #include <net/iucv/iucv.h>
  23. #include "hvc_console.h"
  24. /* General device driver settings */
  25. #define HVC_IUCV_MAGIC 0xc9e4c3e5
  26. #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
  27. #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
  28. /* IUCV TTY message */
  29. #define MSG_VERSION 0x02 /* Message version */
  30. #define MSG_TYPE_ERROR 0x01 /* Error message */
  31. #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
  32. #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
  33. #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
  34. #define MSG_TYPE_DATA 0x10 /* Terminal data */
  35. struct iucv_tty_msg {
  36. u8 version; /* Message version */
  37. u8 type; /* Message type */
  38. #define MSG_MAX_DATALEN ((u16)(~0))
  39. u16 datalen; /* Payload length */
  40. u8 data[]; /* Payload buffer */
  41. } __attribute__((packed));
  42. #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
  43. enum iucv_state_t {
  44. IUCV_DISCONN = 0,
  45. IUCV_CONNECTED = 1,
  46. IUCV_SEVERED = 2,
  47. };
  48. enum tty_state_t {
  49. TTY_CLOSED = 0,
  50. TTY_OPENED = 1,
  51. };
  52. struct hvc_iucv_private {
  53. struct hvc_struct *hvc; /* HVC struct reference */
  54. u8 srv_name[8]; /* IUCV service name (ebcdic) */
  55. unsigned char is_console; /* Linux console usage flag */
  56. enum iucv_state_t iucv_state; /* IUCV connection status */
  57. enum tty_state_t tty_state; /* TTY status */
  58. struct iucv_path *path; /* IUCV path pointer */
  59. spinlock_t lock; /* hvc_iucv_private lock */
  60. #define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
  61. void *sndbuf; /* send buffer */
  62. size_t sndbuf_len; /* length of send buffer */
  63. #define QUEUE_SNDBUF_DELAY (HZ / 25)
  64. struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
  65. wait_queue_head_t sndbuf_waitq; /* wait for send completion */
  66. struct list_head tty_outqueue; /* outgoing IUCV messages */
  67. struct list_head tty_inqueue; /* incoming IUCV messages */
  68. };
  69. struct iucv_tty_buffer {
  70. struct list_head list; /* list pointer */
  71. struct iucv_message msg; /* store an IUCV message */
  72. size_t offset; /* data buffer offset */
  73. struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
  74. };
  75. /* IUCV callback handler */
  76. static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
  77. static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
  78. static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
  79. static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
  80. /* Kernel module parameter: use one terminal device as default */
  81. static unsigned long hvc_iucv_devices = 1;
  82. /* Array of allocated hvc iucv tty lines... */
  83. static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
  84. #define IUCV_HVC_CON_IDX (0)
  85. /* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
  86. #define MAX_VMID_FILTER (500)
  87. static size_t hvc_iucv_filter_size;
  88. static void *hvc_iucv_filter;
  89. static const char *hvc_iucv_filter_string;
  90. static DEFINE_RWLOCK(hvc_iucv_filter_lock);
  91. /* Kmem cache and mempool for iucv_tty_buffer elements */
  92. static struct kmem_cache *hvc_iucv_buffer_cache;
  93. static mempool_t *hvc_iucv_mempool;
  94. /* IUCV handler callback functions */
  95. static struct iucv_handler hvc_iucv_handler = {
  96. .path_pending = hvc_iucv_path_pending,
  97. .path_severed = hvc_iucv_path_severed,
  98. .message_complete = hvc_iucv_msg_complete,
  99. .message_pending = hvc_iucv_msg_pending,
  100. };
  101. /**
  102. * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
  103. * @num: The HVC virtual terminal number (vtermno)
  104. *
  105. * This function returns the struct hvc_iucv_private instance that corresponds
  106. * to the HVC virtual terminal number specified as parameter @num.
  107. */
  108. struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
  109. {
  110. if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
  111. return NULL;
  112. return hvc_iucv_table[num - HVC_IUCV_MAGIC];
  113. }
  114. /**
  115. * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
  116. * @size: Size of the internal buffer used to store data.
  117. * @flags: Memory allocation flags passed to mempool.
  118. *
  119. * This function allocates a new struct iucv_tty_buffer element and, optionally,
  120. * allocates an internal data buffer with the specified size @size.
  121. * Note: The total message size arises from the internal buffer size and the
  122. * members of the iucv_tty_msg structure.
  123. * The function returns NULL if memory allocation has failed.
  124. */
  125. static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
  126. {
  127. struct iucv_tty_buffer *bufp;
  128. bufp = mempool_alloc(hvc_iucv_mempool, flags);
  129. if (!bufp)
  130. return NULL;
  131. memset(bufp, 0, sizeof(*bufp));
  132. if (size > 0) {
  133. bufp->msg.length = MSG_SIZE(size);
  134. bufp->mbuf = kmalloc(bufp->msg.length, flags);
  135. if (!bufp->mbuf) {
  136. mempool_free(bufp, hvc_iucv_mempool);
  137. return NULL;
  138. }
  139. bufp->mbuf->version = MSG_VERSION;
  140. bufp->mbuf->type = MSG_TYPE_DATA;
  141. bufp->mbuf->datalen = (u16) size;
  142. }
  143. return bufp;
  144. }
  145. /**
  146. * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
  147. * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
  148. */
  149. static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
  150. {
  151. kfree(bufp->mbuf);
  152. mempool_free(bufp, hvc_iucv_mempool);
  153. }
  154. /**
  155. * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
  156. * @list: List containing struct iucv_tty_buffer elements.
  157. */
  158. static void destroy_tty_buffer_list(struct list_head *list)
  159. {
  160. struct iucv_tty_buffer *ent, *next;
  161. list_for_each_entry_safe(ent, next, list, list) {
  162. list_del(&ent->list);
  163. destroy_tty_buffer(ent);
  164. }
  165. }
  166. /**
  167. * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
  168. * @priv: Pointer to struct hvc_iucv_private
  169. * @buf: HVC buffer for writing received terminal data.
  170. * @count: HVC buffer size.
  171. * @has_more_data: Pointer to an int variable.
  172. *
  173. * The function picks up pending messages from the input queue and receives
  174. * the message data that is then written to the specified buffer @buf.
  175. * If the buffer size @count is less than the data message size, the
  176. * message is kept on the input queue and @has_more_data is set to 1.
  177. * If all message data has been written, the message is removed from
  178. * the input queue.
  179. *
  180. * The function returns the number of bytes written to the terminal, zero if
  181. * there are no pending data messages available or if there is no established
  182. * IUCV path.
  183. * If the IUCV path has been severed, then -EPIPE is returned to cause a
  184. * hang up (that is issued by the HVC layer).
  185. */
  186. static int hvc_iucv_write(struct hvc_iucv_private *priv,
  187. char *buf, int count, int *has_more_data)
  188. {
  189. struct iucv_tty_buffer *rb;
  190. int written;
  191. int rc;
  192. /* immediately return if there is no IUCV connection */
  193. if (priv->iucv_state == IUCV_DISCONN)
  194. return 0;
  195. /* if the IUCV path has been severed, return -EPIPE to inform the
  196. * HVC layer to hang up the tty device. */
  197. if (priv->iucv_state == IUCV_SEVERED)
  198. return -EPIPE;
  199. /* check if there are pending messages */
  200. if (list_empty(&priv->tty_inqueue))
  201. return 0;
  202. /* receive an iucv message and flip data to the tty (ldisc) */
  203. rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
  204. written = 0;
  205. if (!rb->mbuf) { /* message not yet received ... */
  206. /* allocate mem to store msg data; if no memory is available
  207. * then leave the buffer on the list and re-try later */
  208. rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC);
  209. if (!rb->mbuf)
  210. return -ENOMEM;
  211. rc = __iucv_message_receive(priv->path, &rb->msg, 0,
  212. rb->mbuf, rb->msg.length, NULL);
  213. switch (rc) {
  214. case 0: /* Successful */
  215. break;
  216. case 2: /* No message found */
  217. case 9: /* Message purged */
  218. break;
  219. default:
  220. written = -EIO;
  221. }
  222. /* remove buffer if an error has occured or received data
  223. * is not correct */
  224. if (rc || (rb->mbuf->version != MSG_VERSION) ||
  225. (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
  226. goto out_remove_buffer;
  227. }
  228. switch (rb->mbuf->type) {
  229. case MSG_TYPE_DATA:
  230. written = min_t(int, rb->mbuf->datalen - rb->offset, count);
  231. memcpy(buf, rb->mbuf->data + rb->offset, written);
  232. if (written < (rb->mbuf->datalen - rb->offset)) {
  233. rb->offset += written;
  234. *has_more_data = 1;
  235. goto out_written;
  236. }
  237. break;
  238. case MSG_TYPE_WINSIZE:
  239. if (rb->mbuf->datalen != sizeof(struct winsize))
  240. break;
  241. hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
  242. break;
  243. case MSG_TYPE_ERROR: /* ignored ... */
  244. case MSG_TYPE_TERMENV: /* ignored ... */
  245. case MSG_TYPE_TERMIOS: /* ignored ... */
  246. break;
  247. }
  248. out_remove_buffer:
  249. list_del(&rb->list);
  250. destroy_tty_buffer(rb);
  251. *has_more_data = !list_empty(&priv->tty_inqueue);
  252. out_written:
  253. return written;
  254. }
  255. /**
  256. * hvc_iucv_get_chars() - HVC get_chars operation.
  257. * @vtermno: HVC virtual terminal number.
  258. * @buf: Pointer to a buffer to store data
  259. * @count: Size of buffer available for writing
  260. *
  261. * The HVC thread calls this method to read characters from the back-end.
  262. * If an IUCV communication path has been established, pending IUCV messages
  263. * are received and data is copied into buffer @buf up to @count bytes.
  264. *
  265. * Locking: The routine gets called under an irqsave() spinlock; and
  266. * the routine locks the struct hvc_iucv_private->lock to call
  267. * helper functions.
  268. */
  269. static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
  270. {
  271. struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
  272. int written;
  273. int has_more_data;
  274. if (count <= 0)
  275. return 0;
  276. if (!priv)
  277. return -ENODEV;
  278. spin_lock(&priv->lock);
  279. has_more_data = 0;
  280. written = hvc_iucv_write(priv, buf, count, &has_more_data);
  281. spin_unlock(&priv->lock);
  282. /* if there are still messages on the queue... schedule another run */
  283. if (has_more_data)
  284. hvc_kick();
  285. return written;
  286. }
  287. /**
  288. * hvc_iucv_queue() - Buffer terminal data for sending.
  289. * @priv: Pointer to struct hvc_iucv_private instance.
  290. * @buf: Buffer containing data to send.
  291. * @count: Size of buffer and amount of data to send.
  292. *
  293. * The function queues data for sending. To actually send the buffered data,
  294. * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
  295. * The function returns the number of data bytes that has been buffered.
  296. *
  297. * If the device is not connected, data is ignored and the function returns
  298. * @count.
  299. * If the buffer is full, the function returns 0.
  300. * If an existing IUCV communicaton path has been severed, -EPIPE is returned
  301. * (that can be passed to HVC layer to cause a tty hangup).
  302. */
  303. static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
  304. int count)
  305. {
  306. size_t len;
  307. if (priv->iucv_state == IUCV_DISCONN)
  308. return count; /* ignore data */
  309. if (priv->iucv_state == IUCV_SEVERED)
  310. return -EPIPE;
  311. len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
  312. if (!len)
  313. return 0;
  314. memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
  315. priv->sndbuf_len += len;
  316. if (priv->iucv_state == IUCV_CONNECTED)
  317. schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
  318. return len;
  319. }
  320. /**
  321. * hvc_iucv_send() - Send an IUCV message containing terminal data.
  322. * @priv: Pointer to struct hvc_iucv_private instance.
  323. *
  324. * If an IUCV communication path has been established, the buffered output data
  325. * is sent via an IUCV message and the number of bytes sent is returned.
  326. * Returns 0 if there is no established IUCV communication path or
  327. * -EPIPE if an existing IUCV communicaton path has been severed.
  328. */
  329. static int hvc_iucv_send(struct hvc_iucv_private *priv)
  330. {
  331. struct iucv_tty_buffer *sb;
  332. int rc, len;
  333. if (priv->iucv_state == IUCV_SEVERED)
  334. return -EPIPE;
  335. if (priv->iucv_state == IUCV_DISCONN)
  336. return -EIO;
  337. if (!priv->sndbuf_len)
  338. return 0;
  339. /* allocate internal buffer to store msg data and also compute total
  340. * message length */
  341. sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
  342. if (!sb)
  343. return -ENOMEM;
  344. memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
  345. sb->mbuf->datalen = (u16) priv->sndbuf_len;
  346. sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
  347. list_add_tail(&sb->list, &priv->tty_outqueue);
  348. rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
  349. (void *) sb->mbuf, sb->msg.length);
  350. if (rc) {
  351. /* drop the message here; however we might want to handle
  352. * 0x03 (msg limit reached) by trying again... */
  353. list_del(&sb->list);
  354. destroy_tty_buffer(sb);
  355. }
  356. len = priv->sndbuf_len;
  357. priv->sndbuf_len = 0;
  358. return len;
  359. }
  360. /**
  361. * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
  362. * @work: Work structure.
  363. *
  364. * This work queue function sends buffered output data over IUCV and,
  365. * if not all buffered data could be sent, reschedules itself.
  366. */
  367. static void hvc_iucv_sndbuf_work(struct work_struct *work)
  368. {
  369. struct hvc_iucv_private *priv;
  370. priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
  371. if (!priv)
  372. return;
  373. spin_lock_bh(&priv->lock);
  374. hvc_iucv_send(priv);
  375. spin_unlock_bh(&priv->lock);
  376. }
  377. /**
  378. * hvc_iucv_put_chars() - HVC put_chars operation.
  379. * @vtermno: HVC virtual terminal number.
  380. * @buf: Pointer to an buffer to read data from
  381. * @count: Size of buffer available for reading
  382. *
  383. * The HVC thread calls this method to write characters to the back-end.
  384. * The function calls hvc_iucv_queue() to queue terminal data for sending.
  385. *
  386. * Locking: The method gets called under an irqsave() spinlock; and
  387. * locks struct hvc_iucv_private->lock.
  388. */
  389. static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
  390. {
  391. struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
  392. int queued;
  393. if (count <= 0)
  394. return 0;
  395. if (!priv)
  396. return -ENODEV;
  397. spin_lock(&priv->lock);
  398. queued = hvc_iucv_queue(priv, buf, count);
  399. spin_unlock(&priv->lock);
  400. return queued;
  401. }
  402. /**
  403. * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
  404. * @hp: Pointer to the HVC device (struct hvc_struct)
  405. * @id: Additional data (originally passed to hvc_alloc): the index of an struct
  406. * hvc_iucv_private instance.
  407. *
  408. * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
  409. * instance that is derived from @id. Always returns 0.
  410. *
  411. * Locking: struct hvc_iucv_private->lock, spin_lock_bh
  412. */
  413. static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
  414. {
  415. struct hvc_iucv_private *priv;
  416. priv = hvc_iucv_get_private(id);
  417. if (!priv)
  418. return 0;
  419. spin_lock_bh(&priv->lock);
  420. priv->tty_state = TTY_OPENED;
  421. spin_unlock_bh(&priv->lock);
  422. return 0;
  423. }
  424. /**
  425. * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
  426. * @priv: Pointer to the struct hvc_iucv_private instance.
  427. */
  428. static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
  429. {
  430. destroy_tty_buffer_list(&priv->tty_outqueue);
  431. destroy_tty_buffer_list(&priv->tty_inqueue);
  432. priv->tty_state = TTY_CLOSED;
  433. priv->iucv_state = IUCV_DISCONN;
  434. priv->sndbuf_len = 0;
  435. }
  436. /**
  437. * tty_outqueue_empty() - Test if the tty outq is empty
  438. * @priv: Pointer to struct hvc_iucv_private instance.
  439. */
  440. static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
  441. {
  442. int rc;
  443. spin_lock_bh(&priv->lock);
  444. rc = list_empty(&priv->tty_outqueue);
  445. spin_unlock_bh(&priv->lock);
  446. return rc;
  447. }
  448. /**
  449. * flush_sndbuf_sync() - Flush send buffer and wait for completion
  450. * @priv: Pointer to struct hvc_iucv_private instance.
  451. *
  452. * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
  453. * to flush any buffered terminal output data and waits for completion.
  454. */
  455. static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
  456. {
  457. int sync_wait;
  458. cancel_delayed_work_sync(&priv->sndbuf_work);
  459. spin_lock_bh(&priv->lock);
  460. hvc_iucv_send(priv); /* force sending buffered data */
  461. sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
  462. spin_unlock_bh(&priv->lock);
  463. if (sync_wait)
  464. wait_event_timeout(priv->sndbuf_waitq,
  465. tty_outqueue_empty(priv), HZ);
  466. }
  467. /**
  468. * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
  469. * @hp: Pointer to the HVC device (struct hvc_struct)
  470. * @id: Additional data (originally passed to hvc_alloc):
  471. * the index of an struct hvc_iucv_private instance.
  472. *
  473. * This routine notifies the HVC back-end that a tty hangup (carrier loss,
  474. * virtual or otherwise) has occured.
  475. * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
  476. * to keep an existing IUCV communication path established.
  477. * (Background: vhangup() is called from user space (by getty or login) to
  478. * disable writing to the tty by other applications).
  479. * If the tty has been opened and an established IUCV path has been severed
  480. * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
  481. *
  482. * Locking: struct hvc_iucv_private->lock
  483. */
  484. static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
  485. {
  486. struct hvc_iucv_private *priv;
  487. priv = hvc_iucv_get_private(id);
  488. if (!priv)
  489. return;
  490. flush_sndbuf_sync(priv);
  491. spin_lock_bh(&priv->lock);
  492. /* NOTE: If the hangup was scheduled by ourself (from the iucv
  493. * path_servered callback [IUCV_SEVERED]), we have to clean up
  494. * our structure and to set state to TTY_CLOSED.
  495. * If the tty was hung up otherwise (e.g. vhangup()), then we
  496. * ignore this hangup and keep an established IUCV path open...
  497. * (...the reason is that we are not able to connect back to the
  498. * client if we disconnect on hang up) */
  499. priv->tty_state = TTY_CLOSED;
  500. if (priv->iucv_state == IUCV_SEVERED)
  501. hvc_iucv_cleanup(priv);
  502. spin_unlock_bh(&priv->lock);
  503. }
  504. /**
  505. * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
  506. * @hp: Pointer to the HVC device (struct hvc_struct)
  507. * @id: Additional data (originally passed to hvc_alloc):
  508. * the index of an struct hvc_iucv_private instance.
  509. *
  510. * This routine notifies the HVC back-end that the last tty device fd has been
  511. * closed. The function calls hvc_iucv_cleanup() to clean up the struct
  512. * hvc_iucv_private instance.
  513. *
  514. * Locking: struct hvc_iucv_private->lock
  515. */
  516. static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
  517. {
  518. struct hvc_iucv_private *priv;
  519. struct iucv_path *path;
  520. priv = hvc_iucv_get_private(id);
  521. if (!priv)
  522. return;
  523. flush_sndbuf_sync(priv);
  524. spin_lock_bh(&priv->lock);
  525. path = priv->path; /* save reference to IUCV path */
  526. priv->path = NULL;
  527. hvc_iucv_cleanup(priv);
  528. spin_unlock_bh(&priv->lock);
  529. /* sever IUCV path outside of priv->lock due to lock ordering of:
  530. * priv->lock <--> iucv_table_lock */
  531. if (path) {
  532. iucv_path_sever(path, NULL);
  533. iucv_path_free(path);
  534. }
  535. }
  536. /**
  537. * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
  538. * @ipvmid: Originating z/VM user ID (right padded with blanks)
  539. *
  540. * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
  541. * non-zero.
  542. */
  543. static int hvc_iucv_filter_connreq(u8 ipvmid[8])
  544. {
  545. size_t i;
  546. /* Note: default policy is ACCEPT if no filter is set */
  547. if (!hvc_iucv_filter_size)
  548. return 0;
  549. for (i = 0; i < hvc_iucv_filter_size; i++)
  550. if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
  551. return 0;
  552. return 1;
  553. }
  554. /**
  555. * hvc_iucv_path_pending() - IUCV handler to process a connection request.
  556. * @path: Pending path (struct iucv_path)
  557. * @ipvmid: z/VM system identifier of originator
  558. * @ipuser: User specified data for this path
  559. * (AF_IUCV: port/service name and originator port)
  560. *
  561. * The function uses the @ipuser data to determine if the pending path belongs
  562. * to a terminal managed by this device driver.
  563. * If the path belongs to this driver, ensure that the terminal is not accessed
  564. * multiple times (only one connection to a terminal is allowed).
  565. * If the terminal is not yet connected, the pending path is accepted and is
  566. * associated to the appropriate struct hvc_iucv_private instance.
  567. *
  568. * Returns 0 if @path belongs to a terminal managed by the this device driver;
  569. * otherwise returns -ENODEV in order to dispatch this path to other handlers.
  570. *
  571. * Locking: struct hvc_iucv_private->lock
  572. */
  573. static int hvc_iucv_path_pending(struct iucv_path *path,
  574. u8 ipvmid[8], u8 ipuser[16])
  575. {
  576. struct hvc_iucv_private *priv;
  577. u8 nuser_data[16];
  578. u8 vm_user_id[9];
  579. int i, rc;
  580. priv = NULL;
  581. for (i = 0; i < hvc_iucv_devices; i++)
  582. if (hvc_iucv_table[i] &&
  583. (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
  584. priv = hvc_iucv_table[i];
  585. break;
  586. }
  587. if (!priv)
  588. return -ENODEV;
  589. /* Enforce that ipvmid is allowed to connect to us */
  590. read_lock(&hvc_iucv_filter_lock);
  591. rc = hvc_iucv_filter_connreq(ipvmid);
  592. read_unlock(&hvc_iucv_filter_lock);
  593. if (rc) {
  594. iucv_path_sever(path, ipuser);
  595. iucv_path_free(path);
  596. memcpy(vm_user_id, ipvmid, 8);
  597. vm_user_id[8] = 0;
  598. pr_info("A connection request from z/VM user ID %s "
  599. "was refused\n", vm_user_id);
  600. return 0;
  601. }
  602. spin_lock(&priv->lock);
  603. /* If the terminal is already connected or being severed, then sever
  604. * this path to enforce that there is only ONE established communication
  605. * path per terminal. */
  606. if (priv->iucv_state != IUCV_DISCONN) {
  607. iucv_path_sever(path, ipuser);
  608. iucv_path_free(path);
  609. goto out_path_handled;
  610. }
  611. /* accept path */
  612. memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
  613. memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
  614. path->msglim = 0xffff; /* IUCV MSGLIMIT */
  615. path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
  616. rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
  617. if (rc) {
  618. iucv_path_sever(path, ipuser);
  619. iucv_path_free(path);
  620. goto out_path_handled;
  621. }
  622. priv->path = path;
  623. priv->iucv_state = IUCV_CONNECTED;
  624. /* flush buffered output data... */
  625. schedule_delayed_work(&priv->sndbuf_work, 5);
  626. out_path_handled:
  627. spin_unlock(&priv->lock);
  628. return 0;
  629. }
  630. /**
  631. * hvc_iucv_path_severed() - IUCV handler to process a path sever.
  632. * @path: Pending path (struct iucv_path)
  633. * @ipuser: User specified data for this path
  634. * (AF_IUCV: port/service name and originator port)
  635. *
  636. * The function also severs the path (as required by the IUCV protocol) and
  637. * sets the iucv state to IUCV_SEVERED for the associated struct
  638. * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty
  639. * hangup (hvc_iucv_get_chars() / hvc_iucv_write()).
  640. * If tty portion of the HVC is closed, clean up the outqueue.
  641. *
  642. * Locking: struct hvc_iucv_private->lock
  643. */
  644. static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
  645. {
  646. struct hvc_iucv_private *priv = path->private;
  647. spin_lock(&priv->lock);
  648. priv->iucv_state = IUCV_SEVERED;
  649. /* If the tty has not yet been opened, clean up the hvc_iucv_private
  650. * structure to allow re-connects.
  651. * This is also done for our console device because console hangups
  652. * are handled specially and no notifier is called by HVC.
  653. * The tty session is active (TTY_OPEN) and ready for re-connects...
  654. *
  655. * If it has been opened, let get_chars() return -EPIPE to signal the
  656. * HVC layer to hang up the tty.
  657. * If so, we need to wake up the HVC thread to call get_chars()...
  658. */
  659. priv->path = NULL;
  660. if (priv->tty_state == TTY_CLOSED)
  661. hvc_iucv_cleanup(priv);
  662. else
  663. if (priv->is_console) {
  664. hvc_iucv_cleanup(priv);
  665. priv->tty_state = TTY_OPENED;
  666. } else
  667. hvc_kick();
  668. spin_unlock(&priv->lock);
  669. /* finally sever path (outside of priv->lock due to lock ordering) */
  670. iucv_path_sever(path, ipuser);
  671. iucv_path_free(path);
  672. }
  673. /**
  674. * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
  675. * @path: Pending path (struct iucv_path)
  676. * @msg: Pointer to the IUCV message
  677. *
  678. * The function puts an incoming message on the input queue for later
  679. * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
  680. * If the tty has not yet been opened, the message is rejected.
  681. *
  682. * Locking: struct hvc_iucv_private->lock
  683. */
  684. static void hvc_iucv_msg_pending(struct iucv_path *path,
  685. struct iucv_message *msg)
  686. {
  687. struct hvc_iucv_private *priv = path->private;
  688. struct iucv_tty_buffer *rb;
  689. /* reject messages that exceed max size of iucv_tty_msg->datalen */
  690. if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
  691. iucv_message_reject(path, msg);
  692. return;
  693. }
  694. spin_lock(&priv->lock);
  695. /* reject messages if tty has not yet been opened */
  696. if (priv->tty_state == TTY_CLOSED) {
  697. iucv_message_reject(path, msg);
  698. goto unlock_return;
  699. }
  700. /* allocate tty buffer to save iucv msg only */
  701. rb = alloc_tty_buffer(0, GFP_ATOMIC);
  702. if (!rb) {
  703. iucv_message_reject(path, msg);
  704. goto unlock_return; /* -ENOMEM */
  705. }
  706. rb->msg = *msg;
  707. list_add_tail(&rb->list, &priv->tty_inqueue);
  708. hvc_kick(); /* wake up hvc thread */
  709. unlock_return:
  710. spin_unlock(&priv->lock);
  711. }
  712. /**
  713. * hvc_iucv_msg_complete() - IUCV handler to process message completion
  714. * @path: Pending path (struct iucv_path)
  715. * @msg: Pointer to the IUCV message
  716. *
  717. * The function is called upon completion of message delivery to remove the
  718. * message from the outqueue. Additional delivery information can be found
  719. * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
  720. * purged messages (0x010000 (IPADPGNR)).
  721. *
  722. * Locking: struct hvc_iucv_private->lock
  723. */
  724. static void hvc_iucv_msg_complete(struct iucv_path *path,
  725. struct iucv_message *msg)
  726. {
  727. struct hvc_iucv_private *priv = path->private;
  728. struct iucv_tty_buffer *ent, *next;
  729. LIST_HEAD(list_remove);
  730. spin_lock(&priv->lock);
  731. list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
  732. if (ent->msg.id == msg->id) {
  733. list_move(&ent->list, &list_remove);
  734. break;
  735. }
  736. wake_up(&priv->sndbuf_waitq);
  737. spin_unlock(&priv->lock);
  738. destroy_tty_buffer_list(&list_remove);
  739. }
  740. /* HVC operations */
  741. static struct hv_ops hvc_iucv_ops = {
  742. .get_chars = hvc_iucv_get_chars,
  743. .put_chars = hvc_iucv_put_chars,
  744. .notifier_add = hvc_iucv_notifier_add,
  745. .notifier_del = hvc_iucv_notifier_del,
  746. .notifier_hangup = hvc_iucv_notifier_hangup,
  747. };
  748. /**
  749. * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
  750. * @id: hvc_iucv_table index
  751. * @is_console: Flag if the instance is used as Linux console
  752. *
  753. * This function allocates a new hvc_iucv_private structure and stores
  754. * the instance in hvc_iucv_table at index @id.
  755. * Returns 0 on success; otherwise non-zero.
  756. */
  757. static int __init hvc_iucv_alloc(int id, unsigned int is_console)
  758. {
  759. struct hvc_iucv_private *priv;
  760. char name[9];
  761. int rc;
  762. priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
  763. if (!priv)
  764. return -ENOMEM;
  765. spin_lock_init(&priv->lock);
  766. INIT_LIST_HEAD(&priv->tty_outqueue);
  767. INIT_LIST_HEAD(&priv->tty_inqueue);
  768. INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
  769. init_waitqueue_head(&priv->sndbuf_waitq);
  770. priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
  771. if (!priv->sndbuf) {
  772. kfree(priv);
  773. return -ENOMEM;
  774. }
  775. /* set console flag */
  776. priv->is_console = is_console;
  777. /* finally allocate hvc */
  778. priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
  779. HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
  780. if (IS_ERR(priv->hvc)) {
  781. rc = PTR_ERR(priv->hvc);
  782. free_page((unsigned long) priv->sndbuf);
  783. kfree(priv);
  784. return rc;
  785. }
  786. /* notify HVC thread instead of using polling */
  787. priv->hvc->irq_requested = 1;
  788. /* setup iucv related information */
  789. snprintf(name, 9, "lnxhvc%-2d", id);
  790. memcpy(priv->srv_name, name, 8);
  791. ASCEBC(priv->srv_name, 8);
  792. hvc_iucv_table[id] = priv;
  793. return 0;
  794. }
  795. /**
  796. * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
  797. * @filter: String containing a comma-separated list of z/VM user IDs
  798. */
  799. static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
  800. {
  801. const char *nextdelim, *residual;
  802. size_t len;
  803. nextdelim = strchr(filter, ',');
  804. if (nextdelim) {
  805. len = nextdelim - filter;
  806. residual = nextdelim + 1;
  807. } else {
  808. len = strlen(filter);
  809. residual = filter + len;
  810. }
  811. if (len == 0)
  812. return ERR_PTR(-EINVAL);
  813. /* check for '\n' (if called from sysfs) */
  814. if (filter[len - 1] == '\n')
  815. len--;
  816. if (len > 8)
  817. return ERR_PTR(-EINVAL);
  818. /* pad with blanks and save upper case version of user ID */
  819. memset(dest, ' ', 8);
  820. while (len--)
  821. dest[len] = toupper(filter[len]);
  822. return residual;
  823. }
  824. /**
  825. * hvc_iucv_setup_filter() - Set up z/VM user ID filter
  826. * @filter: String consisting of a comma-separated list of z/VM user IDs
  827. *
  828. * The function parses the @filter string and creates an array containing
  829. * the list of z/VM user ID filter entries.
  830. * Return code 0 means success, -EINVAL if the filter is syntactically
  831. * incorrect, -ENOMEM if there was not enough memory to allocate the
  832. * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
  833. */
  834. static int hvc_iucv_setup_filter(const char *val)
  835. {
  836. const char *residual;
  837. int err;
  838. size_t size, count;
  839. void *array, *old_filter;
  840. count = strlen(val);
  841. if (count == 0 || (count == 1 && val[0] == '\n')) {
  842. size = 0;
  843. array = NULL;
  844. goto out_replace_filter; /* clear filter */
  845. }
  846. /* count user IDs in order to allocate sufficient memory */
  847. size = 1;
  848. residual = val;
  849. while ((residual = strchr(residual, ',')) != NULL) {
  850. residual++;
  851. size++;
  852. }
  853. /* check if the specified list exceeds the filter limit */
  854. if (size > MAX_VMID_FILTER)
  855. return -ENOSPC;
  856. array = kzalloc(size * 8, GFP_KERNEL);
  857. if (!array)
  858. return -ENOMEM;
  859. count = size;
  860. residual = val;
  861. while (*residual && count) {
  862. residual = hvc_iucv_parse_filter(residual,
  863. array + ((size - count) * 8));
  864. if (IS_ERR(residual)) {
  865. err = PTR_ERR(residual);
  866. kfree(array);
  867. goto out_err;
  868. }
  869. count--;
  870. }
  871. out_replace_filter:
  872. write_lock_bh(&hvc_iucv_filter_lock);
  873. old_filter = hvc_iucv_filter;
  874. hvc_iucv_filter_size = size;
  875. hvc_iucv_filter = array;
  876. write_unlock_bh(&hvc_iucv_filter_lock);
  877. kfree(old_filter);
  878. err = 0;
  879. out_err:
  880. return err;
  881. }
  882. /**
  883. * param_set_vmidfilter() - Set z/VM user ID filter parameter
  884. * @val: String consisting of a comma-separated list of z/VM user IDs
  885. * @kp: Kernel parameter pointing to hvc_iucv_filter array
  886. *
  887. * The function sets up the z/VM user ID filter specified as comma-separated
  888. * list of user IDs in @val.
  889. * Note: If it is called early in the boot process, @val is stored and
  890. * parsed later in hvc_iucv_init().
  891. */
  892. static int param_set_vmidfilter(const char *val, struct kernel_param *kp)
  893. {
  894. int rc;
  895. if (!MACHINE_IS_VM || !hvc_iucv_devices)
  896. return -ENODEV;
  897. if (!val)
  898. return -EINVAL;
  899. rc = 0;
  900. if (slab_is_available())
  901. rc = hvc_iucv_setup_filter(val);
  902. else
  903. hvc_iucv_filter_string = val; /* defer... */
  904. return rc;
  905. }
  906. /**
  907. * param_get_vmidfilter() - Get z/VM user ID filter
  908. * @buffer: Buffer to store z/VM user ID filter,
  909. * (buffer size assumption PAGE_SIZE)
  910. * @kp: Kernel parameter pointing to the hvc_iucv_filter array
  911. *
  912. * The function stores the filter as a comma-separated list of z/VM user IDs
  913. * in @buffer. Typically, sysfs routines call this function for attr show.
  914. */
  915. static int param_get_vmidfilter(char *buffer, struct kernel_param *kp)
  916. {
  917. int rc;
  918. size_t index, len;
  919. void *start, *end;
  920. if (!MACHINE_IS_VM || !hvc_iucv_devices)
  921. return -ENODEV;
  922. rc = 0;
  923. read_lock_bh(&hvc_iucv_filter_lock);
  924. for (index = 0; index < hvc_iucv_filter_size; index++) {
  925. start = hvc_iucv_filter + (8 * index);
  926. end = memchr(start, ' ', 8);
  927. len = (end) ? end - start : 8;
  928. memcpy(buffer + rc, start, len);
  929. rc += len;
  930. buffer[rc++] = ',';
  931. }
  932. read_unlock_bh(&hvc_iucv_filter_lock);
  933. if (rc)
  934. buffer[--rc] = '\0'; /* replace last comma and update rc */
  935. return rc;
  936. }
  937. #define param_check_vmidfilter(name, p) __param_check(name, p, void)
  938. /**
  939. * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
  940. */
  941. static int __init hvc_iucv_init(void)
  942. {
  943. int rc;
  944. unsigned int i;
  945. if (!hvc_iucv_devices)
  946. return -ENODEV;
  947. if (!MACHINE_IS_VM) {
  948. pr_notice("The z/VM IUCV HVC device driver cannot "
  949. "be used without z/VM\n");
  950. rc = -ENODEV;
  951. goto out_error;
  952. }
  953. if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
  954. pr_err("%lu is not a valid value for the hvc_iucv= "
  955. "kernel parameter\n", hvc_iucv_devices);
  956. rc = -EINVAL;
  957. goto out_error;
  958. }
  959. /* parse hvc_iucv_allow string and create z/VM user ID filter list */
  960. if (hvc_iucv_filter_string) {
  961. rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
  962. switch (rc) {
  963. case 0:
  964. break;
  965. case -ENOMEM:
  966. pr_err("Allocating memory failed with "
  967. "reason code=%d\n", 3);
  968. goto out_error;
  969. case -EINVAL:
  970. pr_err("hvc_iucv_allow= does not specify a valid "
  971. "z/VM user ID list\n");
  972. goto out_error;
  973. case -ENOSPC:
  974. pr_err("hvc_iucv_allow= specifies too many "
  975. "z/VM user IDs\n");
  976. goto out_error;
  977. default:
  978. goto out_error;
  979. }
  980. }
  981. hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
  982. sizeof(struct iucv_tty_buffer),
  983. 0, 0, NULL);
  984. if (!hvc_iucv_buffer_cache) {
  985. pr_err("Allocating memory failed with reason code=%d\n", 1);
  986. rc = -ENOMEM;
  987. goto out_error;
  988. }
  989. hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
  990. hvc_iucv_buffer_cache);
  991. if (!hvc_iucv_mempool) {
  992. pr_err("Allocating memory failed with reason code=%d\n", 2);
  993. kmem_cache_destroy(hvc_iucv_buffer_cache);
  994. rc = -ENOMEM;
  995. goto out_error;
  996. }
  997. /* register the first terminal device as console
  998. * (must be done before allocating hvc terminal devices) */
  999. rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
  1000. if (rc) {
  1001. pr_err("Registering HVC terminal device as "
  1002. "Linux console failed\n");
  1003. goto out_error_memory;
  1004. }
  1005. /* allocate hvc_iucv_private structs */
  1006. for (i = 0; i < hvc_iucv_devices; i++) {
  1007. rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
  1008. if (rc) {
  1009. pr_err("Creating a new HVC terminal device "
  1010. "failed with error code=%d\n", rc);
  1011. goto out_error_hvc;
  1012. }
  1013. }
  1014. /* register IUCV callback handler */
  1015. rc = iucv_register(&hvc_iucv_handler, 0);
  1016. if (rc) {
  1017. pr_err("Registering IUCV handlers failed with error code=%d\n",
  1018. rc);
  1019. goto out_error_iucv;
  1020. }
  1021. return 0;
  1022. out_error_iucv:
  1023. iucv_unregister(&hvc_iucv_handler, 0);
  1024. out_error_hvc:
  1025. for (i = 0; i < hvc_iucv_devices; i++)
  1026. if (hvc_iucv_table[i]) {
  1027. if (hvc_iucv_table[i]->hvc)
  1028. hvc_remove(hvc_iucv_table[i]->hvc);
  1029. kfree(hvc_iucv_table[i]);
  1030. }
  1031. out_error_memory:
  1032. mempool_destroy(hvc_iucv_mempool);
  1033. kmem_cache_destroy(hvc_iucv_buffer_cache);
  1034. out_error:
  1035. hvc_iucv_devices = 0; /* ensure that we do not provide any device */
  1036. return rc;
  1037. }
  1038. /**
  1039. * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
  1040. * @val: Parameter value (numeric)
  1041. */
  1042. static int __init hvc_iucv_config(char *val)
  1043. {
  1044. return strict_strtoul(val, 10, &hvc_iucv_devices);
  1045. }
  1046. device_initcall(hvc_iucv_init);
  1047. __setup("hvc_iucv=", hvc_iucv_config);
  1048. core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);