af_iucv.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365
  1. /*
  2. * linux/net/iucv/af_iucv.c
  3. *
  4. * IUCV protocol stack for Linux on zSeries
  5. *
  6. * Copyright 2006 IBM Corporation
  7. *
  8. * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
  9. */
  10. #define KMSG_COMPONENT "af_iucv"
  11. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/types.h>
  14. #include <linux/list.h>
  15. #include <linux/errno.h>
  16. #include <linux/kernel.h>
  17. #include <linux/sched.h>
  18. #include <linux/slab.h>
  19. #include <linux/skbuff.h>
  20. #include <linux/init.h>
  21. #include <linux/poll.h>
  22. #include <net/sock.h>
  23. #include <asm/ebcdic.h>
  24. #include <asm/cpcmd.h>
  25. #include <linux/kmod.h>
  26. #include <net/iucv/iucv.h>
  27. #include <net/iucv/af_iucv.h>
  28. #define CONFIG_IUCV_SOCK_DEBUG 1
  29. #define IPRMDATA 0x80
  30. #define VERSION "1.1"
  31. static char iucv_userid[80];
  32. static struct proto_ops iucv_sock_ops;
  33. static struct proto iucv_proto = {
  34. .name = "AF_IUCV",
  35. .owner = THIS_MODULE,
  36. .obj_size = sizeof(struct iucv_sock),
  37. };
  38. static void iucv_sock_kill(struct sock *sk);
  39. static void iucv_sock_close(struct sock *sk);
  40. /* Call Back functions */
  41. static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
  42. static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
  43. static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
  44. static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
  45. u8 ipuser[16]);
  46. static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
  47. static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
  48. static struct iucv_sock_list iucv_sk_list = {
  49. .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
  50. .autobind_name = ATOMIC_INIT(0)
  51. };
  52. static struct iucv_handler af_iucv_handler = {
  53. .path_pending = iucv_callback_connreq,
  54. .path_complete = iucv_callback_connack,
  55. .path_severed = iucv_callback_connrej,
  56. .message_pending = iucv_callback_rx,
  57. .message_complete = iucv_callback_txdone,
  58. .path_quiesced = iucv_callback_shutdown,
  59. };
  60. static inline void high_nmcpy(unsigned char *dst, char *src)
  61. {
  62. memcpy(dst, src, 8);
  63. }
  64. static inline void low_nmcpy(unsigned char *dst, char *src)
  65. {
  66. memcpy(&dst[8], src, 8);
  67. }
  68. /* Timers */
  69. static void iucv_sock_timeout(unsigned long arg)
  70. {
  71. struct sock *sk = (struct sock *)arg;
  72. bh_lock_sock(sk);
  73. sk->sk_err = ETIMEDOUT;
  74. sk->sk_state_change(sk);
  75. bh_unlock_sock(sk);
  76. iucv_sock_kill(sk);
  77. sock_put(sk);
  78. }
  79. static void iucv_sock_clear_timer(struct sock *sk)
  80. {
  81. sk_stop_timer(sk, &sk->sk_timer);
  82. }
  83. static struct sock *__iucv_get_sock_by_name(char *nm)
  84. {
  85. struct sock *sk;
  86. struct hlist_node *node;
  87. sk_for_each(sk, node, &iucv_sk_list.head)
  88. if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
  89. return sk;
  90. return NULL;
  91. }
  92. static void iucv_sock_destruct(struct sock *sk)
  93. {
  94. skb_queue_purge(&sk->sk_receive_queue);
  95. skb_queue_purge(&sk->sk_write_queue);
  96. }
  97. /* Cleanup Listen */
  98. static void iucv_sock_cleanup_listen(struct sock *parent)
  99. {
  100. struct sock *sk;
  101. /* Close non-accepted connections */
  102. while ((sk = iucv_accept_dequeue(parent, NULL))) {
  103. iucv_sock_close(sk);
  104. iucv_sock_kill(sk);
  105. }
  106. parent->sk_state = IUCV_CLOSED;
  107. sock_set_flag(parent, SOCK_ZAPPED);
  108. }
  109. /* Kill socket */
  110. static void iucv_sock_kill(struct sock *sk)
  111. {
  112. if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
  113. return;
  114. iucv_sock_unlink(&iucv_sk_list, sk);
  115. sock_set_flag(sk, SOCK_DEAD);
  116. sock_put(sk);
  117. }
  118. /* Close an IUCV socket */
  119. static void iucv_sock_close(struct sock *sk)
  120. {
  121. unsigned char user_data[16];
  122. struct iucv_sock *iucv = iucv_sk(sk);
  123. int err;
  124. unsigned long timeo;
  125. iucv_sock_clear_timer(sk);
  126. lock_sock(sk);
  127. switch (sk->sk_state) {
  128. case IUCV_LISTEN:
  129. iucv_sock_cleanup_listen(sk);
  130. break;
  131. case IUCV_CONNECTED:
  132. case IUCV_DISCONN:
  133. err = 0;
  134. sk->sk_state = IUCV_CLOSING;
  135. sk->sk_state_change(sk);
  136. if (!skb_queue_empty(&iucv->send_skb_q)) {
  137. if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
  138. timeo = sk->sk_lingertime;
  139. else
  140. timeo = IUCV_DISCONN_TIMEOUT;
  141. err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
  142. }
  143. sk->sk_state = IUCV_CLOSED;
  144. sk->sk_state_change(sk);
  145. if (iucv->path) {
  146. low_nmcpy(user_data, iucv->src_name);
  147. high_nmcpy(user_data, iucv->dst_name);
  148. ASCEBC(user_data, sizeof(user_data));
  149. err = iucv_path_sever(iucv->path, user_data);
  150. iucv_path_free(iucv->path);
  151. iucv->path = NULL;
  152. }
  153. sk->sk_err = ECONNRESET;
  154. sk->sk_state_change(sk);
  155. skb_queue_purge(&iucv->send_skb_q);
  156. skb_queue_purge(&iucv->backlog_skb_q);
  157. sock_set_flag(sk, SOCK_ZAPPED);
  158. break;
  159. default:
  160. sock_set_flag(sk, SOCK_ZAPPED);
  161. break;
  162. }
  163. release_sock(sk);
  164. iucv_sock_kill(sk);
  165. }
  166. static void iucv_sock_init(struct sock *sk, struct sock *parent)
  167. {
  168. if (parent)
  169. sk->sk_type = parent->sk_type;
  170. }
  171. static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
  172. {
  173. struct sock *sk;
  174. sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
  175. if (!sk)
  176. return NULL;
  177. sock_init_data(sock, sk);
  178. INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
  179. spin_lock_init(&iucv_sk(sk)->accept_q_lock);
  180. skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
  181. INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
  182. spin_lock_init(&iucv_sk(sk)->message_q.lock);
  183. skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
  184. iucv_sk(sk)->send_tag = 0;
  185. iucv_sk(sk)->flags = 0;
  186. sk->sk_destruct = iucv_sock_destruct;
  187. sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
  188. sk->sk_allocation = GFP_DMA;
  189. sock_reset_flag(sk, SOCK_ZAPPED);
  190. sk->sk_protocol = proto;
  191. sk->sk_state = IUCV_OPEN;
  192. setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
  193. iucv_sock_link(&iucv_sk_list, sk);
  194. return sk;
  195. }
  196. /* Create an IUCV socket */
  197. static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
  198. {
  199. struct sock *sk;
  200. if (sock->type != SOCK_STREAM)
  201. return -ESOCKTNOSUPPORT;
  202. sock->state = SS_UNCONNECTED;
  203. sock->ops = &iucv_sock_ops;
  204. sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
  205. if (!sk)
  206. return -ENOMEM;
  207. iucv_sock_init(sk, NULL);
  208. return 0;
  209. }
  210. void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
  211. {
  212. write_lock_bh(&l->lock);
  213. sk_add_node(sk, &l->head);
  214. write_unlock_bh(&l->lock);
  215. }
  216. void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
  217. {
  218. write_lock_bh(&l->lock);
  219. sk_del_node_init(sk);
  220. write_unlock_bh(&l->lock);
  221. }
  222. void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
  223. {
  224. unsigned long flags;
  225. struct iucv_sock *par = iucv_sk(parent);
  226. sock_hold(sk);
  227. spin_lock_irqsave(&par->accept_q_lock, flags);
  228. list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
  229. spin_unlock_irqrestore(&par->accept_q_lock, flags);
  230. iucv_sk(sk)->parent = parent;
  231. parent->sk_ack_backlog++;
  232. }
  233. void iucv_accept_unlink(struct sock *sk)
  234. {
  235. unsigned long flags;
  236. struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
  237. spin_lock_irqsave(&par->accept_q_lock, flags);
  238. list_del_init(&iucv_sk(sk)->accept_q);
  239. spin_unlock_irqrestore(&par->accept_q_lock, flags);
  240. iucv_sk(sk)->parent->sk_ack_backlog--;
  241. iucv_sk(sk)->parent = NULL;
  242. sock_put(sk);
  243. }
  244. struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
  245. {
  246. struct iucv_sock *isk, *n;
  247. struct sock *sk;
  248. list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
  249. sk = (struct sock *) isk;
  250. lock_sock(sk);
  251. if (sk->sk_state == IUCV_CLOSED) {
  252. iucv_accept_unlink(sk);
  253. release_sock(sk);
  254. continue;
  255. }
  256. if (sk->sk_state == IUCV_CONNECTED ||
  257. sk->sk_state == IUCV_SEVERED ||
  258. !newsock) {
  259. iucv_accept_unlink(sk);
  260. if (newsock)
  261. sock_graft(sk, newsock);
  262. if (sk->sk_state == IUCV_SEVERED)
  263. sk->sk_state = IUCV_DISCONN;
  264. release_sock(sk);
  265. return sk;
  266. }
  267. release_sock(sk);
  268. }
  269. return NULL;
  270. }
  271. int iucv_sock_wait_state(struct sock *sk, int state, int state2,
  272. unsigned long timeo)
  273. {
  274. DECLARE_WAITQUEUE(wait, current);
  275. int err = 0;
  276. add_wait_queue(sk->sk_sleep, &wait);
  277. while (sk->sk_state != state && sk->sk_state != state2) {
  278. set_current_state(TASK_INTERRUPTIBLE);
  279. if (!timeo) {
  280. err = -EAGAIN;
  281. break;
  282. }
  283. if (signal_pending(current)) {
  284. err = sock_intr_errno(timeo);
  285. break;
  286. }
  287. release_sock(sk);
  288. timeo = schedule_timeout(timeo);
  289. lock_sock(sk);
  290. err = sock_error(sk);
  291. if (err)
  292. break;
  293. }
  294. set_current_state(TASK_RUNNING);
  295. remove_wait_queue(sk->sk_sleep, &wait);
  296. return err;
  297. }
  298. /* Bind an unbound socket */
  299. static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
  300. int addr_len)
  301. {
  302. struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
  303. struct sock *sk = sock->sk;
  304. struct iucv_sock *iucv;
  305. int err;
  306. /* Verify the input sockaddr */
  307. if (!addr || addr->sa_family != AF_IUCV)
  308. return -EINVAL;
  309. lock_sock(sk);
  310. if (sk->sk_state != IUCV_OPEN) {
  311. err = -EBADFD;
  312. goto done;
  313. }
  314. write_lock_bh(&iucv_sk_list.lock);
  315. iucv = iucv_sk(sk);
  316. if (__iucv_get_sock_by_name(sa->siucv_name)) {
  317. err = -EADDRINUSE;
  318. goto done_unlock;
  319. }
  320. if (iucv->path) {
  321. err = 0;
  322. goto done_unlock;
  323. }
  324. /* Bind the socket */
  325. memcpy(iucv->src_name, sa->siucv_name, 8);
  326. /* Copy the user id */
  327. memcpy(iucv->src_user_id, iucv_userid, 8);
  328. sk->sk_state = IUCV_BOUND;
  329. err = 0;
  330. done_unlock:
  331. /* Release the socket list lock */
  332. write_unlock_bh(&iucv_sk_list.lock);
  333. done:
  334. release_sock(sk);
  335. return err;
  336. }
  337. /* Automatically bind an unbound socket */
  338. static int iucv_sock_autobind(struct sock *sk)
  339. {
  340. struct iucv_sock *iucv = iucv_sk(sk);
  341. char query_buffer[80];
  342. char name[12];
  343. int err = 0;
  344. /* Set the userid and name */
  345. cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
  346. if (unlikely(err))
  347. return -EPROTO;
  348. memcpy(iucv->src_user_id, query_buffer, 8);
  349. write_lock_bh(&iucv_sk_list.lock);
  350. sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
  351. while (__iucv_get_sock_by_name(name)) {
  352. sprintf(name, "%08x",
  353. atomic_inc_return(&iucv_sk_list.autobind_name));
  354. }
  355. write_unlock_bh(&iucv_sk_list.lock);
  356. memcpy(&iucv->src_name, name, 8);
  357. return err;
  358. }
  359. /* Connect an unconnected socket */
  360. static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
  361. int alen, int flags)
  362. {
  363. struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
  364. struct sock *sk = sock->sk;
  365. struct iucv_sock *iucv;
  366. unsigned char user_data[16];
  367. int err;
  368. if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
  369. return -EINVAL;
  370. if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
  371. return -EBADFD;
  372. if (sk->sk_type != SOCK_STREAM)
  373. return -EINVAL;
  374. iucv = iucv_sk(sk);
  375. if (sk->sk_state == IUCV_OPEN) {
  376. err = iucv_sock_autobind(sk);
  377. if (unlikely(err))
  378. return err;
  379. }
  380. lock_sock(sk);
  381. /* Set the destination information */
  382. memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
  383. memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
  384. high_nmcpy(user_data, sa->siucv_name);
  385. low_nmcpy(user_data, iucv_sk(sk)->src_name);
  386. ASCEBC(user_data, sizeof(user_data));
  387. iucv = iucv_sk(sk);
  388. /* Create path. */
  389. iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
  390. IPRMDATA, GFP_KERNEL);
  391. if (!iucv->path) {
  392. err = -ENOMEM;
  393. goto done;
  394. }
  395. err = iucv_path_connect(iucv->path, &af_iucv_handler,
  396. sa->siucv_user_id, NULL, user_data, sk);
  397. if (err) {
  398. iucv_path_free(iucv->path);
  399. iucv->path = NULL;
  400. switch (err) {
  401. case 0x0b: /* Target communicator is not logged on */
  402. err = -ENETUNREACH;
  403. break;
  404. case 0x0d: /* Max connections for this guest exceeded */
  405. case 0x0e: /* Max connections for target guest exceeded */
  406. err = -EAGAIN;
  407. break;
  408. case 0x0f: /* Missing IUCV authorization */
  409. err = -EACCES;
  410. break;
  411. default:
  412. err = -ECONNREFUSED;
  413. break;
  414. }
  415. goto done;
  416. }
  417. if (sk->sk_state != IUCV_CONNECTED) {
  418. err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
  419. sock_sndtimeo(sk, flags & O_NONBLOCK));
  420. }
  421. if (sk->sk_state == IUCV_DISCONN) {
  422. release_sock(sk);
  423. return -ECONNREFUSED;
  424. }
  425. if (err) {
  426. iucv_path_sever(iucv->path, NULL);
  427. iucv_path_free(iucv->path);
  428. iucv->path = NULL;
  429. }
  430. done:
  431. release_sock(sk);
  432. return err;
  433. }
  434. /* Move a socket into listening state. */
  435. static int iucv_sock_listen(struct socket *sock, int backlog)
  436. {
  437. struct sock *sk = sock->sk;
  438. int err;
  439. lock_sock(sk);
  440. err = -EINVAL;
  441. if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
  442. goto done;
  443. sk->sk_max_ack_backlog = backlog;
  444. sk->sk_ack_backlog = 0;
  445. sk->sk_state = IUCV_LISTEN;
  446. err = 0;
  447. done:
  448. release_sock(sk);
  449. return err;
  450. }
  451. /* Accept a pending connection */
  452. static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
  453. int flags)
  454. {
  455. DECLARE_WAITQUEUE(wait, current);
  456. struct sock *sk = sock->sk, *nsk;
  457. long timeo;
  458. int err = 0;
  459. lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
  460. if (sk->sk_state != IUCV_LISTEN) {
  461. err = -EBADFD;
  462. goto done;
  463. }
  464. timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
  465. /* Wait for an incoming connection */
  466. add_wait_queue_exclusive(sk->sk_sleep, &wait);
  467. while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
  468. set_current_state(TASK_INTERRUPTIBLE);
  469. if (!timeo) {
  470. err = -EAGAIN;
  471. break;
  472. }
  473. release_sock(sk);
  474. timeo = schedule_timeout(timeo);
  475. lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
  476. if (sk->sk_state != IUCV_LISTEN) {
  477. err = -EBADFD;
  478. break;
  479. }
  480. if (signal_pending(current)) {
  481. err = sock_intr_errno(timeo);
  482. break;
  483. }
  484. }
  485. set_current_state(TASK_RUNNING);
  486. remove_wait_queue(sk->sk_sleep, &wait);
  487. if (err)
  488. goto done;
  489. newsock->state = SS_CONNECTED;
  490. done:
  491. release_sock(sk);
  492. return err;
  493. }
  494. static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
  495. int *len, int peer)
  496. {
  497. struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
  498. struct sock *sk = sock->sk;
  499. addr->sa_family = AF_IUCV;
  500. *len = sizeof(struct sockaddr_iucv);
  501. if (peer) {
  502. memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
  503. memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
  504. } else {
  505. memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
  506. memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
  507. }
  508. memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
  509. memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
  510. memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
  511. return 0;
  512. }
  513. static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
  514. struct msghdr *msg, size_t len)
  515. {
  516. struct sock *sk = sock->sk;
  517. struct iucv_sock *iucv = iucv_sk(sk);
  518. struct sk_buff *skb;
  519. struct iucv_message txmsg;
  520. char user_id[9];
  521. char appl_id[9];
  522. int err;
  523. err = sock_error(sk);
  524. if (err)
  525. return err;
  526. if (msg->msg_flags & MSG_OOB)
  527. return -EOPNOTSUPP;
  528. lock_sock(sk);
  529. if (sk->sk_shutdown & SEND_SHUTDOWN) {
  530. err = -EPIPE;
  531. goto out;
  532. }
  533. if (sk->sk_state == IUCV_CONNECTED) {
  534. if (!(skb = sock_alloc_send_skb(sk, len,
  535. msg->msg_flags & MSG_DONTWAIT,
  536. &err)))
  537. goto out;
  538. if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
  539. err = -EFAULT;
  540. goto fail;
  541. }
  542. txmsg.class = 0;
  543. memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
  544. txmsg.tag = iucv->send_tag++;
  545. memcpy(skb->cb, &txmsg.tag, 4);
  546. skb_queue_tail(&iucv->send_skb_q, skb);
  547. err = iucv_message_send(iucv->path, &txmsg, 0, 0,
  548. (void *) skb->data, skb->len);
  549. if (err) {
  550. if (err == 3) {
  551. user_id[8] = 0;
  552. memcpy(user_id, iucv->dst_user_id, 8);
  553. appl_id[8] = 0;
  554. memcpy(appl_id, iucv->dst_name, 8);
  555. pr_err("Application %s on z/VM guest %s"
  556. " exceeds message limit\n",
  557. user_id, appl_id);
  558. }
  559. skb_unlink(skb, &iucv->send_skb_q);
  560. err = -EPIPE;
  561. goto fail;
  562. }
  563. } else {
  564. err = -ENOTCONN;
  565. goto out;
  566. }
  567. release_sock(sk);
  568. return len;
  569. fail:
  570. kfree_skb(skb);
  571. out:
  572. release_sock(sk);
  573. return err;
  574. }
  575. static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
  576. {
  577. int dataleft, size, copied = 0;
  578. struct sk_buff *nskb;
  579. dataleft = len;
  580. while (dataleft) {
  581. if (dataleft >= sk->sk_rcvbuf / 4)
  582. size = sk->sk_rcvbuf / 4;
  583. else
  584. size = dataleft;
  585. nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
  586. if (!nskb)
  587. return -ENOMEM;
  588. memcpy(nskb->data, skb->data + copied, size);
  589. copied += size;
  590. dataleft -= size;
  591. skb_reset_transport_header(nskb);
  592. skb_reset_network_header(nskb);
  593. nskb->len = size;
  594. skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
  595. }
  596. return 0;
  597. }
  598. static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
  599. struct iucv_path *path,
  600. struct iucv_message *msg)
  601. {
  602. int rc;
  603. if (msg->flags & IPRMDATA) {
  604. skb->data = NULL;
  605. skb->len = 0;
  606. } else {
  607. rc = iucv_message_receive(path, msg, 0, skb->data,
  608. msg->length, NULL);
  609. if (rc) {
  610. kfree_skb(skb);
  611. return;
  612. }
  613. if (skb->truesize >= sk->sk_rcvbuf / 4) {
  614. rc = iucv_fragment_skb(sk, skb, msg->length);
  615. kfree_skb(skb);
  616. skb = NULL;
  617. if (rc) {
  618. iucv_path_sever(path, NULL);
  619. return;
  620. }
  621. skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
  622. } else {
  623. skb_reset_transport_header(skb);
  624. skb_reset_network_header(skb);
  625. skb->len = msg->length;
  626. }
  627. }
  628. if (sock_queue_rcv_skb(sk, skb))
  629. skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
  630. }
  631. static void iucv_process_message_q(struct sock *sk)
  632. {
  633. struct iucv_sock *iucv = iucv_sk(sk);
  634. struct sk_buff *skb;
  635. struct sock_msg_q *p, *n;
  636. list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
  637. skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA);
  638. if (!skb)
  639. break;
  640. iucv_process_message(sk, skb, p->path, &p->msg);
  641. list_del(&p->list);
  642. kfree(p);
  643. if (!skb_queue_empty(&iucv->backlog_skb_q))
  644. break;
  645. }
  646. }
  647. static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
  648. struct msghdr *msg, size_t len, int flags)
  649. {
  650. int noblock = flags & MSG_DONTWAIT;
  651. struct sock *sk = sock->sk;
  652. struct iucv_sock *iucv = iucv_sk(sk);
  653. int target, copied = 0;
  654. struct sk_buff *skb, *rskb, *cskb;
  655. int err = 0;
  656. if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
  657. skb_queue_empty(&iucv->backlog_skb_q) &&
  658. skb_queue_empty(&sk->sk_receive_queue) &&
  659. list_empty(&iucv->message_q.list))
  660. return 0;
  661. if (flags & (MSG_OOB))
  662. return -EOPNOTSUPP;
  663. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  664. skb = skb_recv_datagram(sk, flags, noblock, &err);
  665. if (!skb) {
  666. if (sk->sk_shutdown & RCV_SHUTDOWN)
  667. return 0;
  668. return err;
  669. }
  670. copied = min_t(unsigned int, skb->len, len);
  671. cskb = skb;
  672. if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
  673. skb_queue_head(&sk->sk_receive_queue, skb);
  674. if (copied == 0)
  675. return -EFAULT;
  676. goto done;
  677. }
  678. len -= copied;
  679. /* Mark read part of skb as used */
  680. if (!(flags & MSG_PEEK)) {
  681. skb_pull(skb, copied);
  682. if (skb->len) {
  683. skb_queue_head(&sk->sk_receive_queue, skb);
  684. goto done;
  685. }
  686. kfree_skb(skb);
  687. /* Queue backlog skbs */
  688. rskb = skb_dequeue(&iucv->backlog_skb_q);
  689. while (rskb) {
  690. if (sock_queue_rcv_skb(sk, rskb)) {
  691. skb_queue_head(&iucv->backlog_skb_q,
  692. rskb);
  693. break;
  694. } else {
  695. rskb = skb_dequeue(&iucv->backlog_skb_q);
  696. }
  697. }
  698. if (skb_queue_empty(&iucv->backlog_skb_q)) {
  699. spin_lock_bh(&iucv->message_q.lock);
  700. if (!list_empty(&iucv->message_q.list))
  701. iucv_process_message_q(sk);
  702. spin_unlock_bh(&iucv->message_q.lock);
  703. }
  704. } else
  705. skb_queue_head(&sk->sk_receive_queue, skb);
  706. done:
  707. return err ? : copied;
  708. }
  709. static inline unsigned int iucv_accept_poll(struct sock *parent)
  710. {
  711. struct iucv_sock *isk, *n;
  712. struct sock *sk;
  713. list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
  714. sk = (struct sock *) isk;
  715. if (sk->sk_state == IUCV_CONNECTED)
  716. return POLLIN | POLLRDNORM;
  717. }
  718. return 0;
  719. }
  720. unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
  721. poll_table *wait)
  722. {
  723. struct sock *sk = sock->sk;
  724. unsigned int mask = 0;
  725. poll_wait(file, sk->sk_sleep, wait);
  726. if (sk->sk_state == IUCV_LISTEN)
  727. return iucv_accept_poll(sk);
  728. if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
  729. mask |= POLLERR;
  730. if (sk->sk_shutdown & RCV_SHUTDOWN)
  731. mask |= POLLRDHUP;
  732. if (sk->sk_shutdown == SHUTDOWN_MASK)
  733. mask |= POLLHUP;
  734. if (!skb_queue_empty(&sk->sk_receive_queue) ||
  735. (sk->sk_shutdown & RCV_SHUTDOWN))
  736. mask |= POLLIN | POLLRDNORM;
  737. if (sk->sk_state == IUCV_CLOSED)
  738. mask |= POLLHUP;
  739. if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
  740. mask |= POLLIN;
  741. if (sock_writeable(sk))
  742. mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
  743. else
  744. set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  745. return mask;
  746. }
  747. static int iucv_sock_shutdown(struct socket *sock, int how)
  748. {
  749. struct sock *sk = sock->sk;
  750. struct iucv_sock *iucv = iucv_sk(sk);
  751. struct iucv_message txmsg;
  752. int err = 0;
  753. u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
  754. how++;
  755. if ((how & ~SHUTDOWN_MASK) || !how)
  756. return -EINVAL;
  757. lock_sock(sk);
  758. switch (sk->sk_state) {
  759. case IUCV_CLOSED:
  760. err = -ENOTCONN;
  761. goto fail;
  762. default:
  763. sk->sk_shutdown |= how;
  764. break;
  765. }
  766. if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
  767. txmsg.class = 0;
  768. txmsg.tag = 0;
  769. err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
  770. (void *) prmmsg, 8);
  771. if (err) {
  772. switch (err) {
  773. case 1:
  774. err = -ENOTCONN;
  775. break;
  776. case 2:
  777. err = -ECONNRESET;
  778. break;
  779. default:
  780. err = -ENOTCONN;
  781. break;
  782. }
  783. }
  784. }
  785. if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
  786. err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
  787. if (err)
  788. err = -ENOTCONN;
  789. skb_queue_purge(&sk->sk_receive_queue);
  790. }
  791. /* Wake up anyone sleeping in poll */
  792. sk->sk_state_change(sk);
  793. fail:
  794. release_sock(sk);
  795. return err;
  796. }
  797. static int iucv_sock_release(struct socket *sock)
  798. {
  799. struct sock *sk = sock->sk;
  800. int err = 0;
  801. if (!sk)
  802. return 0;
  803. iucv_sock_close(sk);
  804. /* Unregister with IUCV base support */
  805. if (iucv_sk(sk)->path) {
  806. iucv_path_sever(iucv_sk(sk)->path, NULL);
  807. iucv_path_free(iucv_sk(sk)->path);
  808. iucv_sk(sk)->path = NULL;
  809. }
  810. sock_orphan(sk);
  811. iucv_sock_kill(sk);
  812. return err;
  813. }
  814. /* getsockopt and setsockopt */
  815. static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
  816. char __user *optval, int optlen)
  817. {
  818. struct sock *sk = sock->sk;
  819. struct iucv_sock *iucv = iucv_sk(sk);
  820. int val;
  821. int rc;
  822. if (level != SOL_IUCV)
  823. return -ENOPROTOOPT;
  824. if (optlen < sizeof(int))
  825. return -EINVAL;
  826. if (get_user(val, (int __user *) optval))
  827. return -EFAULT;
  828. rc = 0;
  829. lock_sock(sk);
  830. switch (optname) {
  831. case SO_IPRMDATA_MSG:
  832. if (val)
  833. iucv->flags |= IUCV_IPRMDATA;
  834. else
  835. iucv->flags &= ~IUCV_IPRMDATA;
  836. break;
  837. default:
  838. rc = -ENOPROTOOPT;
  839. break;
  840. }
  841. release_sock(sk);
  842. return rc;
  843. }
  844. static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
  845. char __user *optval, int __user *optlen)
  846. {
  847. struct sock *sk = sock->sk;
  848. struct iucv_sock *iucv = iucv_sk(sk);
  849. int val, len;
  850. if (level != SOL_IUCV)
  851. return -ENOPROTOOPT;
  852. if (get_user(len, optlen))
  853. return -EFAULT;
  854. if (len < 0)
  855. return -EINVAL;
  856. len = min_t(unsigned int, len, sizeof(int));
  857. switch (optname) {
  858. case SO_IPRMDATA_MSG:
  859. val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
  860. break;
  861. default:
  862. return -ENOPROTOOPT;
  863. }
  864. if (put_user(len, optlen))
  865. return -EFAULT;
  866. if (copy_to_user(optval, &val, len))
  867. return -EFAULT;
  868. return 0;
  869. }
  870. /* Callback wrappers - called from iucv base support */
  871. static int iucv_callback_connreq(struct iucv_path *path,
  872. u8 ipvmid[8], u8 ipuser[16])
  873. {
  874. unsigned char user_data[16];
  875. unsigned char nuser_data[16];
  876. unsigned char src_name[8];
  877. struct hlist_node *node;
  878. struct sock *sk, *nsk;
  879. struct iucv_sock *iucv, *niucv;
  880. int err;
  881. memcpy(src_name, ipuser, 8);
  882. EBCASC(src_name, 8);
  883. /* Find out if this path belongs to af_iucv. */
  884. read_lock(&iucv_sk_list.lock);
  885. iucv = NULL;
  886. sk = NULL;
  887. sk_for_each(sk, node, &iucv_sk_list.head)
  888. if (sk->sk_state == IUCV_LISTEN &&
  889. !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
  890. /*
  891. * Found a listening socket with
  892. * src_name == ipuser[0-7].
  893. */
  894. iucv = iucv_sk(sk);
  895. break;
  896. }
  897. read_unlock(&iucv_sk_list.lock);
  898. if (!iucv)
  899. /* No socket found, not one of our paths. */
  900. return -EINVAL;
  901. bh_lock_sock(sk);
  902. /* Check if parent socket is listening */
  903. low_nmcpy(user_data, iucv->src_name);
  904. high_nmcpy(user_data, iucv->dst_name);
  905. ASCEBC(user_data, sizeof(user_data));
  906. if (sk->sk_state != IUCV_LISTEN) {
  907. err = iucv_path_sever(path, user_data);
  908. iucv_path_free(path);
  909. goto fail;
  910. }
  911. /* Check for backlog size */
  912. if (sk_acceptq_is_full(sk)) {
  913. err = iucv_path_sever(path, user_data);
  914. iucv_path_free(path);
  915. goto fail;
  916. }
  917. /* Create the new socket */
  918. nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
  919. if (!nsk) {
  920. err = iucv_path_sever(path, user_data);
  921. iucv_path_free(path);
  922. goto fail;
  923. }
  924. niucv = iucv_sk(nsk);
  925. iucv_sock_init(nsk, sk);
  926. /* Set the new iucv_sock */
  927. memcpy(niucv->dst_name, ipuser + 8, 8);
  928. EBCASC(niucv->dst_name, 8);
  929. memcpy(niucv->dst_user_id, ipvmid, 8);
  930. memcpy(niucv->src_name, iucv->src_name, 8);
  931. memcpy(niucv->src_user_id, iucv->src_user_id, 8);
  932. niucv->path = path;
  933. /* Call iucv_accept */
  934. high_nmcpy(nuser_data, ipuser + 8);
  935. memcpy(nuser_data + 8, niucv->src_name, 8);
  936. ASCEBC(nuser_data + 8, 8);
  937. path->msglim = IUCV_QUEUELEN_DEFAULT;
  938. err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
  939. if (err) {
  940. err = iucv_path_sever(path, user_data);
  941. iucv_path_free(path);
  942. iucv_sock_kill(nsk);
  943. goto fail;
  944. }
  945. iucv_accept_enqueue(sk, nsk);
  946. /* Wake up accept */
  947. nsk->sk_state = IUCV_CONNECTED;
  948. sk->sk_data_ready(sk, 1);
  949. err = 0;
  950. fail:
  951. bh_unlock_sock(sk);
  952. return 0;
  953. }
  954. static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
  955. {
  956. struct sock *sk = path->private;
  957. sk->sk_state = IUCV_CONNECTED;
  958. sk->sk_state_change(sk);
  959. }
  960. static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
  961. {
  962. struct sock *sk = path->private;
  963. struct iucv_sock *iucv = iucv_sk(sk);
  964. struct sk_buff *skb;
  965. struct sock_msg_q *save_msg;
  966. int len;
  967. if (sk->sk_shutdown & RCV_SHUTDOWN)
  968. return;
  969. if (!list_empty(&iucv->message_q.list) ||
  970. !skb_queue_empty(&iucv->backlog_skb_q))
  971. goto save_message;
  972. len = atomic_read(&sk->sk_rmem_alloc);
  973. len += msg->length + sizeof(struct sk_buff);
  974. if (len > sk->sk_rcvbuf)
  975. goto save_message;
  976. skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
  977. if (!skb)
  978. goto save_message;
  979. spin_lock(&iucv->message_q.lock);
  980. iucv_process_message(sk, skb, path, msg);
  981. spin_unlock(&iucv->message_q.lock);
  982. return;
  983. save_message:
  984. save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
  985. if (!save_msg)
  986. return;
  987. save_msg->path = path;
  988. save_msg->msg = *msg;
  989. spin_lock(&iucv->message_q.lock);
  990. list_add_tail(&save_msg->list, &iucv->message_q.list);
  991. spin_unlock(&iucv->message_q.lock);
  992. }
  993. static void iucv_callback_txdone(struct iucv_path *path,
  994. struct iucv_message *msg)
  995. {
  996. struct sock *sk = path->private;
  997. struct sk_buff *this = NULL;
  998. struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
  999. struct sk_buff *list_skb = list->next;
  1000. unsigned long flags;
  1001. if (!skb_queue_empty(list)) {
  1002. spin_lock_irqsave(&list->lock, flags);
  1003. while (list_skb != (struct sk_buff *)list) {
  1004. if (!memcmp(&msg->tag, list_skb->cb, 4)) {
  1005. this = list_skb;
  1006. break;
  1007. }
  1008. list_skb = list_skb->next;
  1009. }
  1010. if (this)
  1011. __skb_unlink(this, list);
  1012. spin_unlock_irqrestore(&list->lock, flags);
  1013. kfree_skb(this);
  1014. }
  1015. BUG_ON(!this);
  1016. if (sk->sk_state == IUCV_CLOSING) {
  1017. if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
  1018. sk->sk_state = IUCV_CLOSED;
  1019. sk->sk_state_change(sk);
  1020. }
  1021. }
  1022. }
  1023. static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
  1024. {
  1025. struct sock *sk = path->private;
  1026. if (!list_empty(&iucv_sk(sk)->accept_q))
  1027. sk->sk_state = IUCV_SEVERED;
  1028. else
  1029. sk->sk_state = IUCV_DISCONN;
  1030. sk->sk_state_change(sk);
  1031. }
  1032. /* called if the other communication side shuts down its RECV direction;
  1033. * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
  1034. */
  1035. static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
  1036. {
  1037. struct sock *sk = path->private;
  1038. bh_lock_sock(sk);
  1039. if (sk->sk_state != IUCV_CLOSED) {
  1040. sk->sk_shutdown |= SEND_SHUTDOWN;
  1041. sk->sk_state_change(sk);
  1042. }
  1043. bh_unlock_sock(sk);
  1044. }
  1045. static struct proto_ops iucv_sock_ops = {
  1046. .family = PF_IUCV,
  1047. .owner = THIS_MODULE,
  1048. .release = iucv_sock_release,
  1049. .bind = iucv_sock_bind,
  1050. .connect = iucv_sock_connect,
  1051. .listen = iucv_sock_listen,
  1052. .accept = iucv_sock_accept,
  1053. .getname = iucv_sock_getname,
  1054. .sendmsg = iucv_sock_sendmsg,
  1055. .recvmsg = iucv_sock_recvmsg,
  1056. .poll = iucv_sock_poll,
  1057. .ioctl = sock_no_ioctl,
  1058. .mmap = sock_no_mmap,
  1059. .socketpair = sock_no_socketpair,
  1060. .shutdown = iucv_sock_shutdown,
  1061. .setsockopt = iucv_sock_setsockopt,
  1062. .getsockopt = iucv_sock_getsockopt,
  1063. };
  1064. static struct net_proto_family iucv_sock_family_ops = {
  1065. .family = AF_IUCV,
  1066. .owner = THIS_MODULE,
  1067. .create = iucv_sock_create,
  1068. };
  1069. static int __init afiucv_init(void)
  1070. {
  1071. int err;
  1072. if (!MACHINE_IS_VM) {
  1073. pr_err("The af_iucv module cannot be loaded"
  1074. " without z/VM\n");
  1075. err = -EPROTONOSUPPORT;
  1076. goto out;
  1077. }
  1078. cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
  1079. if (unlikely(err)) {
  1080. WARN_ON(err);
  1081. err = -EPROTONOSUPPORT;
  1082. goto out;
  1083. }
  1084. err = iucv_register(&af_iucv_handler, 0);
  1085. if (err)
  1086. goto out;
  1087. err = proto_register(&iucv_proto, 0);
  1088. if (err)
  1089. goto out_iucv;
  1090. err = sock_register(&iucv_sock_family_ops);
  1091. if (err)
  1092. goto out_proto;
  1093. return 0;
  1094. out_proto:
  1095. proto_unregister(&iucv_proto);
  1096. out_iucv:
  1097. iucv_unregister(&af_iucv_handler, 0);
  1098. out:
  1099. return err;
  1100. }
  1101. static void __exit afiucv_exit(void)
  1102. {
  1103. sock_unregister(PF_IUCV);
  1104. proto_unregister(&iucv_proto);
  1105. iucv_unregister(&af_iucv_handler, 0);
  1106. }
  1107. module_init(afiucv_init);
  1108. module_exit(afiucv_exit);
  1109. MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
  1110. MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
  1111. MODULE_VERSION(VERSION);
  1112. MODULE_LICENSE("GPL");
  1113. MODULE_ALIAS_NETPROTO(PF_IUCV);