af_iucv.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644
  1. /*
  2. * linux/net/iucv/af_iucv.c
  3. *
  4. * IUCV protocol stack for Linux on zSeries
  5. *
  6. * Copyright 2006 IBM Corporation
  7. *
  8. * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
  9. */
  10. #define KMSG_COMPONENT "af_iucv"
  11. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/types.h>
  14. #include <linux/list.h>
  15. #include <linux/errno.h>
  16. #include <linux/kernel.h>
  17. #include <linux/sched.h>
  18. #include <linux/slab.h>
  19. #include <linux/skbuff.h>
  20. #include <linux/init.h>
  21. #include <linux/poll.h>
  22. #include <net/sock.h>
  23. #include <asm/ebcdic.h>
  24. #include <asm/cpcmd.h>
  25. #include <linux/kmod.h>
  26. #include <net/iucv/iucv.h>
  27. #include <net/iucv/af_iucv.h>
  28. #define VERSION "1.1"
  29. static char iucv_userid[80];
  30. static struct proto_ops iucv_sock_ops;
  31. static struct proto iucv_proto = {
  32. .name = "AF_IUCV",
  33. .owner = THIS_MODULE,
  34. .obj_size = sizeof(struct iucv_sock),
  35. };
  36. /* special AF_IUCV IPRM messages */
  37. static const u8 iprm_shutdown[8] =
  38. {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
  39. #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
  40. /* macros to set/get socket control buffer at correct offset */
  41. #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
  42. #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
  43. #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
  44. #define CB_TRGCLS_LEN (TRGCLS_SIZE)
  45. #define __iucv_sock_wait(sk, condition, timeo, ret) \
  46. do { \
  47. DEFINE_WAIT(__wait); \
  48. long __timeo = timeo; \
  49. ret = 0; \
  50. while (!(condition)) { \
  51. prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
  52. if (!__timeo) { \
  53. ret = -EAGAIN; \
  54. break; \
  55. } \
  56. if (signal_pending(current)) { \
  57. ret = sock_intr_errno(__timeo); \
  58. break; \
  59. } \
  60. release_sock(sk); \
  61. __timeo = schedule_timeout(__timeo); \
  62. lock_sock(sk); \
  63. ret = sock_error(sk); \
  64. if (ret) \
  65. break; \
  66. } \
  67. finish_wait(sk->sk_sleep, &__wait); \
  68. } while (0)
  69. #define iucv_sock_wait(sk, condition, timeo) \
  70. ({ \
  71. int __ret = 0; \
  72. if (!(condition)) \
  73. __iucv_sock_wait(sk, condition, timeo, __ret); \
  74. __ret; \
  75. })
  76. static void iucv_sock_kill(struct sock *sk);
  77. static void iucv_sock_close(struct sock *sk);
  78. /* Call Back functions */
  79. static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
  80. static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
  81. static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
  82. static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
  83. u8 ipuser[16]);
  84. static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
  85. static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
  86. static struct iucv_sock_list iucv_sk_list = {
  87. .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
  88. .autobind_name = ATOMIC_INIT(0)
  89. };
  90. static struct iucv_handler af_iucv_handler = {
  91. .path_pending = iucv_callback_connreq,
  92. .path_complete = iucv_callback_connack,
  93. .path_severed = iucv_callback_connrej,
  94. .message_pending = iucv_callback_rx,
  95. .message_complete = iucv_callback_txdone,
  96. .path_quiesced = iucv_callback_shutdown,
  97. };
  98. static inline void high_nmcpy(unsigned char *dst, char *src)
  99. {
  100. memcpy(dst, src, 8);
  101. }
  102. static inline void low_nmcpy(unsigned char *dst, char *src)
  103. {
  104. memcpy(&dst[8], src, 8);
  105. }
  106. /**
  107. * iucv_msg_length() - Returns the length of an iucv message.
  108. * @msg: Pointer to struct iucv_message, MUST NOT be NULL
  109. *
  110. * The function returns the length of the specified iucv message @msg of data
  111. * stored in a buffer and of data stored in the parameter list (PRMDATA).
  112. *
  113. * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
  114. * data:
  115. * PRMDATA[0..6] socket data (max 7 bytes);
  116. * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
  117. *
  118. * The socket data length is computed by substracting the socket data length
  119. * value from 0xFF.
  120. * If the socket data len is greater 7, then PRMDATA can be used for special
  121. * notifications (see iucv_sock_shutdown); and further,
  122. * if the socket data len is > 7, the function returns 8.
  123. *
  124. * Use this function to allocate socket buffers to store iucv message data.
  125. */
  126. static inline size_t iucv_msg_length(struct iucv_message *msg)
  127. {
  128. size_t datalen;
  129. if (msg->flags & IUCV_IPRMDATA) {
  130. datalen = 0xff - msg->rmmsg[7];
  131. return (datalen < 8) ? datalen : 8;
  132. }
  133. return msg->length;
  134. }
  135. /**
  136. * iucv_sock_in_state() - check for specific states
  137. * @sk: sock structure
  138. * @state: first iucv sk state
  139. * @state: second iucv sk state
  140. *
  141. * Returns true if the socket in either in the first or second state.
  142. */
  143. static int iucv_sock_in_state(struct sock *sk, int state, int state2)
  144. {
  145. return (sk->sk_state == state || sk->sk_state == state2);
  146. }
  147. /**
  148. * iucv_below_msglim() - function to check if messages can be sent
  149. * @sk: sock structure
  150. *
  151. * Returns true if the send queue length is lower than the message limit.
  152. * Always returns true if the socket is not connected (no iucv path for
  153. * checking the message limit).
  154. */
  155. static inline int iucv_below_msglim(struct sock *sk)
  156. {
  157. struct iucv_sock *iucv = iucv_sk(sk);
  158. if (sk->sk_state != IUCV_CONNECTED)
  159. return 1;
  160. return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
  161. }
  162. /**
  163. * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
  164. */
  165. static void iucv_sock_wake_msglim(struct sock *sk)
  166. {
  167. read_lock(&sk->sk_callback_lock);
  168. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  169. wake_up_interruptible_all(sk->sk_sleep);
  170. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  171. read_unlock(&sk->sk_callback_lock);
  172. }
  173. /* Timers */
  174. static void iucv_sock_timeout(unsigned long arg)
  175. {
  176. struct sock *sk = (struct sock *)arg;
  177. bh_lock_sock(sk);
  178. sk->sk_err = ETIMEDOUT;
  179. sk->sk_state_change(sk);
  180. bh_unlock_sock(sk);
  181. iucv_sock_kill(sk);
  182. sock_put(sk);
  183. }
  184. static void iucv_sock_clear_timer(struct sock *sk)
  185. {
  186. sk_stop_timer(sk, &sk->sk_timer);
  187. }
  188. static struct sock *__iucv_get_sock_by_name(char *nm)
  189. {
  190. struct sock *sk;
  191. struct hlist_node *node;
  192. sk_for_each(sk, node, &iucv_sk_list.head)
  193. if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
  194. return sk;
  195. return NULL;
  196. }
  197. static void iucv_sock_destruct(struct sock *sk)
  198. {
  199. skb_queue_purge(&sk->sk_receive_queue);
  200. skb_queue_purge(&sk->sk_write_queue);
  201. }
  202. /* Cleanup Listen */
  203. static void iucv_sock_cleanup_listen(struct sock *parent)
  204. {
  205. struct sock *sk;
  206. /* Close non-accepted connections */
  207. while ((sk = iucv_accept_dequeue(parent, NULL))) {
  208. iucv_sock_close(sk);
  209. iucv_sock_kill(sk);
  210. }
  211. parent->sk_state = IUCV_CLOSED;
  212. sock_set_flag(parent, SOCK_ZAPPED);
  213. }
  214. /* Kill socket */
  215. static void iucv_sock_kill(struct sock *sk)
  216. {
  217. if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
  218. return;
  219. iucv_sock_unlink(&iucv_sk_list, sk);
  220. sock_set_flag(sk, SOCK_DEAD);
  221. sock_put(sk);
  222. }
  223. /* Close an IUCV socket */
  224. static void iucv_sock_close(struct sock *sk)
  225. {
  226. unsigned char user_data[16];
  227. struct iucv_sock *iucv = iucv_sk(sk);
  228. int err;
  229. unsigned long timeo;
  230. iucv_sock_clear_timer(sk);
  231. lock_sock(sk);
  232. switch (sk->sk_state) {
  233. case IUCV_LISTEN:
  234. iucv_sock_cleanup_listen(sk);
  235. break;
  236. case IUCV_CONNECTED:
  237. case IUCV_DISCONN:
  238. err = 0;
  239. sk->sk_state = IUCV_CLOSING;
  240. sk->sk_state_change(sk);
  241. if (!skb_queue_empty(&iucv->send_skb_q)) {
  242. if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
  243. timeo = sk->sk_lingertime;
  244. else
  245. timeo = IUCV_DISCONN_TIMEOUT;
  246. err = iucv_sock_wait(sk,
  247. iucv_sock_in_state(sk, IUCV_CLOSED, 0),
  248. timeo);
  249. }
  250. case IUCV_CLOSING: /* fall through */
  251. sk->sk_state = IUCV_CLOSED;
  252. sk->sk_state_change(sk);
  253. if (iucv->path) {
  254. low_nmcpy(user_data, iucv->src_name);
  255. high_nmcpy(user_data, iucv->dst_name);
  256. ASCEBC(user_data, sizeof(user_data));
  257. err = iucv_path_sever(iucv->path, user_data);
  258. iucv_path_free(iucv->path);
  259. iucv->path = NULL;
  260. }
  261. sk->sk_err = ECONNRESET;
  262. sk->sk_state_change(sk);
  263. skb_queue_purge(&iucv->send_skb_q);
  264. skb_queue_purge(&iucv->backlog_skb_q);
  265. sock_set_flag(sk, SOCK_ZAPPED);
  266. break;
  267. default:
  268. sock_set_flag(sk, SOCK_ZAPPED);
  269. break;
  270. }
  271. release_sock(sk);
  272. iucv_sock_kill(sk);
  273. }
  274. static void iucv_sock_init(struct sock *sk, struct sock *parent)
  275. {
  276. if (parent)
  277. sk->sk_type = parent->sk_type;
  278. }
  279. static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
  280. {
  281. struct sock *sk;
  282. sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
  283. if (!sk)
  284. return NULL;
  285. sock_init_data(sock, sk);
  286. INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
  287. spin_lock_init(&iucv_sk(sk)->accept_q_lock);
  288. skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
  289. INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
  290. spin_lock_init(&iucv_sk(sk)->message_q.lock);
  291. skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
  292. iucv_sk(sk)->send_tag = 0;
  293. iucv_sk(sk)->flags = 0;
  294. iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
  295. iucv_sk(sk)->path = NULL;
  296. memset(&iucv_sk(sk)->src_user_id , 0, 32);
  297. sk->sk_destruct = iucv_sock_destruct;
  298. sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
  299. sk->sk_allocation = GFP_DMA;
  300. sock_reset_flag(sk, SOCK_ZAPPED);
  301. sk->sk_protocol = proto;
  302. sk->sk_state = IUCV_OPEN;
  303. setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
  304. iucv_sock_link(&iucv_sk_list, sk);
  305. return sk;
  306. }
  307. /* Create an IUCV socket */
  308. static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
  309. {
  310. struct sock *sk;
  311. if (protocol && protocol != PF_IUCV)
  312. return -EPROTONOSUPPORT;
  313. sock->state = SS_UNCONNECTED;
  314. switch (sock->type) {
  315. case SOCK_STREAM:
  316. sock->ops = &iucv_sock_ops;
  317. break;
  318. case SOCK_SEQPACKET:
  319. /* currently, proto ops can handle both sk types */
  320. sock->ops = &iucv_sock_ops;
  321. break;
  322. default:
  323. return -ESOCKTNOSUPPORT;
  324. }
  325. sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
  326. if (!sk)
  327. return -ENOMEM;
  328. iucv_sock_init(sk, NULL);
  329. return 0;
  330. }
  331. void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
  332. {
  333. write_lock_bh(&l->lock);
  334. sk_add_node(sk, &l->head);
  335. write_unlock_bh(&l->lock);
  336. }
  337. void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
  338. {
  339. write_lock_bh(&l->lock);
  340. sk_del_node_init(sk);
  341. write_unlock_bh(&l->lock);
  342. }
  343. void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
  344. {
  345. unsigned long flags;
  346. struct iucv_sock *par = iucv_sk(parent);
  347. sock_hold(sk);
  348. spin_lock_irqsave(&par->accept_q_lock, flags);
  349. list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
  350. spin_unlock_irqrestore(&par->accept_q_lock, flags);
  351. iucv_sk(sk)->parent = parent;
  352. parent->sk_ack_backlog++;
  353. }
  354. void iucv_accept_unlink(struct sock *sk)
  355. {
  356. unsigned long flags;
  357. struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
  358. spin_lock_irqsave(&par->accept_q_lock, flags);
  359. list_del_init(&iucv_sk(sk)->accept_q);
  360. spin_unlock_irqrestore(&par->accept_q_lock, flags);
  361. iucv_sk(sk)->parent->sk_ack_backlog--;
  362. iucv_sk(sk)->parent = NULL;
  363. sock_put(sk);
  364. }
  365. struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
  366. {
  367. struct iucv_sock *isk, *n;
  368. struct sock *sk;
  369. list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
  370. sk = (struct sock *) isk;
  371. lock_sock(sk);
  372. if (sk->sk_state == IUCV_CLOSED) {
  373. iucv_accept_unlink(sk);
  374. release_sock(sk);
  375. continue;
  376. }
  377. if (sk->sk_state == IUCV_CONNECTED ||
  378. sk->sk_state == IUCV_SEVERED ||
  379. !newsock) {
  380. iucv_accept_unlink(sk);
  381. if (newsock)
  382. sock_graft(sk, newsock);
  383. if (sk->sk_state == IUCV_SEVERED)
  384. sk->sk_state = IUCV_DISCONN;
  385. release_sock(sk);
  386. return sk;
  387. }
  388. release_sock(sk);
  389. }
  390. return NULL;
  391. }
  392. /* Bind an unbound socket */
  393. static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
  394. int addr_len)
  395. {
  396. struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
  397. struct sock *sk = sock->sk;
  398. struct iucv_sock *iucv;
  399. int err;
  400. /* Verify the input sockaddr */
  401. if (!addr || addr->sa_family != AF_IUCV)
  402. return -EINVAL;
  403. lock_sock(sk);
  404. if (sk->sk_state != IUCV_OPEN) {
  405. err = -EBADFD;
  406. goto done;
  407. }
  408. write_lock_bh(&iucv_sk_list.lock);
  409. iucv = iucv_sk(sk);
  410. if (__iucv_get_sock_by_name(sa->siucv_name)) {
  411. err = -EADDRINUSE;
  412. goto done_unlock;
  413. }
  414. if (iucv->path) {
  415. err = 0;
  416. goto done_unlock;
  417. }
  418. /* Bind the socket */
  419. memcpy(iucv->src_name, sa->siucv_name, 8);
  420. /* Copy the user id */
  421. memcpy(iucv->src_user_id, iucv_userid, 8);
  422. sk->sk_state = IUCV_BOUND;
  423. err = 0;
  424. done_unlock:
  425. /* Release the socket list lock */
  426. write_unlock_bh(&iucv_sk_list.lock);
  427. done:
  428. release_sock(sk);
  429. return err;
  430. }
  431. /* Automatically bind an unbound socket */
  432. static int iucv_sock_autobind(struct sock *sk)
  433. {
  434. struct iucv_sock *iucv = iucv_sk(sk);
  435. char query_buffer[80];
  436. char name[12];
  437. int err = 0;
  438. /* Set the userid and name */
  439. cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
  440. if (unlikely(err))
  441. return -EPROTO;
  442. memcpy(iucv->src_user_id, query_buffer, 8);
  443. write_lock_bh(&iucv_sk_list.lock);
  444. sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
  445. while (__iucv_get_sock_by_name(name)) {
  446. sprintf(name, "%08x",
  447. atomic_inc_return(&iucv_sk_list.autobind_name));
  448. }
  449. write_unlock_bh(&iucv_sk_list.lock);
  450. memcpy(&iucv->src_name, name, 8);
  451. return err;
  452. }
  453. /* Connect an unconnected socket */
  454. static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
  455. int alen, int flags)
  456. {
  457. struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
  458. struct sock *sk = sock->sk;
  459. struct iucv_sock *iucv;
  460. unsigned char user_data[16];
  461. int err;
  462. if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
  463. return -EINVAL;
  464. if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
  465. return -EBADFD;
  466. if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
  467. return -EINVAL;
  468. if (sk->sk_state == IUCV_OPEN) {
  469. err = iucv_sock_autobind(sk);
  470. if (unlikely(err))
  471. return err;
  472. }
  473. lock_sock(sk);
  474. /* Set the destination information */
  475. memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
  476. memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
  477. high_nmcpy(user_data, sa->siucv_name);
  478. low_nmcpy(user_data, iucv_sk(sk)->src_name);
  479. ASCEBC(user_data, sizeof(user_data));
  480. iucv = iucv_sk(sk);
  481. /* Create path. */
  482. iucv->path = iucv_path_alloc(iucv->msglimit,
  483. IUCV_IPRMDATA, GFP_KERNEL);
  484. if (!iucv->path) {
  485. err = -ENOMEM;
  486. goto done;
  487. }
  488. err = iucv_path_connect(iucv->path, &af_iucv_handler,
  489. sa->siucv_user_id, NULL, user_data, sk);
  490. if (err) {
  491. iucv_path_free(iucv->path);
  492. iucv->path = NULL;
  493. switch (err) {
  494. case 0x0b: /* Target communicator is not logged on */
  495. err = -ENETUNREACH;
  496. break;
  497. case 0x0d: /* Max connections for this guest exceeded */
  498. case 0x0e: /* Max connections for target guest exceeded */
  499. err = -EAGAIN;
  500. break;
  501. case 0x0f: /* Missing IUCV authorization */
  502. err = -EACCES;
  503. break;
  504. default:
  505. err = -ECONNREFUSED;
  506. break;
  507. }
  508. goto done;
  509. }
  510. if (sk->sk_state != IUCV_CONNECTED) {
  511. err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
  512. IUCV_DISCONN),
  513. sock_sndtimeo(sk, flags & O_NONBLOCK));
  514. }
  515. if (sk->sk_state == IUCV_DISCONN) {
  516. err = -ECONNREFUSED;
  517. }
  518. if (err) {
  519. iucv_path_sever(iucv->path, NULL);
  520. iucv_path_free(iucv->path);
  521. iucv->path = NULL;
  522. }
  523. done:
  524. release_sock(sk);
  525. return err;
  526. }
  527. /* Move a socket into listening state. */
  528. static int iucv_sock_listen(struct socket *sock, int backlog)
  529. {
  530. struct sock *sk = sock->sk;
  531. int err;
  532. lock_sock(sk);
  533. err = -EINVAL;
  534. if (sk->sk_state != IUCV_BOUND)
  535. goto done;
  536. if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
  537. goto done;
  538. sk->sk_max_ack_backlog = backlog;
  539. sk->sk_ack_backlog = 0;
  540. sk->sk_state = IUCV_LISTEN;
  541. err = 0;
  542. done:
  543. release_sock(sk);
  544. return err;
  545. }
  546. /* Accept a pending connection */
  547. static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
  548. int flags)
  549. {
  550. DECLARE_WAITQUEUE(wait, current);
  551. struct sock *sk = sock->sk, *nsk;
  552. long timeo;
  553. int err = 0;
  554. lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
  555. if (sk->sk_state != IUCV_LISTEN) {
  556. err = -EBADFD;
  557. goto done;
  558. }
  559. timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
  560. /* Wait for an incoming connection */
  561. add_wait_queue_exclusive(sk->sk_sleep, &wait);
  562. while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
  563. set_current_state(TASK_INTERRUPTIBLE);
  564. if (!timeo) {
  565. err = -EAGAIN;
  566. break;
  567. }
  568. release_sock(sk);
  569. timeo = schedule_timeout(timeo);
  570. lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
  571. if (sk->sk_state != IUCV_LISTEN) {
  572. err = -EBADFD;
  573. break;
  574. }
  575. if (signal_pending(current)) {
  576. err = sock_intr_errno(timeo);
  577. break;
  578. }
  579. }
  580. set_current_state(TASK_RUNNING);
  581. remove_wait_queue(sk->sk_sleep, &wait);
  582. if (err)
  583. goto done;
  584. newsock->state = SS_CONNECTED;
  585. done:
  586. release_sock(sk);
  587. return err;
  588. }
  589. static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
  590. int *len, int peer)
  591. {
  592. struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
  593. struct sock *sk = sock->sk;
  594. addr->sa_family = AF_IUCV;
  595. *len = sizeof(struct sockaddr_iucv);
  596. if (peer) {
  597. memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
  598. memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
  599. } else {
  600. memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
  601. memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
  602. }
  603. memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
  604. memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
  605. memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
  606. return 0;
  607. }
  608. /**
  609. * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
  610. * @path: IUCV path
  611. * @msg: Pointer to a struct iucv_message
  612. * @skb: The socket data to send, skb->len MUST BE <= 7
  613. *
  614. * Send the socket data in the parameter list in the iucv message
  615. * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
  616. * list and the socket data len at index 7 (last byte).
  617. * See also iucv_msg_length().
  618. *
  619. * Returns the error code from the iucv_message_send() call.
  620. */
  621. static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
  622. struct sk_buff *skb)
  623. {
  624. u8 prmdata[8];
  625. memcpy(prmdata, (void *) skb->data, skb->len);
  626. prmdata[7] = 0xff - (u8) skb->len;
  627. return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
  628. (void *) prmdata, 8);
  629. }
  630. static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
  631. struct msghdr *msg, size_t len)
  632. {
  633. struct sock *sk = sock->sk;
  634. struct iucv_sock *iucv = iucv_sk(sk);
  635. struct sk_buff *skb;
  636. struct iucv_message txmsg;
  637. struct cmsghdr *cmsg;
  638. int cmsg_done;
  639. long timeo;
  640. char user_id[9];
  641. char appl_id[9];
  642. int err;
  643. int noblock = msg->msg_flags & MSG_DONTWAIT;
  644. err = sock_error(sk);
  645. if (err)
  646. return err;
  647. if (msg->msg_flags & MSG_OOB)
  648. return -EOPNOTSUPP;
  649. /* SOCK_SEQPACKET: we do not support segmented records */
  650. if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
  651. return -EOPNOTSUPP;
  652. lock_sock(sk);
  653. if (sk->sk_shutdown & SEND_SHUTDOWN) {
  654. err = -EPIPE;
  655. goto out;
  656. }
  657. /* Return if the socket is not in connected state */
  658. if (sk->sk_state != IUCV_CONNECTED) {
  659. err = -ENOTCONN;
  660. goto out;
  661. }
  662. /* initialize defaults */
  663. cmsg_done = 0; /* check for duplicate headers */
  664. txmsg.class = 0;
  665. /* iterate over control messages */
  666. for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
  667. cmsg = CMSG_NXTHDR(msg, cmsg)) {
  668. if (!CMSG_OK(msg, cmsg)) {
  669. err = -EINVAL;
  670. goto out;
  671. }
  672. if (cmsg->cmsg_level != SOL_IUCV)
  673. continue;
  674. if (cmsg->cmsg_type & cmsg_done) {
  675. err = -EINVAL;
  676. goto out;
  677. }
  678. cmsg_done |= cmsg->cmsg_type;
  679. switch (cmsg->cmsg_type) {
  680. case SCM_IUCV_TRGCLS:
  681. if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
  682. err = -EINVAL;
  683. goto out;
  684. }
  685. /* set iucv message target class */
  686. memcpy(&txmsg.class,
  687. (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
  688. break;
  689. default:
  690. err = -EINVAL;
  691. goto out;
  692. break;
  693. }
  694. }
  695. /* allocate one skb for each iucv message:
  696. * this is fine for SOCK_SEQPACKET (unless we want to support
  697. * segmented records using the MSG_EOR flag), but
  698. * for SOCK_STREAM we might want to improve it in future */
  699. skb = sock_alloc_send_skb(sk, len, noblock, &err);
  700. if (!skb)
  701. goto out;
  702. if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
  703. err = -EFAULT;
  704. goto fail;
  705. }
  706. /* wait if outstanding messages for iucv path has reached */
  707. timeo = sock_sndtimeo(sk, noblock);
  708. err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
  709. if (err)
  710. goto fail;
  711. /* return -ECONNRESET if the socket is no longer connected */
  712. if (sk->sk_state != IUCV_CONNECTED) {
  713. err = -ECONNRESET;
  714. goto fail;
  715. }
  716. /* increment and save iucv message tag for msg_completion cbk */
  717. txmsg.tag = iucv->send_tag++;
  718. memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
  719. skb_queue_tail(&iucv->send_skb_q, skb);
  720. if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
  721. && skb->len <= 7) {
  722. err = iucv_send_iprm(iucv->path, &txmsg, skb);
  723. /* on success: there is no message_complete callback
  724. * for an IPRMDATA msg; remove skb from send queue */
  725. if (err == 0) {
  726. skb_unlink(skb, &iucv->send_skb_q);
  727. kfree_skb(skb);
  728. }
  729. /* this error should never happen since the
  730. * IUCV_IPRMDATA path flag is set... sever path */
  731. if (err == 0x15) {
  732. iucv_path_sever(iucv->path, NULL);
  733. skb_unlink(skb, &iucv->send_skb_q);
  734. err = -EPIPE;
  735. goto fail;
  736. }
  737. } else
  738. err = iucv_message_send(iucv->path, &txmsg, 0, 0,
  739. (void *) skb->data, skb->len);
  740. if (err) {
  741. if (err == 3) {
  742. user_id[8] = 0;
  743. memcpy(user_id, iucv->dst_user_id, 8);
  744. appl_id[8] = 0;
  745. memcpy(appl_id, iucv->dst_name, 8);
  746. pr_err("Application %s on z/VM guest %s"
  747. " exceeds message limit\n",
  748. appl_id, user_id);
  749. err = -EAGAIN;
  750. } else
  751. err = -EPIPE;
  752. skb_unlink(skb, &iucv->send_skb_q);
  753. goto fail;
  754. }
  755. release_sock(sk);
  756. return len;
  757. fail:
  758. kfree_skb(skb);
  759. out:
  760. release_sock(sk);
  761. return err;
  762. }
  763. static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
  764. {
  765. int dataleft, size, copied = 0;
  766. struct sk_buff *nskb;
  767. dataleft = len;
  768. while (dataleft) {
  769. if (dataleft >= sk->sk_rcvbuf / 4)
  770. size = sk->sk_rcvbuf / 4;
  771. else
  772. size = dataleft;
  773. nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
  774. if (!nskb)
  775. return -ENOMEM;
  776. /* copy target class to control buffer of new skb */
  777. memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
  778. /* copy data fragment */
  779. memcpy(nskb->data, skb->data + copied, size);
  780. copied += size;
  781. dataleft -= size;
  782. skb_reset_transport_header(nskb);
  783. skb_reset_network_header(nskb);
  784. nskb->len = size;
  785. skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
  786. }
  787. return 0;
  788. }
  789. static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
  790. struct iucv_path *path,
  791. struct iucv_message *msg)
  792. {
  793. int rc;
  794. unsigned int len;
  795. len = iucv_msg_length(msg);
  796. /* store msg target class in the second 4 bytes of skb ctrl buffer */
  797. /* Note: the first 4 bytes are reserved for msg tag */
  798. memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
  799. /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
  800. if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
  801. if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
  802. skb->data = NULL;
  803. skb->len = 0;
  804. }
  805. } else {
  806. rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
  807. skb->data, len, NULL);
  808. if (rc) {
  809. kfree_skb(skb);
  810. return;
  811. }
  812. /* we need to fragment iucv messages for SOCK_STREAM only;
  813. * for SOCK_SEQPACKET, it is only relevant if we support
  814. * record segmentation using MSG_EOR (see also recvmsg()) */
  815. if (sk->sk_type == SOCK_STREAM &&
  816. skb->truesize >= sk->sk_rcvbuf / 4) {
  817. rc = iucv_fragment_skb(sk, skb, len);
  818. kfree_skb(skb);
  819. skb = NULL;
  820. if (rc) {
  821. iucv_path_sever(path, NULL);
  822. return;
  823. }
  824. skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
  825. } else {
  826. skb_reset_transport_header(skb);
  827. skb_reset_network_header(skb);
  828. skb->len = len;
  829. }
  830. }
  831. if (sock_queue_rcv_skb(sk, skb))
  832. skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
  833. }
  834. static void iucv_process_message_q(struct sock *sk)
  835. {
  836. struct iucv_sock *iucv = iucv_sk(sk);
  837. struct sk_buff *skb;
  838. struct sock_msg_q *p, *n;
  839. list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
  840. skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
  841. if (!skb)
  842. break;
  843. iucv_process_message(sk, skb, p->path, &p->msg);
  844. list_del(&p->list);
  845. kfree(p);
  846. if (!skb_queue_empty(&iucv->backlog_skb_q))
  847. break;
  848. }
  849. }
  850. static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
  851. struct msghdr *msg, size_t len, int flags)
  852. {
  853. int noblock = flags & MSG_DONTWAIT;
  854. struct sock *sk = sock->sk;
  855. struct iucv_sock *iucv = iucv_sk(sk);
  856. unsigned int copied, rlen;
  857. struct sk_buff *skb, *rskb, *cskb;
  858. int err = 0;
  859. if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
  860. skb_queue_empty(&iucv->backlog_skb_q) &&
  861. skb_queue_empty(&sk->sk_receive_queue) &&
  862. list_empty(&iucv->message_q.list))
  863. return 0;
  864. if (flags & (MSG_OOB))
  865. return -EOPNOTSUPP;
  866. /* receive/dequeue next skb:
  867. * the function understands MSG_PEEK and, thus, does not dequeue skb */
  868. skb = skb_recv_datagram(sk, flags, noblock, &err);
  869. if (!skb) {
  870. if (sk->sk_shutdown & RCV_SHUTDOWN)
  871. return 0;
  872. return err;
  873. }
  874. rlen = skb->len; /* real length of skb */
  875. copied = min_t(unsigned int, rlen, len);
  876. cskb = skb;
  877. if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
  878. if (!(flags & MSG_PEEK))
  879. skb_queue_head(&sk->sk_receive_queue, skb);
  880. return -EFAULT;
  881. }
  882. /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
  883. if (sk->sk_type == SOCK_SEQPACKET) {
  884. if (copied < rlen)
  885. msg->msg_flags |= MSG_TRUNC;
  886. /* each iucv message contains a complete record */
  887. msg->msg_flags |= MSG_EOR;
  888. }
  889. /* create control message to store iucv msg target class:
  890. * get the trgcls from the control buffer of the skb due to
  891. * fragmentation of original iucv message. */
  892. err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
  893. CB_TRGCLS_LEN, CB_TRGCLS(skb));
  894. if (err) {
  895. if (!(flags & MSG_PEEK))
  896. skb_queue_head(&sk->sk_receive_queue, skb);
  897. return err;
  898. }
  899. /* Mark read part of skb as used */
  900. if (!(flags & MSG_PEEK)) {
  901. /* SOCK_STREAM: re-queue skb if it contains unreceived data */
  902. if (sk->sk_type == SOCK_STREAM) {
  903. skb_pull(skb, copied);
  904. if (skb->len) {
  905. skb_queue_head(&sk->sk_receive_queue, skb);
  906. goto done;
  907. }
  908. }
  909. kfree_skb(skb);
  910. /* Queue backlog skbs */
  911. rskb = skb_dequeue(&iucv->backlog_skb_q);
  912. while (rskb) {
  913. if (sock_queue_rcv_skb(sk, rskb)) {
  914. skb_queue_head(&iucv->backlog_skb_q,
  915. rskb);
  916. break;
  917. } else {
  918. rskb = skb_dequeue(&iucv->backlog_skb_q);
  919. }
  920. }
  921. if (skb_queue_empty(&iucv->backlog_skb_q)) {
  922. spin_lock_bh(&iucv->message_q.lock);
  923. if (!list_empty(&iucv->message_q.list))
  924. iucv_process_message_q(sk);
  925. spin_unlock_bh(&iucv->message_q.lock);
  926. }
  927. }
  928. done:
  929. /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
  930. if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
  931. copied = rlen;
  932. return copied;
  933. }
  934. static inline unsigned int iucv_accept_poll(struct sock *parent)
  935. {
  936. struct iucv_sock *isk, *n;
  937. struct sock *sk;
  938. list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
  939. sk = (struct sock *) isk;
  940. if (sk->sk_state == IUCV_CONNECTED)
  941. return POLLIN | POLLRDNORM;
  942. }
  943. return 0;
  944. }
  945. unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
  946. poll_table *wait)
  947. {
  948. struct sock *sk = sock->sk;
  949. unsigned int mask = 0;
  950. poll_wait(file, sk->sk_sleep, wait);
  951. if (sk->sk_state == IUCV_LISTEN)
  952. return iucv_accept_poll(sk);
  953. if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
  954. mask |= POLLERR;
  955. if (sk->sk_shutdown & RCV_SHUTDOWN)
  956. mask |= POLLRDHUP;
  957. if (sk->sk_shutdown == SHUTDOWN_MASK)
  958. mask |= POLLHUP;
  959. if (!skb_queue_empty(&sk->sk_receive_queue) ||
  960. (sk->sk_shutdown & RCV_SHUTDOWN))
  961. mask |= POLLIN | POLLRDNORM;
  962. if (sk->sk_state == IUCV_CLOSED)
  963. mask |= POLLHUP;
  964. if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
  965. mask |= POLLIN;
  966. if (sock_writeable(sk))
  967. mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
  968. else
  969. set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  970. return mask;
  971. }
  972. static int iucv_sock_shutdown(struct socket *sock, int how)
  973. {
  974. struct sock *sk = sock->sk;
  975. struct iucv_sock *iucv = iucv_sk(sk);
  976. struct iucv_message txmsg;
  977. int err = 0;
  978. how++;
  979. if ((how & ~SHUTDOWN_MASK) || !how)
  980. return -EINVAL;
  981. lock_sock(sk);
  982. switch (sk->sk_state) {
  983. case IUCV_DISCONN:
  984. case IUCV_CLOSING:
  985. case IUCV_SEVERED:
  986. case IUCV_CLOSED:
  987. err = -ENOTCONN;
  988. goto fail;
  989. default:
  990. sk->sk_shutdown |= how;
  991. break;
  992. }
  993. if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
  994. txmsg.class = 0;
  995. txmsg.tag = 0;
  996. err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
  997. (void *) iprm_shutdown, 8);
  998. if (err) {
  999. switch (err) {
  1000. case 1:
  1001. err = -ENOTCONN;
  1002. break;
  1003. case 2:
  1004. err = -ECONNRESET;
  1005. break;
  1006. default:
  1007. err = -ENOTCONN;
  1008. break;
  1009. }
  1010. }
  1011. }
  1012. if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
  1013. err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
  1014. if (err)
  1015. err = -ENOTCONN;
  1016. skb_queue_purge(&sk->sk_receive_queue);
  1017. }
  1018. /* Wake up anyone sleeping in poll */
  1019. sk->sk_state_change(sk);
  1020. fail:
  1021. release_sock(sk);
  1022. return err;
  1023. }
  1024. static int iucv_sock_release(struct socket *sock)
  1025. {
  1026. struct sock *sk = sock->sk;
  1027. int err = 0;
  1028. if (!sk)
  1029. return 0;
  1030. iucv_sock_close(sk);
  1031. /* Unregister with IUCV base support */
  1032. if (iucv_sk(sk)->path) {
  1033. iucv_path_sever(iucv_sk(sk)->path, NULL);
  1034. iucv_path_free(iucv_sk(sk)->path);
  1035. iucv_sk(sk)->path = NULL;
  1036. }
  1037. sock_orphan(sk);
  1038. iucv_sock_kill(sk);
  1039. return err;
  1040. }
  1041. /* getsockopt and setsockopt */
  1042. static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
  1043. char __user *optval, int optlen)
  1044. {
  1045. struct sock *sk = sock->sk;
  1046. struct iucv_sock *iucv = iucv_sk(sk);
  1047. int val;
  1048. int rc;
  1049. if (level != SOL_IUCV)
  1050. return -ENOPROTOOPT;
  1051. if (optlen < sizeof(int))
  1052. return -EINVAL;
  1053. if (get_user(val, (int __user *) optval))
  1054. return -EFAULT;
  1055. rc = 0;
  1056. lock_sock(sk);
  1057. switch (optname) {
  1058. case SO_IPRMDATA_MSG:
  1059. if (val)
  1060. iucv->flags |= IUCV_IPRMDATA;
  1061. else
  1062. iucv->flags &= ~IUCV_IPRMDATA;
  1063. break;
  1064. case SO_MSGLIMIT:
  1065. switch (sk->sk_state) {
  1066. case IUCV_OPEN:
  1067. case IUCV_BOUND:
  1068. if (val < 1 || val > (u16)(~0))
  1069. rc = -EINVAL;
  1070. else
  1071. iucv->msglimit = val;
  1072. break;
  1073. default:
  1074. rc = -EINVAL;
  1075. break;
  1076. }
  1077. break;
  1078. default:
  1079. rc = -ENOPROTOOPT;
  1080. break;
  1081. }
  1082. release_sock(sk);
  1083. return rc;
  1084. }
  1085. static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
  1086. char __user *optval, int __user *optlen)
  1087. {
  1088. struct sock *sk = sock->sk;
  1089. struct iucv_sock *iucv = iucv_sk(sk);
  1090. int val, len;
  1091. if (level != SOL_IUCV)
  1092. return -ENOPROTOOPT;
  1093. if (get_user(len, optlen))
  1094. return -EFAULT;
  1095. if (len < 0)
  1096. return -EINVAL;
  1097. len = min_t(unsigned int, len, sizeof(int));
  1098. switch (optname) {
  1099. case SO_IPRMDATA_MSG:
  1100. val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
  1101. break;
  1102. case SO_MSGLIMIT:
  1103. lock_sock(sk);
  1104. val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
  1105. : iucv->msglimit; /* default */
  1106. release_sock(sk);
  1107. break;
  1108. default:
  1109. return -ENOPROTOOPT;
  1110. }
  1111. if (put_user(len, optlen))
  1112. return -EFAULT;
  1113. if (copy_to_user(optval, &val, len))
  1114. return -EFAULT;
  1115. return 0;
  1116. }
  1117. /* Callback wrappers - called from iucv base support */
  1118. static int iucv_callback_connreq(struct iucv_path *path,
  1119. u8 ipvmid[8], u8 ipuser[16])
  1120. {
  1121. unsigned char user_data[16];
  1122. unsigned char nuser_data[16];
  1123. unsigned char src_name[8];
  1124. struct hlist_node *node;
  1125. struct sock *sk, *nsk;
  1126. struct iucv_sock *iucv, *niucv;
  1127. int err;
  1128. memcpy(src_name, ipuser, 8);
  1129. EBCASC(src_name, 8);
  1130. /* Find out if this path belongs to af_iucv. */
  1131. read_lock(&iucv_sk_list.lock);
  1132. iucv = NULL;
  1133. sk = NULL;
  1134. sk_for_each(sk, node, &iucv_sk_list.head)
  1135. if (sk->sk_state == IUCV_LISTEN &&
  1136. !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
  1137. /*
  1138. * Found a listening socket with
  1139. * src_name == ipuser[0-7].
  1140. */
  1141. iucv = iucv_sk(sk);
  1142. break;
  1143. }
  1144. read_unlock(&iucv_sk_list.lock);
  1145. if (!iucv)
  1146. /* No socket found, not one of our paths. */
  1147. return -EINVAL;
  1148. bh_lock_sock(sk);
  1149. /* Check if parent socket is listening */
  1150. low_nmcpy(user_data, iucv->src_name);
  1151. high_nmcpy(user_data, iucv->dst_name);
  1152. ASCEBC(user_data, sizeof(user_data));
  1153. if (sk->sk_state != IUCV_LISTEN) {
  1154. err = iucv_path_sever(path, user_data);
  1155. iucv_path_free(path);
  1156. goto fail;
  1157. }
  1158. /* Check for backlog size */
  1159. if (sk_acceptq_is_full(sk)) {
  1160. err = iucv_path_sever(path, user_data);
  1161. iucv_path_free(path);
  1162. goto fail;
  1163. }
  1164. /* Create the new socket */
  1165. nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
  1166. if (!nsk) {
  1167. err = iucv_path_sever(path, user_data);
  1168. iucv_path_free(path);
  1169. goto fail;
  1170. }
  1171. niucv = iucv_sk(nsk);
  1172. iucv_sock_init(nsk, sk);
  1173. /* Set the new iucv_sock */
  1174. memcpy(niucv->dst_name, ipuser + 8, 8);
  1175. EBCASC(niucv->dst_name, 8);
  1176. memcpy(niucv->dst_user_id, ipvmid, 8);
  1177. memcpy(niucv->src_name, iucv->src_name, 8);
  1178. memcpy(niucv->src_user_id, iucv->src_user_id, 8);
  1179. niucv->path = path;
  1180. /* Call iucv_accept */
  1181. high_nmcpy(nuser_data, ipuser + 8);
  1182. memcpy(nuser_data + 8, niucv->src_name, 8);
  1183. ASCEBC(nuser_data + 8, 8);
  1184. /* set message limit for path based on msglimit of accepting socket */
  1185. niucv->msglimit = iucv->msglimit;
  1186. path->msglim = iucv->msglimit;
  1187. err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
  1188. if (err) {
  1189. err = iucv_path_sever(path, user_data);
  1190. iucv_path_free(path);
  1191. iucv_sock_kill(nsk);
  1192. goto fail;
  1193. }
  1194. iucv_accept_enqueue(sk, nsk);
  1195. /* Wake up accept */
  1196. nsk->sk_state = IUCV_CONNECTED;
  1197. sk->sk_data_ready(sk, 1);
  1198. err = 0;
  1199. fail:
  1200. bh_unlock_sock(sk);
  1201. return 0;
  1202. }
  1203. static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
  1204. {
  1205. struct sock *sk = path->private;
  1206. sk->sk_state = IUCV_CONNECTED;
  1207. sk->sk_state_change(sk);
  1208. }
  1209. static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
  1210. {
  1211. struct sock *sk = path->private;
  1212. struct iucv_sock *iucv = iucv_sk(sk);
  1213. struct sk_buff *skb;
  1214. struct sock_msg_q *save_msg;
  1215. int len;
  1216. if (sk->sk_shutdown & RCV_SHUTDOWN) {
  1217. iucv_message_reject(path, msg);
  1218. return;
  1219. }
  1220. spin_lock(&iucv->message_q.lock);
  1221. if (!list_empty(&iucv->message_q.list) ||
  1222. !skb_queue_empty(&iucv->backlog_skb_q))
  1223. goto save_message;
  1224. len = atomic_read(&sk->sk_rmem_alloc);
  1225. len += iucv_msg_length(msg) + sizeof(struct sk_buff);
  1226. if (len > sk->sk_rcvbuf)
  1227. goto save_message;
  1228. skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
  1229. if (!skb)
  1230. goto save_message;
  1231. iucv_process_message(sk, skb, path, msg);
  1232. goto out_unlock;
  1233. save_message:
  1234. save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
  1235. if (!save_msg)
  1236. return;
  1237. save_msg->path = path;
  1238. save_msg->msg = *msg;
  1239. list_add_tail(&save_msg->list, &iucv->message_q.list);
  1240. out_unlock:
  1241. spin_unlock(&iucv->message_q.lock);
  1242. }
  1243. static void iucv_callback_txdone(struct iucv_path *path,
  1244. struct iucv_message *msg)
  1245. {
  1246. struct sock *sk = path->private;
  1247. struct sk_buff *this = NULL;
  1248. struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
  1249. struct sk_buff *list_skb = list->next;
  1250. unsigned long flags;
  1251. if (!skb_queue_empty(list)) {
  1252. spin_lock_irqsave(&list->lock, flags);
  1253. while (list_skb != (struct sk_buff *)list) {
  1254. if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
  1255. this = list_skb;
  1256. break;
  1257. }
  1258. list_skb = list_skb->next;
  1259. }
  1260. if (this)
  1261. __skb_unlink(this, list);
  1262. spin_unlock_irqrestore(&list->lock, flags);
  1263. if (this) {
  1264. kfree_skb(this);
  1265. /* wake up any process waiting for sending */
  1266. iucv_sock_wake_msglim(sk);
  1267. }
  1268. }
  1269. BUG_ON(!this);
  1270. if (sk->sk_state == IUCV_CLOSING) {
  1271. if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
  1272. sk->sk_state = IUCV_CLOSED;
  1273. sk->sk_state_change(sk);
  1274. }
  1275. }
  1276. }
  1277. static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
  1278. {
  1279. struct sock *sk = path->private;
  1280. if (!list_empty(&iucv_sk(sk)->accept_q))
  1281. sk->sk_state = IUCV_SEVERED;
  1282. else
  1283. sk->sk_state = IUCV_DISCONN;
  1284. sk->sk_state_change(sk);
  1285. }
  1286. /* called if the other communication side shuts down its RECV direction;
  1287. * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
  1288. */
  1289. static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
  1290. {
  1291. struct sock *sk = path->private;
  1292. bh_lock_sock(sk);
  1293. if (sk->sk_state != IUCV_CLOSED) {
  1294. sk->sk_shutdown |= SEND_SHUTDOWN;
  1295. sk->sk_state_change(sk);
  1296. }
  1297. bh_unlock_sock(sk);
  1298. }
  1299. static struct proto_ops iucv_sock_ops = {
  1300. .family = PF_IUCV,
  1301. .owner = THIS_MODULE,
  1302. .release = iucv_sock_release,
  1303. .bind = iucv_sock_bind,
  1304. .connect = iucv_sock_connect,
  1305. .listen = iucv_sock_listen,
  1306. .accept = iucv_sock_accept,
  1307. .getname = iucv_sock_getname,
  1308. .sendmsg = iucv_sock_sendmsg,
  1309. .recvmsg = iucv_sock_recvmsg,
  1310. .poll = iucv_sock_poll,
  1311. .ioctl = sock_no_ioctl,
  1312. .mmap = sock_no_mmap,
  1313. .socketpair = sock_no_socketpair,
  1314. .shutdown = iucv_sock_shutdown,
  1315. .setsockopt = iucv_sock_setsockopt,
  1316. .getsockopt = iucv_sock_getsockopt,
  1317. };
  1318. static struct net_proto_family iucv_sock_family_ops = {
  1319. .family = AF_IUCV,
  1320. .owner = THIS_MODULE,
  1321. .create = iucv_sock_create,
  1322. };
  1323. static int __init afiucv_init(void)
  1324. {
  1325. int err;
  1326. if (!MACHINE_IS_VM) {
  1327. pr_err("The af_iucv module cannot be loaded"
  1328. " without z/VM\n");
  1329. err = -EPROTONOSUPPORT;
  1330. goto out;
  1331. }
  1332. cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
  1333. if (unlikely(err)) {
  1334. WARN_ON(err);
  1335. err = -EPROTONOSUPPORT;
  1336. goto out;
  1337. }
  1338. err = iucv_register(&af_iucv_handler, 0);
  1339. if (err)
  1340. goto out;
  1341. err = proto_register(&iucv_proto, 0);
  1342. if (err)
  1343. goto out_iucv;
  1344. err = sock_register(&iucv_sock_family_ops);
  1345. if (err)
  1346. goto out_proto;
  1347. return 0;
  1348. out_proto:
  1349. proto_unregister(&iucv_proto);
  1350. out_iucv:
  1351. iucv_unregister(&af_iucv_handler, 0);
  1352. out:
  1353. return err;
  1354. }
  1355. static void __exit afiucv_exit(void)
  1356. {
  1357. sock_unregister(PF_IUCV);
  1358. proto_unregister(&iucv_proto);
  1359. iucv_unregister(&af_iucv_handler, 0);
  1360. }
  1361. module_init(afiucv_init);
  1362. module_exit(afiucv_exit);
  1363. MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
  1364. MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
  1365. MODULE_VERSION(VERSION);
  1366. MODULE_LICENSE("GPL");
  1367. MODULE_ALIAS_NETPROTO(PF_IUCV);