af_x25.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482
  1. /*
  2. * X.25 Packet Layer release 002
  3. *
  4. * This is ALPHA test software. This code may break your machine,
  5. * randomly fail to work with new releases, misbehave and/or generally
  6. * screw up. It might even work.
  7. *
  8. * This code REQUIRES 2.1.15 or higher
  9. *
  10. * This module:
  11. * This module is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. *
  16. * History
  17. * X.25 001 Jonathan Naylor Started coding.
  18. * X.25 002 Jonathan Naylor Centralised disconnect handling.
  19. * New timer architecture.
  20. * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant.
  21. * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of
  22. * facilities negotiation and increased
  23. * the throughput upper limit.
  24. * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups
  25. * 2000-09-04 Henner Eisen Set sock->state in x25_accept().
  26. * Fixed x25_output() related skb leakage.
  27. * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket.
  28. * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation.
  29. * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN
  30. * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to
  31. * x25_proc.c, using seq_file
  32. * 2005-04-02 Shaun Pereira Selective sub address matching
  33. * with call user data
  34. * 2005-04-15 Shaun Pereira Fast select with no restriction on
  35. * response
  36. */
  37. #include <linux/config.h>
  38. #include <linux/module.h>
  39. #include <linux/errno.h>
  40. #include <linux/kernel.h>
  41. #include <linux/sched.h>
  42. #include <linux/timer.h>
  43. #include <linux/string.h>
  44. #include <linux/net.h>
  45. #include <linux/netdevice.h>
  46. #include <linux/if_arp.h>
  47. #include <linux/skbuff.h>
  48. #include <net/sock.h>
  49. #include <net/tcp_states.h>
  50. #include <asm/uaccess.h>
  51. #include <linux/fcntl.h>
  52. #include <linux/termios.h> /* For TIOCINQ/OUTQ */
  53. #include <linux/notifier.h>
  54. #include <linux/init.h>
  55. #include <net/x25.h>
  56. int sysctl_x25_restart_request_timeout = X25_DEFAULT_T20;
  57. int sysctl_x25_call_request_timeout = X25_DEFAULT_T21;
  58. int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22;
  59. int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23;
  60. int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2;
  61. HLIST_HEAD(x25_list);
  62. DEFINE_RWLOCK(x25_list_lock);
  63. static const struct proto_ops x25_proto_ops;
  64. static struct x25_address null_x25_address = {" "};
  65. int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
  66. struct x25_address *calling_addr)
  67. {
  68. int called_len, calling_len;
  69. char *called, *calling;
  70. int i;
  71. called_len = (*p >> 0) & 0x0F;
  72. calling_len = (*p >> 4) & 0x0F;
  73. called = called_addr->x25_addr;
  74. calling = calling_addr->x25_addr;
  75. p++;
  76. for (i = 0; i < (called_len + calling_len); i++) {
  77. if (i < called_len) {
  78. if (i % 2 != 0) {
  79. *called++ = ((*p >> 0) & 0x0F) + '0';
  80. p++;
  81. } else {
  82. *called++ = ((*p >> 4) & 0x0F) + '0';
  83. }
  84. } else {
  85. if (i % 2 != 0) {
  86. *calling++ = ((*p >> 0) & 0x0F) + '0';
  87. p++;
  88. } else {
  89. *calling++ = ((*p >> 4) & 0x0F) + '0';
  90. }
  91. }
  92. }
  93. *called = *calling = '\0';
  94. return 1 + (called_len + calling_len + 1) / 2;
  95. }
  96. int x25_addr_aton(unsigned char *p, struct x25_address *called_addr,
  97. struct x25_address *calling_addr)
  98. {
  99. unsigned int called_len, calling_len;
  100. char *called, *calling;
  101. int i;
  102. called = called_addr->x25_addr;
  103. calling = calling_addr->x25_addr;
  104. called_len = strlen(called);
  105. calling_len = strlen(calling);
  106. *p++ = (calling_len << 4) | (called_len << 0);
  107. for (i = 0; i < (called_len + calling_len); i++) {
  108. if (i < called_len) {
  109. if (i % 2 != 0) {
  110. *p |= (*called++ - '0') << 0;
  111. p++;
  112. } else {
  113. *p = 0x00;
  114. *p |= (*called++ - '0') << 4;
  115. }
  116. } else {
  117. if (i % 2 != 0) {
  118. *p |= (*calling++ - '0') << 0;
  119. p++;
  120. } else {
  121. *p = 0x00;
  122. *p |= (*calling++ - '0') << 4;
  123. }
  124. }
  125. }
  126. return 1 + (called_len + calling_len + 1) / 2;
  127. }
  128. /*
  129. * Socket removal during an interrupt is now safe.
  130. */
  131. static void x25_remove_socket(struct sock *sk)
  132. {
  133. write_lock_bh(&x25_list_lock);
  134. sk_del_node_init(sk);
  135. write_unlock_bh(&x25_list_lock);
  136. }
  137. /*
  138. * Kill all bound sockets on a dropped device.
  139. */
  140. static void x25_kill_by_device(struct net_device *dev)
  141. {
  142. struct sock *s;
  143. struct hlist_node *node;
  144. write_lock_bh(&x25_list_lock);
  145. sk_for_each(s, node, &x25_list)
  146. if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev)
  147. x25_disconnect(s, ENETUNREACH, 0, 0);
  148. write_unlock_bh(&x25_list_lock);
  149. }
  150. /*
  151. * Handle device status changes.
  152. */
  153. static int x25_device_event(struct notifier_block *this, unsigned long event,
  154. void *ptr)
  155. {
  156. struct net_device *dev = ptr;
  157. struct x25_neigh *nb;
  158. if (dev->type == ARPHRD_X25
  159. #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
  160. || dev->type == ARPHRD_ETHER
  161. #endif
  162. ) {
  163. switch (event) {
  164. case NETDEV_UP:
  165. x25_link_device_up(dev);
  166. break;
  167. case NETDEV_GOING_DOWN:
  168. nb = x25_get_neigh(dev);
  169. if (nb) {
  170. x25_terminate_link(nb);
  171. x25_neigh_put(nb);
  172. }
  173. break;
  174. case NETDEV_DOWN:
  175. x25_kill_by_device(dev);
  176. x25_route_device_down(dev);
  177. x25_link_device_down(dev);
  178. break;
  179. }
  180. }
  181. return NOTIFY_DONE;
  182. }
  183. /*
  184. * Add a socket to the bound sockets list.
  185. */
  186. static void x25_insert_socket(struct sock *sk)
  187. {
  188. write_lock_bh(&x25_list_lock);
  189. sk_add_node(sk, &x25_list);
  190. write_unlock_bh(&x25_list_lock);
  191. }
  192. /*
  193. * Find a socket that wants to accept the Call Request we just
  194. * received. Check the full list for an address/cud match.
  195. * If no cuds match return the next_best thing, an address match.
  196. * Note: if a listening socket has cud set it must only get calls
  197. * with matching cud.
  198. */
  199. static struct sock *x25_find_listener(struct x25_address *addr,
  200. struct sk_buff *skb)
  201. {
  202. struct sock *s;
  203. struct sock *next_best;
  204. struct hlist_node *node;
  205. read_lock_bh(&x25_list_lock);
  206. next_best = NULL;
  207. sk_for_each(s, node, &x25_list)
  208. if ((!strcmp(addr->x25_addr,
  209. x25_sk(s)->source_addr.x25_addr) ||
  210. !strcmp(addr->x25_addr,
  211. null_x25_address.x25_addr)) &&
  212. s->sk_state == TCP_LISTEN) {
  213. /*
  214. * Found a listening socket, now check the incoming
  215. * call user data vs this sockets call user data
  216. */
  217. if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) {
  218. if((memcmp(x25_sk(s)->calluserdata.cuddata,
  219. skb->data,
  220. x25_sk(s)->cudmatchlength)) == 0) {
  221. sock_hold(s);
  222. goto found;
  223. }
  224. } else
  225. next_best = s;
  226. }
  227. if (next_best) {
  228. s = next_best;
  229. sock_hold(s);
  230. goto found;
  231. }
  232. s = NULL;
  233. found:
  234. read_unlock_bh(&x25_list_lock);
  235. return s;
  236. }
  237. /*
  238. * Find a connected X.25 socket given my LCI and neighbour.
  239. */
  240. static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb)
  241. {
  242. struct sock *s;
  243. struct hlist_node *node;
  244. sk_for_each(s, node, &x25_list)
  245. if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) {
  246. sock_hold(s);
  247. goto found;
  248. }
  249. s = NULL;
  250. found:
  251. return s;
  252. }
  253. struct sock *x25_find_socket(unsigned int lci, struct x25_neigh *nb)
  254. {
  255. struct sock *s;
  256. read_lock_bh(&x25_list_lock);
  257. s = __x25_find_socket(lci, nb);
  258. read_unlock_bh(&x25_list_lock);
  259. return s;
  260. }
  261. /*
  262. * Find a unique LCI for a given device.
  263. */
  264. static unsigned int x25_new_lci(struct x25_neigh *nb)
  265. {
  266. unsigned int lci = 1;
  267. struct sock *sk;
  268. read_lock_bh(&x25_list_lock);
  269. while ((sk = __x25_find_socket(lci, nb)) != NULL) {
  270. sock_put(sk);
  271. if (++lci == 4096) {
  272. lci = 0;
  273. break;
  274. }
  275. }
  276. read_unlock_bh(&x25_list_lock);
  277. return lci;
  278. }
  279. /*
  280. * Deferred destroy.
  281. */
  282. void x25_destroy_socket(struct sock *);
  283. /*
  284. * handler for deferred kills.
  285. */
  286. static void x25_destroy_timer(unsigned long data)
  287. {
  288. x25_destroy_socket((struct sock *)data);
  289. }
  290. /*
  291. * This is called from user mode and the timers. Thus it protects itself
  292. * against interrupt users but doesn't worry about being called during
  293. * work. Once it is removed from the queue no interrupt or bottom half
  294. * will touch it and we are (fairly 8-) ) safe.
  295. * Not static as it's used by the timer
  296. */
  297. void x25_destroy_socket(struct sock *sk)
  298. {
  299. struct sk_buff *skb;
  300. sock_hold(sk);
  301. lock_sock(sk);
  302. x25_stop_heartbeat(sk);
  303. x25_stop_timer(sk);
  304. x25_remove_socket(sk);
  305. x25_clear_queues(sk); /* Flush the queues */
  306. while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
  307. if (skb->sk != sk) { /* A pending connection */
  308. /*
  309. * Queue the unaccepted socket for death
  310. */
  311. sock_set_flag(skb->sk, SOCK_DEAD);
  312. x25_start_heartbeat(skb->sk);
  313. x25_sk(skb->sk)->state = X25_STATE_0;
  314. }
  315. kfree_skb(skb);
  316. }
  317. if (atomic_read(&sk->sk_wmem_alloc) ||
  318. atomic_read(&sk->sk_rmem_alloc)) {
  319. /* Defer: outstanding buffers */
  320. sk->sk_timer.expires = jiffies + 10 * HZ;
  321. sk->sk_timer.function = x25_destroy_timer;
  322. sk->sk_timer.data = (unsigned long)sk;
  323. add_timer(&sk->sk_timer);
  324. } else {
  325. /* drop last reference so sock_put will free */
  326. __sock_put(sk);
  327. }
  328. release_sock(sk);
  329. sock_put(sk);
  330. }
  331. /*
  332. * Handling for system calls applied via the various interfaces to a
  333. * X.25 socket object.
  334. */
  335. static int x25_setsockopt(struct socket *sock, int level, int optname,
  336. char __user *optval, int optlen)
  337. {
  338. int opt;
  339. struct sock *sk = sock->sk;
  340. int rc = -ENOPROTOOPT;
  341. if (level != SOL_X25 || optname != X25_QBITINCL)
  342. goto out;
  343. rc = -EINVAL;
  344. if (optlen < sizeof(int))
  345. goto out;
  346. rc = -EFAULT;
  347. if (get_user(opt, (int __user *)optval))
  348. goto out;
  349. x25_sk(sk)->qbitincl = !!opt;
  350. rc = 0;
  351. out:
  352. return rc;
  353. }
  354. static int x25_getsockopt(struct socket *sock, int level, int optname,
  355. char __user *optval, int __user *optlen)
  356. {
  357. struct sock *sk = sock->sk;
  358. int val, len, rc = -ENOPROTOOPT;
  359. if (level != SOL_X25 || optname != X25_QBITINCL)
  360. goto out;
  361. rc = -EFAULT;
  362. if (get_user(len, optlen))
  363. goto out;
  364. len = min_t(unsigned int, len, sizeof(int));
  365. rc = -EINVAL;
  366. if (len < 0)
  367. goto out;
  368. rc = -EFAULT;
  369. if (put_user(len, optlen))
  370. goto out;
  371. val = x25_sk(sk)->qbitincl;
  372. rc = copy_to_user(optval, &val, len) ? -EFAULT : 0;
  373. out:
  374. return rc;
  375. }
  376. static int x25_listen(struct socket *sock, int backlog)
  377. {
  378. struct sock *sk = sock->sk;
  379. int rc = -EOPNOTSUPP;
  380. if (sk->sk_state != TCP_LISTEN) {
  381. memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
  382. sk->sk_max_ack_backlog = backlog;
  383. sk->sk_state = TCP_LISTEN;
  384. rc = 0;
  385. }
  386. return rc;
  387. }
  388. static struct proto x25_proto = {
  389. .name = "X25",
  390. .owner = THIS_MODULE,
  391. .obj_size = sizeof(struct x25_sock),
  392. };
  393. static struct sock *x25_alloc_socket(void)
  394. {
  395. struct x25_sock *x25;
  396. struct sock *sk = sk_alloc(AF_X25, GFP_ATOMIC, &x25_proto, 1);
  397. if (!sk)
  398. goto out;
  399. sock_init_data(NULL, sk);
  400. x25 = x25_sk(sk);
  401. skb_queue_head_init(&x25->ack_queue);
  402. skb_queue_head_init(&x25->fragment_queue);
  403. skb_queue_head_init(&x25->interrupt_in_queue);
  404. skb_queue_head_init(&x25->interrupt_out_queue);
  405. out:
  406. return sk;
  407. }
  408. void x25_init_timers(struct sock *sk);
  409. static int x25_create(struct socket *sock, int protocol)
  410. {
  411. struct sock *sk;
  412. struct x25_sock *x25;
  413. int rc = -ESOCKTNOSUPPORT;
  414. if (sock->type != SOCK_SEQPACKET || protocol)
  415. goto out;
  416. rc = -ENOMEM;
  417. if ((sk = x25_alloc_socket()) == NULL)
  418. goto out;
  419. x25 = x25_sk(sk);
  420. sock_init_data(sock, sk);
  421. x25_init_timers(sk);
  422. sock->ops = &x25_proto_ops;
  423. sk->sk_protocol = protocol;
  424. sk->sk_backlog_rcv = x25_backlog_rcv;
  425. x25->t21 = sysctl_x25_call_request_timeout;
  426. x25->t22 = sysctl_x25_reset_request_timeout;
  427. x25->t23 = sysctl_x25_clear_request_timeout;
  428. x25->t2 = sysctl_x25_ack_holdback_timeout;
  429. x25->state = X25_STATE_0;
  430. x25->cudmatchlength = 0;
  431. x25->accptapprv = X25_DENY_ACCPT_APPRV; /* normally no cud */
  432. /* on call accept */
  433. x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE;
  434. x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE;
  435. x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE;
  436. x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE;
  437. x25->facilities.throughput = X25_DEFAULT_THROUGHPUT;
  438. x25->facilities.reverse = X25_DEFAULT_REVERSE;
  439. rc = 0;
  440. out:
  441. return rc;
  442. }
  443. static struct sock *x25_make_new(struct sock *osk)
  444. {
  445. struct sock *sk = NULL;
  446. struct x25_sock *x25, *ox25;
  447. if (osk->sk_type != SOCK_SEQPACKET)
  448. goto out;
  449. if ((sk = x25_alloc_socket()) == NULL)
  450. goto out;
  451. x25 = x25_sk(sk);
  452. sk->sk_type = osk->sk_type;
  453. sk->sk_socket = osk->sk_socket;
  454. sk->sk_priority = osk->sk_priority;
  455. sk->sk_protocol = osk->sk_protocol;
  456. sk->sk_rcvbuf = osk->sk_rcvbuf;
  457. sk->sk_sndbuf = osk->sk_sndbuf;
  458. sk->sk_state = TCP_ESTABLISHED;
  459. sk->sk_sleep = osk->sk_sleep;
  460. sk->sk_backlog_rcv = osk->sk_backlog_rcv;
  461. sock_copy_flags(sk, osk);
  462. ox25 = x25_sk(osk);
  463. x25->t21 = ox25->t21;
  464. x25->t22 = ox25->t22;
  465. x25->t23 = ox25->t23;
  466. x25->t2 = ox25->t2;
  467. x25->facilities = ox25->facilities;
  468. x25->qbitincl = ox25->qbitincl;
  469. x25->cudmatchlength = ox25->cudmatchlength;
  470. x25->accptapprv = ox25->accptapprv;
  471. x25_init_timers(sk);
  472. out:
  473. return sk;
  474. }
  475. static int x25_release(struct socket *sock)
  476. {
  477. struct sock *sk = sock->sk;
  478. struct x25_sock *x25;
  479. if (!sk)
  480. goto out;
  481. x25 = x25_sk(sk);
  482. switch (x25->state) {
  483. case X25_STATE_0:
  484. case X25_STATE_2:
  485. x25_disconnect(sk, 0, 0, 0);
  486. x25_destroy_socket(sk);
  487. goto out;
  488. case X25_STATE_1:
  489. case X25_STATE_3:
  490. case X25_STATE_4:
  491. x25_clear_queues(sk);
  492. x25_write_internal(sk, X25_CLEAR_REQUEST);
  493. x25_start_t23timer(sk);
  494. x25->state = X25_STATE_2;
  495. sk->sk_state = TCP_CLOSE;
  496. sk->sk_shutdown |= SEND_SHUTDOWN;
  497. sk->sk_state_change(sk);
  498. sock_set_flag(sk, SOCK_DEAD);
  499. sock_set_flag(sk, SOCK_DESTROY);
  500. break;
  501. }
  502. sock->sk = NULL;
  503. sk->sk_socket = NULL; /* Not used, but we should do this */
  504. out:
  505. return 0;
  506. }
  507. static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
  508. {
  509. struct sock *sk = sock->sk;
  510. struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
  511. if (!sock_flag(sk, SOCK_ZAPPED) ||
  512. addr_len != sizeof(struct sockaddr_x25) ||
  513. addr->sx25_family != AF_X25)
  514. return -EINVAL;
  515. x25_sk(sk)->source_addr = addr->sx25_addr;
  516. x25_insert_socket(sk);
  517. sock_reset_flag(sk, SOCK_ZAPPED);
  518. SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
  519. return 0;
  520. }
  521. static int x25_wait_for_connection_establishment(struct sock *sk)
  522. {
  523. DECLARE_WAITQUEUE(wait, current);
  524. int rc;
  525. add_wait_queue_exclusive(sk->sk_sleep, &wait);
  526. for (;;) {
  527. __set_current_state(TASK_INTERRUPTIBLE);
  528. rc = -ERESTARTSYS;
  529. if (signal_pending(current))
  530. break;
  531. rc = sock_error(sk);
  532. if (rc) {
  533. sk->sk_socket->state = SS_UNCONNECTED;
  534. break;
  535. }
  536. rc = 0;
  537. if (sk->sk_state != TCP_ESTABLISHED) {
  538. release_sock(sk);
  539. schedule();
  540. lock_sock(sk);
  541. } else
  542. break;
  543. }
  544. __set_current_state(TASK_RUNNING);
  545. remove_wait_queue(sk->sk_sleep, &wait);
  546. return rc;
  547. }
  548. static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
  549. int addr_len, int flags)
  550. {
  551. struct sock *sk = sock->sk;
  552. struct x25_sock *x25 = x25_sk(sk);
  553. struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
  554. struct x25_route *rt;
  555. int rc = 0;
  556. lock_sock(sk);
  557. if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
  558. sock->state = SS_CONNECTED;
  559. goto out; /* Connect completed during a ERESTARTSYS event */
  560. }
  561. rc = -ECONNREFUSED;
  562. if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
  563. sock->state = SS_UNCONNECTED;
  564. goto out;
  565. }
  566. rc = -EISCONN; /* No reconnect on a seqpacket socket */
  567. if (sk->sk_state == TCP_ESTABLISHED)
  568. goto out;
  569. sk->sk_state = TCP_CLOSE;
  570. sock->state = SS_UNCONNECTED;
  571. rc = -EINVAL;
  572. if (addr_len != sizeof(struct sockaddr_x25) ||
  573. addr->sx25_family != AF_X25)
  574. goto out;
  575. rc = -ENETUNREACH;
  576. rt = x25_get_route(&addr->sx25_addr);
  577. if (!rt)
  578. goto out;
  579. x25->neighbour = x25_get_neigh(rt->dev);
  580. if (!x25->neighbour)
  581. goto out_put_route;
  582. x25_limit_facilities(&x25->facilities, x25->neighbour);
  583. x25->lci = x25_new_lci(x25->neighbour);
  584. if (!x25->lci)
  585. goto out_put_neigh;
  586. rc = -EINVAL;
  587. if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
  588. goto out_put_neigh;
  589. if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr))
  590. memset(&x25->source_addr, '\0', X25_ADDR_LEN);
  591. x25->dest_addr = addr->sx25_addr;
  592. /* Move to connecting socket, start sending Connect Requests */
  593. sock->state = SS_CONNECTING;
  594. sk->sk_state = TCP_SYN_SENT;
  595. x25->state = X25_STATE_1;
  596. x25_write_internal(sk, X25_CALL_REQUEST);
  597. x25_start_heartbeat(sk);
  598. x25_start_t21timer(sk);
  599. /* Now the loop */
  600. rc = -EINPROGRESS;
  601. if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
  602. goto out_put_neigh;
  603. rc = x25_wait_for_connection_establishment(sk);
  604. if (rc)
  605. goto out_put_neigh;
  606. sock->state = SS_CONNECTED;
  607. rc = 0;
  608. out_put_neigh:
  609. if (rc)
  610. x25_neigh_put(x25->neighbour);
  611. out_put_route:
  612. x25_route_put(rt);
  613. out:
  614. release_sock(sk);
  615. return rc;
  616. }
  617. static int x25_wait_for_data(struct sock *sk, int timeout)
  618. {
  619. DECLARE_WAITQUEUE(wait, current);
  620. int rc = 0;
  621. add_wait_queue_exclusive(sk->sk_sleep, &wait);
  622. for (;;) {
  623. __set_current_state(TASK_INTERRUPTIBLE);
  624. if (sk->sk_shutdown & RCV_SHUTDOWN)
  625. break;
  626. rc = -ERESTARTSYS;
  627. if (signal_pending(current))
  628. break;
  629. rc = -EAGAIN;
  630. if (!timeout)
  631. break;
  632. rc = 0;
  633. if (skb_queue_empty(&sk->sk_receive_queue)) {
  634. release_sock(sk);
  635. timeout = schedule_timeout(timeout);
  636. lock_sock(sk);
  637. } else
  638. break;
  639. }
  640. __set_current_state(TASK_RUNNING);
  641. remove_wait_queue(sk->sk_sleep, &wait);
  642. return rc;
  643. }
  644. static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
  645. {
  646. struct sock *sk = sock->sk;
  647. struct sock *newsk;
  648. struct sk_buff *skb;
  649. int rc = -EINVAL;
  650. if (!sk || sk->sk_state != TCP_LISTEN)
  651. goto out;
  652. rc = -EOPNOTSUPP;
  653. if (sk->sk_type != SOCK_SEQPACKET)
  654. goto out;
  655. lock_sock(sk);
  656. rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
  657. if (rc)
  658. goto out2;
  659. skb = skb_dequeue(&sk->sk_receive_queue);
  660. rc = -EINVAL;
  661. if (!skb->sk)
  662. goto out2;
  663. newsk = skb->sk;
  664. newsk->sk_socket = newsock;
  665. newsk->sk_sleep = &newsock->wait;
  666. /* Now attach up the new socket */
  667. skb->sk = NULL;
  668. kfree_skb(skb);
  669. sk->sk_ack_backlog--;
  670. newsock->sk = newsk;
  671. newsock->state = SS_CONNECTED;
  672. rc = 0;
  673. out2:
  674. release_sock(sk);
  675. out:
  676. return rc;
  677. }
  678. static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
  679. int *uaddr_len, int peer)
  680. {
  681. struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr;
  682. struct sock *sk = sock->sk;
  683. struct x25_sock *x25 = x25_sk(sk);
  684. if (peer) {
  685. if (sk->sk_state != TCP_ESTABLISHED)
  686. return -ENOTCONN;
  687. sx25->sx25_addr = x25->dest_addr;
  688. } else
  689. sx25->sx25_addr = x25->source_addr;
  690. sx25->sx25_family = AF_X25;
  691. *uaddr_len = sizeof(*sx25);
  692. return 0;
  693. }
  694. int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
  695. unsigned int lci)
  696. {
  697. struct sock *sk;
  698. struct sock *make;
  699. struct x25_sock *makex25;
  700. struct x25_address source_addr, dest_addr;
  701. struct x25_facilities facilities;
  702. int len, rc;
  703. /*
  704. * Remove the LCI and frame type.
  705. */
  706. skb_pull(skb, X25_STD_MIN_LEN);
  707. /*
  708. * Extract the X.25 addresses and convert them to ASCII strings,
  709. * and remove them.
  710. */
  711. skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr));
  712. /*
  713. * Get the length of the facilities, skip past them for the moment
  714. * get the call user data because this is needed to determine
  715. * the correct listener
  716. */
  717. len = skb->data[0] + 1;
  718. skb_pull(skb,len);
  719. /*
  720. * Find a listener for the particular address/cud pair.
  721. */
  722. sk = x25_find_listener(&source_addr,skb);
  723. skb_push(skb,len);
  724. /*
  725. * We can't accept the Call Request.
  726. */
  727. if (sk == NULL || sk_acceptq_is_full(sk))
  728. goto out_clear_request;
  729. /*
  730. * Try to reach a compromise on the requested facilities.
  731. */
  732. if ((len = x25_negotiate_facilities(skb, sk, &facilities)) == -1)
  733. goto out_sock_put;
  734. /*
  735. * current neighbour/link might impose additional limits
  736. * on certain facilties
  737. */
  738. x25_limit_facilities(&facilities, nb);
  739. /*
  740. * Try to create a new socket.
  741. */
  742. make = x25_make_new(sk);
  743. if (!make)
  744. goto out_sock_put;
  745. /*
  746. * Remove the facilities
  747. */
  748. skb_pull(skb, len);
  749. skb->sk = make;
  750. make->sk_state = TCP_ESTABLISHED;
  751. makex25 = x25_sk(make);
  752. makex25->lci = lci;
  753. makex25->dest_addr = dest_addr;
  754. makex25->source_addr = source_addr;
  755. makex25->neighbour = nb;
  756. makex25->facilities = facilities;
  757. makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask;
  758. /* ensure no reverse facil on accept */
  759. makex25->vc_facil_mask &= ~X25_MASK_REVERSE;
  760. makex25->cudmatchlength = x25_sk(sk)->cudmatchlength;
  761. /* Normally all calls are accepted immediatly */
  762. if(makex25->accptapprv & X25_DENY_ACCPT_APPRV) {
  763. x25_write_internal(make, X25_CALL_ACCEPTED);
  764. makex25->state = X25_STATE_3;
  765. }
  766. /*
  767. * Incoming Call User Data.
  768. */
  769. if (skb->len >= 0) {
  770. memcpy(makex25->calluserdata.cuddata, skb->data, skb->len);
  771. makex25->calluserdata.cudlength = skb->len;
  772. }
  773. sk->sk_ack_backlog++;
  774. x25_insert_socket(make);
  775. skb_queue_head(&sk->sk_receive_queue, skb);
  776. x25_start_heartbeat(make);
  777. if (!sock_flag(sk, SOCK_DEAD))
  778. sk->sk_data_ready(sk, skb->len);
  779. rc = 1;
  780. sock_put(sk);
  781. out:
  782. return rc;
  783. out_sock_put:
  784. sock_put(sk);
  785. out_clear_request:
  786. rc = 0;
  787. x25_transmit_clear_request(nb, lci, 0x01);
  788. goto out;
  789. }
  790. static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
  791. struct msghdr *msg, size_t len)
  792. {
  793. struct sock *sk = sock->sk;
  794. struct x25_sock *x25 = x25_sk(sk);
  795. struct sockaddr_x25 *usx25 = (struct sockaddr_x25 *)msg->msg_name;
  796. struct sockaddr_x25 sx25;
  797. struct sk_buff *skb;
  798. unsigned char *asmptr;
  799. int noblock = msg->msg_flags & MSG_DONTWAIT;
  800. size_t size;
  801. int qbit = 0, rc = -EINVAL;
  802. if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT))
  803. goto out;
  804. /* we currently don't support segmented records at the user interface */
  805. if (!(msg->msg_flags & (MSG_EOR|MSG_OOB)))
  806. goto out;
  807. rc = -EADDRNOTAVAIL;
  808. if (sock_flag(sk, SOCK_ZAPPED))
  809. goto out;
  810. rc = -EPIPE;
  811. if (sk->sk_shutdown & SEND_SHUTDOWN) {
  812. send_sig(SIGPIPE, current, 0);
  813. goto out;
  814. }
  815. rc = -ENETUNREACH;
  816. if (!x25->neighbour)
  817. goto out;
  818. if (usx25) {
  819. rc = -EINVAL;
  820. if (msg->msg_namelen < sizeof(sx25))
  821. goto out;
  822. memcpy(&sx25, usx25, sizeof(sx25));
  823. rc = -EISCONN;
  824. if (strcmp(x25->dest_addr.x25_addr, sx25.sx25_addr.x25_addr))
  825. goto out;
  826. rc = -EINVAL;
  827. if (sx25.sx25_family != AF_X25)
  828. goto out;
  829. } else {
  830. /*
  831. * FIXME 1003.1g - if the socket is like this because
  832. * it has become closed (not started closed) we ought
  833. * to SIGPIPE, EPIPE;
  834. */
  835. rc = -ENOTCONN;
  836. if (sk->sk_state != TCP_ESTABLISHED)
  837. goto out;
  838. sx25.sx25_family = AF_X25;
  839. sx25.sx25_addr = x25->dest_addr;
  840. }
  841. SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n");
  842. /* Build a packet */
  843. SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n");
  844. if ((msg->msg_flags & MSG_OOB) && len > 32)
  845. len = 32;
  846. size = len + X25_MAX_L2_LEN + X25_EXT_MIN_LEN;
  847. skb = sock_alloc_send_skb(sk, size, noblock, &rc);
  848. if (!skb)
  849. goto out;
  850. X25_SKB_CB(skb)->flags = msg->msg_flags;
  851. skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN);
  852. /*
  853. * Put the data on the end
  854. */
  855. SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n");
  856. asmptr = skb->h.raw = skb_put(skb, len);
  857. rc = memcpy_fromiovec(asmptr, msg->msg_iov, len);
  858. if (rc)
  859. goto out_kfree_skb;
  860. /*
  861. * If the Q BIT Include socket option is in force, the first
  862. * byte of the user data is the logical value of the Q Bit.
  863. */
  864. if (x25->qbitincl) {
  865. qbit = skb->data[0];
  866. skb_pull(skb, 1);
  867. }
  868. /*
  869. * Push down the X.25 header
  870. */
  871. SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n");
  872. if (msg->msg_flags & MSG_OOB) {
  873. if (x25->neighbour->extended) {
  874. asmptr = skb_push(skb, X25_STD_MIN_LEN);
  875. *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ;
  876. *asmptr++ = (x25->lci >> 0) & 0xFF;
  877. *asmptr++ = X25_INTERRUPT;
  878. } else {
  879. asmptr = skb_push(skb, X25_STD_MIN_LEN);
  880. *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ;
  881. *asmptr++ = (x25->lci >> 0) & 0xFF;
  882. *asmptr++ = X25_INTERRUPT;
  883. }
  884. } else {
  885. if (x25->neighbour->extended) {
  886. /* Build an Extended X.25 header */
  887. asmptr = skb_push(skb, X25_EXT_MIN_LEN);
  888. *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ;
  889. *asmptr++ = (x25->lci >> 0) & 0xFF;
  890. *asmptr++ = X25_DATA;
  891. *asmptr++ = X25_DATA;
  892. } else {
  893. /* Build an Standard X.25 header */
  894. asmptr = skb_push(skb, X25_STD_MIN_LEN);
  895. *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ;
  896. *asmptr++ = (x25->lci >> 0) & 0xFF;
  897. *asmptr++ = X25_DATA;
  898. }
  899. if (qbit)
  900. skb->data[0] |= X25_Q_BIT;
  901. }
  902. SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n");
  903. SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n");
  904. rc = -ENOTCONN;
  905. if (sk->sk_state != TCP_ESTABLISHED)
  906. goto out_kfree_skb;
  907. if (msg->msg_flags & MSG_OOB)
  908. skb_queue_tail(&x25->interrupt_out_queue, skb);
  909. else {
  910. len = x25_output(sk, skb);
  911. if (len < 0)
  912. kfree_skb(skb);
  913. else if (x25->qbitincl)
  914. len++;
  915. }
  916. /*
  917. * lock_sock() is currently only used to serialize this x25_kick()
  918. * against input-driven x25_kick() calls. It currently only blocks
  919. * incoming packets for this socket and does not protect against
  920. * any other socket state changes and is not called from anywhere
  921. * else. As x25_kick() cannot block and as long as all socket
  922. * operations are BKL-wrapped, we don't need take to care about
  923. * purging the backlog queue in x25_release().
  924. *
  925. * Using lock_sock() to protect all socket operations entirely
  926. * (and making the whole x25 stack SMP aware) unfortunately would
  927. * require major changes to {send,recv}msg and skb allocation methods.
  928. * -> 2.5 ;)
  929. */
  930. lock_sock(sk);
  931. x25_kick(sk);
  932. release_sock(sk);
  933. rc = len;
  934. out:
  935. return rc;
  936. out_kfree_skb:
  937. kfree_skb(skb);
  938. goto out;
  939. }
  940. static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
  941. struct msghdr *msg, size_t size,
  942. int flags)
  943. {
  944. struct sock *sk = sock->sk;
  945. struct x25_sock *x25 = x25_sk(sk);
  946. struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
  947. size_t copied;
  948. int qbit;
  949. struct sk_buff *skb;
  950. unsigned char *asmptr;
  951. int rc = -ENOTCONN;
  952. /*
  953. * This works for seqpacket too. The receiver has ordered the queue for
  954. * us! We do one quick check first though
  955. */
  956. if (sk->sk_state != TCP_ESTABLISHED)
  957. goto out;
  958. if (flags & MSG_OOB) {
  959. rc = -EINVAL;
  960. if (sock_flag(sk, SOCK_URGINLINE) ||
  961. !skb_peek(&x25->interrupt_in_queue))
  962. goto out;
  963. skb = skb_dequeue(&x25->interrupt_in_queue);
  964. skb_pull(skb, X25_STD_MIN_LEN);
  965. /*
  966. * No Q bit information on Interrupt data.
  967. */
  968. if (x25->qbitincl) {
  969. asmptr = skb_push(skb, 1);
  970. *asmptr = 0x00;
  971. }
  972. msg->msg_flags |= MSG_OOB;
  973. } else {
  974. /* Now we can treat all alike */
  975. skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
  976. flags & MSG_DONTWAIT, &rc);
  977. if (!skb)
  978. goto out;
  979. qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
  980. skb_pull(skb, x25->neighbour->extended ?
  981. X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
  982. if (x25->qbitincl) {
  983. asmptr = skb_push(skb, 1);
  984. *asmptr = qbit;
  985. }
  986. }
  987. skb->h.raw = skb->data;
  988. copied = skb->len;
  989. if (copied > size) {
  990. copied = size;
  991. msg->msg_flags |= MSG_TRUNC;
  992. }
  993. /* Currently, each datagram always contains a complete record */
  994. msg->msg_flags |= MSG_EOR;
  995. rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
  996. if (rc)
  997. goto out_free_dgram;
  998. if (sx25) {
  999. sx25->sx25_family = AF_X25;
  1000. sx25->sx25_addr = x25->dest_addr;
  1001. }
  1002. msg->msg_namelen = sizeof(struct sockaddr_x25);
  1003. lock_sock(sk);
  1004. x25_check_rbuf(sk);
  1005. release_sock(sk);
  1006. rc = copied;
  1007. out_free_dgram:
  1008. skb_free_datagram(sk, skb);
  1009. out:
  1010. return rc;
  1011. }
  1012. static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  1013. {
  1014. struct sock *sk = sock->sk;
  1015. struct x25_sock *x25 = x25_sk(sk);
  1016. void __user *argp = (void __user *)arg;
  1017. int rc;
  1018. switch (cmd) {
  1019. case TIOCOUTQ: {
  1020. int amount = sk->sk_sndbuf -
  1021. atomic_read(&sk->sk_wmem_alloc);
  1022. if (amount < 0)
  1023. amount = 0;
  1024. rc = put_user(amount, (unsigned int __user *)argp);
  1025. break;
  1026. }
  1027. case TIOCINQ: {
  1028. struct sk_buff *skb;
  1029. int amount = 0;
  1030. /*
  1031. * These two are safe on a single CPU system as
  1032. * only user tasks fiddle here
  1033. */
  1034. if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
  1035. amount = skb->len;
  1036. rc = put_user(amount, (unsigned int __user *)argp);
  1037. break;
  1038. }
  1039. case SIOCGSTAMP:
  1040. rc = -EINVAL;
  1041. if (sk)
  1042. rc = sock_get_timestamp(sk,
  1043. (struct timeval __user *)argp);
  1044. break;
  1045. case SIOCGIFADDR:
  1046. case SIOCSIFADDR:
  1047. case SIOCGIFDSTADDR:
  1048. case SIOCSIFDSTADDR:
  1049. case SIOCGIFBRDADDR:
  1050. case SIOCSIFBRDADDR:
  1051. case SIOCGIFNETMASK:
  1052. case SIOCSIFNETMASK:
  1053. case SIOCGIFMETRIC:
  1054. case SIOCSIFMETRIC:
  1055. rc = -EINVAL;
  1056. break;
  1057. case SIOCADDRT:
  1058. case SIOCDELRT:
  1059. rc = -EPERM;
  1060. if (!capable(CAP_NET_ADMIN))
  1061. break;
  1062. rc = x25_route_ioctl(cmd, argp);
  1063. break;
  1064. case SIOCX25GSUBSCRIP:
  1065. rc = x25_subscr_ioctl(cmd, argp);
  1066. break;
  1067. case SIOCX25SSUBSCRIP:
  1068. rc = -EPERM;
  1069. if (!capable(CAP_NET_ADMIN))
  1070. break;
  1071. rc = x25_subscr_ioctl(cmd, argp);
  1072. break;
  1073. case SIOCX25GFACILITIES: {
  1074. struct x25_facilities fac = x25->facilities;
  1075. rc = copy_to_user(argp, &fac,
  1076. sizeof(fac)) ? -EFAULT : 0;
  1077. break;
  1078. }
  1079. case SIOCX25SFACILITIES: {
  1080. struct x25_facilities facilities;
  1081. rc = -EFAULT;
  1082. if (copy_from_user(&facilities, argp,
  1083. sizeof(facilities)))
  1084. break;
  1085. rc = -EINVAL;
  1086. if (sk->sk_state != TCP_LISTEN &&
  1087. sk->sk_state != TCP_CLOSE)
  1088. break;
  1089. if (facilities.pacsize_in < X25_PS16 ||
  1090. facilities.pacsize_in > X25_PS4096)
  1091. break;
  1092. if (facilities.pacsize_out < X25_PS16 ||
  1093. facilities.pacsize_out > X25_PS4096)
  1094. break;
  1095. if (facilities.winsize_in < 1 ||
  1096. facilities.winsize_in > 127)
  1097. break;
  1098. if (facilities.throughput < 0x03 ||
  1099. facilities.throughput > 0xDD)
  1100. break;
  1101. if (facilities.reverse &&
  1102. (facilities.reverse | 0x81)!= 0x81)
  1103. break;
  1104. x25->facilities = facilities;
  1105. rc = 0;
  1106. break;
  1107. }
  1108. case SIOCX25GCALLUSERDATA: {
  1109. struct x25_calluserdata cud = x25->calluserdata;
  1110. rc = copy_to_user(argp, &cud,
  1111. sizeof(cud)) ? -EFAULT : 0;
  1112. break;
  1113. }
  1114. case SIOCX25SCALLUSERDATA: {
  1115. struct x25_calluserdata calluserdata;
  1116. rc = -EFAULT;
  1117. if (copy_from_user(&calluserdata, argp,
  1118. sizeof(calluserdata)))
  1119. break;
  1120. rc = -EINVAL;
  1121. if (calluserdata.cudlength > X25_MAX_CUD_LEN)
  1122. break;
  1123. x25->calluserdata = calluserdata;
  1124. rc = 0;
  1125. break;
  1126. }
  1127. case SIOCX25GCAUSEDIAG: {
  1128. struct x25_causediag causediag;
  1129. causediag = x25->causediag;
  1130. rc = copy_to_user(argp, &causediag,
  1131. sizeof(causediag)) ? -EFAULT : 0;
  1132. break;
  1133. }
  1134. case SIOCX25SCUDMATCHLEN: {
  1135. struct x25_subaddr sub_addr;
  1136. rc = -EINVAL;
  1137. if(sk->sk_state != TCP_CLOSE)
  1138. break;
  1139. rc = -EFAULT;
  1140. if (copy_from_user(&sub_addr, argp,
  1141. sizeof(sub_addr)))
  1142. break;
  1143. rc = -EINVAL;
  1144. if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
  1145. break;
  1146. x25->cudmatchlength = sub_addr.cudmatchlength;
  1147. rc = 0;
  1148. break;
  1149. }
  1150. case SIOCX25CALLACCPTAPPRV: {
  1151. rc = -EINVAL;
  1152. if (sk->sk_state != TCP_CLOSE)
  1153. break;
  1154. x25->accptapprv = X25_ALLOW_ACCPT_APPRV;
  1155. rc = 0;
  1156. break;
  1157. }
  1158. case SIOCX25SENDCALLACCPT: {
  1159. rc = -EINVAL;
  1160. if (sk->sk_state != TCP_ESTABLISHED)
  1161. break;
  1162. if (x25->accptapprv) /* must call accptapprv above */
  1163. break;
  1164. x25_write_internal(sk, X25_CALL_ACCEPTED);
  1165. x25->state = X25_STATE_3;
  1166. rc = 0;
  1167. break;
  1168. }
  1169. default:
  1170. rc = -ENOIOCTLCMD;
  1171. break;
  1172. }
  1173. return rc;
  1174. }
  1175. static struct net_proto_family x25_family_ops = {
  1176. .family = AF_X25,
  1177. .create = x25_create,
  1178. .owner = THIS_MODULE,
  1179. };
  1180. static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = {
  1181. .family = AF_X25,
  1182. .owner = THIS_MODULE,
  1183. .release = x25_release,
  1184. .bind = x25_bind,
  1185. .connect = x25_connect,
  1186. .socketpair = sock_no_socketpair,
  1187. .accept = x25_accept,
  1188. .getname = x25_getname,
  1189. .poll = datagram_poll,
  1190. .ioctl = x25_ioctl,
  1191. .listen = x25_listen,
  1192. .shutdown = sock_no_shutdown,
  1193. .setsockopt = x25_setsockopt,
  1194. .getsockopt = x25_getsockopt,
  1195. .sendmsg = x25_sendmsg,
  1196. .recvmsg = x25_recvmsg,
  1197. .mmap = sock_no_mmap,
  1198. .sendpage = sock_no_sendpage,
  1199. };
  1200. #include <linux/smp_lock.h>
  1201. SOCKOPS_WRAP(x25_proto, AF_X25);
  1202. static struct packet_type x25_packet_type = {
  1203. .type = __constant_htons(ETH_P_X25),
  1204. .func = x25_lapb_receive_frame,
  1205. };
  1206. static struct notifier_block x25_dev_notifier = {
  1207. .notifier_call = x25_device_event,
  1208. };
  1209. void x25_kill_by_neigh(struct x25_neigh *nb)
  1210. {
  1211. struct sock *s;
  1212. struct hlist_node *node;
  1213. write_lock_bh(&x25_list_lock);
  1214. sk_for_each(s, node, &x25_list)
  1215. if (x25_sk(s)->neighbour == nb)
  1216. x25_disconnect(s, ENETUNREACH, 0, 0);
  1217. write_unlock_bh(&x25_list_lock);
  1218. }
  1219. static int __init x25_init(void)
  1220. {
  1221. int rc = proto_register(&x25_proto, 0);
  1222. if (rc != 0)
  1223. goto out;
  1224. sock_register(&x25_family_ops);
  1225. dev_add_pack(&x25_packet_type);
  1226. register_netdevice_notifier(&x25_dev_notifier);
  1227. printk(KERN_INFO "X.25 for Linux. Version 0.2 for Linux 2.1.15\n");
  1228. #ifdef CONFIG_SYSCTL
  1229. x25_register_sysctl();
  1230. #endif
  1231. x25_proc_init();
  1232. out:
  1233. return rc;
  1234. }
  1235. module_init(x25_init);
  1236. static void __exit x25_exit(void)
  1237. {
  1238. x25_proc_exit();
  1239. x25_link_free();
  1240. x25_route_free();
  1241. #ifdef CONFIG_SYSCTL
  1242. x25_unregister_sysctl();
  1243. #endif
  1244. unregister_netdevice_notifier(&x25_dev_notifier);
  1245. dev_remove_pack(&x25_packet_type);
  1246. sock_unregister(AF_X25);
  1247. proto_unregister(&x25_proto);
  1248. }
  1249. module_exit(x25_exit);
  1250. MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
  1251. MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol");
  1252. MODULE_LICENSE("GPL");
  1253. MODULE_ALIAS_NETPROTO(PF_X25);