af_x25.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483
  1. /*
  2. * X.25 Packet Layer release 002
  3. *
  4. * This is ALPHA test software. This code may break your machine,
  5. * randomly fail to work with new releases, misbehave and/or generally
  6. * screw up. It might even work.
  7. *
  8. * This code REQUIRES 2.1.15 or higher
  9. *
  10. * This module:
  11. * This module is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. *
  16. * History
  17. * X.25 001 Jonathan Naylor Started coding.
  18. * X.25 002 Jonathan Naylor Centralised disconnect handling.
  19. * New timer architecture.
  20. * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant.
  21. * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of
  22. * facilities negotiation and increased
  23. * the throughput upper limit.
  24. * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups
  25. * 2000-09-04 Henner Eisen Set sock->state in x25_accept().
  26. * Fixed x25_output() related skb leakage.
  27. * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket.
  28. * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation.
  29. * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN
  30. * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to
  31. * x25_proc.c, using seq_file
  32. * 2005-04-02 Shaun Pereira Selective sub address matching
  33. * with call user data
  34. * 2005-04-15 Shaun Pereira Fast select with no restriction on
  35. * response
  36. */
  37. #include <linux/config.h>
  38. #include <linux/module.h>
  39. #include <linux/capability.h>
  40. #include <linux/errno.h>
  41. #include <linux/kernel.h>
  42. #include <linux/sched.h>
  43. #include <linux/timer.h>
  44. #include <linux/string.h>
  45. #include <linux/net.h>
  46. #include <linux/netdevice.h>
  47. #include <linux/if_arp.h>
  48. #include <linux/skbuff.h>
  49. #include <net/sock.h>
  50. #include <net/tcp_states.h>
  51. #include <asm/uaccess.h>
  52. #include <linux/fcntl.h>
  53. #include <linux/termios.h> /* For TIOCINQ/OUTQ */
  54. #include <linux/notifier.h>
  55. #include <linux/init.h>
  56. #include <net/x25.h>
  57. int sysctl_x25_restart_request_timeout = X25_DEFAULT_T20;
  58. int sysctl_x25_call_request_timeout = X25_DEFAULT_T21;
  59. int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22;
  60. int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23;
  61. int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2;
  62. HLIST_HEAD(x25_list);
  63. DEFINE_RWLOCK(x25_list_lock);
  64. static const struct proto_ops x25_proto_ops;
  65. static struct x25_address null_x25_address = {" "};
  66. int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
  67. struct x25_address *calling_addr)
  68. {
  69. int called_len, calling_len;
  70. char *called, *calling;
  71. int i;
  72. called_len = (*p >> 0) & 0x0F;
  73. calling_len = (*p >> 4) & 0x0F;
  74. called = called_addr->x25_addr;
  75. calling = calling_addr->x25_addr;
  76. p++;
  77. for (i = 0; i < (called_len + calling_len); i++) {
  78. if (i < called_len) {
  79. if (i % 2 != 0) {
  80. *called++ = ((*p >> 0) & 0x0F) + '0';
  81. p++;
  82. } else {
  83. *called++ = ((*p >> 4) & 0x0F) + '0';
  84. }
  85. } else {
  86. if (i % 2 != 0) {
  87. *calling++ = ((*p >> 0) & 0x0F) + '0';
  88. p++;
  89. } else {
  90. *calling++ = ((*p >> 4) & 0x0F) + '0';
  91. }
  92. }
  93. }
  94. *called = *calling = '\0';
  95. return 1 + (called_len + calling_len + 1) / 2;
  96. }
  97. int x25_addr_aton(unsigned char *p, struct x25_address *called_addr,
  98. struct x25_address *calling_addr)
  99. {
  100. unsigned int called_len, calling_len;
  101. char *called, *calling;
  102. int i;
  103. called = called_addr->x25_addr;
  104. calling = calling_addr->x25_addr;
  105. called_len = strlen(called);
  106. calling_len = strlen(calling);
  107. *p++ = (calling_len << 4) | (called_len << 0);
  108. for (i = 0; i < (called_len + calling_len); i++) {
  109. if (i < called_len) {
  110. if (i % 2 != 0) {
  111. *p |= (*called++ - '0') << 0;
  112. p++;
  113. } else {
  114. *p = 0x00;
  115. *p |= (*called++ - '0') << 4;
  116. }
  117. } else {
  118. if (i % 2 != 0) {
  119. *p |= (*calling++ - '0') << 0;
  120. p++;
  121. } else {
  122. *p = 0x00;
  123. *p |= (*calling++ - '0') << 4;
  124. }
  125. }
  126. }
  127. return 1 + (called_len + calling_len + 1) / 2;
  128. }
  129. /*
  130. * Socket removal during an interrupt is now safe.
  131. */
  132. static void x25_remove_socket(struct sock *sk)
  133. {
  134. write_lock_bh(&x25_list_lock);
  135. sk_del_node_init(sk);
  136. write_unlock_bh(&x25_list_lock);
  137. }
  138. /*
  139. * Kill all bound sockets on a dropped device.
  140. */
  141. static void x25_kill_by_device(struct net_device *dev)
  142. {
  143. struct sock *s;
  144. struct hlist_node *node;
  145. write_lock_bh(&x25_list_lock);
  146. sk_for_each(s, node, &x25_list)
  147. if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev)
  148. x25_disconnect(s, ENETUNREACH, 0, 0);
  149. write_unlock_bh(&x25_list_lock);
  150. }
  151. /*
  152. * Handle device status changes.
  153. */
  154. static int x25_device_event(struct notifier_block *this, unsigned long event,
  155. void *ptr)
  156. {
  157. struct net_device *dev = ptr;
  158. struct x25_neigh *nb;
  159. if (dev->type == ARPHRD_X25
  160. #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
  161. || dev->type == ARPHRD_ETHER
  162. #endif
  163. ) {
  164. switch (event) {
  165. case NETDEV_UP:
  166. x25_link_device_up(dev);
  167. break;
  168. case NETDEV_GOING_DOWN:
  169. nb = x25_get_neigh(dev);
  170. if (nb) {
  171. x25_terminate_link(nb);
  172. x25_neigh_put(nb);
  173. }
  174. break;
  175. case NETDEV_DOWN:
  176. x25_kill_by_device(dev);
  177. x25_route_device_down(dev);
  178. x25_link_device_down(dev);
  179. break;
  180. }
  181. }
  182. return NOTIFY_DONE;
  183. }
  184. /*
  185. * Add a socket to the bound sockets list.
  186. */
  187. static void x25_insert_socket(struct sock *sk)
  188. {
  189. write_lock_bh(&x25_list_lock);
  190. sk_add_node(sk, &x25_list);
  191. write_unlock_bh(&x25_list_lock);
  192. }
  193. /*
  194. * Find a socket that wants to accept the Call Request we just
  195. * received. Check the full list for an address/cud match.
  196. * If no cuds match return the next_best thing, an address match.
  197. * Note: if a listening socket has cud set it must only get calls
  198. * with matching cud.
  199. */
  200. static struct sock *x25_find_listener(struct x25_address *addr,
  201. struct sk_buff *skb)
  202. {
  203. struct sock *s;
  204. struct sock *next_best;
  205. struct hlist_node *node;
  206. read_lock_bh(&x25_list_lock);
  207. next_best = NULL;
  208. sk_for_each(s, node, &x25_list)
  209. if ((!strcmp(addr->x25_addr,
  210. x25_sk(s)->source_addr.x25_addr) ||
  211. !strcmp(addr->x25_addr,
  212. null_x25_address.x25_addr)) &&
  213. s->sk_state == TCP_LISTEN) {
  214. /*
  215. * Found a listening socket, now check the incoming
  216. * call user data vs this sockets call user data
  217. */
  218. if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) {
  219. if((memcmp(x25_sk(s)->calluserdata.cuddata,
  220. skb->data,
  221. x25_sk(s)->cudmatchlength)) == 0) {
  222. sock_hold(s);
  223. goto found;
  224. }
  225. } else
  226. next_best = s;
  227. }
  228. if (next_best) {
  229. s = next_best;
  230. sock_hold(s);
  231. goto found;
  232. }
  233. s = NULL;
  234. found:
  235. read_unlock_bh(&x25_list_lock);
  236. return s;
  237. }
  238. /*
  239. * Find a connected X.25 socket given my LCI and neighbour.
  240. */
  241. static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb)
  242. {
  243. struct sock *s;
  244. struct hlist_node *node;
  245. sk_for_each(s, node, &x25_list)
  246. if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) {
  247. sock_hold(s);
  248. goto found;
  249. }
  250. s = NULL;
  251. found:
  252. return s;
  253. }
  254. struct sock *x25_find_socket(unsigned int lci, struct x25_neigh *nb)
  255. {
  256. struct sock *s;
  257. read_lock_bh(&x25_list_lock);
  258. s = __x25_find_socket(lci, nb);
  259. read_unlock_bh(&x25_list_lock);
  260. return s;
  261. }
  262. /*
  263. * Find a unique LCI for a given device.
  264. */
  265. static unsigned int x25_new_lci(struct x25_neigh *nb)
  266. {
  267. unsigned int lci = 1;
  268. struct sock *sk;
  269. read_lock_bh(&x25_list_lock);
  270. while ((sk = __x25_find_socket(lci, nb)) != NULL) {
  271. sock_put(sk);
  272. if (++lci == 4096) {
  273. lci = 0;
  274. break;
  275. }
  276. }
  277. read_unlock_bh(&x25_list_lock);
  278. return lci;
  279. }
  280. /*
  281. * Deferred destroy.
  282. */
  283. void x25_destroy_socket(struct sock *);
  284. /*
  285. * handler for deferred kills.
  286. */
  287. static void x25_destroy_timer(unsigned long data)
  288. {
  289. x25_destroy_socket((struct sock *)data);
  290. }
  291. /*
  292. * This is called from user mode and the timers. Thus it protects itself
  293. * against interrupt users but doesn't worry about being called during
  294. * work. Once it is removed from the queue no interrupt or bottom half
  295. * will touch it and we are (fairly 8-) ) safe.
  296. * Not static as it's used by the timer
  297. */
  298. void x25_destroy_socket(struct sock *sk)
  299. {
  300. struct sk_buff *skb;
  301. sock_hold(sk);
  302. lock_sock(sk);
  303. x25_stop_heartbeat(sk);
  304. x25_stop_timer(sk);
  305. x25_remove_socket(sk);
  306. x25_clear_queues(sk); /* Flush the queues */
  307. while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
  308. if (skb->sk != sk) { /* A pending connection */
  309. /*
  310. * Queue the unaccepted socket for death
  311. */
  312. sock_set_flag(skb->sk, SOCK_DEAD);
  313. x25_start_heartbeat(skb->sk);
  314. x25_sk(skb->sk)->state = X25_STATE_0;
  315. }
  316. kfree_skb(skb);
  317. }
  318. if (atomic_read(&sk->sk_wmem_alloc) ||
  319. atomic_read(&sk->sk_rmem_alloc)) {
  320. /* Defer: outstanding buffers */
  321. sk->sk_timer.expires = jiffies + 10 * HZ;
  322. sk->sk_timer.function = x25_destroy_timer;
  323. sk->sk_timer.data = (unsigned long)sk;
  324. add_timer(&sk->sk_timer);
  325. } else {
  326. /* drop last reference so sock_put will free */
  327. __sock_put(sk);
  328. }
  329. release_sock(sk);
  330. sock_put(sk);
  331. }
  332. /*
  333. * Handling for system calls applied via the various interfaces to a
  334. * X.25 socket object.
  335. */
  336. static int x25_setsockopt(struct socket *sock, int level, int optname,
  337. char __user *optval, int optlen)
  338. {
  339. int opt;
  340. struct sock *sk = sock->sk;
  341. int rc = -ENOPROTOOPT;
  342. if (level != SOL_X25 || optname != X25_QBITINCL)
  343. goto out;
  344. rc = -EINVAL;
  345. if (optlen < sizeof(int))
  346. goto out;
  347. rc = -EFAULT;
  348. if (get_user(opt, (int __user *)optval))
  349. goto out;
  350. x25_sk(sk)->qbitincl = !!opt;
  351. rc = 0;
  352. out:
  353. return rc;
  354. }
  355. static int x25_getsockopt(struct socket *sock, int level, int optname,
  356. char __user *optval, int __user *optlen)
  357. {
  358. struct sock *sk = sock->sk;
  359. int val, len, rc = -ENOPROTOOPT;
  360. if (level != SOL_X25 || optname != X25_QBITINCL)
  361. goto out;
  362. rc = -EFAULT;
  363. if (get_user(len, optlen))
  364. goto out;
  365. len = min_t(unsigned int, len, sizeof(int));
  366. rc = -EINVAL;
  367. if (len < 0)
  368. goto out;
  369. rc = -EFAULT;
  370. if (put_user(len, optlen))
  371. goto out;
  372. val = x25_sk(sk)->qbitincl;
  373. rc = copy_to_user(optval, &val, len) ? -EFAULT : 0;
  374. out:
  375. return rc;
  376. }
  377. static int x25_listen(struct socket *sock, int backlog)
  378. {
  379. struct sock *sk = sock->sk;
  380. int rc = -EOPNOTSUPP;
  381. if (sk->sk_state != TCP_LISTEN) {
  382. memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
  383. sk->sk_max_ack_backlog = backlog;
  384. sk->sk_state = TCP_LISTEN;
  385. rc = 0;
  386. }
  387. return rc;
  388. }
  389. static struct proto x25_proto = {
  390. .name = "X25",
  391. .owner = THIS_MODULE,
  392. .obj_size = sizeof(struct x25_sock),
  393. };
  394. static struct sock *x25_alloc_socket(void)
  395. {
  396. struct x25_sock *x25;
  397. struct sock *sk = sk_alloc(AF_X25, GFP_ATOMIC, &x25_proto, 1);
  398. if (!sk)
  399. goto out;
  400. sock_init_data(NULL, sk);
  401. x25 = x25_sk(sk);
  402. skb_queue_head_init(&x25->ack_queue);
  403. skb_queue_head_init(&x25->fragment_queue);
  404. skb_queue_head_init(&x25->interrupt_in_queue);
  405. skb_queue_head_init(&x25->interrupt_out_queue);
  406. out:
  407. return sk;
  408. }
  409. void x25_init_timers(struct sock *sk);
  410. static int x25_create(struct socket *sock, int protocol)
  411. {
  412. struct sock *sk;
  413. struct x25_sock *x25;
  414. int rc = -ESOCKTNOSUPPORT;
  415. if (sock->type != SOCK_SEQPACKET || protocol)
  416. goto out;
  417. rc = -ENOMEM;
  418. if ((sk = x25_alloc_socket()) == NULL)
  419. goto out;
  420. x25 = x25_sk(sk);
  421. sock_init_data(sock, sk);
  422. x25_init_timers(sk);
  423. sock->ops = &x25_proto_ops;
  424. sk->sk_protocol = protocol;
  425. sk->sk_backlog_rcv = x25_backlog_rcv;
  426. x25->t21 = sysctl_x25_call_request_timeout;
  427. x25->t22 = sysctl_x25_reset_request_timeout;
  428. x25->t23 = sysctl_x25_clear_request_timeout;
  429. x25->t2 = sysctl_x25_ack_holdback_timeout;
  430. x25->state = X25_STATE_0;
  431. x25->cudmatchlength = 0;
  432. x25->accptapprv = X25_DENY_ACCPT_APPRV; /* normally no cud */
  433. /* on call accept */
  434. x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE;
  435. x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE;
  436. x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE;
  437. x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE;
  438. x25->facilities.throughput = X25_DEFAULT_THROUGHPUT;
  439. x25->facilities.reverse = X25_DEFAULT_REVERSE;
  440. rc = 0;
  441. out:
  442. return rc;
  443. }
  444. static struct sock *x25_make_new(struct sock *osk)
  445. {
  446. struct sock *sk = NULL;
  447. struct x25_sock *x25, *ox25;
  448. if (osk->sk_type != SOCK_SEQPACKET)
  449. goto out;
  450. if ((sk = x25_alloc_socket()) == NULL)
  451. goto out;
  452. x25 = x25_sk(sk);
  453. sk->sk_type = osk->sk_type;
  454. sk->sk_socket = osk->sk_socket;
  455. sk->sk_priority = osk->sk_priority;
  456. sk->sk_protocol = osk->sk_protocol;
  457. sk->sk_rcvbuf = osk->sk_rcvbuf;
  458. sk->sk_sndbuf = osk->sk_sndbuf;
  459. sk->sk_state = TCP_ESTABLISHED;
  460. sk->sk_sleep = osk->sk_sleep;
  461. sk->sk_backlog_rcv = osk->sk_backlog_rcv;
  462. sock_copy_flags(sk, osk);
  463. ox25 = x25_sk(osk);
  464. x25->t21 = ox25->t21;
  465. x25->t22 = ox25->t22;
  466. x25->t23 = ox25->t23;
  467. x25->t2 = ox25->t2;
  468. x25->facilities = ox25->facilities;
  469. x25->qbitincl = ox25->qbitincl;
  470. x25->cudmatchlength = ox25->cudmatchlength;
  471. x25->accptapprv = ox25->accptapprv;
  472. x25_init_timers(sk);
  473. out:
  474. return sk;
  475. }
  476. static int x25_release(struct socket *sock)
  477. {
  478. struct sock *sk = sock->sk;
  479. struct x25_sock *x25;
  480. if (!sk)
  481. goto out;
  482. x25 = x25_sk(sk);
  483. switch (x25->state) {
  484. case X25_STATE_0:
  485. case X25_STATE_2:
  486. x25_disconnect(sk, 0, 0, 0);
  487. x25_destroy_socket(sk);
  488. goto out;
  489. case X25_STATE_1:
  490. case X25_STATE_3:
  491. case X25_STATE_4:
  492. x25_clear_queues(sk);
  493. x25_write_internal(sk, X25_CLEAR_REQUEST);
  494. x25_start_t23timer(sk);
  495. x25->state = X25_STATE_2;
  496. sk->sk_state = TCP_CLOSE;
  497. sk->sk_shutdown |= SEND_SHUTDOWN;
  498. sk->sk_state_change(sk);
  499. sock_set_flag(sk, SOCK_DEAD);
  500. sock_set_flag(sk, SOCK_DESTROY);
  501. break;
  502. }
  503. sock->sk = NULL;
  504. sk->sk_socket = NULL; /* Not used, but we should do this */
  505. out:
  506. return 0;
  507. }
  508. static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
  509. {
  510. struct sock *sk = sock->sk;
  511. struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
  512. if (!sock_flag(sk, SOCK_ZAPPED) ||
  513. addr_len != sizeof(struct sockaddr_x25) ||
  514. addr->sx25_family != AF_X25)
  515. return -EINVAL;
  516. x25_sk(sk)->source_addr = addr->sx25_addr;
  517. x25_insert_socket(sk);
  518. sock_reset_flag(sk, SOCK_ZAPPED);
  519. SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
  520. return 0;
  521. }
  522. static int x25_wait_for_connection_establishment(struct sock *sk)
  523. {
  524. DECLARE_WAITQUEUE(wait, current);
  525. int rc;
  526. add_wait_queue_exclusive(sk->sk_sleep, &wait);
  527. for (;;) {
  528. __set_current_state(TASK_INTERRUPTIBLE);
  529. rc = -ERESTARTSYS;
  530. if (signal_pending(current))
  531. break;
  532. rc = sock_error(sk);
  533. if (rc) {
  534. sk->sk_socket->state = SS_UNCONNECTED;
  535. break;
  536. }
  537. rc = 0;
  538. if (sk->sk_state != TCP_ESTABLISHED) {
  539. release_sock(sk);
  540. schedule();
  541. lock_sock(sk);
  542. } else
  543. break;
  544. }
  545. __set_current_state(TASK_RUNNING);
  546. remove_wait_queue(sk->sk_sleep, &wait);
  547. return rc;
  548. }
  549. static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
  550. int addr_len, int flags)
  551. {
  552. struct sock *sk = sock->sk;
  553. struct x25_sock *x25 = x25_sk(sk);
  554. struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
  555. struct x25_route *rt;
  556. int rc = 0;
  557. lock_sock(sk);
  558. if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
  559. sock->state = SS_CONNECTED;
  560. goto out; /* Connect completed during a ERESTARTSYS event */
  561. }
  562. rc = -ECONNREFUSED;
  563. if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
  564. sock->state = SS_UNCONNECTED;
  565. goto out;
  566. }
  567. rc = -EISCONN; /* No reconnect on a seqpacket socket */
  568. if (sk->sk_state == TCP_ESTABLISHED)
  569. goto out;
  570. sk->sk_state = TCP_CLOSE;
  571. sock->state = SS_UNCONNECTED;
  572. rc = -EINVAL;
  573. if (addr_len != sizeof(struct sockaddr_x25) ||
  574. addr->sx25_family != AF_X25)
  575. goto out;
  576. rc = -ENETUNREACH;
  577. rt = x25_get_route(&addr->sx25_addr);
  578. if (!rt)
  579. goto out;
  580. x25->neighbour = x25_get_neigh(rt->dev);
  581. if (!x25->neighbour)
  582. goto out_put_route;
  583. x25_limit_facilities(&x25->facilities, x25->neighbour);
  584. x25->lci = x25_new_lci(x25->neighbour);
  585. if (!x25->lci)
  586. goto out_put_neigh;
  587. rc = -EINVAL;
  588. if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
  589. goto out_put_neigh;
  590. if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr))
  591. memset(&x25->source_addr, '\0', X25_ADDR_LEN);
  592. x25->dest_addr = addr->sx25_addr;
  593. /* Move to connecting socket, start sending Connect Requests */
  594. sock->state = SS_CONNECTING;
  595. sk->sk_state = TCP_SYN_SENT;
  596. x25->state = X25_STATE_1;
  597. x25_write_internal(sk, X25_CALL_REQUEST);
  598. x25_start_heartbeat(sk);
  599. x25_start_t21timer(sk);
  600. /* Now the loop */
  601. rc = -EINPROGRESS;
  602. if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
  603. goto out_put_neigh;
  604. rc = x25_wait_for_connection_establishment(sk);
  605. if (rc)
  606. goto out_put_neigh;
  607. sock->state = SS_CONNECTED;
  608. rc = 0;
  609. out_put_neigh:
  610. if (rc)
  611. x25_neigh_put(x25->neighbour);
  612. out_put_route:
  613. x25_route_put(rt);
  614. out:
  615. release_sock(sk);
  616. return rc;
  617. }
  618. static int x25_wait_for_data(struct sock *sk, int timeout)
  619. {
  620. DECLARE_WAITQUEUE(wait, current);
  621. int rc = 0;
  622. add_wait_queue_exclusive(sk->sk_sleep, &wait);
  623. for (;;) {
  624. __set_current_state(TASK_INTERRUPTIBLE);
  625. if (sk->sk_shutdown & RCV_SHUTDOWN)
  626. break;
  627. rc = -ERESTARTSYS;
  628. if (signal_pending(current))
  629. break;
  630. rc = -EAGAIN;
  631. if (!timeout)
  632. break;
  633. rc = 0;
  634. if (skb_queue_empty(&sk->sk_receive_queue)) {
  635. release_sock(sk);
  636. timeout = schedule_timeout(timeout);
  637. lock_sock(sk);
  638. } else
  639. break;
  640. }
  641. __set_current_state(TASK_RUNNING);
  642. remove_wait_queue(sk->sk_sleep, &wait);
  643. return rc;
  644. }
  645. static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
  646. {
  647. struct sock *sk = sock->sk;
  648. struct sock *newsk;
  649. struct sk_buff *skb;
  650. int rc = -EINVAL;
  651. if (!sk || sk->sk_state != TCP_LISTEN)
  652. goto out;
  653. rc = -EOPNOTSUPP;
  654. if (sk->sk_type != SOCK_SEQPACKET)
  655. goto out;
  656. lock_sock(sk);
  657. rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
  658. if (rc)
  659. goto out2;
  660. skb = skb_dequeue(&sk->sk_receive_queue);
  661. rc = -EINVAL;
  662. if (!skb->sk)
  663. goto out2;
  664. newsk = skb->sk;
  665. newsk->sk_socket = newsock;
  666. newsk->sk_sleep = &newsock->wait;
  667. /* Now attach up the new socket */
  668. skb->sk = NULL;
  669. kfree_skb(skb);
  670. sk->sk_ack_backlog--;
  671. newsock->sk = newsk;
  672. newsock->state = SS_CONNECTED;
  673. rc = 0;
  674. out2:
  675. release_sock(sk);
  676. out:
  677. return rc;
  678. }
  679. static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
  680. int *uaddr_len, int peer)
  681. {
  682. struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr;
  683. struct sock *sk = sock->sk;
  684. struct x25_sock *x25 = x25_sk(sk);
  685. if (peer) {
  686. if (sk->sk_state != TCP_ESTABLISHED)
  687. return -ENOTCONN;
  688. sx25->sx25_addr = x25->dest_addr;
  689. } else
  690. sx25->sx25_addr = x25->source_addr;
  691. sx25->sx25_family = AF_X25;
  692. *uaddr_len = sizeof(*sx25);
  693. return 0;
  694. }
  695. int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
  696. unsigned int lci)
  697. {
  698. struct sock *sk;
  699. struct sock *make;
  700. struct x25_sock *makex25;
  701. struct x25_address source_addr, dest_addr;
  702. struct x25_facilities facilities;
  703. int len, rc;
  704. /*
  705. * Remove the LCI and frame type.
  706. */
  707. skb_pull(skb, X25_STD_MIN_LEN);
  708. /*
  709. * Extract the X.25 addresses and convert them to ASCII strings,
  710. * and remove them.
  711. */
  712. skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr));
  713. /*
  714. * Get the length of the facilities, skip past them for the moment
  715. * get the call user data because this is needed to determine
  716. * the correct listener
  717. */
  718. len = skb->data[0] + 1;
  719. skb_pull(skb,len);
  720. /*
  721. * Find a listener for the particular address/cud pair.
  722. */
  723. sk = x25_find_listener(&source_addr,skb);
  724. skb_push(skb,len);
  725. /*
  726. * We can't accept the Call Request.
  727. */
  728. if (sk == NULL || sk_acceptq_is_full(sk))
  729. goto out_clear_request;
  730. /*
  731. * Try to reach a compromise on the requested facilities.
  732. */
  733. if ((len = x25_negotiate_facilities(skb, sk, &facilities)) == -1)
  734. goto out_sock_put;
  735. /*
  736. * current neighbour/link might impose additional limits
  737. * on certain facilties
  738. */
  739. x25_limit_facilities(&facilities, nb);
  740. /*
  741. * Try to create a new socket.
  742. */
  743. make = x25_make_new(sk);
  744. if (!make)
  745. goto out_sock_put;
  746. /*
  747. * Remove the facilities
  748. */
  749. skb_pull(skb, len);
  750. skb->sk = make;
  751. make->sk_state = TCP_ESTABLISHED;
  752. makex25 = x25_sk(make);
  753. makex25->lci = lci;
  754. makex25->dest_addr = dest_addr;
  755. makex25->source_addr = source_addr;
  756. makex25->neighbour = nb;
  757. makex25->facilities = facilities;
  758. makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask;
  759. /* ensure no reverse facil on accept */
  760. makex25->vc_facil_mask &= ~X25_MASK_REVERSE;
  761. makex25->cudmatchlength = x25_sk(sk)->cudmatchlength;
  762. /* Normally all calls are accepted immediatly */
  763. if(makex25->accptapprv & X25_DENY_ACCPT_APPRV) {
  764. x25_write_internal(make, X25_CALL_ACCEPTED);
  765. makex25->state = X25_STATE_3;
  766. }
  767. /*
  768. * Incoming Call User Data.
  769. */
  770. if (skb->len >= 0) {
  771. memcpy(makex25->calluserdata.cuddata, skb->data, skb->len);
  772. makex25->calluserdata.cudlength = skb->len;
  773. }
  774. sk->sk_ack_backlog++;
  775. x25_insert_socket(make);
  776. skb_queue_head(&sk->sk_receive_queue, skb);
  777. x25_start_heartbeat(make);
  778. if (!sock_flag(sk, SOCK_DEAD))
  779. sk->sk_data_ready(sk, skb->len);
  780. rc = 1;
  781. sock_put(sk);
  782. out:
  783. return rc;
  784. out_sock_put:
  785. sock_put(sk);
  786. out_clear_request:
  787. rc = 0;
  788. x25_transmit_clear_request(nb, lci, 0x01);
  789. goto out;
  790. }
  791. static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
  792. struct msghdr *msg, size_t len)
  793. {
  794. struct sock *sk = sock->sk;
  795. struct x25_sock *x25 = x25_sk(sk);
  796. struct sockaddr_x25 *usx25 = (struct sockaddr_x25 *)msg->msg_name;
  797. struct sockaddr_x25 sx25;
  798. struct sk_buff *skb;
  799. unsigned char *asmptr;
  800. int noblock = msg->msg_flags & MSG_DONTWAIT;
  801. size_t size;
  802. int qbit = 0, rc = -EINVAL;
  803. if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT))
  804. goto out;
  805. /* we currently don't support segmented records at the user interface */
  806. if (!(msg->msg_flags & (MSG_EOR|MSG_OOB)))
  807. goto out;
  808. rc = -EADDRNOTAVAIL;
  809. if (sock_flag(sk, SOCK_ZAPPED))
  810. goto out;
  811. rc = -EPIPE;
  812. if (sk->sk_shutdown & SEND_SHUTDOWN) {
  813. send_sig(SIGPIPE, current, 0);
  814. goto out;
  815. }
  816. rc = -ENETUNREACH;
  817. if (!x25->neighbour)
  818. goto out;
  819. if (usx25) {
  820. rc = -EINVAL;
  821. if (msg->msg_namelen < sizeof(sx25))
  822. goto out;
  823. memcpy(&sx25, usx25, sizeof(sx25));
  824. rc = -EISCONN;
  825. if (strcmp(x25->dest_addr.x25_addr, sx25.sx25_addr.x25_addr))
  826. goto out;
  827. rc = -EINVAL;
  828. if (sx25.sx25_family != AF_X25)
  829. goto out;
  830. } else {
  831. /*
  832. * FIXME 1003.1g - if the socket is like this because
  833. * it has become closed (not started closed) we ought
  834. * to SIGPIPE, EPIPE;
  835. */
  836. rc = -ENOTCONN;
  837. if (sk->sk_state != TCP_ESTABLISHED)
  838. goto out;
  839. sx25.sx25_family = AF_X25;
  840. sx25.sx25_addr = x25->dest_addr;
  841. }
  842. SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n");
  843. /* Build a packet */
  844. SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n");
  845. if ((msg->msg_flags & MSG_OOB) && len > 32)
  846. len = 32;
  847. size = len + X25_MAX_L2_LEN + X25_EXT_MIN_LEN;
  848. skb = sock_alloc_send_skb(sk, size, noblock, &rc);
  849. if (!skb)
  850. goto out;
  851. X25_SKB_CB(skb)->flags = msg->msg_flags;
  852. skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN);
  853. /*
  854. * Put the data on the end
  855. */
  856. SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n");
  857. asmptr = skb->h.raw = skb_put(skb, len);
  858. rc = memcpy_fromiovec(asmptr, msg->msg_iov, len);
  859. if (rc)
  860. goto out_kfree_skb;
  861. /*
  862. * If the Q BIT Include socket option is in force, the first
  863. * byte of the user data is the logical value of the Q Bit.
  864. */
  865. if (x25->qbitincl) {
  866. qbit = skb->data[0];
  867. skb_pull(skb, 1);
  868. }
  869. /*
  870. * Push down the X.25 header
  871. */
  872. SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n");
  873. if (msg->msg_flags & MSG_OOB) {
  874. if (x25->neighbour->extended) {
  875. asmptr = skb_push(skb, X25_STD_MIN_LEN);
  876. *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ;
  877. *asmptr++ = (x25->lci >> 0) & 0xFF;
  878. *asmptr++ = X25_INTERRUPT;
  879. } else {
  880. asmptr = skb_push(skb, X25_STD_MIN_LEN);
  881. *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ;
  882. *asmptr++ = (x25->lci >> 0) & 0xFF;
  883. *asmptr++ = X25_INTERRUPT;
  884. }
  885. } else {
  886. if (x25->neighbour->extended) {
  887. /* Build an Extended X.25 header */
  888. asmptr = skb_push(skb, X25_EXT_MIN_LEN);
  889. *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ;
  890. *asmptr++ = (x25->lci >> 0) & 0xFF;
  891. *asmptr++ = X25_DATA;
  892. *asmptr++ = X25_DATA;
  893. } else {
  894. /* Build an Standard X.25 header */
  895. asmptr = skb_push(skb, X25_STD_MIN_LEN);
  896. *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ;
  897. *asmptr++ = (x25->lci >> 0) & 0xFF;
  898. *asmptr++ = X25_DATA;
  899. }
  900. if (qbit)
  901. skb->data[0] |= X25_Q_BIT;
  902. }
  903. SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n");
  904. SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n");
  905. rc = -ENOTCONN;
  906. if (sk->sk_state != TCP_ESTABLISHED)
  907. goto out_kfree_skb;
  908. if (msg->msg_flags & MSG_OOB)
  909. skb_queue_tail(&x25->interrupt_out_queue, skb);
  910. else {
  911. len = x25_output(sk, skb);
  912. if (len < 0)
  913. kfree_skb(skb);
  914. else if (x25->qbitincl)
  915. len++;
  916. }
  917. /*
  918. * lock_sock() is currently only used to serialize this x25_kick()
  919. * against input-driven x25_kick() calls. It currently only blocks
  920. * incoming packets for this socket and does not protect against
  921. * any other socket state changes and is not called from anywhere
  922. * else. As x25_kick() cannot block and as long as all socket
  923. * operations are BKL-wrapped, we don't need take to care about
  924. * purging the backlog queue in x25_release().
  925. *
  926. * Using lock_sock() to protect all socket operations entirely
  927. * (and making the whole x25 stack SMP aware) unfortunately would
  928. * require major changes to {send,recv}msg and skb allocation methods.
  929. * -> 2.5 ;)
  930. */
  931. lock_sock(sk);
  932. x25_kick(sk);
  933. release_sock(sk);
  934. rc = len;
  935. out:
  936. return rc;
  937. out_kfree_skb:
  938. kfree_skb(skb);
  939. goto out;
  940. }
  941. static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
  942. struct msghdr *msg, size_t size,
  943. int flags)
  944. {
  945. struct sock *sk = sock->sk;
  946. struct x25_sock *x25 = x25_sk(sk);
  947. struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
  948. size_t copied;
  949. int qbit;
  950. struct sk_buff *skb;
  951. unsigned char *asmptr;
  952. int rc = -ENOTCONN;
  953. /*
  954. * This works for seqpacket too. The receiver has ordered the queue for
  955. * us! We do one quick check first though
  956. */
  957. if (sk->sk_state != TCP_ESTABLISHED)
  958. goto out;
  959. if (flags & MSG_OOB) {
  960. rc = -EINVAL;
  961. if (sock_flag(sk, SOCK_URGINLINE) ||
  962. !skb_peek(&x25->interrupt_in_queue))
  963. goto out;
  964. skb = skb_dequeue(&x25->interrupt_in_queue);
  965. skb_pull(skb, X25_STD_MIN_LEN);
  966. /*
  967. * No Q bit information on Interrupt data.
  968. */
  969. if (x25->qbitincl) {
  970. asmptr = skb_push(skb, 1);
  971. *asmptr = 0x00;
  972. }
  973. msg->msg_flags |= MSG_OOB;
  974. } else {
  975. /* Now we can treat all alike */
  976. skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
  977. flags & MSG_DONTWAIT, &rc);
  978. if (!skb)
  979. goto out;
  980. qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
  981. skb_pull(skb, x25->neighbour->extended ?
  982. X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
  983. if (x25->qbitincl) {
  984. asmptr = skb_push(skb, 1);
  985. *asmptr = qbit;
  986. }
  987. }
  988. skb->h.raw = skb->data;
  989. copied = skb->len;
  990. if (copied > size) {
  991. copied = size;
  992. msg->msg_flags |= MSG_TRUNC;
  993. }
  994. /* Currently, each datagram always contains a complete record */
  995. msg->msg_flags |= MSG_EOR;
  996. rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
  997. if (rc)
  998. goto out_free_dgram;
  999. if (sx25) {
  1000. sx25->sx25_family = AF_X25;
  1001. sx25->sx25_addr = x25->dest_addr;
  1002. }
  1003. msg->msg_namelen = sizeof(struct sockaddr_x25);
  1004. lock_sock(sk);
  1005. x25_check_rbuf(sk);
  1006. release_sock(sk);
  1007. rc = copied;
  1008. out_free_dgram:
  1009. skb_free_datagram(sk, skb);
  1010. out:
  1011. return rc;
  1012. }
  1013. static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  1014. {
  1015. struct sock *sk = sock->sk;
  1016. struct x25_sock *x25 = x25_sk(sk);
  1017. void __user *argp = (void __user *)arg;
  1018. int rc;
  1019. switch (cmd) {
  1020. case TIOCOUTQ: {
  1021. int amount = sk->sk_sndbuf -
  1022. atomic_read(&sk->sk_wmem_alloc);
  1023. if (amount < 0)
  1024. amount = 0;
  1025. rc = put_user(amount, (unsigned int __user *)argp);
  1026. break;
  1027. }
  1028. case TIOCINQ: {
  1029. struct sk_buff *skb;
  1030. int amount = 0;
  1031. /*
  1032. * These two are safe on a single CPU system as
  1033. * only user tasks fiddle here
  1034. */
  1035. if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
  1036. amount = skb->len;
  1037. rc = put_user(amount, (unsigned int __user *)argp);
  1038. break;
  1039. }
  1040. case SIOCGSTAMP:
  1041. rc = -EINVAL;
  1042. if (sk)
  1043. rc = sock_get_timestamp(sk,
  1044. (struct timeval __user *)argp);
  1045. break;
  1046. case SIOCGIFADDR:
  1047. case SIOCSIFADDR:
  1048. case SIOCGIFDSTADDR:
  1049. case SIOCSIFDSTADDR:
  1050. case SIOCGIFBRDADDR:
  1051. case SIOCSIFBRDADDR:
  1052. case SIOCGIFNETMASK:
  1053. case SIOCSIFNETMASK:
  1054. case SIOCGIFMETRIC:
  1055. case SIOCSIFMETRIC:
  1056. rc = -EINVAL;
  1057. break;
  1058. case SIOCADDRT:
  1059. case SIOCDELRT:
  1060. rc = -EPERM;
  1061. if (!capable(CAP_NET_ADMIN))
  1062. break;
  1063. rc = x25_route_ioctl(cmd, argp);
  1064. break;
  1065. case SIOCX25GSUBSCRIP:
  1066. rc = x25_subscr_ioctl(cmd, argp);
  1067. break;
  1068. case SIOCX25SSUBSCRIP:
  1069. rc = -EPERM;
  1070. if (!capable(CAP_NET_ADMIN))
  1071. break;
  1072. rc = x25_subscr_ioctl(cmd, argp);
  1073. break;
  1074. case SIOCX25GFACILITIES: {
  1075. struct x25_facilities fac = x25->facilities;
  1076. rc = copy_to_user(argp, &fac,
  1077. sizeof(fac)) ? -EFAULT : 0;
  1078. break;
  1079. }
  1080. case SIOCX25SFACILITIES: {
  1081. struct x25_facilities facilities;
  1082. rc = -EFAULT;
  1083. if (copy_from_user(&facilities, argp,
  1084. sizeof(facilities)))
  1085. break;
  1086. rc = -EINVAL;
  1087. if (sk->sk_state != TCP_LISTEN &&
  1088. sk->sk_state != TCP_CLOSE)
  1089. break;
  1090. if (facilities.pacsize_in < X25_PS16 ||
  1091. facilities.pacsize_in > X25_PS4096)
  1092. break;
  1093. if (facilities.pacsize_out < X25_PS16 ||
  1094. facilities.pacsize_out > X25_PS4096)
  1095. break;
  1096. if (facilities.winsize_in < 1 ||
  1097. facilities.winsize_in > 127)
  1098. break;
  1099. if (facilities.throughput < 0x03 ||
  1100. facilities.throughput > 0xDD)
  1101. break;
  1102. if (facilities.reverse &&
  1103. (facilities.reverse | 0x81)!= 0x81)
  1104. break;
  1105. x25->facilities = facilities;
  1106. rc = 0;
  1107. break;
  1108. }
  1109. case SIOCX25GCALLUSERDATA: {
  1110. struct x25_calluserdata cud = x25->calluserdata;
  1111. rc = copy_to_user(argp, &cud,
  1112. sizeof(cud)) ? -EFAULT : 0;
  1113. break;
  1114. }
  1115. case SIOCX25SCALLUSERDATA: {
  1116. struct x25_calluserdata calluserdata;
  1117. rc = -EFAULT;
  1118. if (copy_from_user(&calluserdata, argp,
  1119. sizeof(calluserdata)))
  1120. break;
  1121. rc = -EINVAL;
  1122. if (calluserdata.cudlength > X25_MAX_CUD_LEN)
  1123. break;
  1124. x25->calluserdata = calluserdata;
  1125. rc = 0;
  1126. break;
  1127. }
  1128. case SIOCX25GCAUSEDIAG: {
  1129. struct x25_causediag causediag;
  1130. causediag = x25->causediag;
  1131. rc = copy_to_user(argp, &causediag,
  1132. sizeof(causediag)) ? -EFAULT : 0;
  1133. break;
  1134. }
  1135. case SIOCX25SCUDMATCHLEN: {
  1136. struct x25_subaddr sub_addr;
  1137. rc = -EINVAL;
  1138. if(sk->sk_state != TCP_CLOSE)
  1139. break;
  1140. rc = -EFAULT;
  1141. if (copy_from_user(&sub_addr, argp,
  1142. sizeof(sub_addr)))
  1143. break;
  1144. rc = -EINVAL;
  1145. if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
  1146. break;
  1147. x25->cudmatchlength = sub_addr.cudmatchlength;
  1148. rc = 0;
  1149. break;
  1150. }
  1151. case SIOCX25CALLACCPTAPPRV: {
  1152. rc = -EINVAL;
  1153. if (sk->sk_state != TCP_CLOSE)
  1154. break;
  1155. x25->accptapprv = X25_ALLOW_ACCPT_APPRV;
  1156. rc = 0;
  1157. break;
  1158. }
  1159. case SIOCX25SENDCALLACCPT: {
  1160. rc = -EINVAL;
  1161. if (sk->sk_state != TCP_ESTABLISHED)
  1162. break;
  1163. if (x25->accptapprv) /* must call accptapprv above */
  1164. break;
  1165. x25_write_internal(sk, X25_CALL_ACCEPTED);
  1166. x25->state = X25_STATE_3;
  1167. rc = 0;
  1168. break;
  1169. }
  1170. default:
  1171. rc = -ENOIOCTLCMD;
  1172. break;
  1173. }
  1174. return rc;
  1175. }
  1176. static struct net_proto_family x25_family_ops = {
  1177. .family = AF_X25,
  1178. .create = x25_create,
  1179. .owner = THIS_MODULE,
  1180. };
  1181. static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = {
  1182. .family = AF_X25,
  1183. .owner = THIS_MODULE,
  1184. .release = x25_release,
  1185. .bind = x25_bind,
  1186. .connect = x25_connect,
  1187. .socketpair = sock_no_socketpair,
  1188. .accept = x25_accept,
  1189. .getname = x25_getname,
  1190. .poll = datagram_poll,
  1191. .ioctl = x25_ioctl,
  1192. .listen = x25_listen,
  1193. .shutdown = sock_no_shutdown,
  1194. .setsockopt = x25_setsockopt,
  1195. .getsockopt = x25_getsockopt,
  1196. .sendmsg = x25_sendmsg,
  1197. .recvmsg = x25_recvmsg,
  1198. .mmap = sock_no_mmap,
  1199. .sendpage = sock_no_sendpage,
  1200. };
  1201. #include <linux/smp_lock.h>
  1202. SOCKOPS_WRAP(x25_proto, AF_X25);
  1203. static struct packet_type x25_packet_type = {
  1204. .type = __constant_htons(ETH_P_X25),
  1205. .func = x25_lapb_receive_frame,
  1206. };
  1207. static struct notifier_block x25_dev_notifier = {
  1208. .notifier_call = x25_device_event,
  1209. };
  1210. void x25_kill_by_neigh(struct x25_neigh *nb)
  1211. {
  1212. struct sock *s;
  1213. struct hlist_node *node;
  1214. write_lock_bh(&x25_list_lock);
  1215. sk_for_each(s, node, &x25_list)
  1216. if (x25_sk(s)->neighbour == nb)
  1217. x25_disconnect(s, ENETUNREACH, 0, 0);
  1218. write_unlock_bh(&x25_list_lock);
  1219. }
  1220. static int __init x25_init(void)
  1221. {
  1222. int rc = proto_register(&x25_proto, 0);
  1223. if (rc != 0)
  1224. goto out;
  1225. sock_register(&x25_family_ops);
  1226. dev_add_pack(&x25_packet_type);
  1227. register_netdevice_notifier(&x25_dev_notifier);
  1228. printk(KERN_INFO "X.25 for Linux. Version 0.2 for Linux 2.1.15\n");
  1229. #ifdef CONFIG_SYSCTL
  1230. x25_register_sysctl();
  1231. #endif
  1232. x25_proc_init();
  1233. out:
  1234. return rc;
  1235. }
  1236. module_init(x25_init);
  1237. static void __exit x25_exit(void)
  1238. {
  1239. x25_proc_exit();
  1240. x25_link_free();
  1241. x25_route_free();
  1242. #ifdef CONFIG_SYSCTL
  1243. x25_unregister_sysctl();
  1244. #endif
  1245. unregister_netdevice_notifier(&x25_dev_notifier);
  1246. dev_remove_pack(&x25_packet_type);
  1247. sock_unregister(AF_X25);
  1248. proto_unregister(&x25_proto);
  1249. }
  1250. module_exit(x25_exit);
  1251. MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
  1252. MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol");
  1253. MODULE_LICENSE("GPL");
  1254. MODULE_ALIAS_NETPROTO(PF_X25);