af_x25.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487
  1. /*
  2. * X.25 Packet Layer release 002
  3. *
  4. * This is ALPHA test software. This code may break your machine,
  5. * randomly fail to work with new releases, misbehave and/or generally
  6. * screw up. It might even work.
  7. *
  8. * This code REQUIRES 2.1.15 or higher
  9. *
  10. * This module:
  11. * This module is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. *
  16. * History
  17. * X.25 001 Jonathan Naylor Started coding.
  18. * X.25 002 Jonathan Naylor Centralised disconnect handling.
  19. * New timer architecture.
  20. * 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant.
  21. * 2000-03-22 Daniela Squassoni Allowed disabling/enabling of
  22. * facilities negotiation and increased
  23. * the throughput upper limit.
  24. * 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups
  25. * 2000-09-04 Henner Eisen Set sock->state in x25_accept().
  26. * Fixed x25_output() related skb leakage.
  27. * 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket.
  28. * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation.
  29. * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN
  30. * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to
  31. * x25_proc.c, using seq_file
  32. * 2005-04-02 Shaun Pereira Selective sub address matching
  33. * with call user data
  34. * 2005-04-15 Shaun Pereira Fast select with no restriction on
  35. * response
  36. */
  37. #include <linux/config.h>
  38. #include <linux/module.h>
  39. #include <linux/errno.h>
  40. #include <linux/kernel.h>
  41. #include <linux/sched.h>
  42. #include <linux/timer.h>
  43. #include <linux/string.h>
  44. #include <linux/net.h>
  45. #include <linux/netdevice.h>
  46. #include <linux/if_arp.h>
  47. #include <linux/skbuff.h>
  48. #include <net/sock.h>
  49. #include <net/tcp_states.h>
  50. #include <asm/uaccess.h>
  51. #include <linux/fcntl.h>
  52. #include <linux/termios.h> /* For TIOCINQ/OUTQ */
  53. #include <linux/notifier.h>
  54. #include <linux/init.h>
  55. #include <net/x25.h>
  56. int sysctl_x25_restart_request_timeout = X25_DEFAULT_T20;
  57. int sysctl_x25_call_request_timeout = X25_DEFAULT_T21;
  58. int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22;
  59. int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23;
  60. int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2;
  61. HLIST_HEAD(x25_list);
  62. DEFINE_RWLOCK(x25_list_lock);
  63. static struct proto_ops x25_proto_ops;
  64. static struct x25_address null_x25_address = {" "};
  65. int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
  66. struct x25_address *calling_addr)
  67. {
  68. int called_len, calling_len;
  69. char *called, *calling;
  70. int i;
  71. called_len = (*p >> 0) & 0x0F;
  72. calling_len = (*p >> 4) & 0x0F;
  73. called = called_addr->x25_addr;
  74. calling = calling_addr->x25_addr;
  75. p++;
  76. for (i = 0; i < (called_len + calling_len); i++) {
  77. if (i < called_len) {
  78. if (i % 2 != 0) {
  79. *called++ = ((*p >> 0) & 0x0F) + '0';
  80. p++;
  81. } else {
  82. *called++ = ((*p >> 4) & 0x0F) + '0';
  83. }
  84. } else {
  85. if (i % 2 != 0) {
  86. *calling++ = ((*p >> 0) & 0x0F) + '0';
  87. p++;
  88. } else {
  89. *calling++ = ((*p >> 4) & 0x0F) + '0';
  90. }
  91. }
  92. }
  93. *called = *calling = '\0';
  94. return 1 + (called_len + calling_len + 1) / 2;
  95. }
  96. int x25_addr_aton(unsigned char *p, struct x25_address *called_addr,
  97. struct x25_address *calling_addr)
  98. {
  99. unsigned int called_len, calling_len;
  100. char *called, *calling;
  101. int i;
  102. called = called_addr->x25_addr;
  103. calling = calling_addr->x25_addr;
  104. called_len = strlen(called);
  105. calling_len = strlen(calling);
  106. *p++ = (calling_len << 4) | (called_len << 0);
  107. for (i = 0; i < (called_len + calling_len); i++) {
  108. if (i < called_len) {
  109. if (i % 2 != 0) {
  110. *p |= (*called++ - '0') << 0;
  111. p++;
  112. } else {
  113. *p = 0x00;
  114. *p |= (*called++ - '0') << 4;
  115. }
  116. } else {
  117. if (i % 2 != 0) {
  118. *p |= (*calling++ - '0') << 0;
  119. p++;
  120. } else {
  121. *p = 0x00;
  122. *p |= (*calling++ - '0') << 4;
  123. }
  124. }
  125. }
  126. return 1 + (called_len + calling_len + 1) / 2;
  127. }
  128. /*
  129. * Socket removal during an interrupt is now safe.
  130. */
  131. static void x25_remove_socket(struct sock *sk)
  132. {
  133. write_lock_bh(&x25_list_lock);
  134. sk_del_node_init(sk);
  135. write_unlock_bh(&x25_list_lock);
  136. }
  137. /*
  138. * Kill all bound sockets on a dropped device.
  139. */
  140. static void x25_kill_by_device(struct net_device *dev)
  141. {
  142. struct sock *s;
  143. struct hlist_node *node;
  144. write_lock_bh(&x25_list_lock);
  145. sk_for_each(s, node, &x25_list)
  146. if (x25_sk(s)->neighbour && x25_sk(s)->neighbour->dev == dev)
  147. x25_disconnect(s, ENETUNREACH, 0, 0);
  148. write_unlock_bh(&x25_list_lock);
  149. }
  150. /*
  151. * Handle device status changes.
  152. */
  153. static int x25_device_event(struct notifier_block *this, unsigned long event,
  154. void *ptr)
  155. {
  156. struct net_device *dev = ptr;
  157. struct x25_neigh *nb;
  158. if (dev->type == ARPHRD_X25
  159. #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
  160. || dev->type == ARPHRD_ETHER
  161. #endif
  162. ) {
  163. switch (event) {
  164. case NETDEV_UP:
  165. x25_link_device_up(dev);
  166. break;
  167. case NETDEV_GOING_DOWN:
  168. nb = x25_get_neigh(dev);
  169. if (nb) {
  170. x25_terminate_link(nb);
  171. x25_neigh_put(nb);
  172. }
  173. break;
  174. case NETDEV_DOWN:
  175. x25_kill_by_device(dev);
  176. x25_route_device_down(dev);
  177. x25_link_device_down(dev);
  178. break;
  179. }
  180. }
  181. return NOTIFY_DONE;
  182. }
  183. /*
  184. * Add a socket to the bound sockets list.
  185. */
  186. static void x25_insert_socket(struct sock *sk)
  187. {
  188. write_lock_bh(&x25_list_lock);
  189. sk_add_node(sk, &x25_list);
  190. write_unlock_bh(&x25_list_lock);
  191. }
  192. /*
  193. * Find a socket that wants to accept the Call Request we just
  194. * received. Check the full list for an address/cud match.
  195. * If no cuds match return the next_best thing, an address match.
  196. * Note: if a listening socket has cud set it must only get calls
  197. * with matching cud.
  198. */
  199. static struct sock *x25_find_listener(struct x25_address *addr,
  200. struct sk_buff *skb)
  201. {
  202. struct sock *s;
  203. struct sock *next_best;
  204. struct hlist_node *node;
  205. read_lock_bh(&x25_list_lock);
  206. next_best = NULL;
  207. sk_for_each(s, node, &x25_list)
  208. if ((!strcmp(addr->x25_addr,
  209. x25_sk(s)->source_addr.x25_addr) ||
  210. !strcmp(addr->x25_addr,
  211. null_x25_address.x25_addr)) &&
  212. s->sk_state == TCP_LISTEN) {
  213. /*
  214. * Found a listening socket, now check the incoming
  215. * call user data vs this sockets call user data
  216. */
  217. if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) {
  218. if((memcmp(x25_sk(s)->calluserdata.cuddata,
  219. skb->data,
  220. x25_sk(s)->cudmatchlength)) == 0) {
  221. sock_hold(s);
  222. goto found;
  223. }
  224. } else
  225. next_best = s;
  226. }
  227. if (next_best) {
  228. s = next_best;
  229. sock_hold(s);
  230. goto found;
  231. }
  232. s = NULL;
  233. found:
  234. read_unlock_bh(&x25_list_lock);
  235. return s;
  236. }
  237. /*
  238. * Find a connected X.25 socket given my LCI and neighbour.
  239. */
  240. static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb)
  241. {
  242. struct sock *s;
  243. struct hlist_node *node;
  244. sk_for_each(s, node, &x25_list)
  245. if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) {
  246. sock_hold(s);
  247. goto found;
  248. }
  249. s = NULL;
  250. found:
  251. return s;
  252. }
  253. struct sock *x25_find_socket(unsigned int lci, struct x25_neigh *nb)
  254. {
  255. struct sock *s;
  256. read_lock_bh(&x25_list_lock);
  257. s = __x25_find_socket(lci, nb);
  258. read_unlock_bh(&x25_list_lock);
  259. return s;
  260. }
  261. /*
  262. * Find a unique LCI for a given device.
  263. */
  264. static unsigned int x25_new_lci(struct x25_neigh *nb)
  265. {
  266. unsigned int lci = 1;
  267. struct sock *sk;
  268. read_lock_bh(&x25_list_lock);
  269. while ((sk = __x25_find_socket(lci, nb)) != NULL) {
  270. sock_put(sk);
  271. if (++lci == 4096) {
  272. lci = 0;
  273. break;
  274. }
  275. }
  276. read_unlock_bh(&x25_list_lock);
  277. return lci;
  278. }
  279. /*
  280. * Deferred destroy.
  281. */
  282. void x25_destroy_socket(struct sock *);
  283. /*
  284. * handler for deferred kills.
  285. */
  286. static void x25_destroy_timer(unsigned long data)
  287. {
  288. x25_destroy_socket((struct sock *)data);
  289. }
  290. /*
  291. * This is called from user mode and the timers. Thus it protects itself
  292. * against interrupt users but doesn't worry about being called during
  293. * work. Once it is removed from the queue no interrupt or bottom half
  294. * will touch it and we are (fairly 8-) ) safe.
  295. * Not static as it's used by the timer
  296. */
  297. void x25_destroy_socket(struct sock *sk)
  298. {
  299. struct sk_buff *skb;
  300. sock_hold(sk);
  301. lock_sock(sk);
  302. x25_stop_heartbeat(sk);
  303. x25_stop_timer(sk);
  304. x25_remove_socket(sk);
  305. x25_clear_queues(sk); /* Flush the queues */
  306. while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
  307. if (skb->sk != sk) { /* A pending connection */
  308. /*
  309. * Queue the unaccepted socket for death
  310. */
  311. sock_set_flag(skb->sk, SOCK_DEAD);
  312. x25_start_heartbeat(skb->sk);
  313. x25_sk(skb->sk)->state = X25_STATE_0;
  314. }
  315. kfree_skb(skb);
  316. }
  317. if (atomic_read(&sk->sk_wmem_alloc) ||
  318. atomic_read(&sk->sk_rmem_alloc)) {
  319. /* Defer: outstanding buffers */
  320. sk->sk_timer.expires = jiffies + 10 * HZ;
  321. sk->sk_timer.function = x25_destroy_timer;
  322. sk->sk_timer.data = (unsigned long)sk;
  323. add_timer(&sk->sk_timer);
  324. } else {
  325. /* drop last reference so sock_put will free */
  326. __sock_put(sk);
  327. }
  328. release_sock(sk);
  329. sock_put(sk);
  330. }
  331. /*
  332. * Handling for system calls applied via the various interfaces to a
  333. * X.25 socket object.
  334. */
  335. static int x25_setsockopt(struct socket *sock, int level, int optname,
  336. char __user *optval, int optlen)
  337. {
  338. int opt;
  339. struct sock *sk = sock->sk;
  340. int rc = -ENOPROTOOPT;
  341. if (level != SOL_X25 || optname != X25_QBITINCL)
  342. goto out;
  343. rc = -EINVAL;
  344. if (optlen < sizeof(int))
  345. goto out;
  346. rc = -EFAULT;
  347. if (get_user(opt, (int __user *)optval))
  348. goto out;
  349. x25_sk(sk)->qbitincl = !!opt;
  350. rc = 0;
  351. out:
  352. return rc;
  353. }
  354. static int x25_getsockopt(struct socket *sock, int level, int optname,
  355. char __user *optval, int __user *optlen)
  356. {
  357. struct sock *sk = sock->sk;
  358. int val, len, rc = -ENOPROTOOPT;
  359. if (level != SOL_X25 || optname != X25_QBITINCL)
  360. goto out;
  361. rc = -EFAULT;
  362. if (get_user(len, optlen))
  363. goto out;
  364. len = min_t(unsigned int, len, sizeof(int));
  365. rc = -EINVAL;
  366. if (len < 0)
  367. goto out;
  368. rc = -EFAULT;
  369. if (put_user(len, optlen))
  370. goto out;
  371. val = x25_sk(sk)->qbitincl;
  372. rc = copy_to_user(optval, &val, len) ? -EFAULT : 0;
  373. out:
  374. return rc;
  375. }
  376. static int x25_listen(struct socket *sock, int backlog)
  377. {
  378. struct sock *sk = sock->sk;
  379. int rc = -EOPNOTSUPP;
  380. if (sk->sk_state != TCP_LISTEN) {
  381. memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
  382. sk->sk_max_ack_backlog = backlog;
  383. sk->sk_state = TCP_LISTEN;
  384. rc = 0;
  385. }
  386. return rc;
  387. }
  388. static struct proto x25_proto = {
  389. .name = "X25",
  390. .owner = THIS_MODULE,
  391. .obj_size = sizeof(struct x25_sock),
  392. };
  393. static struct sock *x25_alloc_socket(void)
  394. {
  395. struct x25_sock *x25;
  396. struct sock *sk = sk_alloc(AF_X25, GFP_ATOMIC, &x25_proto, 1);
  397. if (!sk)
  398. goto out;
  399. sock_init_data(NULL, sk);
  400. x25 = x25_sk(sk);
  401. skb_queue_head_init(&x25->ack_queue);
  402. skb_queue_head_init(&x25->fragment_queue);
  403. skb_queue_head_init(&x25->interrupt_in_queue);
  404. skb_queue_head_init(&x25->interrupt_out_queue);
  405. out:
  406. return sk;
  407. }
  408. void x25_init_timers(struct sock *sk);
  409. static int x25_create(struct socket *sock, int protocol)
  410. {
  411. struct sock *sk;
  412. struct x25_sock *x25;
  413. int rc = -ESOCKTNOSUPPORT;
  414. if (sock->type != SOCK_SEQPACKET || protocol)
  415. goto out;
  416. rc = -ENOMEM;
  417. if ((sk = x25_alloc_socket()) == NULL)
  418. goto out;
  419. x25 = x25_sk(sk);
  420. sock_init_data(sock, sk);
  421. x25_init_timers(sk);
  422. sock->ops = &x25_proto_ops;
  423. sk->sk_protocol = protocol;
  424. sk->sk_backlog_rcv = x25_backlog_rcv;
  425. x25->t21 = sysctl_x25_call_request_timeout;
  426. x25->t22 = sysctl_x25_reset_request_timeout;
  427. x25->t23 = sysctl_x25_clear_request_timeout;
  428. x25->t2 = sysctl_x25_ack_holdback_timeout;
  429. x25->state = X25_STATE_0;
  430. x25->cudmatchlength = 0;
  431. x25->accptapprv = X25_DENY_ACCPT_APPRV; /* normally no cud */
  432. /* on call accept */
  433. x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE;
  434. x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE;
  435. x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE;
  436. x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE;
  437. x25->facilities.throughput = X25_DEFAULT_THROUGHPUT;
  438. x25->facilities.reverse = X25_DEFAULT_REVERSE;
  439. rc = 0;
  440. out:
  441. return rc;
  442. }
  443. static struct sock *x25_make_new(struct sock *osk)
  444. {
  445. struct sock *sk = NULL;
  446. struct x25_sock *x25, *ox25;
  447. if (osk->sk_type != SOCK_SEQPACKET)
  448. goto out;
  449. if ((sk = x25_alloc_socket()) == NULL)
  450. goto out;
  451. x25 = x25_sk(sk);
  452. sk->sk_type = osk->sk_type;
  453. sk->sk_socket = osk->sk_socket;
  454. sk->sk_priority = osk->sk_priority;
  455. sk->sk_protocol = osk->sk_protocol;
  456. sk->sk_rcvbuf = osk->sk_rcvbuf;
  457. sk->sk_sndbuf = osk->sk_sndbuf;
  458. sk->sk_state = TCP_ESTABLISHED;
  459. sk->sk_sleep = osk->sk_sleep;
  460. sk->sk_backlog_rcv = osk->sk_backlog_rcv;
  461. if (sock_flag(osk, SOCK_ZAPPED))
  462. sock_set_flag(sk, SOCK_ZAPPED);
  463. if (sock_flag(osk, SOCK_DBG))
  464. sock_set_flag(sk, SOCK_DBG);
  465. ox25 = x25_sk(osk);
  466. x25->t21 = ox25->t21;
  467. x25->t22 = ox25->t22;
  468. x25->t23 = ox25->t23;
  469. x25->t2 = ox25->t2;
  470. x25->facilities = ox25->facilities;
  471. x25->qbitincl = ox25->qbitincl;
  472. x25->cudmatchlength = ox25->cudmatchlength;
  473. x25->accptapprv = ox25->accptapprv;
  474. x25_init_timers(sk);
  475. out:
  476. return sk;
  477. }
  478. static int x25_release(struct socket *sock)
  479. {
  480. struct sock *sk = sock->sk;
  481. struct x25_sock *x25;
  482. if (!sk)
  483. goto out;
  484. x25 = x25_sk(sk);
  485. switch (x25->state) {
  486. case X25_STATE_0:
  487. case X25_STATE_2:
  488. x25_disconnect(sk, 0, 0, 0);
  489. x25_destroy_socket(sk);
  490. goto out;
  491. case X25_STATE_1:
  492. case X25_STATE_3:
  493. case X25_STATE_4:
  494. x25_clear_queues(sk);
  495. x25_write_internal(sk, X25_CLEAR_REQUEST);
  496. x25_start_t23timer(sk);
  497. x25->state = X25_STATE_2;
  498. sk->sk_state = TCP_CLOSE;
  499. sk->sk_shutdown |= SEND_SHUTDOWN;
  500. sk->sk_state_change(sk);
  501. sock_set_flag(sk, SOCK_DEAD);
  502. sock_set_flag(sk, SOCK_DESTROY);
  503. break;
  504. }
  505. sock->sk = NULL;
  506. sk->sk_socket = NULL; /* Not used, but we should do this */
  507. out:
  508. return 0;
  509. }
  510. static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
  511. {
  512. struct sock *sk = sock->sk;
  513. struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
  514. if (!sock_flag(sk, SOCK_ZAPPED) ||
  515. addr_len != sizeof(struct sockaddr_x25) ||
  516. addr->sx25_family != AF_X25)
  517. return -EINVAL;
  518. x25_sk(sk)->source_addr = addr->sx25_addr;
  519. x25_insert_socket(sk);
  520. sock_reset_flag(sk, SOCK_ZAPPED);
  521. SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
  522. return 0;
  523. }
  524. static int x25_wait_for_connection_establishment(struct sock *sk)
  525. {
  526. DECLARE_WAITQUEUE(wait, current);
  527. int rc;
  528. add_wait_queue_exclusive(sk->sk_sleep, &wait);
  529. for (;;) {
  530. __set_current_state(TASK_INTERRUPTIBLE);
  531. rc = -ERESTARTSYS;
  532. if (signal_pending(current))
  533. break;
  534. rc = sock_error(sk);
  535. if (rc) {
  536. sk->sk_socket->state = SS_UNCONNECTED;
  537. break;
  538. }
  539. rc = 0;
  540. if (sk->sk_state != TCP_ESTABLISHED) {
  541. release_sock(sk);
  542. schedule();
  543. lock_sock(sk);
  544. } else
  545. break;
  546. }
  547. __set_current_state(TASK_RUNNING);
  548. remove_wait_queue(sk->sk_sleep, &wait);
  549. return rc;
  550. }
  551. static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
  552. int addr_len, int flags)
  553. {
  554. struct sock *sk = sock->sk;
  555. struct x25_sock *x25 = x25_sk(sk);
  556. struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
  557. struct x25_route *rt;
  558. int rc = 0;
  559. lock_sock(sk);
  560. if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
  561. sock->state = SS_CONNECTED;
  562. goto out; /* Connect completed during a ERESTARTSYS event */
  563. }
  564. rc = -ECONNREFUSED;
  565. if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
  566. sock->state = SS_UNCONNECTED;
  567. goto out;
  568. }
  569. rc = -EISCONN; /* No reconnect on a seqpacket socket */
  570. if (sk->sk_state == TCP_ESTABLISHED)
  571. goto out;
  572. sk->sk_state = TCP_CLOSE;
  573. sock->state = SS_UNCONNECTED;
  574. rc = -EINVAL;
  575. if (addr_len != sizeof(struct sockaddr_x25) ||
  576. addr->sx25_family != AF_X25)
  577. goto out;
  578. rc = -ENETUNREACH;
  579. rt = x25_get_route(&addr->sx25_addr);
  580. if (!rt)
  581. goto out;
  582. x25->neighbour = x25_get_neigh(rt->dev);
  583. if (!x25->neighbour)
  584. goto out_put_route;
  585. x25_limit_facilities(&x25->facilities, x25->neighbour);
  586. x25->lci = x25_new_lci(x25->neighbour);
  587. if (!x25->lci)
  588. goto out_put_neigh;
  589. rc = -EINVAL;
  590. if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
  591. goto out_put_neigh;
  592. if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr))
  593. memset(&x25->source_addr, '\0', X25_ADDR_LEN);
  594. x25->dest_addr = addr->sx25_addr;
  595. /* Move to connecting socket, start sending Connect Requests */
  596. sock->state = SS_CONNECTING;
  597. sk->sk_state = TCP_SYN_SENT;
  598. x25->state = X25_STATE_1;
  599. x25_write_internal(sk, X25_CALL_REQUEST);
  600. x25_start_heartbeat(sk);
  601. x25_start_t21timer(sk);
  602. /* Now the loop */
  603. rc = -EINPROGRESS;
  604. if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
  605. goto out_put_neigh;
  606. rc = x25_wait_for_connection_establishment(sk);
  607. if (rc)
  608. goto out_put_neigh;
  609. sock->state = SS_CONNECTED;
  610. rc = 0;
  611. out_put_neigh:
  612. if (rc)
  613. x25_neigh_put(x25->neighbour);
  614. out_put_route:
  615. x25_route_put(rt);
  616. out:
  617. release_sock(sk);
  618. return rc;
  619. }
  620. static int x25_wait_for_data(struct sock *sk, int timeout)
  621. {
  622. DECLARE_WAITQUEUE(wait, current);
  623. int rc = 0;
  624. add_wait_queue_exclusive(sk->sk_sleep, &wait);
  625. for (;;) {
  626. __set_current_state(TASK_INTERRUPTIBLE);
  627. if (sk->sk_shutdown & RCV_SHUTDOWN)
  628. break;
  629. rc = -ERESTARTSYS;
  630. if (signal_pending(current))
  631. break;
  632. rc = -EAGAIN;
  633. if (!timeout)
  634. break;
  635. rc = 0;
  636. if (skb_queue_empty(&sk->sk_receive_queue)) {
  637. release_sock(sk);
  638. timeout = schedule_timeout(timeout);
  639. lock_sock(sk);
  640. } else
  641. break;
  642. }
  643. __set_current_state(TASK_RUNNING);
  644. remove_wait_queue(sk->sk_sleep, &wait);
  645. return rc;
  646. }
  647. static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
  648. {
  649. struct sock *sk = sock->sk;
  650. struct sock *newsk;
  651. struct sk_buff *skb;
  652. int rc = -EINVAL;
  653. if (!sk || sk->sk_state != TCP_LISTEN)
  654. goto out;
  655. rc = -EOPNOTSUPP;
  656. if (sk->sk_type != SOCK_SEQPACKET)
  657. goto out;
  658. lock_sock(sk);
  659. rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
  660. if (rc)
  661. goto out2;
  662. skb = skb_dequeue(&sk->sk_receive_queue);
  663. rc = -EINVAL;
  664. if (!skb->sk)
  665. goto out2;
  666. newsk = skb->sk;
  667. newsk->sk_socket = newsock;
  668. newsk->sk_sleep = &newsock->wait;
  669. /* Now attach up the new socket */
  670. skb->sk = NULL;
  671. kfree_skb(skb);
  672. sk->sk_ack_backlog--;
  673. newsock->sk = newsk;
  674. newsock->state = SS_CONNECTED;
  675. rc = 0;
  676. out2:
  677. release_sock(sk);
  678. out:
  679. return rc;
  680. }
  681. static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
  682. int *uaddr_len, int peer)
  683. {
  684. struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr;
  685. struct sock *sk = sock->sk;
  686. struct x25_sock *x25 = x25_sk(sk);
  687. if (peer) {
  688. if (sk->sk_state != TCP_ESTABLISHED)
  689. return -ENOTCONN;
  690. sx25->sx25_addr = x25->dest_addr;
  691. } else
  692. sx25->sx25_addr = x25->source_addr;
  693. sx25->sx25_family = AF_X25;
  694. *uaddr_len = sizeof(*sx25);
  695. return 0;
  696. }
  697. int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
  698. unsigned int lci)
  699. {
  700. struct sock *sk;
  701. struct sock *make;
  702. struct x25_sock *makex25;
  703. struct x25_address source_addr, dest_addr;
  704. struct x25_facilities facilities;
  705. int len, rc;
  706. /*
  707. * Remove the LCI and frame type.
  708. */
  709. skb_pull(skb, X25_STD_MIN_LEN);
  710. /*
  711. * Extract the X.25 addresses and convert them to ASCII strings,
  712. * and remove them.
  713. */
  714. skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr));
  715. /*
  716. * Get the length of the facilities, skip past them for the moment
  717. * get the call user data because this is needed to determine
  718. * the correct listener
  719. */
  720. len = skb->data[0] + 1;
  721. skb_pull(skb,len);
  722. /*
  723. * Find a listener for the particular address/cud pair.
  724. */
  725. sk = x25_find_listener(&source_addr,skb);
  726. skb_push(skb,len);
  727. /*
  728. * We can't accept the Call Request.
  729. */
  730. if (sk == NULL || sk_acceptq_is_full(sk))
  731. goto out_clear_request;
  732. /*
  733. * Try to reach a compromise on the requested facilities.
  734. */
  735. if ((len = x25_negotiate_facilities(skb, sk, &facilities)) == -1)
  736. goto out_sock_put;
  737. /*
  738. * current neighbour/link might impose additional limits
  739. * on certain facilties
  740. */
  741. x25_limit_facilities(&facilities, nb);
  742. /*
  743. * Try to create a new socket.
  744. */
  745. make = x25_make_new(sk);
  746. if (!make)
  747. goto out_sock_put;
  748. /*
  749. * Remove the facilities
  750. */
  751. skb_pull(skb, len);
  752. skb->sk = make;
  753. make->sk_state = TCP_ESTABLISHED;
  754. makex25 = x25_sk(make);
  755. makex25->lci = lci;
  756. makex25->dest_addr = dest_addr;
  757. makex25->source_addr = source_addr;
  758. makex25->neighbour = nb;
  759. makex25->facilities = facilities;
  760. makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask;
  761. /* ensure no reverse facil on accept */
  762. makex25->vc_facil_mask &= ~X25_MASK_REVERSE;
  763. makex25->cudmatchlength = x25_sk(sk)->cudmatchlength;
  764. /* Normally all calls are accepted immediatly */
  765. if(makex25->accptapprv & X25_DENY_ACCPT_APPRV) {
  766. x25_write_internal(make, X25_CALL_ACCEPTED);
  767. makex25->state = X25_STATE_3;
  768. }
  769. /*
  770. * Incoming Call User Data.
  771. */
  772. if (skb->len >= 0) {
  773. memcpy(makex25->calluserdata.cuddata, skb->data, skb->len);
  774. makex25->calluserdata.cudlength = skb->len;
  775. }
  776. sk->sk_ack_backlog++;
  777. x25_insert_socket(make);
  778. skb_queue_head(&sk->sk_receive_queue, skb);
  779. x25_start_heartbeat(make);
  780. if (!sock_flag(sk, SOCK_DEAD))
  781. sk->sk_data_ready(sk, skb->len);
  782. rc = 1;
  783. sock_put(sk);
  784. out:
  785. return rc;
  786. out_sock_put:
  787. sock_put(sk);
  788. out_clear_request:
  789. rc = 0;
  790. x25_transmit_clear_request(nb, lci, 0x01);
  791. goto out;
  792. }
  793. static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
  794. struct msghdr *msg, size_t len)
  795. {
  796. struct sock *sk = sock->sk;
  797. struct x25_sock *x25 = x25_sk(sk);
  798. struct sockaddr_x25 *usx25 = (struct sockaddr_x25 *)msg->msg_name;
  799. struct sockaddr_x25 sx25;
  800. struct sk_buff *skb;
  801. unsigned char *asmptr;
  802. int noblock = msg->msg_flags & MSG_DONTWAIT;
  803. size_t size;
  804. int qbit = 0, rc = -EINVAL;
  805. if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT))
  806. goto out;
  807. /* we currently don't support segmented records at the user interface */
  808. if (!(msg->msg_flags & (MSG_EOR|MSG_OOB)))
  809. goto out;
  810. rc = -EADDRNOTAVAIL;
  811. if (sock_flag(sk, SOCK_ZAPPED))
  812. goto out;
  813. rc = -EPIPE;
  814. if (sk->sk_shutdown & SEND_SHUTDOWN) {
  815. send_sig(SIGPIPE, current, 0);
  816. goto out;
  817. }
  818. rc = -ENETUNREACH;
  819. if (!x25->neighbour)
  820. goto out;
  821. if (usx25) {
  822. rc = -EINVAL;
  823. if (msg->msg_namelen < sizeof(sx25))
  824. goto out;
  825. memcpy(&sx25, usx25, sizeof(sx25));
  826. rc = -EISCONN;
  827. if (strcmp(x25->dest_addr.x25_addr, sx25.sx25_addr.x25_addr))
  828. goto out;
  829. rc = -EINVAL;
  830. if (sx25.sx25_family != AF_X25)
  831. goto out;
  832. } else {
  833. /*
  834. * FIXME 1003.1g - if the socket is like this because
  835. * it has become closed (not started closed) we ought
  836. * to SIGPIPE, EPIPE;
  837. */
  838. rc = -ENOTCONN;
  839. if (sk->sk_state != TCP_ESTABLISHED)
  840. goto out;
  841. sx25.sx25_family = AF_X25;
  842. sx25.sx25_addr = x25->dest_addr;
  843. }
  844. SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n");
  845. /* Build a packet */
  846. SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n");
  847. if ((msg->msg_flags & MSG_OOB) && len > 32)
  848. len = 32;
  849. size = len + X25_MAX_L2_LEN + X25_EXT_MIN_LEN;
  850. skb = sock_alloc_send_skb(sk, size, noblock, &rc);
  851. if (!skb)
  852. goto out;
  853. X25_SKB_CB(skb)->flags = msg->msg_flags;
  854. skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN);
  855. /*
  856. * Put the data on the end
  857. */
  858. SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n");
  859. asmptr = skb->h.raw = skb_put(skb, len);
  860. rc = memcpy_fromiovec(asmptr, msg->msg_iov, len);
  861. if (rc)
  862. goto out_kfree_skb;
  863. /*
  864. * If the Q BIT Include socket option is in force, the first
  865. * byte of the user data is the logical value of the Q Bit.
  866. */
  867. if (x25->qbitincl) {
  868. qbit = skb->data[0];
  869. skb_pull(skb, 1);
  870. }
  871. /*
  872. * Push down the X.25 header
  873. */
  874. SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n");
  875. if (msg->msg_flags & MSG_OOB) {
  876. if (x25->neighbour->extended) {
  877. asmptr = skb_push(skb, X25_STD_MIN_LEN);
  878. *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ;
  879. *asmptr++ = (x25->lci >> 0) & 0xFF;
  880. *asmptr++ = X25_INTERRUPT;
  881. } else {
  882. asmptr = skb_push(skb, X25_STD_MIN_LEN);
  883. *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ;
  884. *asmptr++ = (x25->lci >> 0) & 0xFF;
  885. *asmptr++ = X25_INTERRUPT;
  886. }
  887. } else {
  888. if (x25->neighbour->extended) {
  889. /* Build an Extended X.25 header */
  890. asmptr = skb_push(skb, X25_EXT_MIN_LEN);
  891. *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ;
  892. *asmptr++ = (x25->lci >> 0) & 0xFF;
  893. *asmptr++ = X25_DATA;
  894. *asmptr++ = X25_DATA;
  895. } else {
  896. /* Build an Standard X.25 header */
  897. asmptr = skb_push(skb, X25_STD_MIN_LEN);
  898. *asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ;
  899. *asmptr++ = (x25->lci >> 0) & 0xFF;
  900. *asmptr++ = X25_DATA;
  901. }
  902. if (qbit)
  903. skb->data[0] |= X25_Q_BIT;
  904. }
  905. SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n");
  906. SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n");
  907. rc = -ENOTCONN;
  908. if (sk->sk_state != TCP_ESTABLISHED)
  909. goto out_kfree_skb;
  910. if (msg->msg_flags & MSG_OOB)
  911. skb_queue_tail(&x25->interrupt_out_queue, skb);
  912. else {
  913. len = x25_output(sk, skb);
  914. if (len < 0)
  915. kfree_skb(skb);
  916. else if (x25->qbitincl)
  917. len++;
  918. }
  919. /*
  920. * lock_sock() is currently only used to serialize this x25_kick()
  921. * against input-driven x25_kick() calls. It currently only blocks
  922. * incoming packets for this socket and does not protect against
  923. * any other socket state changes and is not called from anywhere
  924. * else. As x25_kick() cannot block and as long as all socket
  925. * operations are BKL-wrapped, we don't need take to care about
  926. * purging the backlog queue in x25_release().
  927. *
  928. * Using lock_sock() to protect all socket operations entirely
  929. * (and making the whole x25 stack SMP aware) unfortunately would
  930. * require major changes to {send,recv}msg and skb allocation methods.
  931. * -> 2.5 ;)
  932. */
  933. lock_sock(sk);
  934. x25_kick(sk);
  935. release_sock(sk);
  936. rc = len;
  937. out:
  938. return rc;
  939. out_kfree_skb:
  940. kfree_skb(skb);
  941. goto out;
  942. }
  943. static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
  944. struct msghdr *msg, size_t size,
  945. int flags)
  946. {
  947. struct sock *sk = sock->sk;
  948. struct x25_sock *x25 = x25_sk(sk);
  949. struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
  950. size_t copied;
  951. int qbit;
  952. struct sk_buff *skb;
  953. unsigned char *asmptr;
  954. int rc = -ENOTCONN;
  955. /*
  956. * This works for seqpacket too. The receiver has ordered the queue for
  957. * us! We do one quick check first though
  958. */
  959. if (sk->sk_state != TCP_ESTABLISHED)
  960. goto out;
  961. if (flags & MSG_OOB) {
  962. rc = -EINVAL;
  963. if (sock_flag(sk, SOCK_URGINLINE) ||
  964. !skb_peek(&x25->interrupt_in_queue))
  965. goto out;
  966. skb = skb_dequeue(&x25->interrupt_in_queue);
  967. skb_pull(skb, X25_STD_MIN_LEN);
  968. /*
  969. * No Q bit information on Interrupt data.
  970. */
  971. if (x25->qbitincl) {
  972. asmptr = skb_push(skb, 1);
  973. *asmptr = 0x00;
  974. }
  975. msg->msg_flags |= MSG_OOB;
  976. } else {
  977. /* Now we can treat all alike */
  978. skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
  979. flags & MSG_DONTWAIT, &rc);
  980. if (!skb)
  981. goto out;
  982. qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
  983. skb_pull(skb, x25->neighbour->extended ?
  984. X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
  985. if (x25->qbitincl) {
  986. asmptr = skb_push(skb, 1);
  987. *asmptr = qbit;
  988. }
  989. }
  990. skb->h.raw = skb->data;
  991. copied = skb->len;
  992. if (copied > size) {
  993. copied = size;
  994. msg->msg_flags |= MSG_TRUNC;
  995. }
  996. /* Currently, each datagram always contains a complete record */
  997. msg->msg_flags |= MSG_EOR;
  998. rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
  999. if (rc)
  1000. goto out_free_dgram;
  1001. if (sx25) {
  1002. sx25->sx25_family = AF_X25;
  1003. sx25->sx25_addr = x25->dest_addr;
  1004. }
  1005. msg->msg_namelen = sizeof(struct sockaddr_x25);
  1006. lock_sock(sk);
  1007. x25_check_rbuf(sk);
  1008. release_sock(sk);
  1009. rc = copied;
  1010. out_free_dgram:
  1011. skb_free_datagram(sk, skb);
  1012. out:
  1013. return rc;
  1014. }
  1015. static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  1016. {
  1017. struct sock *sk = sock->sk;
  1018. struct x25_sock *x25 = x25_sk(sk);
  1019. void __user *argp = (void __user *)arg;
  1020. int rc;
  1021. switch (cmd) {
  1022. case TIOCOUTQ: {
  1023. int amount = sk->sk_sndbuf -
  1024. atomic_read(&sk->sk_wmem_alloc);
  1025. if (amount < 0)
  1026. amount = 0;
  1027. rc = put_user(amount, (unsigned int __user *)argp);
  1028. break;
  1029. }
  1030. case TIOCINQ: {
  1031. struct sk_buff *skb;
  1032. int amount = 0;
  1033. /*
  1034. * These two are safe on a single CPU system as
  1035. * only user tasks fiddle here
  1036. */
  1037. if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
  1038. amount = skb->len;
  1039. rc = put_user(amount, (unsigned int __user *)argp);
  1040. break;
  1041. }
  1042. case SIOCGSTAMP:
  1043. rc = -EINVAL;
  1044. if (sk)
  1045. rc = sock_get_timestamp(sk,
  1046. (struct timeval __user *)argp);
  1047. break;
  1048. case SIOCGIFADDR:
  1049. case SIOCSIFADDR:
  1050. case SIOCGIFDSTADDR:
  1051. case SIOCSIFDSTADDR:
  1052. case SIOCGIFBRDADDR:
  1053. case SIOCSIFBRDADDR:
  1054. case SIOCGIFNETMASK:
  1055. case SIOCSIFNETMASK:
  1056. case SIOCGIFMETRIC:
  1057. case SIOCSIFMETRIC:
  1058. rc = -EINVAL;
  1059. break;
  1060. case SIOCADDRT:
  1061. case SIOCDELRT:
  1062. rc = -EPERM;
  1063. if (!capable(CAP_NET_ADMIN))
  1064. break;
  1065. rc = x25_route_ioctl(cmd, argp);
  1066. break;
  1067. case SIOCX25GSUBSCRIP:
  1068. rc = x25_subscr_ioctl(cmd, argp);
  1069. break;
  1070. case SIOCX25SSUBSCRIP:
  1071. rc = -EPERM;
  1072. if (!capable(CAP_NET_ADMIN))
  1073. break;
  1074. rc = x25_subscr_ioctl(cmd, argp);
  1075. break;
  1076. case SIOCX25GFACILITIES: {
  1077. struct x25_facilities fac = x25->facilities;
  1078. rc = copy_to_user(argp, &fac,
  1079. sizeof(fac)) ? -EFAULT : 0;
  1080. break;
  1081. }
  1082. case SIOCX25SFACILITIES: {
  1083. struct x25_facilities facilities;
  1084. rc = -EFAULT;
  1085. if (copy_from_user(&facilities, argp,
  1086. sizeof(facilities)))
  1087. break;
  1088. rc = -EINVAL;
  1089. if (sk->sk_state != TCP_LISTEN &&
  1090. sk->sk_state != TCP_CLOSE)
  1091. break;
  1092. if (facilities.pacsize_in < X25_PS16 ||
  1093. facilities.pacsize_in > X25_PS4096)
  1094. break;
  1095. if (facilities.pacsize_out < X25_PS16 ||
  1096. facilities.pacsize_out > X25_PS4096)
  1097. break;
  1098. if (facilities.winsize_in < 1 ||
  1099. facilities.winsize_in > 127)
  1100. break;
  1101. if (facilities.throughput < 0x03 ||
  1102. facilities.throughput > 0xDD)
  1103. break;
  1104. if (facilities.reverse &&
  1105. (facilities.reverse | 0x81)!= 0x81)
  1106. break;
  1107. x25->facilities = facilities;
  1108. rc = 0;
  1109. break;
  1110. }
  1111. case SIOCX25GCALLUSERDATA: {
  1112. struct x25_calluserdata cud = x25->calluserdata;
  1113. rc = copy_to_user(argp, &cud,
  1114. sizeof(cud)) ? -EFAULT : 0;
  1115. break;
  1116. }
  1117. case SIOCX25SCALLUSERDATA: {
  1118. struct x25_calluserdata calluserdata;
  1119. rc = -EFAULT;
  1120. if (copy_from_user(&calluserdata, argp,
  1121. sizeof(calluserdata)))
  1122. break;
  1123. rc = -EINVAL;
  1124. if (calluserdata.cudlength > X25_MAX_CUD_LEN)
  1125. break;
  1126. x25->calluserdata = calluserdata;
  1127. rc = 0;
  1128. break;
  1129. }
  1130. case SIOCX25GCAUSEDIAG: {
  1131. struct x25_causediag causediag;
  1132. causediag = x25->causediag;
  1133. rc = copy_to_user(argp, &causediag,
  1134. sizeof(causediag)) ? -EFAULT : 0;
  1135. break;
  1136. }
  1137. case SIOCX25SCUDMATCHLEN: {
  1138. struct x25_subaddr sub_addr;
  1139. rc = -EINVAL;
  1140. if(sk->sk_state != TCP_CLOSE)
  1141. break;
  1142. rc = -EFAULT;
  1143. if (copy_from_user(&sub_addr, argp,
  1144. sizeof(sub_addr)))
  1145. break;
  1146. rc = -EINVAL;
  1147. if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
  1148. break;
  1149. x25->cudmatchlength = sub_addr.cudmatchlength;
  1150. rc = 0;
  1151. break;
  1152. }
  1153. case SIOCX25CALLACCPTAPPRV: {
  1154. rc = -EINVAL;
  1155. if (sk->sk_state != TCP_CLOSE)
  1156. break;
  1157. x25->accptapprv = X25_ALLOW_ACCPT_APPRV;
  1158. rc = 0;
  1159. break;
  1160. }
  1161. case SIOCX25SENDCALLACCPT: {
  1162. rc = -EINVAL;
  1163. if (sk->sk_state != TCP_ESTABLISHED)
  1164. break;
  1165. if (x25->accptapprv) /* must call accptapprv above */
  1166. break;
  1167. x25_write_internal(sk, X25_CALL_ACCEPTED);
  1168. x25->state = X25_STATE_3;
  1169. rc = 0;
  1170. break;
  1171. }
  1172. default:
  1173. rc = dev_ioctl(cmd, argp);
  1174. break;
  1175. }
  1176. return rc;
  1177. }
  1178. static struct net_proto_family x25_family_ops = {
  1179. .family = AF_X25,
  1180. .create = x25_create,
  1181. .owner = THIS_MODULE,
  1182. };
  1183. static struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = {
  1184. .family = AF_X25,
  1185. .owner = THIS_MODULE,
  1186. .release = x25_release,
  1187. .bind = x25_bind,
  1188. .connect = x25_connect,
  1189. .socketpair = sock_no_socketpair,
  1190. .accept = x25_accept,
  1191. .getname = x25_getname,
  1192. .poll = datagram_poll,
  1193. .ioctl = x25_ioctl,
  1194. .listen = x25_listen,
  1195. .shutdown = sock_no_shutdown,
  1196. .setsockopt = x25_setsockopt,
  1197. .getsockopt = x25_getsockopt,
  1198. .sendmsg = x25_sendmsg,
  1199. .recvmsg = x25_recvmsg,
  1200. .mmap = sock_no_mmap,
  1201. .sendpage = sock_no_sendpage,
  1202. };
  1203. #include <linux/smp_lock.h>
  1204. SOCKOPS_WRAP(x25_proto, AF_X25);
  1205. static struct packet_type x25_packet_type = {
  1206. .type = __constant_htons(ETH_P_X25),
  1207. .func = x25_lapb_receive_frame,
  1208. };
  1209. static struct notifier_block x25_dev_notifier = {
  1210. .notifier_call = x25_device_event,
  1211. };
  1212. void x25_kill_by_neigh(struct x25_neigh *nb)
  1213. {
  1214. struct sock *s;
  1215. struct hlist_node *node;
  1216. write_lock_bh(&x25_list_lock);
  1217. sk_for_each(s, node, &x25_list)
  1218. if (x25_sk(s)->neighbour == nb)
  1219. x25_disconnect(s, ENETUNREACH, 0, 0);
  1220. write_unlock_bh(&x25_list_lock);
  1221. }
  1222. static int __init x25_init(void)
  1223. {
  1224. int rc = proto_register(&x25_proto, 0);
  1225. if (rc != 0)
  1226. goto out;
  1227. sock_register(&x25_family_ops);
  1228. dev_add_pack(&x25_packet_type);
  1229. register_netdevice_notifier(&x25_dev_notifier);
  1230. printk(KERN_INFO "X.25 for Linux. Version 0.2 for Linux 2.1.15\n");
  1231. #ifdef CONFIG_SYSCTL
  1232. x25_register_sysctl();
  1233. #endif
  1234. x25_proc_init();
  1235. out:
  1236. return rc;
  1237. }
  1238. module_init(x25_init);
  1239. static void __exit x25_exit(void)
  1240. {
  1241. x25_proc_exit();
  1242. x25_link_free();
  1243. x25_route_free();
  1244. #ifdef CONFIG_SYSCTL
  1245. x25_unregister_sysctl();
  1246. #endif
  1247. unregister_netdevice_notifier(&x25_dev_notifier);
  1248. dev_remove_pack(&x25_packet_type);
  1249. sock_unregister(AF_X25);
  1250. proto_unregister(&x25_proto);
  1251. }
  1252. module_exit(x25_exit);
  1253. MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
  1254. MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol");
  1255. MODULE_LICENSE("GPL");
  1256. MODULE_ALIAS_NETPROTO(PF_X25);