transport.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /* transport.c: Rx Transport routines
  2. *
  3. * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <rxrpc/transport.h>
  15. #include <rxrpc/peer.h>
  16. #include <rxrpc/connection.h>
  17. #include <rxrpc/call.h>
  18. #include <rxrpc/message.h>
  19. #include <rxrpc/krxiod.h>
  20. #include <rxrpc/krxsecd.h>
  21. #include <linux/udp.h>
  22. #include <linux/in.h>
  23. #include <linux/in6.h>
  24. #include <linux/icmp.h>
  25. #include <linux/skbuff.h>
  26. #include <net/sock.h>
  27. #include <net/ip.h>
  28. #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
  29. #include <linux/ipv6.h> /* this should _really_ be in errqueue.h.. */
  30. #endif
  31. #include <linux/errqueue.h>
  32. #include <asm/uaccess.h>
  33. #include "internal.h"
  34. struct errormsg {
  35. struct cmsghdr cmsg; /* control message header */
  36. struct sock_extended_err ee; /* extended error information */
  37. struct sockaddr_in icmp_src; /* ICMP packet source address */
  38. };
  39. static DEFINE_SPINLOCK(rxrpc_transports_lock);
  40. static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports);
  41. __RXACCT_DECL(atomic_t rxrpc_transport_count);
  42. LIST_HEAD(rxrpc_proc_transports);
  43. DECLARE_RWSEM(rxrpc_proc_transports_sem);
  44. static void rxrpc_data_ready(struct sock *sk, int count);
  45. static void rxrpc_error_report(struct sock *sk);
  46. static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
  47. struct list_head *msgq);
  48. static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans);
  49. /*****************************************************************************/
  50. /*
  51. * create a new transport endpoint using the specified UDP port
  52. */
  53. int rxrpc_create_transport(unsigned short port,
  54. struct rxrpc_transport **_trans)
  55. {
  56. struct rxrpc_transport *trans;
  57. struct sockaddr_in sin;
  58. mm_segment_t oldfs;
  59. struct sock *sock;
  60. int ret, opt;
  61. _enter("%hu", port);
  62. trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
  63. if (!trans)
  64. return -ENOMEM;
  65. atomic_set(&trans->usage, 1);
  66. INIT_LIST_HEAD(&trans->services);
  67. INIT_LIST_HEAD(&trans->link);
  68. INIT_LIST_HEAD(&trans->krxiodq_link);
  69. spin_lock_init(&trans->lock);
  70. INIT_LIST_HEAD(&trans->peer_active);
  71. INIT_LIST_HEAD(&trans->peer_graveyard);
  72. spin_lock_init(&trans->peer_gylock);
  73. init_waitqueue_head(&trans->peer_gy_waitq);
  74. rwlock_init(&trans->peer_lock);
  75. atomic_set(&trans->peer_count, 0);
  76. trans->port = port;
  77. /* create a UDP socket to be my actual transport endpoint */
  78. ret = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &trans->socket);
  79. if (ret < 0)
  80. goto error;
  81. /* use the specified port */
  82. if (port) {
  83. memset(&sin, 0, sizeof(sin));
  84. sin.sin_family = AF_INET;
  85. sin.sin_port = htons(port);
  86. ret = trans->socket->ops->bind(trans->socket,
  87. (struct sockaddr *) &sin,
  88. sizeof(sin));
  89. if (ret < 0)
  90. goto error;
  91. }
  92. opt = 1;
  93. oldfs = get_fs();
  94. set_fs(KERNEL_DS);
  95. ret = trans->socket->ops->setsockopt(trans->socket, SOL_IP, IP_RECVERR,
  96. (char *) &opt, sizeof(opt));
  97. set_fs(oldfs);
  98. spin_lock(&rxrpc_transports_lock);
  99. list_add(&trans->link, &rxrpc_transports);
  100. spin_unlock(&rxrpc_transports_lock);
  101. /* set the socket up */
  102. sock = trans->socket->sk;
  103. sock->sk_user_data = trans;
  104. sock->sk_data_ready = rxrpc_data_ready;
  105. sock->sk_error_report = rxrpc_error_report;
  106. down_write(&rxrpc_proc_transports_sem);
  107. list_add_tail(&trans->proc_link, &rxrpc_proc_transports);
  108. up_write(&rxrpc_proc_transports_sem);
  109. __RXACCT(atomic_inc(&rxrpc_transport_count));
  110. *_trans = trans;
  111. _leave(" = 0 (%p)", trans);
  112. return 0;
  113. error:
  114. /* finish cleaning up the transport (not really needed here, but...) */
  115. if (trans->socket)
  116. trans->socket->ops->shutdown(trans->socket, 2);
  117. /* close the socket */
  118. if (trans->socket) {
  119. trans->socket->sk->sk_user_data = NULL;
  120. sock_release(trans->socket);
  121. trans->socket = NULL;
  122. }
  123. kfree(trans);
  124. _leave(" = %d", ret);
  125. return ret;
  126. } /* end rxrpc_create_transport() */
  127. /*****************************************************************************/
  128. /*
  129. * destroy a transport endpoint
  130. */
  131. void rxrpc_put_transport(struct rxrpc_transport *trans)
  132. {
  133. _enter("%p{u=%d p=%hu}",
  134. trans, atomic_read(&trans->usage), trans->port);
  135. BUG_ON(atomic_read(&trans->usage) <= 0);
  136. /* to prevent a race, the decrement and the dequeue must be
  137. * effectively atomic */
  138. spin_lock(&rxrpc_transports_lock);
  139. if (likely(!atomic_dec_and_test(&trans->usage))) {
  140. spin_unlock(&rxrpc_transports_lock);
  141. _leave("");
  142. return;
  143. }
  144. list_del(&trans->link);
  145. spin_unlock(&rxrpc_transports_lock);
  146. /* finish cleaning up the transport */
  147. if (trans->socket)
  148. trans->socket->ops->shutdown(trans->socket, 2);
  149. rxrpc_krxsecd_clear_transport(trans);
  150. rxrpc_krxiod_dequeue_transport(trans);
  151. /* discard all peer information */
  152. rxrpc_peer_clearall(trans);
  153. down_write(&rxrpc_proc_transports_sem);
  154. list_del(&trans->proc_link);
  155. up_write(&rxrpc_proc_transports_sem);
  156. __RXACCT(atomic_dec(&rxrpc_transport_count));
  157. /* close the socket */
  158. if (trans->socket) {
  159. trans->socket->sk->sk_user_data = NULL;
  160. sock_release(trans->socket);
  161. trans->socket = NULL;
  162. }
  163. kfree(trans);
  164. _leave("");
  165. } /* end rxrpc_put_transport() */
  166. /*****************************************************************************/
  167. /*
  168. * add a service to a transport to be listened upon
  169. */
  170. int rxrpc_add_service(struct rxrpc_transport *trans,
  171. struct rxrpc_service *newsrv)
  172. {
  173. struct rxrpc_service *srv;
  174. struct list_head *_p;
  175. int ret = -EEXIST;
  176. _enter("%p{%hu},%p{%hu}",
  177. trans, trans->port, newsrv, newsrv->service_id);
  178. /* verify that the service ID is not already present */
  179. spin_lock(&trans->lock);
  180. list_for_each(_p, &trans->services) {
  181. srv = list_entry(_p, struct rxrpc_service, link);
  182. if (srv->service_id == newsrv->service_id)
  183. goto out;
  184. }
  185. /* okay - add the transport to the list */
  186. list_add_tail(&newsrv->link, &trans->services);
  187. rxrpc_get_transport(trans);
  188. ret = 0;
  189. out:
  190. spin_unlock(&trans->lock);
  191. _leave("= %d", ret);
  192. return ret;
  193. } /* end rxrpc_add_service() */
  194. /*****************************************************************************/
  195. /*
  196. * remove a service from a transport
  197. */
  198. void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv)
  199. {
  200. _enter("%p{%hu},%p{%hu}", trans, trans->port, srv, srv->service_id);
  201. spin_lock(&trans->lock);
  202. list_del(&srv->link);
  203. spin_unlock(&trans->lock);
  204. rxrpc_put_transport(trans);
  205. _leave("");
  206. } /* end rxrpc_del_service() */
  207. /*****************************************************************************/
  208. /*
  209. * INET callback when data has been received on the socket.
  210. */
  211. static void rxrpc_data_ready(struct sock *sk, int count)
  212. {
  213. struct rxrpc_transport *trans;
  214. _enter("%p{t=%p},%d", sk, sk->sk_user_data, count);
  215. /* queue the transport for attention by krxiod */
  216. trans = (struct rxrpc_transport *) sk->sk_user_data;
  217. if (trans)
  218. rxrpc_krxiod_queue_transport(trans);
  219. /* wake up anyone waiting on the socket */
  220. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  221. wake_up_interruptible(sk->sk_sleep);
  222. _leave("");
  223. } /* end rxrpc_data_ready() */
  224. /*****************************************************************************/
  225. /*
  226. * INET callback when an ICMP error packet is received
  227. * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE)
  228. */
  229. static void rxrpc_error_report(struct sock *sk)
  230. {
  231. struct rxrpc_transport *trans;
  232. _enter("%p{t=%p}", sk, sk->sk_user_data);
  233. /* queue the transport for attention by krxiod */
  234. trans = (struct rxrpc_transport *) sk->sk_user_data;
  235. if (trans) {
  236. trans->error_rcvd = 1;
  237. rxrpc_krxiod_queue_transport(trans);
  238. }
  239. /* wake up anyone waiting on the socket */
  240. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  241. wake_up_interruptible(sk->sk_sleep);
  242. _leave("");
  243. } /* end rxrpc_error_report() */
  244. /*****************************************************************************/
  245. /*
  246. * split a message up, allocating message records and filling them in
  247. * from the contents of a socket buffer
  248. */
  249. static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
  250. struct sk_buff *pkt,
  251. struct list_head *msgq)
  252. {
  253. struct rxrpc_message *msg;
  254. int ret;
  255. _enter("");
  256. msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
  257. if (!msg) {
  258. _leave(" = -ENOMEM");
  259. return -ENOMEM;
  260. }
  261. atomic_set(&msg->usage, 1);
  262. list_add_tail(&msg->link,msgq);
  263. /* dig out the Rx routing parameters */
  264. if (skb_copy_bits(pkt, sizeof(struct udphdr),
  265. &msg->hdr, sizeof(msg->hdr)) < 0) {
  266. ret = -EBADMSG;
  267. goto error;
  268. }
  269. msg->trans = trans;
  270. msg->state = RXRPC_MSG_RECEIVED;
  271. skb_get_timestamp(pkt, &msg->stamp);
  272. if (msg->stamp.tv_sec == 0) {
  273. do_gettimeofday(&msg->stamp);
  274. if (pkt->sk)
  275. sock_enable_timestamp(pkt->sk);
  276. }
  277. msg->seq = ntohl(msg->hdr.seq);
  278. /* attach the packet */
  279. skb_get(pkt);
  280. msg->pkt = pkt;
  281. msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header);
  282. msg->dsize = msg->pkt->len - msg->offset;
  283. _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
  284. msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
  285. ntohl(msg->hdr.epoch),
  286. (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
  287. ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
  288. ntohl(msg->hdr.callNumber),
  289. rxrpc_pkts[msg->hdr.type],
  290. msg->hdr.flags,
  291. ntohs(msg->hdr.serviceId),
  292. msg->hdr.securityIndex);
  293. __RXACCT(atomic_inc(&rxrpc_message_count));
  294. /* split off jumbo packets */
  295. while (msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
  296. msg->hdr.flags & RXRPC_JUMBO_PACKET
  297. ) {
  298. struct rxrpc_jumbo_header jumbo;
  299. struct rxrpc_message *jumbomsg = msg;
  300. _debug("split jumbo packet");
  301. /* quick sanity check */
  302. ret = -EBADMSG;
  303. if (msg->dsize <
  304. RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
  305. goto error;
  306. if (msg->hdr.flags & RXRPC_LAST_PACKET)
  307. goto error;
  308. /* dig out the secondary header */
  309. if (skb_copy_bits(pkt, msg->offset + RXRPC_JUMBO_DATALEN,
  310. &jumbo, sizeof(jumbo)) < 0)
  311. goto error;
  312. /* allocate a new message record */
  313. ret = -ENOMEM;
  314. msg = kmemdup(jumbomsg, sizeof(struct rxrpc_message), GFP_KERNEL);
  315. if (!msg)
  316. goto error;
  317. list_add_tail(&msg->link, msgq);
  318. /* adjust the jumbo packet */
  319. jumbomsg->dsize = RXRPC_JUMBO_DATALEN;
  320. /* attach the packet here too */
  321. skb_get(pkt);
  322. /* adjust the parameters */
  323. msg->seq++;
  324. msg->hdr.seq = htonl(msg->seq);
  325. msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1);
  326. msg->offset += RXRPC_JUMBO_DATALEN +
  327. sizeof(struct rxrpc_jumbo_header);
  328. msg->dsize -= RXRPC_JUMBO_DATALEN +
  329. sizeof(struct rxrpc_jumbo_header);
  330. msg->hdr.flags = jumbo.flags;
  331. msg->hdr._rsvd = jumbo._rsvd;
  332. _net("Rx Split jumbo packet from %s"
  333. " (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
  334. msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
  335. ntohl(msg->hdr.epoch),
  336. (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
  337. ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
  338. ntohl(msg->hdr.callNumber),
  339. rxrpc_pkts[msg->hdr.type],
  340. msg->hdr.flags,
  341. ntohs(msg->hdr.serviceId),
  342. msg->hdr.securityIndex);
  343. __RXACCT(atomic_inc(&rxrpc_message_count));
  344. }
  345. _leave(" = 0 #%d", atomic_read(&rxrpc_message_count));
  346. return 0;
  347. error:
  348. while (!list_empty(msgq)) {
  349. msg = list_entry(msgq->next, struct rxrpc_message, link);
  350. list_del_init(&msg->link);
  351. rxrpc_put_message(msg);
  352. }
  353. _leave(" = %d", ret);
  354. return ret;
  355. } /* end rxrpc_incoming_msg() */
  356. /*****************************************************************************/
  357. /*
  358. * accept a new call
  359. * - called from krxiod in process context
  360. */
  361. void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
  362. {
  363. struct rxrpc_message *msg;
  364. struct rxrpc_peer *peer;
  365. struct sk_buff *pkt;
  366. int ret;
  367. __be32 addr;
  368. __be16 port;
  369. LIST_HEAD(msgq);
  370. _enter("%p{%d}", trans, trans->port);
  371. for (;;) {
  372. /* deal with outstanting errors first */
  373. if (trans->error_rcvd)
  374. rxrpc_trans_receive_error_report(trans);
  375. /* attempt to receive a packet */
  376. pkt = skb_recv_datagram(trans->socket->sk, 0, 1, &ret);
  377. if (!pkt) {
  378. if (ret == -EAGAIN) {
  379. _leave(" EAGAIN");
  380. return;
  381. }
  382. /* an icmp error may have occurred */
  383. rxrpc_krxiod_queue_transport(trans);
  384. _leave(" error %d\n", ret);
  385. return;
  386. }
  387. /* we'll probably need to checksum it (didn't call
  388. * sock_recvmsg) */
  389. if (skb_checksum_complete(pkt)) {
  390. kfree_skb(pkt);
  391. rxrpc_krxiod_queue_transport(trans);
  392. _leave(" CSUM failed");
  393. return;
  394. }
  395. addr = pkt->nh.iph->saddr;
  396. port = pkt->h.uh->source;
  397. _net("Rx Received UDP packet from %08x:%04hu",
  398. ntohl(addr), ntohs(port));
  399. /* unmarshall the Rx parameters and split jumbo packets */
  400. ret = rxrpc_incoming_msg(trans, pkt, &msgq);
  401. if (ret < 0) {
  402. kfree_skb(pkt);
  403. rxrpc_krxiod_queue_transport(trans);
  404. _leave(" bad packet");
  405. return;
  406. }
  407. BUG_ON(list_empty(&msgq));
  408. msg = list_entry(msgq.next, struct rxrpc_message, link);
  409. /* locate the record for the peer from which it
  410. * originated */
  411. ret = rxrpc_peer_lookup(trans, addr, &peer);
  412. if (ret < 0) {
  413. kdebug("Rx No connections from that peer");
  414. rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
  415. goto finished_msg;
  416. }
  417. /* try and find a matching connection */
  418. ret = rxrpc_connection_lookup(peer, msg, &msg->conn);
  419. if (ret < 0) {
  420. kdebug("Rx Unknown Connection");
  421. rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
  422. rxrpc_put_peer(peer);
  423. goto finished_msg;
  424. }
  425. rxrpc_put_peer(peer);
  426. /* deal with the first packet of a new call */
  427. if (msg->hdr.flags & RXRPC_CLIENT_INITIATED &&
  428. msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
  429. ntohl(msg->hdr.seq) == 1
  430. ) {
  431. _debug("Rx New server call");
  432. rxrpc_trans_receive_new_call(trans, &msgq);
  433. goto finished_msg;
  434. }
  435. /* deal with subsequent packet(s) of call */
  436. _debug("Rx Call packet");
  437. while (!list_empty(&msgq)) {
  438. msg = list_entry(msgq.next, struct rxrpc_message, link);
  439. list_del_init(&msg->link);
  440. ret = rxrpc_conn_receive_call_packet(msg->conn, NULL, msg);
  441. if (ret < 0) {
  442. rxrpc_trans_immediate_abort(trans, msg, ret);
  443. rxrpc_put_message(msg);
  444. goto finished_msg;
  445. }
  446. rxrpc_put_message(msg);
  447. }
  448. goto finished_msg;
  449. /* dispose of the packets */
  450. finished_msg:
  451. while (!list_empty(&msgq)) {
  452. msg = list_entry(msgq.next, struct rxrpc_message, link);
  453. list_del_init(&msg->link);
  454. rxrpc_put_message(msg);
  455. }
  456. kfree_skb(pkt);
  457. }
  458. _leave("");
  459. } /* end rxrpc_trans_receive_packet() */
  460. /*****************************************************************************/
  461. /*
  462. * accept a new call from a client trying to connect to one of my services
  463. * - called in process context
  464. */
  465. static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
  466. struct list_head *msgq)
  467. {
  468. struct rxrpc_message *msg;
  469. _enter("");
  470. /* only bother with the first packet */
  471. msg = list_entry(msgq->next, struct rxrpc_message, link);
  472. list_del_init(&msg->link);
  473. rxrpc_krxsecd_queue_incoming_call(msg);
  474. rxrpc_put_message(msg);
  475. _leave(" = 0");
  476. return 0;
  477. } /* end rxrpc_trans_receive_new_call() */
  478. /*****************************************************************************/
  479. /*
  480. * perform an immediate abort without connection or call structures
  481. */
  482. int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
  483. struct rxrpc_message *msg,
  484. int error)
  485. {
  486. struct rxrpc_header ahdr;
  487. struct sockaddr_in sin;
  488. struct msghdr msghdr;
  489. struct kvec iov[2];
  490. __be32 _error;
  491. int len, ret;
  492. _enter("%p,%p,%d", trans, msg, error);
  493. /* don't abort an abort packet */
  494. if (msg->hdr.type == RXRPC_PACKET_TYPE_ABORT) {
  495. _leave(" = 0");
  496. return 0;
  497. }
  498. _error = htonl(-error);
  499. /* set up the message to be transmitted */
  500. memcpy(&ahdr, &msg->hdr, sizeof(ahdr));
  501. ahdr.epoch = msg->hdr.epoch;
  502. ahdr.serial = htonl(1);
  503. ahdr.seq = 0;
  504. ahdr.type = RXRPC_PACKET_TYPE_ABORT;
  505. ahdr.flags = RXRPC_LAST_PACKET;
  506. ahdr.flags |= ~msg->hdr.flags & RXRPC_CLIENT_INITIATED;
  507. iov[0].iov_len = sizeof(ahdr);
  508. iov[0].iov_base = &ahdr;
  509. iov[1].iov_len = sizeof(_error);
  510. iov[1].iov_base = &_error;
  511. len = sizeof(ahdr) + sizeof(_error);
  512. memset(&sin,0,sizeof(sin));
  513. sin.sin_family = AF_INET;
  514. sin.sin_port = msg->pkt->h.uh->source;
  515. sin.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
  516. msghdr.msg_name = &sin;
  517. msghdr.msg_namelen = sizeof(sin);
  518. msghdr.msg_control = NULL;
  519. msghdr.msg_controllen = 0;
  520. msghdr.msg_flags = MSG_DONTWAIT;
  521. _net("Sending message type %d of %d bytes to %08x:%d",
  522. ahdr.type,
  523. len,
  524. ntohl(sin.sin_addr.s_addr),
  525. ntohs(sin.sin_port));
  526. /* send the message */
  527. ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len);
  528. _leave(" = %d", ret);
  529. return ret;
  530. } /* end rxrpc_trans_immediate_abort() */
  531. /*****************************************************************************/
  532. /*
  533. * receive an ICMP error report and percolate it to all connections
  534. * heading to the affected host or port
  535. */
  536. static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
  537. {
  538. struct rxrpc_connection *conn;
  539. struct sockaddr_in sin;
  540. struct rxrpc_peer *peer;
  541. struct list_head connq, *_p;
  542. struct errormsg emsg;
  543. struct msghdr msg;
  544. __be16 port;
  545. int local, err;
  546. _enter("%p", trans);
  547. for (;;) {
  548. trans->error_rcvd = 0;
  549. /* try and receive an error message */
  550. msg.msg_name = &sin;
  551. msg.msg_namelen = sizeof(sin);
  552. msg.msg_control = &emsg;
  553. msg.msg_controllen = sizeof(emsg);
  554. msg.msg_flags = 0;
  555. err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0,
  556. MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC);
  557. if (err == -EAGAIN) {
  558. _leave("");
  559. return;
  560. }
  561. if (err < 0) {
  562. printk("%s: unable to recv an error report: %d\n",
  563. __FUNCTION__, err);
  564. _leave("");
  565. return;
  566. }
  567. msg.msg_controllen = (char *) msg.msg_control - (char *) &emsg;
  568. if (msg.msg_controllen < sizeof(emsg.cmsg) ||
  569. msg.msg_namelen < sizeof(sin)) {
  570. printk("%s: short control message"
  571. " (nlen=%u clen=%Zu fl=%x)\n",
  572. __FUNCTION__,
  573. msg.msg_namelen,
  574. msg.msg_controllen,
  575. msg.msg_flags);
  576. continue;
  577. }
  578. _net("Rx Received control message"
  579. " { len=%Zu level=%u type=%u }",
  580. emsg.cmsg.cmsg_len,
  581. emsg.cmsg.cmsg_level,
  582. emsg.cmsg.cmsg_type);
  583. if (sin.sin_family != AF_INET) {
  584. printk("Rx Ignoring error report with non-INET address"
  585. " (fam=%u)",
  586. sin.sin_family);
  587. continue;
  588. }
  589. _net("Rx Received message pertaining to host addr=%x port=%hu",
  590. ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port));
  591. if (emsg.cmsg.cmsg_level != SOL_IP ||
  592. emsg.cmsg.cmsg_type != IP_RECVERR) {
  593. printk("Rx Ignoring unknown error report"
  594. " { level=%u type=%u }",
  595. emsg.cmsg.cmsg_level,
  596. emsg.cmsg.cmsg_type);
  597. continue;
  598. }
  599. if (msg.msg_controllen < sizeof(emsg.cmsg) + sizeof(emsg.ee)) {
  600. printk("%s: short error message (%Zu)\n",
  601. __FUNCTION__, msg.msg_controllen);
  602. _leave("");
  603. return;
  604. }
  605. port = sin.sin_port;
  606. switch (emsg.ee.ee_origin) {
  607. case SO_EE_ORIGIN_ICMP:
  608. local = 0;
  609. switch (emsg.ee.ee_type) {
  610. case ICMP_DEST_UNREACH:
  611. switch (emsg.ee.ee_code) {
  612. case ICMP_NET_UNREACH:
  613. _net("Rx Received ICMP Network Unreachable");
  614. port = 0;
  615. err = -ENETUNREACH;
  616. break;
  617. case ICMP_HOST_UNREACH:
  618. _net("Rx Received ICMP Host Unreachable");
  619. port = 0;
  620. err = -EHOSTUNREACH;
  621. break;
  622. case ICMP_PORT_UNREACH:
  623. _net("Rx Received ICMP Port Unreachable");
  624. err = -ECONNREFUSED;
  625. break;
  626. case ICMP_NET_UNKNOWN:
  627. _net("Rx Received ICMP Unknown Network");
  628. port = 0;
  629. err = -ENETUNREACH;
  630. break;
  631. case ICMP_HOST_UNKNOWN:
  632. _net("Rx Received ICMP Unknown Host");
  633. port = 0;
  634. err = -EHOSTUNREACH;
  635. break;
  636. default:
  637. _net("Rx Received ICMP DestUnreach { code=%u }",
  638. emsg.ee.ee_code);
  639. err = emsg.ee.ee_errno;
  640. break;
  641. }
  642. break;
  643. case ICMP_TIME_EXCEEDED:
  644. _net("Rx Received ICMP TTL Exceeded");
  645. err = emsg.ee.ee_errno;
  646. break;
  647. default:
  648. _proto("Rx Received ICMP error { type=%u code=%u }",
  649. emsg.ee.ee_type, emsg.ee.ee_code);
  650. err = emsg.ee.ee_errno;
  651. break;
  652. }
  653. break;
  654. case SO_EE_ORIGIN_LOCAL:
  655. _proto("Rx Received local error { error=%d }",
  656. emsg.ee.ee_errno);
  657. local = 1;
  658. err = emsg.ee.ee_errno;
  659. break;
  660. case SO_EE_ORIGIN_NONE:
  661. case SO_EE_ORIGIN_ICMP6:
  662. default:
  663. _proto("Rx Received error report { orig=%u }",
  664. emsg.ee.ee_origin);
  665. local = 0;
  666. err = emsg.ee.ee_errno;
  667. break;
  668. }
  669. /* find all the connections between this transport and the
  670. * affected destination */
  671. INIT_LIST_HEAD(&connq);
  672. if (rxrpc_peer_lookup(trans, sin.sin_addr.s_addr,
  673. &peer) == 0) {
  674. read_lock(&peer->conn_lock);
  675. list_for_each(_p, &peer->conn_active) {
  676. conn = list_entry(_p, struct rxrpc_connection,
  677. link);
  678. if (port && conn->addr.sin_port != port)
  679. continue;
  680. if (!list_empty(&conn->err_link))
  681. continue;
  682. rxrpc_get_connection(conn);
  683. list_add_tail(&conn->err_link, &connq);
  684. }
  685. read_unlock(&peer->conn_lock);
  686. /* service all those connections */
  687. while (!list_empty(&connq)) {
  688. conn = list_entry(connq.next,
  689. struct rxrpc_connection,
  690. err_link);
  691. list_del(&conn->err_link);
  692. rxrpc_conn_handle_error(conn, local, err);
  693. rxrpc_put_connection(conn);
  694. }
  695. rxrpc_put_peer(peer);
  696. }
  697. }
  698. _leave("");
  699. return;
  700. } /* end rxrpc_trans_receive_error_report() */