transport.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848
  1. /* transport.c: Rx Transport routines
  2. *
  3. * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <rxrpc/transport.h>
  15. #include <rxrpc/peer.h>
  16. #include <rxrpc/connection.h>
  17. #include <rxrpc/call.h>
  18. #include <rxrpc/message.h>
  19. #include <rxrpc/krxiod.h>
  20. #include <rxrpc/krxsecd.h>
  21. #include <linux/udp.h>
  22. #include <linux/in.h>
  23. #include <linux/in6.h>
  24. #include <linux/icmp.h>
  25. #include <linux/skbuff.h>
  26. #include <net/sock.h>
  27. #include <net/ip.h>
  28. #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
  29. #include <linux/ipv6.h> /* this should _really_ be in errqueue.h.. */
  30. #endif
  31. #include <linux/errqueue.h>
  32. #include <asm/uaccess.h>
  33. #include <asm/checksum.h>
  34. #include "internal.h"
  35. struct errormsg {
  36. struct cmsghdr cmsg; /* control message header */
  37. struct sock_extended_err ee; /* extended error information */
  38. struct sockaddr_in icmp_src; /* ICMP packet source address */
  39. };
  40. static DEFINE_SPINLOCK(rxrpc_transports_lock);
  41. static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports);
  42. __RXACCT_DECL(atomic_t rxrpc_transport_count);
  43. LIST_HEAD(rxrpc_proc_transports);
  44. DECLARE_RWSEM(rxrpc_proc_transports_sem);
  45. static void rxrpc_data_ready(struct sock *sk, int count);
  46. static void rxrpc_error_report(struct sock *sk);
  47. static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
  48. struct list_head *msgq);
  49. static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans);
  50. /*****************************************************************************/
  51. /*
  52. * create a new transport endpoint using the specified UDP port
  53. */
  54. int rxrpc_create_transport(unsigned short port,
  55. struct rxrpc_transport **_trans)
  56. {
  57. struct rxrpc_transport *trans;
  58. struct sockaddr_in sin;
  59. mm_segment_t oldfs;
  60. struct sock *sock;
  61. int ret, opt;
  62. _enter("%hu", port);
  63. trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
  64. if (!trans)
  65. return -ENOMEM;
  66. atomic_set(&trans->usage, 1);
  67. INIT_LIST_HEAD(&trans->services);
  68. INIT_LIST_HEAD(&trans->link);
  69. INIT_LIST_HEAD(&trans->krxiodq_link);
  70. spin_lock_init(&trans->lock);
  71. INIT_LIST_HEAD(&trans->peer_active);
  72. INIT_LIST_HEAD(&trans->peer_graveyard);
  73. spin_lock_init(&trans->peer_gylock);
  74. init_waitqueue_head(&trans->peer_gy_waitq);
  75. rwlock_init(&trans->peer_lock);
  76. atomic_set(&trans->peer_count, 0);
  77. trans->port = port;
  78. /* create a UDP socket to be my actual transport endpoint */
  79. ret = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &trans->socket);
  80. if (ret < 0)
  81. goto error;
  82. /* use the specified port */
  83. if (port) {
  84. memset(&sin, 0, sizeof(sin));
  85. sin.sin_family = AF_INET;
  86. sin.sin_port = htons(port);
  87. ret = trans->socket->ops->bind(trans->socket,
  88. (struct sockaddr *) &sin,
  89. sizeof(sin));
  90. if (ret < 0)
  91. goto error;
  92. }
  93. opt = 1;
  94. oldfs = get_fs();
  95. set_fs(KERNEL_DS);
  96. ret = trans->socket->ops->setsockopt(trans->socket, SOL_IP, IP_RECVERR,
  97. (char *) &opt, sizeof(opt));
  98. set_fs(oldfs);
  99. spin_lock(&rxrpc_transports_lock);
  100. list_add(&trans->link, &rxrpc_transports);
  101. spin_unlock(&rxrpc_transports_lock);
  102. /* set the socket up */
  103. sock = trans->socket->sk;
  104. sock->sk_user_data = trans;
  105. sock->sk_data_ready = rxrpc_data_ready;
  106. sock->sk_error_report = rxrpc_error_report;
  107. down_write(&rxrpc_proc_transports_sem);
  108. list_add_tail(&trans->proc_link, &rxrpc_proc_transports);
  109. up_write(&rxrpc_proc_transports_sem);
  110. __RXACCT(atomic_inc(&rxrpc_transport_count));
  111. *_trans = trans;
  112. _leave(" = 0 (%p)", trans);
  113. return 0;
  114. error:
  115. /* finish cleaning up the transport (not really needed here, but...) */
  116. if (trans->socket)
  117. trans->socket->ops->shutdown(trans->socket, 2);
  118. /* close the socket */
  119. if (trans->socket) {
  120. trans->socket->sk->sk_user_data = NULL;
  121. sock_release(trans->socket);
  122. trans->socket = NULL;
  123. }
  124. kfree(trans);
  125. _leave(" = %d", ret);
  126. return ret;
  127. } /* end rxrpc_create_transport() */
  128. /*****************************************************************************/
  129. /*
  130. * destroy a transport endpoint
  131. */
  132. void rxrpc_put_transport(struct rxrpc_transport *trans)
  133. {
  134. _enter("%p{u=%d p=%hu}",
  135. trans, atomic_read(&trans->usage), trans->port);
  136. BUG_ON(atomic_read(&trans->usage) <= 0);
  137. /* to prevent a race, the decrement and the dequeue must be
  138. * effectively atomic */
  139. spin_lock(&rxrpc_transports_lock);
  140. if (likely(!atomic_dec_and_test(&trans->usage))) {
  141. spin_unlock(&rxrpc_transports_lock);
  142. _leave("");
  143. return;
  144. }
  145. list_del(&trans->link);
  146. spin_unlock(&rxrpc_transports_lock);
  147. /* finish cleaning up the transport */
  148. if (trans->socket)
  149. trans->socket->ops->shutdown(trans->socket, 2);
  150. rxrpc_krxsecd_clear_transport(trans);
  151. rxrpc_krxiod_dequeue_transport(trans);
  152. /* discard all peer information */
  153. rxrpc_peer_clearall(trans);
  154. down_write(&rxrpc_proc_transports_sem);
  155. list_del(&trans->proc_link);
  156. up_write(&rxrpc_proc_transports_sem);
  157. __RXACCT(atomic_dec(&rxrpc_transport_count));
  158. /* close the socket */
  159. if (trans->socket) {
  160. trans->socket->sk->sk_user_data = NULL;
  161. sock_release(trans->socket);
  162. trans->socket = NULL;
  163. }
  164. kfree(trans);
  165. _leave("");
  166. } /* end rxrpc_put_transport() */
  167. /*****************************************************************************/
  168. /*
  169. * add a service to a transport to be listened upon
  170. */
  171. int rxrpc_add_service(struct rxrpc_transport *trans,
  172. struct rxrpc_service *newsrv)
  173. {
  174. struct rxrpc_service *srv;
  175. struct list_head *_p;
  176. int ret = -EEXIST;
  177. _enter("%p{%hu},%p{%hu}",
  178. trans, trans->port, newsrv, newsrv->service_id);
  179. /* verify that the service ID is not already present */
  180. spin_lock(&trans->lock);
  181. list_for_each(_p, &trans->services) {
  182. srv = list_entry(_p, struct rxrpc_service, link);
  183. if (srv->service_id == newsrv->service_id)
  184. goto out;
  185. }
  186. /* okay - add the transport to the list */
  187. list_add_tail(&newsrv->link, &trans->services);
  188. rxrpc_get_transport(trans);
  189. ret = 0;
  190. out:
  191. spin_unlock(&trans->lock);
  192. _leave("= %d", ret);
  193. return ret;
  194. } /* end rxrpc_add_service() */
  195. /*****************************************************************************/
  196. /*
  197. * remove a service from a transport
  198. */
  199. void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv)
  200. {
  201. _enter("%p{%hu},%p{%hu}", trans, trans->port, srv, srv->service_id);
  202. spin_lock(&trans->lock);
  203. list_del(&srv->link);
  204. spin_unlock(&trans->lock);
  205. rxrpc_put_transport(trans);
  206. _leave("");
  207. } /* end rxrpc_del_service() */
  208. /*****************************************************************************/
  209. /*
  210. * INET callback when data has been received on the socket.
  211. */
  212. static void rxrpc_data_ready(struct sock *sk, int count)
  213. {
  214. struct rxrpc_transport *trans;
  215. _enter("%p{t=%p},%d", sk, sk->sk_user_data, count);
  216. /* queue the transport for attention by krxiod */
  217. trans = (struct rxrpc_transport *) sk->sk_user_data;
  218. if (trans)
  219. rxrpc_krxiod_queue_transport(trans);
  220. /* wake up anyone waiting on the socket */
  221. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  222. wake_up_interruptible(sk->sk_sleep);
  223. _leave("");
  224. } /* end rxrpc_data_ready() */
  225. /*****************************************************************************/
  226. /*
  227. * INET callback when an ICMP error packet is received
  228. * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE)
  229. */
  230. static void rxrpc_error_report(struct sock *sk)
  231. {
  232. struct rxrpc_transport *trans;
  233. _enter("%p{t=%p}", sk, sk->sk_user_data);
  234. /* queue the transport for attention by krxiod */
  235. trans = (struct rxrpc_transport *) sk->sk_user_data;
  236. if (trans) {
  237. trans->error_rcvd = 1;
  238. rxrpc_krxiod_queue_transport(trans);
  239. }
  240. /* wake up anyone waiting on the socket */
  241. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  242. wake_up_interruptible(sk->sk_sleep);
  243. _leave("");
  244. } /* end rxrpc_error_report() */
  245. /*****************************************************************************/
  246. /*
  247. * split a message up, allocating message records and filling them in
  248. * from the contents of a socket buffer
  249. */
  250. static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
  251. struct sk_buff *pkt,
  252. struct list_head *msgq)
  253. {
  254. struct rxrpc_message *msg;
  255. int ret;
  256. _enter("");
  257. msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
  258. if (!msg) {
  259. _leave(" = -ENOMEM");
  260. return -ENOMEM;
  261. }
  262. atomic_set(&msg->usage, 1);
  263. list_add_tail(&msg->link,msgq);
  264. /* dig out the Rx routing parameters */
  265. if (skb_copy_bits(pkt, sizeof(struct udphdr),
  266. &msg->hdr, sizeof(msg->hdr)) < 0) {
  267. ret = -EBADMSG;
  268. goto error;
  269. }
  270. msg->trans = trans;
  271. msg->state = RXRPC_MSG_RECEIVED;
  272. skb_get_timestamp(pkt, &msg->stamp);
  273. if (msg->stamp.tv_sec == 0) {
  274. do_gettimeofday(&msg->stamp);
  275. if (pkt->sk)
  276. sock_enable_timestamp(pkt->sk);
  277. }
  278. msg->seq = ntohl(msg->hdr.seq);
  279. /* attach the packet */
  280. skb_get(pkt);
  281. msg->pkt = pkt;
  282. msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header);
  283. msg->dsize = msg->pkt->len - msg->offset;
  284. _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
  285. msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
  286. ntohl(msg->hdr.epoch),
  287. (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
  288. ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
  289. ntohl(msg->hdr.callNumber),
  290. rxrpc_pkts[msg->hdr.type],
  291. msg->hdr.flags,
  292. ntohs(msg->hdr.serviceId),
  293. msg->hdr.securityIndex);
  294. __RXACCT(atomic_inc(&rxrpc_message_count));
  295. /* split off jumbo packets */
  296. while (msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
  297. msg->hdr.flags & RXRPC_JUMBO_PACKET
  298. ) {
  299. struct rxrpc_jumbo_header jumbo;
  300. struct rxrpc_message *jumbomsg = msg;
  301. _debug("split jumbo packet");
  302. /* quick sanity check */
  303. ret = -EBADMSG;
  304. if (msg->dsize <
  305. RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
  306. goto error;
  307. if (msg->hdr.flags & RXRPC_LAST_PACKET)
  308. goto error;
  309. /* dig out the secondary header */
  310. if (skb_copy_bits(pkt, msg->offset + RXRPC_JUMBO_DATALEN,
  311. &jumbo, sizeof(jumbo)) < 0)
  312. goto error;
  313. /* allocate a new message record */
  314. ret = -ENOMEM;
  315. msg = kmemdup(jumbomsg, sizeof(struct rxrpc_message), GFP_KERNEL);
  316. if (!msg)
  317. goto error;
  318. list_add_tail(&msg->link, msgq);
  319. /* adjust the jumbo packet */
  320. jumbomsg->dsize = RXRPC_JUMBO_DATALEN;
  321. /* attach the packet here too */
  322. skb_get(pkt);
  323. /* adjust the parameters */
  324. msg->seq++;
  325. msg->hdr.seq = htonl(msg->seq);
  326. msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1);
  327. msg->offset += RXRPC_JUMBO_DATALEN +
  328. sizeof(struct rxrpc_jumbo_header);
  329. msg->dsize -= RXRPC_JUMBO_DATALEN +
  330. sizeof(struct rxrpc_jumbo_header);
  331. msg->hdr.flags = jumbo.flags;
  332. msg->hdr._rsvd = jumbo._rsvd;
  333. _net("Rx Split jumbo packet from %s"
  334. " (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
  335. msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
  336. ntohl(msg->hdr.epoch),
  337. (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
  338. ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
  339. ntohl(msg->hdr.callNumber),
  340. rxrpc_pkts[msg->hdr.type],
  341. msg->hdr.flags,
  342. ntohs(msg->hdr.serviceId),
  343. msg->hdr.securityIndex);
  344. __RXACCT(atomic_inc(&rxrpc_message_count));
  345. }
  346. _leave(" = 0 #%d", atomic_read(&rxrpc_message_count));
  347. return 0;
  348. error:
  349. while (!list_empty(msgq)) {
  350. msg = list_entry(msgq->next, struct rxrpc_message, link);
  351. list_del_init(&msg->link);
  352. rxrpc_put_message(msg);
  353. }
  354. _leave(" = %d", ret);
  355. return ret;
  356. } /* end rxrpc_incoming_msg() */
  357. /*****************************************************************************/
  358. /*
  359. * accept a new call
  360. * - called from krxiod in process context
  361. */
  362. void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
  363. {
  364. struct rxrpc_message *msg;
  365. struct rxrpc_peer *peer;
  366. struct sk_buff *pkt;
  367. int ret;
  368. __be32 addr;
  369. __be16 port;
  370. LIST_HEAD(msgq);
  371. _enter("%p{%d}", trans, trans->port);
  372. for (;;) {
  373. /* deal with outstanting errors first */
  374. if (trans->error_rcvd)
  375. rxrpc_trans_receive_error_report(trans);
  376. /* attempt to receive a packet */
  377. pkt = skb_recv_datagram(trans->socket->sk, 0, 1, &ret);
  378. if (!pkt) {
  379. if (ret == -EAGAIN) {
  380. _leave(" EAGAIN");
  381. return;
  382. }
  383. /* an icmp error may have occurred */
  384. rxrpc_krxiod_queue_transport(trans);
  385. _leave(" error %d\n", ret);
  386. return;
  387. }
  388. /* we'll probably need to checksum it (didn't call
  389. * sock_recvmsg) */
  390. if (skb_checksum_complete(pkt)) {
  391. kfree_skb(pkt);
  392. rxrpc_krxiod_queue_transport(trans);
  393. _leave(" CSUM failed");
  394. return;
  395. }
  396. addr = pkt->nh.iph->saddr;
  397. port = pkt->h.uh->source;
  398. _net("Rx Received UDP packet from %08x:%04hu",
  399. ntohl(addr), ntohs(port));
  400. /* unmarshall the Rx parameters and split jumbo packets */
  401. ret = rxrpc_incoming_msg(trans, pkt, &msgq);
  402. if (ret < 0) {
  403. kfree_skb(pkt);
  404. rxrpc_krxiod_queue_transport(trans);
  405. _leave(" bad packet");
  406. return;
  407. }
  408. BUG_ON(list_empty(&msgq));
  409. msg = list_entry(msgq.next, struct rxrpc_message, link);
  410. /* locate the record for the peer from which it
  411. * originated */
  412. ret = rxrpc_peer_lookup(trans, addr, &peer);
  413. if (ret < 0) {
  414. kdebug("Rx No connections from that peer");
  415. rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
  416. goto finished_msg;
  417. }
  418. /* try and find a matching connection */
  419. ret = rxrpc_connection_lookup(peer, msg, &msg->conn);
  420. if (ret < 0) {
  421. kdebug("Rx Unknown Connection");
  422. rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
  423. rxrpc_put_peer(peer);
  424. goto finished_msg;
  425. }
  426. rxrpc_put_peer(peer);
  427. /* deal with the first packet of a new call */
  428. if (msg->hdr.flags & RXRPC_CLIENT_INITIATED &&
  429. msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
  430. ntohl(msg->hdr.seq) == 1
  431. ) {
  432. _debug("Rx New server call");
  433. rxrpc_trans_receive_new_call(trans, &msgq);
  434. goto finished_msg;
  435. }
  436. /* deal with subsequent packet(s) of call */
  437. _debug("Rx Call packet");
  438. while (!list_empty(&msgq)) {
  439. msg = list_entry(msgq.next, struct rxrpc_message, link);
  440. list_del_init(&msg->link);
  441. ret = rxrpc_conn_receive_call_packet(msg->conn, NULL, msg);
  442. if (ret < 0) {
  443. rxrpc_trans_immediate_abort(trans, msg, ret);
  444. rxrpc_put_message(msg);
  445. goto finished_msg;
  446. }
  447. rxrpc_put_message(msg);
  448. }
  449. goto finished_msg;
  450. /* dispose of the packets */
  451. finished_msg:
  452. while (!list_empty(&msgq)) {
  453. msg = list_entry(msgq.next, struct rxrpc_message, link);
  454. list_del_init(&msg->link);
  455. rxrpc_put_message(msg);
  456. }
  457. kfree_skb(pkt);
  458. }
  459. _leave("");
  460. } /* end rxrpc_trans_receive_packet() */
  461. /*****************************************************************************/
  462. /*
  463. * accept a new call from a client trying to connect to one of my services
  464. * - called in process context
  465. */
  466. static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
  467. struct list_head *msgq)
  468. {
  469. struct rxrpc_message *msg;
  470. _enter("");
  471. /* only bother with the first packet */
  472. msg = list_entry(msgq->next, struct rxrpc_message, link);
  473. list_del_init(&msg->link);
  474. rxrpc_krxsecd_queue_incoming_call(msg);
  475. rxrpc_put_message(msg);
  476. _leave(" = 0");
  477. return 0;
  478. } /* end rxrpc_trans_receive_new_call() */
  479. /*****************************************************************************/
  480. /*
  481. * perform an immediate abort without connection or call structures
  482. */
  483. int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
  484. struct rxrpc_message *msg,
  485. int error)
  486. {
  487. struct rxrpc_header ahdr;
  488. struct sockaddr_in sin;
  489. struct msghdr msghdr;
  490. struct kvec iov[2];
  491. __be32 _error;
  492. int len, ret;
  493. _enter("%p,%p,%d", trans, msg, error);
  494. /* don't abort an abort packet */
  495. if (msg->hdr.type == RXRPC_PACKET_TYPE_ABORT) {
  496. _leave(" = 0");
  497. return 0;
  498. }
  499. _error = htonl(-error);
  500. /* set up the message to be transmitted */
  501. memcpy(&ahdr, &msg->hdr, sizeof(ahdr));
  502. ahdr.epoch = msg->hdr.epoch;
  503. ahdr.serial = htonl(1);
  504. ahdr.seq = 0;
  505. ahdr.type = RXRPC_PACKET_TYPE_ABORT;
  506. ahdr.flags = RXRPC_LAST_PACKET;
  507. ahdr.flags |= ~msg->hdr.flags & RXRPC_CLIENT_INITIATED;
  508. iov[0].iov_len = sizeof(ahdr);
  509. iov[0].iov_base = &ahdr;
  510. iov[1].iov_len = sizeof(_error);
  511. iov[1].iov_base = &_error;
  512. len = sizeof(ahdr) + sizeof(_error);
  513. memset(&sin,0,sizeof(sin));
  514. sin.sin_family = AF_INET;
  515. sin.sin_port = msg->pkt->h.uh->source;
  516. sin.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
  517. msghdr.msg_name = &sin;
  518. msghdr.msg_namelen = sizeof(sin);
  519. msghdr.msg_control = NULL;
  520. msghdr.msg_controllen = 0;
  521. msghdr.msg_flags = MSG_DONTWAIT;
  522. _net("Sending message type %d of %d bytes to %08x:%d",
  523. ahdr.type,
  524. len,
  525. ntohl(sin.sin_addr.s_addr),
  526. ntohs(sin.sin_port));
  527. /* send the message */
  528. ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len);
  529. _leave(" = %d", ret);
  530. return ret;
  531. } /* end rxrpc_trans_immediate_abort() */
  532. /*****************************************************************************/
  533. /*
  534. * receive an ICMP error report and percolate it to all connections
  535. * heading to the affected host or port
  536. */
  537. static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
  538. {
  539. struct rxrpc_connection *conn;
  540. struct sockaddr_in sin;
  541. struct rxrpc_peer *peer;
  542. struct list_head connq, *_p;
  543. struct errormsg emsg;
  544. struct msghdr msg;
  545. __be16 port;
  546. int local, err;
  547. _enter("%p", trans);
  548. for (;;) {
  549. trans->error_rcvd = 0;
  550. /* try and receive an error message */
  551. msg.msg_name = &sin;
  552. msg.msg_namelen = sizeof(sin);
  553. msg.msg_control = &emsg;
  554. msg.msg_controllen = sizeof(emsg);
  555. msg.msg_flags = 0;
  556. err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0,
  557. MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC);
  558. if (err == -EAGAIN) {
  559. _leave("");
  560. return;
  561. }
  562. if (err < 0) {
  563. printk("%s: unable to recv an error report: %d\n",
  564. __FUNCTION__, err);
  565. _leave("");
  566. return;
  567. }
  568. msg.msg_controllen = (char *) msg.msg_control - (char *) &emsg;
  569. if (msg.msg_controllen < sizeof(emsg.cmsg) ||
  570. msg.msg_namelen < sizeof(sin)) {
  571. printk("%s: short control message"
  572. " (nlen=%u clen=%Zu fl=%x)\n",
  573. __FUNCTION__,
  574. msg.msg_namelen,
  575. msg.msg_controllen,
  576. msg.msg_flags);
  577. continue;
  578. }
  579. _net("Rx Received control message"
  580. " { len=%Zu level=%u type=%u }",
  581. emsg.cmsg.cmsg_len,
  582. emsg.cmsg.cmsg_level,
  583. emsg.cmsg.cmsg_type);
  584. if (sin.sin_family != AF_INET) {
  585. printk("Rx Ignoring error report with non-INET address"
  586. " (fam=%u)",
  587. sin.sin_family);
  588. continue;
  589. }
  590. _net("Rx Received message pertaining to host addr=%x port=%hu",
  591. ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port));
  592. if (emsg.cmsg.cmsg_level != SOL_IP ||
  593. emsg.cmsg.cmsg_type != IP_RECVERR) {
  594. printk("Rx Ignoring unknown error report"
  595. " { level=%u type=%u }",
  596. emsg.cmsg.cmsg_level,
  597. emsg.cmsg.cmsg_type);
  598. continue;
  599. }
  600. if (msg.msg_controllen < sizeof(emsg.cmsg) + sizeof(emsg.ee)) {
  601. printk("%s: short error message (%Zu)\n",
  602. __FUNCTION__, msg.msg_controllen);
  603. _leave("");
  604. return;
  605. }
  606. port = sin.sin_port;
  607. switch (emsg.ee.ee_origin) {
  608. case SO_EE_ORIGIN_ICMP:
  609. local = 0;
  610. switch (emsg.ee.ee_type) {
  611. case ICMP_DEST_UNREACH:
  612. switch (emsg.ee.ee_code) {
  613. case ICMP_NET_UNREACH:
  614. _net("Rx Received ICMP Network Unreachable");
  615. port = 0;
  616. err = -ENETUNREACH;
  617. break;
  618. case ICMP_HOST_UNREACH:
  619. _net("Rx Received ICMP Host Unreachable");
  620. port = 0;
  621. err = -EHOSTUNREACH;
  622. break;
  623. case ICMP_PORT_UNREACH:
  624. _net("Rx Received ICMP Port Unreachable");
  625. err = -ECONNREFUSED;
  626. break;
  627. case ICMP_NET_UNKNOWN:
  628. _net("Rx Received ICMP Unknown Network");
  629. port = 0;
  630. err = -ENETUNREACH;
  631. break;
  632. case ICMP_HOST_UNKNOWN:
  633. _net("Rx Received ICMP Unknown Host");
  634. port = 0;
  635. err = -EHOSTUNREACH;
  636. break;
  637. default:
  638. _net("Rx Received ICMP DestUnreach { code=%u }",
  639. emsg.ee.ee_code);
  640. err = emsg.ee.ee_errno;
  641. break;
  642. }
  643. break;
  644. case ICMP_TIME_EXCEEDED:
  645. _net("Rx Received ICMP TTL Exceeded");
  646. err = emsg.ee.ee_errno;
  647. break;
  648. default:
  649. _proto("Rx Received ICMP error { type=%u code=%u }",
  650. emsg.ee.ee_type, emsg.ee.ee_code);
  651. err = emsg.ee.ee_errno;
  652. break;
  653. }
  654. break;
  655. case SO_EE_ORIGIN_LOCAL:
  656. _proto("Rx Received local error { error=%d }",
  657. emsg.ee.ee_errno);
  658. local = 1;
  659. err = emsg.ee.ee_errno;
  660. break;
  661. case SO_EE_ORIGIN_NONE:
  662. case SO_EE_ORIGIN_ICMP6:
  663. default:
  664. _proto("Rx Received error report { orig=%u }",
  665. emsg.ee.ee_origin);
  666. local = 0;
  667. err = emsg.ee.ee_errno;
  668. break;
  669. }
  670. /* find all the connections between this transport and the
  671. * affected destination */
  672. INIT_LIST_HEAD(&connq);
  673. if (rxrpc_peer_lookup(trans, sin.sin_addr.s_addr,
  674. &peer) == 0) {
  675. read_lock(&peer->conn_lock);
  676. list_for_each(_p, &peer->conn_active) {
  677. conn = list_entry(_p, struct rxrpc_connection,
  678. link);
  679. if (port && conn->addr.sin_port != port)
  680. continue;
  681. if (!list_empty(&conn->err_link))
  682. continue;
  683. rxrpc_get_connection(conn);
  684. list_add_tail(&conn->err_link, &connq);
  685. }
  686. read_unlock(&peer->conn_lock);
  687. /* service all those connections */
  688. while (!list_empty(&connq)) {
  689. conn = list_entry(connq.next,
  690. struct rxrpc_connection,
  691. err_link);
  692. list_del(&conn->err_link);
  693. rxrpc_conn_handle_error(conn, local, err);
  694. rxrpc_put_connection(conn);
  695. }
  696. rxrpc_put_peer(peer);
  697. }
  698. }
  699. _leave("");
  700. return;
  701. } /* end rxrpc_trans_receive_error_report() */