transport.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. /* transport.c: Rx Transport routines
  2. *
  3. * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <rxrpc/transport.h>
  15. #include <rxrpc/peer.h>
  16. #include <rxrpc/connection.h>
  17. #include <rxrpc/call.h>
  18. #include <rxrpc/message.h>
  19. #include <rxrpc/krxiod.h>
  20. #include <rxrpc/krxsecd.h>
  21. #include <linux/udp.h>
  22. #include <linux/in.h>
  23. #include <linux/in6.h>
  24. #include <linux/icmp.h>
  25. #include <linux/skbuff.h>
  26. #include <net/sock.h>
  27. #include <net/ip.h>
  28. #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
  29. #include <linux/ipv6.h> /* this should _really_ be in errqueue.h.. */
  30. #endif
  31. #include <linux/errqueue.h>
  32. #include <asm/uaccess.h>
  33. #include <asm/checksum.h>
  34. #include "internal.h"
  35. struct errormsg {
  36. struct cmsghdr cmsg; /* control message header */
  37. struct sock_extended_err ee; /* extended error information */
  38. struct sockaddr_in icmp_src; /* ICMP packet source address */
  39. };
  40. static DEFINE_SPINLOCK(rxrpc_transports_lock);
  41. static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports);
  42. __RXACCT_DECL(atomic_t rxrpc_transport_count);
  43. LIST_HEAD(rxrpc_proc_transports);
  44. DECLARE_RWSEM(rxrpc_proc_transports_sem);
  45. static void rxrpc_data_ready(struct sock *sk, int count);
  46. static void rxrpc_error_report(struct sock *sk);
  47. static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
  48. struct list_head *msgq);
  49. static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans);
  50. /*****************************************************************************/
  51. /*
  52. * create a new transport endpoint using the specified UDP port
  53. */
  54. int rxrpc_create_transport(unsigned short port,
  55. struct rxrpc_transport **_trans)
  56. {
  57. struct rxrpc_transport *trans;
  58. struct sockaddr_in sin;
  59. mm_segment_t oldfs;
  60. struct sock *sock;
  61. int ret, opt;
  62. _enter("%hu", port);
  63. trans = kmalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
  64. if (!trans)
  65. return -ENOMEM;
  66. memset(trans, 0, sizeof(struct rxrpc_transport));
  67. atomic_set(&trans->usage, 1);
  68. INIT_LIST_HEAD(&trans->services);
  69. INIT_LIST_HEAD(&trans->link);
  70. INIT_LIST_HEAD(&trans->krxiodq_link);
  71. spin_lock_init(&trans->lock);
  72. INIT_LIST_HEAD(&trans->peer_active);
  73. INIT_LIST_HEAD(&trans->peer_graveyard);
  74. spin_lock_init(&trans->peer_gylock);
  75. init_waitqueue_head(&trans->peer_gy_waitq);
  76. rwlock_init(&trans->peer_lock);
  77. atomic_set(&trans->peer_count, 0);
  78. trans->port = port;
  79. /* create a UDP socket to be my actual transport endpoint */
  80. ret = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &trans->socket);
  81. if (ret < 0)
  82. goto error;
  83. /* use the specified port */
  84. if (port) {
  85. memset(&sin, 0, sizeof(sin));
  86. sin.sin_family = AF_INET;
  87. sin.sin_port = htons(port);
  88. ret = trans->socket->ops->bind(trans->socket,
  89. (struct sockaddr *) &sin,
  90. sizeof(sin));
  91. if (ret < 0)
  92. goto error;
  93. }
  94. opt = 1;
  95. oldfs = get_fs();
  96. set_fs(KERNEL_DS);
  97. ret = trans->socket->ops->setsockopt(trans->socket, SOL_IP, IP_RECVERR,
  98. (char *) &opt, sizeof(opt));
  99. set_fs(oldfs);
  100. spin_lock(&rxrpc_transports_lock);
  101. list_add(&trans->link, &rxrpc_transports);
  102. spin_unlock(&rxrpc_transports_lock);
  103. /* set the socket up */
  104. sock = trans->socket->sk;
  105. sock->sk_user_data = trans;
  106. sock->sk_data_ready = rxrpc_data_ready;
  107. sock->sk_error_report = rxrpc_error_report;
  108. down_write(&rxrpc_proc_transports_sem);
  109. list_add_tail(&trans->proc_link, &rxrpc_proc_transports);
  110. up_write(&rxrpc_proc_transports_sem);
  111. __RXACCT(atomic_inc(&rxrpc_transport_count));
  112. *_trans = trans;
  113. _leave(" = 0 (%p)", trans);
  114. return 0;
  115. error:
  116. /* finish cleaning up the transport (not really needed here, but...) */
  117. if (trans->socket)
  118. trans->socket->ops->shutdown(trans->socket, 2);
  119. /* close the socket */
  120. if (trans->socket) {
  121. trans->socket->sk->sk_user_data = NULL;
  122. sock_release(trans->socket);
  123. trans->socket = NULL;
  124. }
  125. kfree(trans);
  126. _leave(" = %d", ret);
  127. return ret;
  128. } /* end rxrpc_create_transport() */
  129. /*****************************************************************************/
  130. /*
  131. * destroy a transport endpoint
  132. */
  133. void rxrpc_put_transport(struct rxrpc_transport *trans)
  134. {
  135. _enter("%p{u=%d p=%hu}",
  136. trans, atomic_read(&trans->usage), trans->port);
  137. BUG_ON(atomic_read(&trans->usage) <= 0);
  138. /* to prevent a race, the decrement and the dequeue must be
  139. * effectively atomic */
  140. spin_lock(&rxrpc_transports_lock);
  141. if (likely(!atomic_dec_and_test(&trans->usage))) {
  142. spin_unlock(&rxrpc_transports_lock);
  143. _leave("");
  144. return;
  145. }
  146. list_del(&trans->link);
  147. spin_unlock(&rxrpc_transports_lock);
  148. /* finish cleaning up the transport */
  149. if (trans->socket)
  150. trans->socket->ops->shutdown(trans->socket, 2);
  151. rxrpc_krxsecd_clear_transport(trans);
  152. rxrpc_krxiod_dequeue_transport(trans);
  153. /* discard all peer information */
  154. rxrpc_peer_clearall(trans);
  155. down_write(&rxrpc_proc_transports_sem);
  156. list_del(&trans->proc_link);
  157. up_write(&rxrpc_proc_transports_sem);
  158. __RXACCT(atomic_dec(&rxrpc_transport_count));
  159. /* close the socket */
  160. if (trans->socket) {
  161. trans->socket->sk->sk_user_data = NULL;
  162. sock_release(trans->socket);
  163. trans->socket = NULL;
  164. }
  165. kfree(trans);
  166. _leave("");
  167. } /* end rxrpc_put_transport() */
  168. /*****************************************************************************/
  169. /*
  170. * add a service to a transport to be listened upon
  171. */
  172. int rxrpc_add_service(struct rxrpc_transport *trans,
  173. struct rxrpc_service *newsrv)
  174. {
  175. struct rxrpc_service *srv;
  176. struct list_head *_p;
  177. int ret = -EEXIST;
  178. _enter("%p{%hu},%p{%hu}",
  179. trans, trans->port, newsrv, newsrv->service_id);
  180. /* verify that the service ID is not already present */
  181. spin_lock(&trans->lock);
  182. list_for_each(_p, &trans->services) {
  183. srv = list_entry(_p, struct rxrpc_service, link);
  184. if (srv->service_id == newsrv->service_id)
  185. goto out;
  186. }
  187. /* okay - add the transport to the list */
  188. list_add_tail(&newsrv->link, &trans->services);
  189. rxrpc_get_transport(trans);
  190. ret = 0;
  191. out:
  192. spin_unlock(&trans->lock);
  193. _leave("= %d", ret);
  194. return ret;
  195. } /* end rxrpc_add_service() */
  196. /*****************************************************************************/
  197. /*
  198. * remove a service from a transport
  199. */
  200. void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv)
  201. {
  202. _enter("%p{%hu},%p{%hu}", trans, trans->port, srv, srv->service_id);
  203. spin_lock(&trans->lock);
  204. list_del(&srv->link);
  205. spin_unlock(&trans->lock);
  206. rxrpc_put_transport(trans);
  207. _leave("");
  208. } /* end rxrpc_del_service() */
  209. /*****************************************************************************/
  210. /*
  211. * INET callback when data has been received on the socket.
  212. */
  213. static void rxrpc_data_ready(struct sock *sk, int count)
  214. {
  215. struct rxrpc_transport *trans;
  216. _enter("%p{t=%p},%d", sk, sk->sk_user_data, count);
  217. /* queue the transport for attention by krxiod */
  218. trans = (struct rxrpc_transport *) sk->sk_user_data;
  219. if (trans)
  220. rxrpc_krxiod_queue_transport(trans);
  221. /* wake up anyone waiting on the socket */
  222. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  223. wake_up_interruptible(sk->sk_sleep);
  224. _leave("");
  225. } /* end rxrpc_data_ready() */
  226. /*****************************************************************************/
  227. /*
  228. * INET callback when an ICMP error packet is received
  229. * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE)
  230. */
  231. static void rxrpc_error_report(struct sock *sk)
  232. {
  233. struct rxrpc_transport *trans;
  234. _enter("%p{t=%p}", sk, sk->sk_user_data);
  235. /* queue the transport for attention by krxiod */
  236. trans = (struct rxrpc_transport *) sk->sk_user_data;
  237. if (trans) {
  238. trans->error_rcvd = 1;
  239. rxrpc_krxiod_queue_transport(trans);
  240. }
  241. /* wake up anyone waiting on the socket */
  242. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  243. wake_up_interruptible(sk->sk_sleep);
  244. _leave("");
  245. } /* end rxrpc_error_report() */
  246. /*****************************************************************************/
  247. /*
  248. * split a message up, allocating message records and filling them in
  249. * from the contents of a socket buffer
  250. */
  251. static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
  252. struct sk_buff *pkt,
  253. struct list_head *msgq)
  254. {
  255. struct rxrpc_message *msg;
  256. int ret;
  257. _enter("");
  258. msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
  259. if (!msg) {
  260. _leave(" = -ENOMEM");
  261. return -ENOMEM;
  262. }
  263. memset(msg, 0, sizeof(*msg));
  264. atomic_set(&msg->usage, 1);
  265. list_add_tail(&msg->link,msgq);
  266. /* dig out the Rx routing parameters */
  267. if (skb_copy_bits(pkt, sizeof(struct udphdr),
  268. &msg->hdr, sizeof(msg->hdr)) < 0) {
  269. ret = -EBADMSG;
  270. goto error;
  271. }
  272. msg->trans = trans;
  273. msg->state = RXRPC_MSG_RECEIVED;
  274. skb_get_timestamp(pkt, &msg->stamp);
  275. if (msg->stamp.tv_sec == 0) {
  276. do_gettimeofday(&msg->stamp);
  277. if (pkt->sk)
  278. sock_enable_timestamp(pkt->sk);
  279. }
  280. msg->seq = ntohl(msg->hdr.seq);
  281. /* attach the packet */
  282. skb_get(pkt);
  283. msg->pkt = pkt;
  284. msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header);
  285. msg->dsize = msg->pkt->len - msg->offset;
  286. _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
  287. msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
  288. ntohl(msg->hdr.epoch),
  289. (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
  290. ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
  291. ntohl(msg->hdr.callNumber),
  292. rxrpc_pkts[msg->hdr.type],
  293. msg->hdr.flags,
  294. ntohs(msg->hdr.serviceId),
  295. msg->hdr.securityIndex);
  296. __RXACCT(atomic_inc(&rxrpc_message_count));
  297. /* split off jumbo packets */
  298. while (msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
  299. msg->hdr.flags & RXRPC_JUMBO_PACKET
  300. ) {
  301. struct rxrpc_jumbo_header jumbo;
  302. struct rxrpc_message *jumbomsg = msg;
  303. _debug("split jumbo packet");
  304. /* quick sanity check */
  305. ret = -EBADMSG;
  306. if (msg->dsize <
  307. RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
  308. goto error;
  309. if (msg->hdr.flags & RXRPC_LAST_PACKET)
  310. goto error;
  311. /* dig out the secondary header */
  312. if (skb_copy_bits(pkt, msg->offset + RXRPC_JUMBO_DATALEN,
  313. &jumbo, sizeof(jumbo)) < 0)
  314. goto error;
  315. /* allocate a new message record */
  316. ret = -ENOMEM;
  317. msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
  318. if (!msg)
  319. goto error;
  320. memcpy(msg, jumbomsg, sizeof(*msg));
  321. list_add_tail(&msg->link, msgq);
  322. /* adjust the jumbo packet */
  323. jumbomsg->dsize = RXRPC_JUMBO_DATALEN;
  324. /* attach the packet here too */
  325. skb_get(pkt);
  326. /* adjust the parameters */
  327. msg->seq++;
  328. msg->hdr.seq = htonl(msg->seq);
  329. msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1);
  330. msg->offset += RXRPC_JUMBO_DATALEN +
  331. sizeof(struct rxrpc_jumbo_header);
  332. msg->dsize -= RXRPC_JUMBO_DATALEN +
  333. sizeof(struct rxrpc_jumbo_header);
  334. msg->hdr.flags = jumbo.flags;
  335. msg->hdr._rsvd = jumbo._rsvd;
  336. _net("Rx Split jumbo packet from %s"
  337. " (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
  338. msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
  339. ntohl(msg->hdr.epoch),
  340. (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
  341. ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
  342. ntohl(msg->hdr.callNumber),
  343. rxrpc_pkts[msg->hdr.type],
  344. msg->hdr.flags,
  345. ntohs(msg->hdr.serviceId),
  346. msg->hdr.securityIndex);
  347. __RXACCT(atomic_inc(&rxrpc_message_count));
  348. }
  349. _leave(" = 0 #%d", atomic_read(&rxrpc_message_count));
  350. return 0;
  351. error:
  352. while (!list_empty(msgq)) {
  353. msg = list_entry(msgq->next, struct rxrpc_message, link);
  354. list_del_init(&msg->link);
  355. rxrpc_put_message(msg);
  356. }
  357. _leave(" = %d", ret);
  358. return ret;
  359. } /* end rxrpc_incoming_msg() */
  360. /*****************************************************************************/
  361. /*
  362. * accept a new call
  363. * - called from krxiod in process context
  364. */
  365. void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
  366. {
  367. struct rxrpc_message *msg;
  368. struct rxrpc_peer *peer;
  369. struct sk_buff *pkt;
  370. int ret;
  371. __be32 addr;
  372. __be16 port;
  373. LIST_HEAD(msgq);
  374. _enter("%p{%d}", trans, trans->port);
  375. for (;;) {
  376. /* deal with outstanting errors first */
  377. if (trans->error_rcvd)
  378. rxrpc_trans_receive_error_report(trans);
  379. /* attempt to receive a packet */
  380. pkt = skb_recv_datagram(trans->socket->sk, 0, 1, &ret);
  381. if (!pkt) {
  382. if (ret == -EAGAIN) {
  383. _leave(" EAGAIN");
  384. return;
  385. }
  386. /* an icmp error may have occurred */
  387. rxrpc_krxiod_queue_transport(trans);
  388. _leave(" error %d\n", ret);
  389. return;
  390. }
  391. /* we'll probably need to checksum it (didn't call
  392. * sock_recvmsg) */
  393. if (skb_checksum_complete(pkt)) {
  394. kfree_skb(pkt);
  395. rxrpc_krxiod_queue_transport(trans);
  396. _leave(" CSUM failed");
  397. return;
  398. }
  399. addr = pkt->nh.iph->saddr;
  400. port = pkt->h.uh->source;
  401. _net("Rx Received UDP packet from %08x:%04hu",
  402. ntohl(addr), ntohs(port));
  403. /* unmarshall the Rx parameters and split jumbo packets */
  404. ret = rxrpc_incoming_msg(trans, pkt, &msgq);
  405. if (ret < 0) {
  406. kfree_skb(pkt);
  407. rxrpc_krxiod_queue_transport(trans);
  408. _leave(" bad packet");
  409. return;
  410. }
  411. BUG_ON(list_empty(&msgq));
  412. msg = list_entry(msgq.next, struct rxrpc_message, link);
  413. /* locate the record for the peer from which it
  414. * originated */
  415. ret = rxrpc_peer_lookup(trans, addr, &peer);
  416. if (ret < 0) {
  417. kdebug("Rx No connections from that peer");
  418. rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
  419. goto finished_msg;
  420. }
  421. /* try and find a matching connection */
  422. ret = rxrpc_connection_lookup(peer, msg, &msg->conn);
  423. if (ret < 0) {
  424. kdebug("Rx Unknown Connection");
  425. rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
  426. rxrpc_put_peer(peer);
  427. goto finished_msg;
  428. }
  429. rxrpc_put_peer(peer);
  430. /* deal with the first packet of a new call */
  431. if (msg->hdr.flags & RXRPC_CLIENT_INITIATED &&
  432. msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
  433. ntohl(msg->hdr.seq) == 1
  434. ) {
  435. _debug("Rx New server call");
  436. rxrpc_trans_receive_new_call(trans, &msgq);
  437. goto finished_msg;
  438. }
  439. /* deal with subsequent packet(s) of call */
  440. _debug("Rx Call packet");
  441. while (!list_empty(&msgq)) {
  442. msg = list_entry(msgq.next, struct rxrpc_message, link);
  443. list_del_init(&msg->link);
  444. ret = rxrpc_conn_receive_call_packet(msg->conn, NULL, msg);
  445. if (ret < 0) {
  446. rxrpc_trans_immediate_abort(trans, msg, ret);
  447. rxrpc_put_message(msg);
  448. goto finished_msg;
  449. }
  450. rxrpc_put_message(msg);
  451. }
  452. goto finished_msg;
  453. /* dispose of the packets */
  454. finished_msg:
  455. while (!list_empty(&msgq)) {
  456. msg = list_entry(msgq.next, struct rxrpc_message, link);
  457. list_del_init(&msg->link);
  458. rxrpc_put_message(msg);
  459. }
  460. kfree_skb(pkt);
  461. }
  462. _leave("");
  463. } /* end rxrpc_trans_receive_packet() */
  464. /*****************************************************************************/
  465. /*
  466. * accept a new call from a client trying to connect to one of my services
  467. * - called in process context
  468. */
  469. static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
  470. struct list_head *msgq)
  471. {
  472. struct rxrpc_message *msg;
  473. _enter("");
  474. /* only bother with the first packet */
  475. msg = list_entry(msgq->next, struct rxrpc_message, link);
  476. list_del_init(&msg->link);
  477. rxrpc_krxsecd_queue_incoming_call(msg);
  478. rxrpc_put_message(msg);
  479. _leave(" = 0");
  480. return 0;
  481. } /* end rxrpc_trans_receive_new_call() */
  482. /*****************************************************************************/
  483. /*
  484. * perform an immediate abort without connection or call structures
  485. */
  486. int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
  487. struct rxrpc_message *msg,
  488. int error)
  489. {
  490. struct rxrpc_header ahdr;
  491. struct sockaddr_in sin;
  492. struct msghdr msghdr;
  493. struct kvec iov[2];
  494. __be32 _error;
  495. int len, ret;
  496. _enter("%p,%p,%d", trans, msg, error);
  497. /* don't abort an abort packet */
  498. if (msg->hdr.type == RXRPC_PACKET_TYPE_ABORT) {
  499. _leave(" = 0");
  500. return 0;
  501. }
  502. _error = htonl(-error);
  503. /* set up the message to be transmitted */
  504. memcpy(&ahdr, &msg->hdr, sizeof(ahdr));
  505. ahdr.epoch = msg->hdr.epoch;
  506. ahdr.serial = htonl(1);
  507. ahdr.seq = 0;
  508. ahdr.type = RXRPC_PACKET_TYPE_ABORT;
  509. ahdr.flags = RXRPC_LAST_PACKET;
  510. ahdr.flags |= ~msg->hdr.flags & RXRPC_CLIENT_INITIATED;
  511. iov[0].iov_len = sizeof(ahdr);
  512. iov[0].iov_base = &ahdr;
  513. iov[1].iov_len = sizeof(_error);
  514. iov[1].iov_base = &_error;
  515. len = sizeof(ahdr) + sizeof(_error);
  516. memset(&sin,0,sizeof(sin));
  517. sin.sin_family = AF_INET;
  518. sin.sin_port = msg->pkt->h.uh->source;
  519. sin.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
  520. msghdr.msg_name = &sin;
  521. msghdr.msg_namelen = sizeof(sin);
  522. msghdr.msg_control = NULL;
  523. msghdr.msg_controllen = 0;
  524. msghdr.msg_flags = MSG_DONTWAIT;
  525. _net("Sending message type %d of %d bytes to %08x:%d",
  526. ahdr.type,
  527. len,
  528. ntohl(sin.sin_addr.s_addr),
  529. ntohs(sin.sin_port));
  530. /* send the message */
  531. ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len);
  532. _leave(" = %d", ret);
  533. return ret;
  534. } /* end rxrpc_trans_immediate_abort() */
  535. /*****************************************************************************/
  536. /*
  537. * receive an ICMP error report and percolate it to all connections
  538. * heading to the affected host or port
  539. */
  540. static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
  541. {
  542. struct rxrpc_connection *conn;
  543. struct sockaddr_in sin;
  544. struct rxrpc_peer *peer;
  545. struct list_head connq, *_p;
  546. struct errormsg emsg;
  547. struct msghdr msg;
  548. __be16 port;
  549. int local, err;
  550. _enter("%p", trans);
  551. for (;;) {
  552. trans->error_rcvd = 0;
  553. /* try and receive an error message */
  554. msg.msg_name = &sin;
  555. msg.msg_namelen = sizeof(sin);
  556. msg.msg_control = &emsg;
  557. msg.msg_controllen = sizeof(emsg);
  558. msg.msg_flags = 0;
  559. err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0,
  560. MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC);
  561. if (err == -EAGAIN) {
  562. _leave("");
  563. return;
  564. }
  565. if (err < 0) {
  566. printk("%s: unable to recv an error report: %d\n",
  567. __FUNCTION__, err);
  568. _leave("");
  569. return;
  570. }
  571. msg.msg_controllen = (char *) msg.msg_control - (char *) &emsg;
  572. if (msg.msg_controllen < sizeof(emsg.cmsg) ||
  573. msg.msg_namelen < sizeof(sin)) {
  574. printk("%s: short control message"
  575. " (nlen=%u clen=%Zu fl=%x)\n",
  576. __FUNCTION__,
  577. msg.msg_namelen,
  578. msg.msg_controllen,
  579. msg.msg_flags);
  580. continue;
  581. }
  582. _net("Rx Received control message"
  583. " { len=%Zu level=%u type=%u }",
  584. emsg.cmsg.cmsg_len,
  585. emsg.cmsg.cmsg_level,
  586. emsg.cmsg.cmsg_type);
  587. if (sin.sin_family != AF_INET) {
  588. printk("Rx Ignoring error report with non-INET address"
  589. " (fam=%u)",
  590. sin.sin_family);
  591. continue;
  592. }
  593. _net("Rx Received message pertaining to host addr=%x port=%hu",
  594. ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port));
  595. if (emsg.cmsg.cmsg_level != SOL_IP ||
  596. emsg.cmsg.cmsg_type != IP_RECVERR) {
  597. printk("Rx Ignoring unknown error report"
  598. " { level=%u type=%u }",
  599. emsg.cmsg.cmsg_level,
  600. emsg.cmsg.cmsg_type);
  601. continue;
  602. }
  603. if (msg.msg_controllen < sizeof(emsg.cmsg) + sizeof(emsg.ee)) {
  604. printk("%s: short error message (%Zu)\n",
  605. __FUNCTION__, msg.msg_controllen);
  606. _leave("");
  607. return;
  608. }
  609. port = sin.sin_port;
  610. switch (emsg.ee.ee_origin) {
  611. case SO_EE_ORIGIN_ICMP:
  612. local = 0;
  613. switch (emsg.ee.ee_type) {
  614. case ICMP_DEST_UNREACH:
  615. switch (emsg.ee.ee_code) {
  616. case ICMP_NET_UNREACH:
  617. _net("Rx Received ICMP Network Unreachable");
  618. port = 0;
  619. err = -ENETUNREACH;
  620. break;
  621. case ICMP_HOST_UNREACH:
  622. _net("Rx Received ICMP Host Unreachable");
  623. port = 0;
  624. err = -EHOSTUNREACH;
  625. break;
  626. case ICMP_PORT_UNREACH:
  627. _net("Rx Received ICMP Port Unreachable");
  628. err = -ECONNREFUSED;
  629. break;
  630. case ICMP_NET_UNKNOWN:
  631. _net("Rx Received ICMP Unknown Network");
  632. port = 0;
  633. err = -ENETUNREACH;
  634. break;
  635. case ICMP_HOST_UNKNOWN:
  636. _net("Rx Received ICMP Unknown Host");
  637. port = 0;
  638. err = -EHOSTUNREACH;
  639. break;
  640. default:
  641. _net("Rx Received ICMP DestUnreach { code=%u }",
  642. emsg.ee.ee_code);
  643. err = emsg.ee.ee_errno;
  644. break;
  645. }
  646. break;
  647. case ICMP_TIME_EXCEEDED:
  648. _net("Rx Received ICMP TTL Exceeded");
  649. err = emsg.ee.ee_errno;
  650. break;
  651. default:
  652. _proto("Rx Received ICMP error { type=%u code=%u }",
  653. emsg.ee.ee_type, emsg.ee.ee_code);
  654. err = emsg.ee.ee_errno;
  655. break;
  656. }
  657. break;
  658. case SO_EE_ORIGIN_LOCAL:
  659. _proto("Rx Received local error { error=%d }",
  660. emsg.ee.ee_errno);
  661. local = 1;
  662. err = emsg.ee.ee_errno;
  663. break;
  664. case SO_EE_ORIGIN_NONE:
  665. case SO_EE_ORIGIN_ICMP6:
  666. default:
  667. _proto("Rx Received error report { orig=%u }",
  668. emsg.ee.ee_origin);
  669. local = 0;
  670. err = emsg.ee.ee_errno;
  671. break;
  672. }
  673. /* find all the connections between this transport and the
  674. * affected destination */
  675. INIT_LIST_HEAD(&connq);
  676. if (rxrpc_peer_lookup(trans, sin.sin_addr.s_addr,
  677. &peer) == 0) {
  678. read_lock(&peer->conn_lock);
  679. list_for_each(_p, &peer->conn_active) {
  680. conn = list_entry(_p, struct rxrpc_connection,
  681. link);
  682. if (port && conn->addr.sin_port != port)
  683. continue;
  684. if (!list_empty(&conn->err_link))
  685. continue;
  686. rxrpc_get_connection(conn);
  687. list_add_tail(&conn->err_link, &connq);
  688. }
  689. read_unlock(&peer->conn_lock);
  690. /* service all those connections */
  691. while (!list_empty(&connq)) {
  692. conn = list_entry(connq.next,
  693. struct rxrpc_connection,
  694. err_link);
  695. list_del(&conn->err_link);
  696. rxrpc_conn_handle_error(conn, local, err);
  697. rxrpc_put_connection(conn);
  698. }
  699. rxrpc_put_peer(peer);
  700. }
  701. }
  702. _leave("");
  703. return;
  704. } /* end rxrpc_trans_receive_error_report() */