transport.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. /* transport.c: Rx Transport routines
  2. *
  3. * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/module.h>
  14. #include <rxrpc/transport.h>
  15. #include <rxrpc/peer.h>
  16. #include <rxrpc/connection.h>
  17. #include <rxrpc/call.h>
  18. #include <rxrpc/message.h>
  19. #include <rxrpc/krxiod.h>
  20. #include <rxrpc/krxsecd.h>
  21. #include <linux/udp.h>
  22. #include <linux/in.h>
  23. #include <linux/in6.h>
  24. #include <linux/icmp.h>
  25. #include <net/sock.h>
  26. #include <net/ip.h>
  27. #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
  28. #include <linux/ipv6.h> /* this should _really_ be in errqueue.h.. */
  29. #endif
  30. #include <linux/errqueue.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/checksum.h>
  33. #include "internal.h"
  34. struct errormsg {
  35. struct cmsghdr cmsg; /* control message header */
  36. struct sock_extended_err ee; /* extended error information */
  37. struct sockaddr_in icmp_src; /* ICMP packet source address */
  38. };
  39. static DEFINE_SPINLOCK(rxrpc_transports_lock);
  40. static struct list_head rxrpc_transports = LIST_HEAD_INIT(rxrpc_transports);
  41. __RXACCT_DECL(atomic_t rxrpc_transport_count);
  42. LIST_HEAD(rxrpc_proc_transports);
  43. DECLARE_RWSEM(rxrpc_proc_transports_sem);
  44. static void rxrpc_data_ready(struct sock *sk, int count);
  45. static void rxrpc_error_report(struct sock *sk);
  46. static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
  47. struct list_head *msgq);
  48. static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans);
  49. /*****************************************************************************/
  50. /*
  51. * create a new transport endpoint using the specified UDP port
  52. */
  53. int rxrpc_create_transport(unsigned short port,
  54. struct rxrpc_transport **_trans)
  55. {
  56. struct rxrpc_transport *trans;
  57. struct sockaddr_in sin;
  58. mm_segment_t oldfs;
  59. struct sock *sock;
  60. int ret, opt;
  61. _enter("%hu", port);
  62. trans = kmalloc(sizeof(struct rxrpc_transport), GFP_KERNEL);
  63. if (!trans)
  64. return -ENOMEM;
  65. memset(trans, 0, sizeof(struct rxrpc_transport));
  66. atomic_set(&trans->usage, 1);
  67. INIT_LIST_HEAD(&trans->services);
  68. INIT_LIST_HEAD(&trans->link);
  69. INIT_LIST_HEAD(&trans->krxiodq_link);
  70. spin_lock_init(&trans->lock);
  71. INIT_LIST_HEAD(&trans->peer_active);
  72. INIT_LIST_HEAD(&trans->peer_graveyard);
  73. spin_lock_init(&trans->peer_gylock);
  74. init_waitqueue_head(&trans->peer_gy_waitq);
  75. rwlock_init(&trans->peer_lock);
  76. atomic_set(&trans->peer_count, 0);
  77. trans->port = port;
  78. /* create a UDP socket to be my actual transport endpoint */
  79. ret = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &trans->socket);
  80. if (ret < 0)
  81. goto error;
  82. /* use the specified port */
  83. if (port) {
  84. memset(&sin, 0, sizeof(sin));
  85. sin.sin_family = AF_INET;
  86. sin.sin_port = htons(port);
  87. ret = trans->socket->ops->bind(trans->socket,
  88. (struct sockaddr *) &sin,
  89. sizeof(sin));
  90. if (ret < 0)
  91. goto error;
  92. }
  93. opt = 1;
  94. oldfs = get_fs();
  95. set_fs(KERNEL_DS);
  96. ret = trans->socket->ops->setsockopt(trans->socket, SOL_IP, IP_RECVERR,
  97. (char *) &opt, sizeof(opt));
  98. set_fs(oldfs);
  99. spin_lock(&rxrpc_transports_lock);
  100. list_add(&trans->link, &rxrpc_transports);
  101. spin_unlock(&rxrpc_transports_lock);
  102. /* set the socket up */
  103. sock = trans->socket->sk;
  104. sock->sk_user_data = trans;
  105. sock->sk_data_ready = rxrpc_data_ready;
  106. sock->sk_error_report = rxrpc_error_report;
  107. down_write(&rxrpc_proc_transports_sem);
  108. list_add_tail(&trans->proc_link, &rxrpc_proc_transports);
  109. up_write(&rxrpc_proc_transports_sem);
  110. __RXACCT(atomic_inc(&rxrpc_transport_count));
  111. *_trans = trans;
  112. _leave(" = 0 (%p)", trans);
  113. return 0;
  114. error:
  115. /* finish cleaning up the transport (not really needed here, but...) */
  116. if (trans->socket)
  117. trans->socket->ops->shutdown(trans->socket, 2);
  118. /* close the socket */
  119. if (trans->socket) {
  120. trans->socket->sk->sk_user_data = NULL;
  121. sock_release(trans->socket);
  122. trans->socket = NULL;
  123. }
  124. kfree(trans);
  125. _leave(" = %d", ret);
  126. return ret;
  127. } /* end rxrpc_create_transport() */
  128. /*****************************************************************************/
  129. /*
  130. * destroy a transport endpoint
  131. */
  132. void rxrpc_put_transport(struct rxrpc_transport *trans)
  133. {
  134. _enter("%p{u=%d p=%hu}",
  135. trans, atomic_read(&trans->usage), trans->port);
  136. BUG_ON(atomic_read(&trans->usage) <= 0);
  137. /* to prevent a race, the decrement and the dequeue must be
  138. * effectively atomic */
  139. spin_lock(&rxrpc_transports_lock);
  140. if (likely(!atomic_dec_and_test(&trans->usage))) {
  141. spin_unlock(&rxrpc_transports_lock);
  142. _leave("");
  143. return;
  144. }
  145. list_del(&trans->link);
  146. spin_unlock(&rxrpc_transports_lock);
  147. /* finish cleaning up the transport */
  148. if (trans->socket)
  149. trans->socket->ops->shutdown(trans->socket, 2);
  150. rxrpc_krxsecd_clear_transport(trans);
  151. rxrpc_krxiod_dequeue_transport(trans);
  152. /* discard all peer information */
  153. rxrpc_peer_clearall(trans);
  154. down_write(&rxrpc_proc_transports_sem);
  155. list_del(&trans->proc_link);
  156. up_write(&rxrpc_proc_transports_sem);
  157. __RXACCT(atomic_dec(&rxrpc_transport_count));
  158. /* close the socket */
  159. if (trans->socket) {
  160. trans->socket->sk->sk_user_data = NULL;
  161. sock_release(trans->socket);
  162. trans->socket = NULL;
  163. }
  164. kfree(trans);
  165. _leave("");
  166. } /* end rxrpc_put_transport() */
  167. /*****************************************************************************/
  168. /*
  169. * add a service to a transport to be listened upon
  170. */
  171. int rxrpc_add_service(struct rxrpc_transport *trans,
  172. struct rxrpc_service *newsrv)
  173. {
  174. struct rxrpc_service *srv;
  175. struct list_head *_p;
  176. int ret = -EEXIST;
  177. _enter("%p{%hu},%p{%hu}",
  178. trans, trans->port, newsrv, newsrv->service_id);
  179. /* verify that the service ID is not already present */
  180. spin_lock(&trans->lock);
  181. list_for_each(_p, &trans->services) {
  182. srv = list_entry(_p, struct rxrpc_service, link);
  183. if (srv->service_id == newsrv->service_id)
  184. goto out;
  185. }
  186. /* okay - add the transport to the list */
  187. list_add_tail(&newsrv->link, &trans->services);
  188. rxrpc_get_transport(trans);
  189. ret = 0;
  190. out:
  191. spin_unlock(&trans->lock);
  192. _leave("= %d", ret);
  193. return ret;
  194. } /* end rxrpc_add_service() */
  195. /*****************************************************************************/
  196. /*
  197. * remove a service from a transport
  198. */
  199. void rxrpc_del_service(struct rxrpc_transport *trans, struct rxrpc_service *srv)
  200. {
  201. _enter("%p{%hu},%p{%hu}", trans, trans->port, srv, srv->service_id);
  202. spin_lock(&trans->lock);
  203. list_del(&srv->link);
  204. spin_unlock(&trans->lock);
  205. rxrpc_put_transport(trans);
  206. _leave("");
  207. } /* end rxrpc_del_service() */
  208. /*****************************************************************************/
  209. /*
  210. * INET callback when data has been received on the socket.
  211. */
  212. static void rxrpc_data_ready(struct sock *sk, int count)
  213. {
  214. struct rxrpc_transport *trans;
  215. _enter("%p{t=%p},%d", sk, sk->sk_user_data, count);
  216. /* queue the transport for attention by krxiod */
  217. trans = (struct rxrpc_transport *) sk->sk_user_data;
  218. if (trans)
  219. rxrpc_krxiod_queue_transport(trans);
  220. /* wake up anyone waiting on the socket */
  221. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  222. wake_up_interruptible(sk->sk_sleep);
  223. _leave("");
  224. } /* end rxrpc_data_ready() */
  225. /*****************************************************************************/
  226. /*
  227. * INET callback when an ICMP error packet is received
  228. * - sk->err is error (EHOSTUNREACH, EPROTO or EMSGSIZE)
  229. */
  230. static void rxrpc_error_report(struct sock *sk)
  231. {
  232. struct rxrpc_transport *trans;
  233. _enter("%p{t=%p}", sk, sk->sk_user_data);
  234. /* queue the transport for attention by krxiod */
  235. trans = (struct rxrpc_transport *) sk->sk_user_data;
  236. if (trans) {
  237. trans->error_rcvd = 1;
  238. rxrpc_krxiod_queue_transport(trans);
  239. }
  240. /* wake up anyone waiting on the socket */
  241. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  242. wake_up_interruptible(sk->sk_sleep);
  243. _leave("");
  244. } /* end rxrpc_error_report() */
  245. /*****************************************************************************/
  246. /*
  247. * split a message up, allocating message records and filling them in
  248. * from the contents of a socket buffer
  249. */
  250. static int rxrpc_incoming_msg(struct rxrpc_transport *trans,
  251. struct sk_buff *pkt,
  252. struct list_head *msgq)
  253. {
  254. struct rxrpc_message *msg;
  255. int ret;
  256. _enter("");
  257. msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
  258. if (!msg) {
  259. _leave(" = -ENOMEM");
  260. return -ENOMEM;
  261. }
  262. memset(msg, 0, sizeof(*msg));
  263. atomic_set(&msg->usage, 1);
  264. list_add_tail(&msg->link,msgq);
  265. /* dig out the Rx routing parameters */
  266. if (skb_copy_bits(pkt, sizeof(struct udphdr),
  267. &msg->hdr, sizeof(msg->hdr)) < 0) {
  268. ret = -EBADMSG;
  269. goto error;
  270. }
  271. msg->trans = trans;
  272. msg->state = RXRPC_MSG_RECEIVED;
  273. skb_get_timestamp(pkt, &msg->stamp);
  274. if (msg->stamp.tv_sec == 0) {
  275. do_gettimeofday(&msg->stamp);
  276. if (pkt->sk)
  277. sock_enable_timestamp(pkt->sk);
  278. }
  279. msg->seq = ntohl(msg->hdr.seq);
  280. /* attach the packet */
  281. skb_get(pkt);
  282. msg->pkt = pkt;
  283. msg->offset = sizeof(struct udphdr) + sizeof(struct rxrpc_header);
  284. msg->dsize = msg->pkt->len - msg->offset;
  285. _net("Rx Received packet from %s (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
  286. msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
  287. ntohl(msg->hdr.epoch),
  288. (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
  289. ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
  290. ntohl(msg->hdr.callNumber),
  291. rxrpc_pkts[msg->hdr.type],
  292. msg->hdr.flags,
  293. ntohs(msg->hdr.serviceId),
  294. msg->hdr.securityIndex);
  295. __RXACCT(atomic_inc(&rxrpc_message_count));
  296. /* split off jumbo packets */
  297. while (msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
  298. msg->hdr.flags & RXRPC_JUMBO_PACKET
  299. ) {
  300. struct rxrpc_jumbo_header jumbo;
  301. struct rxrpc_message *jumbomsg = msg;
  302. _debug("split jumbo packet");
  303. /* quick sanity check */
  304. ret = -EBADMSG;
  305. if (msg->dsize <
  306. RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
  307. goto error;
  308. if (msg->hdr.flags & RXRPC_LAST_PACKET)
  309. goto error;
  310. /* dig out the secondary header */
  311. if (skb_copy_bits(pkt, msg->offset + RXRPC_JUMBO_DATALEN,
  312. &jumbo, sizeof(jumbo)) < 0)
  313. goto error;
  314. /* allocate a new message record */
  315. ret = -ENOMEM;
  316. msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL);
  317. if (!msg)
  318. goto error;
  319. memcpy(msg, jumbomsg, sizeof(*msg));
  320. list_add_tail(&msg->link, msgq);
  321. /* adjust the jumbo packet */
  322. jumbomsg->dsize = RXRPC_JUMBO_DATALEN;
  323. /* attach the packet here too */
  324. skb_get(pkt);
  325. /* adjust the parameters */
  326. msg->seq++;
  327. msg->hdr.seq = htonl(msg->seq);
  328. msg->hdr.serial = htonl(ntohl(msg->hdr.serial) + 1);
  329. msg->offset += RXRPC_JUMBO_DATALEN +
  330. sizeof(struct rxrpc_jumbo_header);
  331. msg->dsize -= RXRPC_JUMBO_DATALEN +
  332. sizeof(struct rxrpc_jumbo_header);
  333. msg->hdr.flags = jumbo.flags;
  334. msg->hdr._rsvd = jumbo._rsvd;
  335. _net("Rx Split jumbo packet from %s"
  336. " (%08x;%08x,%1x,%d,%s,%02x,%d,%d)",
  337. msg->hdr.flags & RXRPC_CLIENT_INITIATED ? "client" : "server",
  338. ntohl(msg->hdr.epoch),
  339. (ntohl(msg->hdr.cid) & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT,
  340. ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK,
  341. ntohl(msg->hdr.callNumber),
  342. rxrpc_pkts[msg->hdr.type],
  343. msg->hdr.flags,
  344. ntohs(msg->hdr.serviceId),
  345. msg->hdr.securityIndex);
  346. __RXACCT(atomic_inc(&rxrpc_message_count));
  347. }
  348. _leave(" = 0 #%d", atomic_read(&rxrpc_message_count));
  349. return 0;
  350. error:
  351. while (!list_empty(msgq)) {
  352. msg = list_entry(msgq->next, struct rxrpc_message, link);
  353. list_del_init(&msg->link);
  354. rxrpc_put_message(msg);
  355. }
  356. _leave(" = %d", ret);
  357. return ret;
  358. } /* end rxrpc_incoming_msg() */
  359. /*****************************************************************************/
  360. /*
  361. * accept a new call
  362. * - called from krxiod in process context
  363. */
  364. void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
  365. {
  366. struct rxrpc_message *msg;
  367. struct rxrpc_peer *peer;
  368. struct sk_buff *pkt;
  369. int ret;
  370. __be32 addr;
  371. __be16 port;
  372. LIST_HEAD(msgq);
  373. _enter("%p{%d}", trans, trans->port);
  374. for (;;) {
  375. /* deal with outstanting errors first */
  376. if (trans->error_rcvd)
  377. rxrpc_trans_receive_error_report(trans);
  378. /* attempt to receive a packet */
  379. pkt = skb_recv_datagram(trans->socket->sk, 0, 1, &ret);
  380. if (!pkt) {
  381. if (ret == -EAGAIN) {
  382. _leave(" EAGAIN");
  383. return;
  384. }
  385. /* an icmp error may have occurred */
  386. rxrpc_krxiod_queue_transport(trans);
  387. _leave(" error %d\n", ret);
  388. return;
  389. }
  390. /* we'll probably need to checksum it (didn't call
  391. * sock_recvmsg) */
  392. if (pkt->ip_summed != CHECKSUM_UNNECESSARY) {
  393. if ((unsigned short)
  394. csum_fold(skb_checksum(pkt, 0, pkt->len,
  395. pkt->csum))) {
  396. kfree_skb(pkt);
  397. rxrpc_krxiod_queue_transport(trans);
  398. _leave(" CSUM failed");
  399. return;
  400. }
  401. }
  402. addr = pkt->nh.iph->saddr;
  403. port = pkt->h.uh->source;
  404. _net("Rx Received UDP packet from %08x:%04hu",
  405. ntohl(addr), ntohs(port));
  406. /* unmarshall the Rx parameters and split jumbo packets */
  407. ret = rxrpc_incoming_msg(trans, pkt, &msgq);
  408. if (ret < 0) {
  409. kfree_skb(pkt);
  410. rxrpc_krxiod_queue_transport(trans);
  411. _leave(" bad packet");
  412. return;
  413. }
  414. BUG_ON(list_empty(&msgq));
  415. msg = list_entry(msgq.next, struct rxrpc_message, link);
  416. /* locate the record for the peer from which it
  417. * originated */
  418. ret = rxrpc_peer_lookup(trans, addr, &peer);
  419. if (ret < 0) {
  420. kdebug("Rx No connections from that peer");
  421. rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
  422. goto finished_msg;
  423. }
  424. /* try and find a matching connection */
  425. ret = rxrpc_connection_lookup(peer, msg, &msg->conn);
  426. if (ret < 0) {
  427. kdebug("Rx Unknown Connection");
  428. rxrpc_trans_immediate_abort(trans, msg, -EINVAL);
  429. rxrpc_put_peer(peer);
  430. goto finished_msg;
  431. }
  432. rxrpc_put_peer(peer);
  433. /* deal with the first packet of a new call */
  434. if (msg->hdr.flags & RXRPC_CLIENT_INITIATED &&
  435. msg->hdr.type == RXRPC_PACKET_TYPE_DATA &&
  436. ntohl(msg->hdr.seq) == 1
  437. ) {
  438. _debug("Rx New server call");
  439. rxrpc_trans_receive_new_call(trans, &msgq);
  440. goto finished_msg;
  441. }
  442. /* deal with subsequent packet(s) of call */
  443. _debug("Rx Call packet");
  444. while (!list_empty(&msgq)) {
  445. msg = list_entry(msgq.next, struct rxrpc_message, link);
  446. list_del_init(&msg->link);
  447. ret = rxrpc_conn_receive_call_packet(msg->conn, NULL, msg);
  448. if (ret < 0) {
  449. rxrpc_trans_immediate_abort(trans, msg, ret);
  450. rxrpc_put_message(msg);
  451. goto finished_msg;
  452. }
  453. rxrpc_put_message(msg);
  454. }
  455. goto finished_msg;
  456. /* dispose of the packets */
  457. finished_msg:
  458. while (!list_empty(&msgq)) {
  459. msg = list_entry(msgq.next, struct rxrpc_message, link);
  460. list_del_init(&msg->link);
  461. rxrpc_put_message(msg);
  462. }
  463. kfree_skb(pkt);
  464. }
  465. _leave("");
  466. } /* end rxrpc_trans_receive_packet() */
  467. /*****************************************************************************/
  468. /*
  469. * accept a new call from a client trying to connect to one of my services
  470. * - called in process context
  471. */
  472. static int rxrpc_trans_receive_new_call(struct rxrpc_transport *trans,
  473. struct list_head *msgq)
  474. {
  475. struct rxrpc_message *msg;
  476. _enter("");
  477. /* only bother with the first packet */
  478. msg = list_entry(msgq->next, struct rxrpc_message, link);
  479. list_del_init(&msg->link);
  480. rxrpc_krxsecd_queue_incoming_call(msg);
  481. rxrpc_put_message(msg);
  482. _leave(" = 0");
  483. return 0;
  484. } /* end rxrpc_trans_receive_new_call() */
  485. /*****************************************************************************/
  486. /*
  487. * perform an immediate abort without connection or call structures
  488. */
  489. int rxrpc_trans_immediate_abort(struct rxrpc_transport *trans,
  490. struct rxrpc_message *msg,
  491. int error)
  492. {
  493. struct rxrpc_header ahdr;
  494. struct sockaddr_in sin;
  495. struct msghdr msghdr;
  496. struct kvec iov[2];
  497. __be32 _error;
  498. int len, ret;
  499. _enter("%p,%p,%d", trans, msg, error);
  500. /* don't abort an abort packet */
  501. if (msg->hdr.type == RXRPC_PACKET_TYPE_ABORT) {
  502. _leave(" = 0");
  503. return 0;
  504. }
  505. _error = htonl(-error);
  506. /* set up the message to be transmitted */
  507. memcpy(&ahdr, &msg->hdr, sizeof(ahdr));
  508. ahdr.epoch = msg->hdr.epoch;
  509. ahdr.serial = htonl(1);
  510. ahdr.seq = 0;
  511. ahdr.type = RXRPC_PACKET_TYPE_ABORT;
  512. ahdr.flags = RXRPC_LAST_PACKET;
  513. ahdr.flags |= ~msg->hdr.flags & RXRPC_CLIENT_INITIATED;
  514. iov[0].iov_len = sizeof(ahdr);
  515. iov[0].iov_base = &ahdr;
  516. iov[1].iov_len = sizeof(_error);
  517. iov[1].iov_base = &_error;
  518. len = sizeof(ahdr) + sizeof(_error);
  519. memset(&sin,0,sizeof(sin));
  520. sin.sin_family = AF_INET;
  521. sin.sin_port = msg->pkt->h.uh->source;
  522. sin.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
  523. msghdr.msg_name = &sin;
  524. msghdr.msg_namelen = sizeof(sin);
  525. msghdr.msg_control = NULL;
  526. msghdr.msg_controllen = 0;
  527. msghdr.msg_flags = MSG_DONTWAIT;
  528. _net("Sending message type %d of %d bytes to %08x:%d",
  529. ahdr.type,
  530. len,
  531. ntohl(sin.sin_addr.s_addr),
  532. ntohs(sin.sin_port));
  533. /* send the message */
  534. ret = kernel_sendmsg(trans->socket, &msghdr, iov, 2, len);
  535. _leave(" = %d", ret);
  536. return ret;
  537. } /* end rxrpc_trans_immediate_abort() */
  538. /*****************************************************************************/
  539. /*
  540. * receive an ICMP error report and percolate it to all connections
  541. * heading to the affected host or port
  542. */
  543. static void rxrpc_trans_receive_error_report(struct rxrpc_transport *trans)
  544. {
  545. struct rxrpc_connection *conn;
  546. struct sockaddr_in sin;
  547. struct rxrpc_peer *peer;
  548. struct list_head connq, *_p;
  549. struct errormsg emsg;
  550. struct msghdr msg;
  551. __be16 port;
  552. int local, err;
  553. _enter("%p", trans);
  554. for (;;) {
  555. trans->error_rcvd = 0;
  556. /* try and receive an error message */
  557. msg.msg_name = &sin;
  558. msg.msg_namelen = sizeof(sin);
  559. msg.msg_control = &emsg;
  560. msg.msg_controllen = sizeof(emsg);
  561. msg.msg_flags = 0;
  562. err = kernel_recvmsg(trans->socket, &msg, NULL, 0, 0,
  563. MSG_ERRQUEUE | MSG_DONTWAIT | MSG_TRUNC);
  564. if (err == -EAGAIN) {
  565. _leave("");
  566. return;
  567. }
  568. if (err < 0) {
  569. printk("%s: unable to recv an error report: %d\n",
  570. __FUNCTION__, err);
  571. _leave("");
  572. return;
  573. }
  574. msg.msg_controllen = (char *) msg.msg_control - (char *) &emsg;
  575. if (msg.msg_controllen < sizeof(emsg.cmsg) ||
  576. msg.msg_namelen < sizeof(sin)) {
  577. printk("%s: short control message"
  578. " (nlen=%u clen=%Zu fl=%x)\n",
  579. __FUNCTION__,
  580. msg.msg_namelen,
  581. msg.msg_controllen,
  582. msg.msg_flags);
  583. continue;
  584. }
  585. _net("Rx Received control message"
  586. " { len=%Zu level=%u type=%u }",
  587. emsg.cmsg.cmsg_len,
  588. emsg.cmsg.cmsg_level,
  589. emsg.cmsg.cmsg_type);
  590. if (sin.sin_family != AF_INET) {
  591. printk("Rx Ignoring error report with non-INET address"
  592. " (fam=%u)",
  593. sin.sin_family);
  594. continue;
  595. }
  596. _net("Rx Received message pertaining to host addr=%x port=%hu",
  597. ntohl(sin.sin_addr.s_addr), ntohs(sin.sin_port));
  598. if (emsg.cmsg.cmsg_level != SOL_IP ||
  599. emsg.cmsg.cmsg_type != IP_RECVERR) {
  600. printk("Rx Ignoring unknown error report"
  601. " { level=%u type=%u }",
  602. emsg.cmsg.cmsg_level,
  603. emsg.cmsg.cmsg_type);
  604. continue;
  605. }
  606. if (msg.msg_controllen < sizeof(emsg.cmsg) + sizeof(emsg.ee)) {
  607. printk("%s: short error message (%Zu)\n",
  608. __FUNCTION__, msg.msg_controllen);
  609. _leave("");
  610. return;
  611. }
  612. port = sin.sin_port;
  613. switch (emsg.ee.ee_origin) {
  614. case SO_EE_ORIGIN_ICMP:
  615. local = 0;
  616. switch (emsg.ee.ee_type) {
  617. case ICMP_DEST_UNREACH:
  618. switch (emsg.ee.ee_code) {
  619. case ICMP_NET_UNREACH:
  620. _net("Rx Received ICMP Network Unreachable");
  621. port = 0;
  622. err = -ENETUNREACH;
  623. break;
  624. case ICMP_HOST_UNREACH:
  625. _net("Rx Received ICMP Host Unreachable");
  626. port = 0;
  627. err = -EHOSTUNREACH;
  628. break;
  629. case ICMP_PORT_UNREACH:
  630. _net("Rx Received ICMP Port Unreachable");
  631. err = -ECONNREFUSED;
  632. break;
  633. case ICMP_NET_UNKNOWN:
  634. _net("Rx Received ICMP Unknown Network");
  635. port = 0;
  636. err = -ENETUNREACH;
  637. break;
  638. case ICMP_HOST_UNKNOWN:
  639. _net("Rx Received ICMP Unknown Host");
  640. port = 0;
  641. err = -EHOSTUNREACH;
  642. break;
  643. default:
  644. _net("Rx Received ICMP DestUnreach { code=%u }",
  645. emsg.ee.ee_code);
  646. err = emsg.ee.ee_errno;
  647. break;
  648. }
  649. break;
  650. case ICMP_TIME_EXCEEDED:
  651. _net("Rx Received ICMP TTL Exceeded");
  652. err = emsg.ee.ee_errno;
  653. break;
  654. default:
  655. _proto("Rx Received ICMP error { type=%u code=%u }",
  656. emsg.ee.ee_type, emsg.ee.ee_code);
  657. err = emsg.ee.ee_errno;
  658. break;
  659. }
  660. break;
  661. case SO_EE_ORIGIN_LOCAL:
  662. _proto("Rx Received local error { error=%d }",
  663. emsg.ee.ee_errno);
  664. local = 1;
  665. err = emsg.ee.ee_errno;
  666. break;
  667. case SO_EE_ORIGIN_NONE:
  668. case SO_EE_ORIGIN_ICMP6:
  669. default:
  670. _proto("Rx Received error report { orig=%u }",
  671. emsg.ee.ee_origin);
  672. local = 0;
  673. err = emsg.ee.ee_errno;
  674. break;
  675. }
  676. /* find all the connections between this transport and the
  677. * affected destination */
  678. INIT_LIST_HEAD(&connq);
  679. if (rxrpc_peer_lookup(trans, sin.sin_addr.s_addr,
  680. &peer) == 0) {
  681. read_lock(&peer->conn_lock);
  682. list_for_each(_p, &peer->conn_active) {
  683. conn = list_entry(_p, struct rxrpc_connection,
  684. link);
  685. if (port && conn->addr.sin_port != port)
  686. continue;
  687. if (!list_empty(&conn->err_link))
  688. continue;
  689. rxrpc_get_connection(conn);
  690. list_add_tail(&conn->err_link, &connq);
  691. }
  692. read_unlock(&peer->conn_lock);
  693. /* service all those connections */
  694. while (!list_empty(&connq)) {
  695. conn = list_entry(connq.next,
  696. struct rxrpc_connection,
  697. err_link);
  698. list_del(&conn->err_link);
  699. rxrpc_conn_handle_error(conn, local, err);
  700. rxrpc_put_connection(conn);
  701. }
  702. rxrpc_put_peer(peer);
  703. }
  704. }
  705. _leave("");
  706. return;
  707. } /* end rxrpc_trans_receive_error_report() */