port.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708
  1. /*
  2. * net/tipc/port.c: TIPC port code
  3. *
  4. * Copyright (c) 1992-2006, Ericsson AB
  5. * Copyright (c) 2004-2005, Wind River Systems
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include "core.h"
  37. #include "config.h"
  38. #include "dbg.h"
  39. #include "port.h"
  40. #include "addr.h"
  41. #include "link.h"
  42. #include "node.h"
  43. #include "port.h"
  44. #include "name_table.h"
  45. #include "user_reg.h"
  46. #include "msg.h"
  47. #include "bcast.h"
  48. /* Connection management: */
  49. #define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
  50. #define CONFIRMED 0
  51. #define PROBING 1
  52. #define MAX_REJECT_SIZE 1024
  53. static struct sk_buff *msg_queue_head = 0;
  54. static struct sk_buff *msg_queue_tail = 0;
  55. spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED;
  56. static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
  57. static LIST_HEAD(ports);
  58. static void port_handle_node_down(unsigned long ref);
  59. static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
  60. static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
  61. static void port_timeout(unsigned long ref);
  62. static inline u32 port_peernode(struct port *p_ptr)
  63. {
  64. return msg_destnode(&p_ptr->publ.phdr);
  65. }
  66. static inline u32 port_peerport(struct port *p_ptr)
  67. {
  68. return msg_destport(&p_ptr->publ.phdr);
  69. }
  70. static inline u32 port_out_seqno(struct port *p_ptr)
  71. {
  72. return msg_transp_seqno(&p_ptr->publ.phdr);
  73. }
  74. static inline void port_set_out_seqno(struct port *p_ptr, u32 seqno)
  75. {
  76. msg_set_transp_seqno(&p_ptr->publ.phdr,seqno);
  77. }
  78. static inline void port_incr_out_seqno(struct port *p_ptr)
  79. {
  80. struct tipc_msg *m = &p_ptr->publ.phdr;
  81. if (likely(!msg_routed(m)))
  82. return;
  83. msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
  84. }
  85. /**
  86. * tipc_multicast - send a multicast message to local and remote destinations
  87. */
  88. int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
  89. u32 num_sect, struct iovec const *msg_sect)
  90. {
  91. struct tipc_msg *hdr;
  92. struct sk_buff *buf;
  93. struct sk_buff *ibuf = NULL;
  94. struct port_list dports = {0, NULL, };
  95. struct port *oport = tipc_port_deref(ref);
  96. int ext_targets;
  97. int res;
  98. if (unlikely(!oport))
  99. return -EINVAL;
  100. /* Create multicast message */
  101. hdr = &oport->publ.phdr;
  102. msg_set_type(hdr, TIPC_MCAST_MSG);
  103. msg_set_nametype(hdr, seq->type);
  104. msg_set_namelower(hdr, seq->lower);
  105. msg_set_nameupper(hdr, seq->upper);
  106. msg_set_hdr_sz(hdr, MCAST_H_SIZE);
  107. res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
  108. !oport->user_port, &buf);
  109. if (unlikely(!buf))
  110. return res;
  111. /* Figure out where to send multicast message */
  112. ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
  113. TIPC_NODE_SCOPE, &dports);
  114. /* Send message to destinations (duplicate it only if necessary) */
  115. if (ext_targets) {
  116. if (dports.count != 0) {
  117. ibuf = skb_copy(buf, GFP_ATOMIC);
  118. if (ibuf == NULL) {
  119. tipc_port_list_free(&dports);
  120. buf_discard(buf);
  121. return -ENOMEM;
  122. }
  123. }
  124. res = tipc_bclink_send_msg(buf);
  125. if ((res < 0) && (dports.count != 0)) {
  126. buf_discard(ibuf);
  127. }
  128. } else {
  129. ibuf = buf;
  130. }
  131. if (res >= 0) {
  132. if (ibuf)
  133. tipc_port_recv_mcast(ibuf, &dports);
  134. } else {
  135. tipc_port_list_free(&dports);
  136. }
  137. return res;
  138. }
  139. /**
  140. * tipc_port_recv_mcast - deliver multicast message to all destination ports
  141. *
  142. * If there is no port list, perform a lookup to create one
  143. */
  144. void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
  145. {
  146. struct tipc_msg* msg;
  147. struct port_list dports = {0, NULL, };
  148. struct port_list *item = dp;
  149. int cnt = 0;
  150. assert(buf);
  151. msg = buf_msg(buf);
  152. /* Create destination port list, if one wasn't supplied */
  153. if (dp == NULL) {
  154. tipc_nametbl_mc_translate(msg_nametype(msg),
  155. msg_namelower(msg),
  156. msg_nameupper(msg),
  157. TIPC_CLUSTER_SCOPE,
  158. &dports);
  159. item = dp = &dports;
  160. }
  161. /* Deliver a copy of message to each destination port */
  162. if (dp->count != 0) {
  163. if (dp->count == 1) {
  164. msg_set_destport(msg, dp->ports[0]);
  165. tipc_port_recv_msg(buf);
  166. tipc_port_list_free(dp);
  167. return;
  168. }
  169. for (; cnt < dp->count; cnt++) {
  170. int index = cnt % PLSIZE;
  171. struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
  172. if (b == NULL) {
  173. warn("Buffer allocation failure\n");
  174. msg_dbg(msg, "LOST:");
  175. goto exit;
  176. }
  177. if ((index == 0) && (cnt != 0)) {
  178. item = item->next;
  179. }
  180. msg_set_destport(buf_msg(b),item->ports[index]);
  181. tipc_port_recv_msg(b);
  182. }
  183. }
  184. exit:
  185. buf_discard(buf);
  186. tipc_port_list_free(dp);
  187. }
  188. /**
  189. * tipc_createport_raw - create a native TIPC port
  190. *
  191. * Returns local port reference
  192. */
  193. u32 tipc_createport_raw(void *usr_handle,
  194. u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
  195. void (*wakeup)(struct tipc_port *),
  196. const u32 importance)
  197. {
  198. struct port *p_ptr;
  199. struct tipc_msg *msg;
  200. u32 ref;
  201. p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC);
  202. if (p_ptr == NULL) {
  203. warn("Memory squeeze; failed to create port\n");
  204. return 0;
  205. }
  206. memset(p_ptr, 0, sizeof(*p_ptr));
  207. ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
  208. if (!ref) {
  209. warn("Reference Table Exhausted\n");
  210. kfree(p_ptr);
  211. return 0;
  212. }
  213. tipc_port_lock(ref);
  214. p_ptr->publ.ref = ref;
  215. msg = &p_ptr->publ.phdr;
  216. msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0);
  217. msg_set_orignode(msg, tipc_own_addr);
  218. msg_set_prevnode(msg, tipc_own_addr);
  219. msg_set_origport(msg, ref);
  220. msg_set_importance(msg,importance);
  221. p_ptr->last_in_seqno = 41;
  222. p_ptr->sent = 1;
  223. p_ptr->publ.usr_handle = usr_handle;
  224. INIT_LIST_HEAD(&p_ptr->wait_list);
  225. INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
  226. p_ptr->congested_link = 0;
  227. p_ptr->max_pkt = MAX_PKT_DEFAULT;
  228. p_ptr->dispatcher = dispatcher;
  229. p_ptr->wakeup = wakeup;
  230. p_ptr->user_port = 0;
  231. k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
  232. spin_lock_bh(&tipc_port_list_lock);
  233. INIT_LIST_HEAD(&p_ptr->publications);
  234. INIT_LIST_HEAD(&p_ptr->port_list);
  235. list_add_tail(&p_ptr->port_list, &ports);
  236. spin_unlock_bh(&tipc_port_list_lock);
  237. tipc_port_unlock(p_ptr);
  238. return ref;
  239. }
  240. int tipc_deleteport(u32 ref)
  241. {
  242. struct port *p_ptr;
  243. struct sk_buff *buf = 0;
  244. tipc_withdraw(ref, 0, 0);
  245. p_ptr = tipc_port_lock(ref);
  246. if (!p_ptr)
  247. return -EINVAL;
  248. tipc_ref_discard(ref);
  249. tipc_port_unlock(p_ptr);
  250. k_cancel_timer(&p_ptr->timer);
  251. if (p_ptr->publ.connected) {
  252. buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
  253. tipc_nodesub_unsubscribe(&p_ptr->subscription);
  254. }
  255. if (p_ptr->user_port) {
  256. tipc_reg_remove_port(p_ptr->user_port);
  257. kfree(p_ptr->user_port);
  258. }
  259. spin_lock_bh(&tipc_port_list_lock);
  260. list_del(&p_ptr->port_list);
  261. list_del(&p_ptr->wait_list);
  262. spin_unlock_bh(&tipc_port_list_lock);
  263. k_term_timer(&p_ptr->timer);
  264. kfree(p_ptr);
  265. dbg("Deleted port %u\n", ref);
  266. tipc_net_route_msg(buf);
  267. return TIPC_OK;
  268. }
  269. /**
  270. * tipc_get_port() - return port associated with 'ref'
  271. *
  272. * Note: Port is not locked.
  273. */
  274. struct tipc_port *tipc_get_port(const u32 ref)
  275. {
  276. return (struct tipc_port *)tipc_ref_deref(ref);
  277. }
  278. /**
  279. * tipc_get_handle - return user handle associated to port 'ref'
  280. */
  281. void *tipc_get_handle(const u32 ref)
  282. {
  283. struct port *p_ptr;
  284. void * handle;
  285. p_ptr = tipc_port_lock(ref);
  286. if (!p_ptr)
  287. return 0;
  288. handle = p_ptr->publ.usr_handle;
  289. tipc_port_unlock(p_ptr);
  290. return handle;
  291. }
  292. static inline int port_unreliable(struct port *p_ptr)
  293. {
  294. return msg_src_droppable(&p_ptr->publ.phdr);
  295. }
  296. int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
  297. {
  298. struct port *p_ptr;
  299. p_ptr = tipc_port_lock(ref);
  300. if (!p_ptr)
  301. return -EINVAL;
  302. *isunreliable = port_unreliable(p_ptr);
  303. spin_unlock_bh(p_ptr->publ.lock);
  304. return TIPC_OK;
  305. }
  306. int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
  307. {
  308. struct port *p_ptr;
  309. p_ptr = tipc_port_lock(ref);
  310. if (!p_ptr)
  311. return -EINVAL;
  312. msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
  313. tipc_port_unlock(p_ptr);
  314. return TIPC_OK;
  315. }
  316. static inline int port_unreturnable(struct port *p_ptr)
  317. {
  318. return msg_dest_droppable(&p_ptr->publ.phdr);
  319. }
  320. int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
  321. {
  322. struct port *p_ptr;
  323. p_ptr = tipc_port_lock(ref);
  324. if (!p_ptr)
  325. return -EINVAL;
  326. *isunrejectable = port_unreturnable(p_ptr);
  327. spin_unlock_bh(p_ptr->publ.lock);
  328. return TIPC_OK;
  329. }
  330. int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
  331. {
  332. struct port *p_ptr;
  333. p_ptr = tipc_port_lock(ref);
  334. if (!p_ptr)
  335. return -EINVAL;
  336. msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
  337. tipc_port_unlock(p_ptr);
  338. return TIPC_OK;
  339. }
  340. /*
  341. * port_build_proto_msg(): build a port level protocol
  342. * or a connection abortion message. Called with
  343. * tipc_port lock on.
  344. */
  345. static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
  346. u32 origport, u32 orignode,
  347. u32 usr, u32 type, u32 err,
  348. u32 seqno, u32 ack)
  349. {
  350. struct sk_buff *buf;
  351. struct tipc_msg *msg;
  352. buf = buf_acquire(LONG_H_SIZE);
  353. if (buf) {
  354. msg = buf_msg(buf);
  355. msg_init(msg, usr, type, err, LONG_H_SIZE, destnode);
  356. msg_set_destport(msg, destport);
  357. msg_set_origport(msg, origport);
  358. msg_set_destnode(msg, destnode);
  359. msg_set_orignode(msg, orignode);
  360. msg_set_transp_seqno(msg, seqno);
  361. msg_set_msgcnt(msg, ack);
  362. msg_dbg(msg, "PORT>SEND>:");
  363. }
  364. return buf;
  365. }
  366. int tipc_set_msg_option(struct tipc_port *tp_ptr, const char *opt, const u32 sz)
  367. {
  368. msg_expand(&tp_ptr->phdr, msg_destnode(&tp_ptr->phdr));
  369. msg_set_options(&tp_ptr->phdr, opt, sz);
  370. return TIPC_OK;
  371. }
  372. int tipc_reject_msg(struct sk_buff *buf, u32 err)
  373. {
  374. struct tipc_msg *msg = buf_msg(buf);
  375. struct sk_buff *rbuf;
  376. struct tipc_msg *rmsg;
  377. int hdr_sz;
  378. u32 imp = msg_importance(msg);
  379. u32 data_sz = msg_data_sz(msg);
  380. if (data_sz > MAX_REJECT_SIZE)
  381. data_sz = MAX_REJECT_SIZE;
  382. if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE))
  383. imp++;
  384. msg_dbg(msg, "port->rej: ");
  385. /* discard rejected message if it shouldn't be returned to sender */
  386. if (msg_errcode(msg) || msg_dest_droppable(msg)) {
  387. buf_discard(buf);
  388. return data_sz;
  389. }
  390. /* construct rejected message */
  391. if (msg_mcast(msg))
  392. hdr_sz = MCAST_H_SIZE;
  393. else
  394. hdr_sz = LONG_H_SIZE;
  395. rbuf = buf_acquire(data_sz + hdr_sz);
  396. if (rbuf == NULL) {
  397. buf_discard(buf);
  398. return data_sz;
  399. }
  400. rmsg = buf_msg(rbuf);
  401. msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg));
  402. msg_set_destport(rmsg, msg_origport(msg));
  403. msg_set_prevnode(rmsg, tipc_own_addr);
  404. msg_set_origport(rmsg, msg_destport(msg));
  405. if (msg_short(msg))
  406. msg_set_orignode(rmsg, tipc_own_addr);
  407. else
  408. msg_set_orignode(rmsg, msg_destnode(msg));
  409. msg_set_size(rmsg, data_sz + hdr_sz);
  410. msg_set_nametype(rmsg, msg_nametype(msg));
  411. msg_set_nameinst(rmsg, msg_nameinst(msg));
  412. memcpy(rbuf->data + hdr_sz, msg_data(msg), data_sz);
  413. /* send self-abort message when rejecting on a connected port */
  414. if (msg_connected(msg)) {
  415. struct sk_buff *abuf = 0;
  416. struct port *p_ptr = tipc_port_lock(msg_destport(msg));
  417. if (p_ptr) {
  418. if (p_ptr->publ.connected)
  419. abuf = port_build_self_abort_msg(p_ptr, err);
  420. tipc_port_unlock(p_ptr);
  421. }
  422. tipc_net_route_msg(abuf);
  423. }
  424. /* send rejected message */
  425. buf_discard(buf);
  426. tipc_net_route_msg(rbuf);
  427. return data_sz;
  428. }
  429. int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
  430. struct iovec const *msg_sect, u32 num_sect,
  431. int err)
  432. {
  433. struct sk_buff *buf;
  434. int res;
  435. res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
  436. !p_ptr->user_port, &buf);
  437. if (!buf)
  438. return res;
  439. return tipc_reject_msg(buf, err);
  440. }
  441. static void port_timeout(unsigned long ref)
  442. {
  443. struct port *p_ptr = tipc_port_lock(ref);
  444. struct sk_buff *buf = 0;
  445. if (!p_ptr || !p_ptr->publ.connected)
  446. return;
  447. /* Last probe answered ? */
  448. if (p_ptr->probing_state == PROBING) {
  449. buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
  450. } else {
  451. buf = port_build_proto_msg(port_peerport(p_ptr),
  452. port_peernode(p_ptr),
  453. p_ptr->publ.ref,
  454. tipc_own_addr,
  455. CONN_MANAGER,
  456. CONN_PROBE,
  457. TIPC_OK,
  458. port_out_seqno(p_ptr),
  459. 0);
  460. port_incr_out_seqno(p_ptr);
  461. p_ptr->probing_state = PROBING;
  462. k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
  463. }
  464. tipc_port_unlock(p_ptr);
  465. tipc_net_route_msg(buf);
  466. }
  467. static void port_handle_node_down(unsigned long ref)
  468. {
  469. struct port *p_ptr = tipc_port_lock(ref);
  470. struct sk_buff* buf = 0;
  471. if (!p_ptr)
  472. return;
  473. buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
  474. tipc_port_unlock(p_ptr);
  475. tipc_net_route_msg(buf);
  476. }
  477. static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
  478. {
  479. u32 imp = msg_importance(&p_ptr->publ.phdr);
  480. if (!p_ptr->publ.connected)
  481. return 0;
  482. if (imp < TIPC_CRITICAL_IMPORTANCE)
  483. imp++;
  484. return port_build_proto_msg(p_ptr->publ.ref,
  485. tipc_own_addr,
  486. port_peerport(p_ptr),
  487. port_peernode(p_ptr),
  488. imp,
  489. TIPC_CONN_MSG,
  490. err,
  491. p_ptr->last_in_seqno + 1,
  492. 0);
  493. }
  494. static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
  495. {
  496. u32 imp = msg_importance(&p_ptr->publ.phdr);
  497. if (!p_ptr->publ.connected)
  498. return 0;
  499. if (imp < TIPC_CRITICAL_IMPORTANCE)
  500. imp++;
  501. return port_build_proto_msg(port_peerport(p_ptr),
  502. port_peernode(p_ptr),
  503. p_ptr->publ.ref,
  504. tipc_own_addr,
  505. imp,
  506. TIPC_CONN_MSG,
  507. err,
  508. port_out_seqno(p_ptr),
  509. 0);
  510. }
  511. void tipc_port_recv_proto_msg(struct sk_buff *buf)
  512. {
  513. struct tipc_msg *msg = buf_msg(buf);
  514. struct port *p_ptr = tipc_port_lock(msg_destport(msg));
  515. u32 err = TIPC_OK;
  516. struct sk_buff *r_buf = 0;
  517. struct sk_buff *abort_buf = 0;
  518. msg_dbg(msg, "PORT<RECV<:");
  519. if (!p_ptr) {
  520. err = TIPC_ERR_NO_PORT;
  521. } else if (p_ptr->publ.connected) {
  522. if (port_peernode(p_ptr) != msg_orignode(msg))
  523. err = TIPC_ERR_NO_PORT;
  524. if (port_peerport(p_ptr) != msg_origport(msg))
  525. err = TIPC_ERR_NO_PORT;
  526. if (!err && msg_routed(msg)) {
  527. u32 seqno = msg_transp_seqno(msg);
  528. u32 myno = ++p_ptr->last_in_seqno;
  529. if (seqno != myno) {
  530. err = TIPC_ERR_NO_PORT;
  531. abort_buf = port_build_self_abort_msg(p_ptr, err);
  532. }
  533. }
  534. if (msg_type(msg) == CONN_ACK) {
  535. int wakeup = tipc_port_congested(p_ptr) &&
  536. p_ptr->publ.congested &&
  537. p_ptr->wakeup;
  538. p_ptr->acked += msg_msgcnt(msg);
  539. if (tipc_port_congested(p_ptr))
  540. goto exit;
  541. p_ptr->publ.congested = 0;
  542. if (!wakeup)
  543. goto exit;
  544. p_ptr->wakeup(&p_ptr->publ);
  545. goto exit;
  546. }
  547. } else if (p_ptr->publ.published) {
  548. err = TIPC_ERR_NO_PORT;
  549. }
  550. if (err) {
  551. r_buf = port_build_proto_msg(msg_origport(msg),
  552. msg_orignode(msg),
  553. msg_destport(msg),
  554. tipc_own_addr,
  555. DATA_HIGH,
  556. TIPC_CONN_MSG,
  557. err,
  558. 0,
  559. 0);
  560. goto exit;
  561. }
  562. /* All is fine */
  563. if (msg_type(msg) == CONN_PROBE) {
  564. r_buf = port_build_proto_msg(msg_origport(msg),
  565. msg_orignode(msg),
  566. msg_destport(msg),
  567. tipc_own_addr,
  568. CONN_MANAGER,
  569. CONN_PROBE_REPLY,
  570. TIPC_OK,
  571. port_out_seqno(p_ptr),
  572. 0);
  573. }
  574. p_ptr->probing_state = CONFIRMED;
  575. port_incr_out_seqno(p_ptr);
  576. exit:
  577. if (p_ptr)
  578. tipc_port_unlock(p_ptr);
  579. tipc_net_route_msg(r_buf);
  580. tipc_net_route_msg(abort_buf);
  581. buf_discard(buf);
  582. }
  583. static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
  584. {
  585. struct publication *publ;
  586. if (full_id)
  587. tipc_printf(buf, "<%u.%u.%u:%u>:",
  588. tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
  589. tipc_node(tipc_own_addr), p_ptr->publ.ref);
  590. else
  591. tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
  592. if (p_ptr->publ.connected) {
  593. u32 dport = port_peerport(p_ptr);
  594. u32 destnode = port_peernode(p_ptr);
  595. tipc_printf(buf, " connected to <%u.%u.%u:%u>",
  596. tipc_zone(destnode), tipc_cluster(destnode),
  597. tipc_node(destnode), dport);
  598. if (p_ptr->publ.conn_type != 0)
  599. tipc_printf(buf, " via {%u,%u}",
  600. p_ptr->publ.conn_type,
  601. p_ptr->publ.conn_instance);
  602. }
  603. else if (p_ptr->publ.published) {
  604. tipc_printf(buf, " bound to");
  605. list_for_each_entry(publ, &p_ptr->publications, pport_list) {
  606. if (publ->lower == publ->upper)
  607. tipc_printf(buf, " {%u,%u}", publ->type,
  608. publ->lower);
  609. else
  610. tipc_printf(buf, " {%u,%u,%u}", publ->type,
  611. publ->lower, publ->upper);
  612. }
  613. }
  614. tipc_printf(buf, "\n");
  615. }
  616. #define MAX_PORT_QUERY 32768
  617. struct sk_buff *tipc_port_get_ports(void)
  618. {
  619. struct sk_buff *buf;
  620. struct tlv_desc *rep_tlv;
  621. struct print_buf pb;
  622. struct port *p_ptr;
  623. int str_len;
  624. buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
  625. if (!buf)
  626. return NULL;
  627. rep_tlv = (struct tlv_desc *)buf->data;
  628. tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
  629. spin_lock_bh(&tipc_port_list_lock);
  630. list_for_each_entry(p_ptr, &ports, port_list) {
  631. spin_lock_bh(p_ptr->publ.lock);
  632. port_print(p_ptr, &pb, 0);
  633. spin_unlock_bh(p_ptr->publ.lock);
  634. }
  635. spin_unlock_bh(&tipc_port_list_lock);
  636. str_len = tipc_printbuf_validate(&pb);
  637. skb_put(buf, TLV_SPACE(str_len));
  638. TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
  639. return buf;
  640. }
  641. #if 0
  642. #define MAX_PORT_STATS 2000
  643. struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
  644. {
  645. u32 ref;
  646. struct port *p_ptr;
  647. struct sk_buff *buf;
  648. struct tlv_desc *rep_tlv;
  649. struct print_buf pb;
  650. int str_len;
  651. if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_PORT_REF))
  652. return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
  653. ref = *(u32 *)TLV_DATA(req_tlv_area);
  654. ref = ntohl(ref);
  655. p_ptr = tipc_port_lock(ref);
  656. if (!p_ptr)
  657. return cfg_reply_error_string("port not found");
  658. buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
  659. if (!buf) {
  660. tipc_port_unlock(p_ptr);
  661. return NULL;
  662. }
  663. rep_tlv = (struct tlv_desc *)buf->data;
  664. tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
  665. port_print(p_ptr, &pb, 1);
  666. /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */
  667. tipc_port_unlock(p_ptr);
  668. str_len = tipc_printbuf_validate(&pb);
  669. skb_put(buf, TLV_SPACE(str_len));
  670. TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
  671. return buf;
  672. }
  673. #endif
  674. void tipc_port_reinit(void)
  675. {
  676. struct port *p_ptr;
  677. struct tipc_msg *msg;
  678. spin_lock_bh(&tipc_port_list_lock);
  679. list_for_each_entry(p_ptr, &ports, port_list) {
  680. msg = &p_ptr->publ.phdr;
  681. if (msg_orignode(msg) == tipc_own_addr)
  682. break;
  683. msg_set_orignode(msg, tipc_own_addr);
  684. }
  685. spin_unlock_bh(&tipc_port_list_lock);
  686. }
  687. /*
  688. * port_dispatcher_sigh(): Signal handler for messages destinated
  689. * to the tipc_port interface.
  690. */
  691. static void port_dispatcher_sigh(void *dummy)
  692. {
  693. struct sk_buff *buf;
  694. spin_lock_bh(&queue_lock);
  695. buf = msg_queue_head;
  696. msg_queue_head = 0;
  697. spin_unlock_bh(&queue_lock);
  698. while (buf) {
  699. struct port *p_ptr;
  700. struct user_port *up_ptr;
  701. struct tipc_portid orig;
  702. struct tipc_name_seq dseq;
  703. void *usr_handle;
  704. int connected;
  705. int published;
  706. struct sk_buff *next = buf->next;
  707. struct tipc_msg *msg = buf_msg(buf);
  708. u32 dref = msg_destport(msg);
  709. p_ptr = tipc_port_lock(dref);
  710. if (!p_ptr) {
  711. /* Port deleted while msg in queue */
  712. tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
  713. buf = next;
  714. continue;
  715. }
  716. orig.ref = msg_origport(msg);
  717. orig.node = msg_orignode(msg);
  718. up_ptr = p_ptr->user_port;
  719. usr_handle = up_ptr->usr_handle;
  720. connected = p_ptr->publ.connected;
  721. published = p_ptr->publ.published;
  722. if (unlikely(msg_errcode(msg)))
  723. goto err;
  724. switch (msg_type(msg)) {
  725. case TIPC_CONN_MSG:{
  726. tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
  727. u32 peer_port = port_peerport(p_ptr);
  728. u32 peer_node = port_peernode(p_ptr);
  729. spin_unlock_bh(p_ptr->publ.lock);
  730. if (unlikely(!connected)) {
  731. if (unlikely(published))
  732. goto reject;
  733. tipc_connect2port(dref,&orig);
  734. }
  735. if (unlikely(msg_origport(msg) != peer_port))
  736. goto reject;
  737. if (unlikely(msg_orignode(msg) != peer_node))
  738. goto reject;
  739. if (unlikely(!cb))
  740. goto reject;
  741. if (unlikely(++p_ptr->publ.conn_unacked >=
  742. TIPC_FLOW_CONTROL_WIN))
  743. tipc_acknowledge(dref,
  744. p_ptr->publ.conn_unacked);
  745. skb_pull(buf, msg_hdr_sz(msg));
  746. cb(usr_handle, dref, &buf, msg_data(msg),
  747. msg_data_sz(msg));
  748. break;
  749. }
  750. case TIPC_DIRECT_MSG:{
  751. tipc_msg_event cb = up_ptr->msg_cb;
  752. spin_unlock_bh(p_ptr->publ.lock);
  753. if (unlikely(connected))
  754. goto reject;
  755. if (unlikely(!cb))
  756. goto reject;
  757. skb_pull(buf, msg_hdr_sz(msg));
  758. cb(usr_handle, dref, &buf, msg_data(msg),
  759. msg_data_sz(msg), msg_importance(msg),
  760. &orig);
  761. break;
  762. }
  763. case TIPC_NAMED_MSG:{
  764. tipc_named_msg_event cb = up_ptr->named_msg_cb;
  765. spin_unlock_bh(p_ptr->publ.lock);
  766. if (unlikely(connected))
  767. goto reject;
  768. if (unlikely(!cb))
  769. goto reject;
  770. if (unlikely(!published))
  771. goto reject;
  772. dseq.type = msg_nametype(msg);
  773. dseq.lower = msg_nameinst(msg);
  774. dseq.upper = dseq.lower;
  775. skb_pull(buf, msg_hdr_sz(msg));
  776. cb(usr_handle, dref, &buf, msg_data(msg),
  777. msg_data_sz(msg), msg_importance(msg),
  778. &orig, &dseq);
  779. break;
  780. }
  781. }
  782. if (buf)
  783. buf_discard(buf);
  784. buf = next;
  785. continue;
  786. err:
  787. switch (msg_type(msg)) {
  788. case TIPC_CONN_MSG:{
  789. tipc_conn_shutdown_event cb =
  790. up_ptr->conn_err_cb;
  791. u32 peer_port = port_peerport(p_ptr);
  792. u32 peer_node = port_peernode(p_ptr);
  793. spin_unlock_bh(p_ptr->publ.lock);
  794. if (!connected || !cb)
  795. break;
  796. if (msg_origport(msg) != peer_port)
  797. break;
  798. if (msg_orignode(msg) != peer_node)
  799. break;
  800. tipc_disconnect(dref);
  801. skb_pull(buf, msg_hdr_sz(msg));
  802. cb(usr_handle, dref, &buf, msg_data(msg),
  803. msg_data_sz(msg), msg_errcode(msg));
  804. break;
  805. }
  806. case TIPC_DIRECT_MSG:{
  807. tipc_msg_err_event cb = up_ptr->err_cb;
  808. spin_unlock_bh(p_ptr->publ.lock);
  809. if (connected || !cb)
  810. break;
  811. skb_pull(buf, msg_hdr_sz(msg));
  812. cb(usr_handle, dref, &buf, msg_data(msg),
  813. msg_data_sz(msg), msg_errcode(msg), &orig);
  814. break;
  815. }
  816. case TIPC_NAMED_MSG:{
  817. tipc_named_msg_err_event cb =
  818. up_ptr->named_err_cb;
  819. spin_unlock_bh(p_ptr->publ.lock);
  820. if (connected || !cb)
  821. break;
  822. dseq.type = msg_nametype(msg);
  823. dseq.lower = msg_nameinst(msg);
  824. dseq.upper = dseq.lower;
  825. skb_pull(buf, msg_hdr_sz(msg));
  826. cb(usr_handle, dref, &buf, msg_data(msg),
  827. msg_data_sz(msg), msg_errcode(msg), &dseq);
  828. break;
  829. }
  830. }
  831. if (buf)
  832. buf_discard(buf);
  833. buf = next;
  834. continue;
  835. reject:
  836. tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
  837. buf = next;
  838. }
  839. }
  840. /*
  841. * port_dispatcher(): Dispatcher for messages destinated
  842. * to the tipc_port interface. Called with port locked.
  843. */
  844. static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
  845. {
  846. buf->next = NULL;
  847. spin_lock_bh(&queue_lock);
  848. if (msg_queue_head) {
  849. msg_queue_tail->next = buf;
  850. msg_queue_tail = buf;
  851. } else {
  852. msg_queue_tail = msg_queue_head = buf;
  853. tipc_k_signal((Handler)port_dispatcher_sigh, 0);
  854. }
  855. spin_unlock_bh(&queue_lock);
  856. return TIPC_OK;
  857. }
  858. /*
  859. * Wake up port after congestion: Called with port locked,
  860. *
  861. */
  862. static void port_wakeup_sh(unsigned long ref)
  863. {
  864. struct port *p_ptr;
  865. struct user_port *up_ptr;
  866. tipc_continue_event cb = 0;
  867. void *uh = 0;
  868. p_ptr = tipc_port_lock(ref);
  869. if (p_ptr) {
  870. up_ptr = p_ptr->user_port;
  871. if (up_ptr) {
  872. cb = up_ptr->continue_event_cb;
  873. uh = up_ptr->usr_handle;
  874. }
  875. tipc_port_unlock(p_ptr);
  876. }
  877. if (cb)
  878. cb(uh, ref);
  879. }
  880. static void port_wakeup(struct tipc_port *p_ptr)
  881. {
  882. tipc_k_signal((Handler)port_wakeup_sh, p_ptr->ref);
  883. }
  884. void tipc_acknowledge(u32 ref, u32 ack)
  885. {
  886. struct port *p_ptr;
  887. struct sk_buff *buf = 0;
  888. p_ptr = tipc_port_lock(ref);
  889. if (!p_ptr)
  890. return;
  891. if (p_ptr->publ.connected) {
  892. p_ptr->publ.conn_unacked -= ack;
  893. buf = port_build_proto_msg(port_peerport(p_ptr),
  894. port_peernode(p_ptr),
  895. ref,
  896. tipc_own_addr,
  897. CONN_MANAGER,
  898. CONN_ACK,
  899. TIPC_OK,
  900. port_out_seqno(p_ptr),
  901. ack);
  902. }
  903. tipc_port_unlock(p_ptr);
  904. tipc_net_route_msg(buf);
  905. }
  906. /*
  907. * tipc_createport(): user level call. Will add port to
  908. * registry if non-zero user_ref.
  909. */
  910. int tipc_createport(u32 user_ref,
  911. void *usr_handle,
  912. unsigned int importance,
  913. tipc_msg_err_event error_cb,
  914. tipc_named_msg_err_event named_error_cb,
  915. tipc_conn_shutdown_event conn_error_cb,
  916. tipc_msg_event msg_cb,
  917. tipc_named_msg_event named_msg_cb,
  918. tipc_conn_msg_event conn_msg_cb,
  919. tipc_continue_event continue_event_cb,/* May be zero */
  920. u32 *portref)
  921. {
  922. struct user_port *up_ptr;
  923. struct port *p_ptr;
  924. u32 ref;
  925. up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
  926. if (up_ptr == NULL) {
  927. return -ENOMEM;
  928. }
  929. ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance);
  930. p_ptr = tipc_port_lock(ref);
  931. if (!p_ptr) {
  932. kfree(up_ptr);
  933. return -ENOMEM;
  934. }
  935. p_ptr->user_port = up_ptr;
  936. up_ptr->user_ref = user_ref;
  937. up_ptr->usr_handle = usr_handle;
  938. up_ptr->ref = p_ptr->publ.ref;
  939. up_ptr->err_cb = error_cb;
  940. up_ptr->named_err_cb = named_error_cb;
  941. up_ptr->conn_err_cb = conn_error_cb;
  942. up_ptr->msg_cb = msg_cb;
  943. up_ptr->named_msg_cb = named_msg_cb;
  944. up_ptr->conn_msg_cb = conn_msg_cb;
  945. up_ptr->continue_event_cb = continue_event_cb;
  946. INIT_LIST_HEAD(&up_ptr->uport_list);
  947. tipc_reg_add_port(up_ptr);
  948. *portref = p_ptr->publ.ref;
  949. dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
  950. tipc_port_unlock(p_ptr);
  951. return TIPC_OK;
  952. }
  953. int tipc_ownidentity(u32 ref, struct tipc_portid *id)
  954. {
  955. id->ref = ref;
  956. id->node = tipc_own_addr;
  957. return TIPC_OK;
  958. }
  959. int tipc_portimportance(u32 ref, unsigned int *importance)
  960. {
  961. struct port *p_ptr;
  962. p_ptr = tipc_port_lock(ref);
  963. if (!p_ptr)
  964. return -EINVAL;
  965. *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
  966. spin_unlock_bh(p_ptr->publ.lock);
  967. return TIPC_OK;
  968. }
  969. int tipc_set_portimportance(u32 ref, unsigned int imp)
  970. {
  971. struct port *p_ptr;
  972. if (imp > TIPC_CRITICAL_IMPORTANCE)
  973. return -EINVAL;
  974. p_ptr = tipc_port_lock(ref);
  975. if (!p_ptr)
  976. return -EINVAL;
  977. msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
  978. spin_unlock_bh(p_ptr->publ.lock);
  979. return TIPC_OK;
  980. }
  981. int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
  982. {
  983. struct port *p_ptr;
  984. struct publication *publ;
  985. u32 key;
  986. int res = -EINVAL;
  987. p_ptr = tipc_port_lock(ref);
  988. dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
  989. "lower = %u, upper = %u\n",
  990. ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
  991. if (!p_ptr)
  992. return -EINVAL;
  993. if (p_ptr->publ.connected)
  994. goto exit;
  995. if (seq->lower > seq->upper)
  996. goto exit;
  997. if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
  998. goto exit;
  999. key = ref + p_ptr->pub_count + 1;
  1000. if (key == ref) {
  1001. res = -EADDRINUSE;
  1002. goto exit;
  1003. }
  1004. publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
  1005. scope, p_ptr->publ.ref, key);
  1006. if (publ) {
  1007. list_add(&publ->pport_list, &p_ptr->publications);
  1008. p_ptr->pub_count++;
  1009. p_ptr->publ.published = 1;
  1010. res = TIPC_OK;
  1011. }
  1012. exit:
  1013. tipc_port_unlock(p_ptr);
  1014. return res;
  1015. }
  1016. int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
  1017. {
  1018. struct port *p_ptr;
  1019. struct publication *publ;
  1020. struct publication *tpubl;
  1021. int res = -EINVAL;
  1022. p_ptr = tipc_port_lock(ref);
  1023. if (!p_ptr)
  1024. return -EINVAL;
  1025. if (!p_ptr->publ.published)
  1026. goto exit;
  1027. if (!seq) {
  1028. list_for_each_entry_safe(publ, tpubl,
  1029. &p_ptr->publications, pport_list) {
  1030. tipc_nametbl_withdraw(publ->type, publ->lower,
  1031. publ->ref, publ->key);
  1032. }
  1033. res = TIPC_OK;
  1034. } else {
  1035. list_for_each_entry_safe(publ, tpubl,
  1036. &p_ptr->publications, pport_list) {
  1037. if (publ->scope != scope)
  1038. continue;
  1039. if (publ->type != seq->type)
  1040. continue;
  1041. if (publ->lower != seq->lower)
  1042. continue;
  1043. if (publ->upper != seq->upper)
  1044. break;
  1045. tipc_nametbl_withdraw(publ->type, publ->lower,
  1046. publ->ref, publ->key);
  1047. res = TIPC_OK;
  1048. break;
  1049. }
  1050. }
  1051. if (list_empty(&p_ptr->publications))
  1052. p_ptr->publ.published = 0;
  1053. exit:
  1054. tipc_port_unlock(p_ptr);
  1055. return res;
  1056. }
  1057. int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
  1058. {
  1059. struct port *p_ptr;
  1060. struct tipc_msg *msg;
  1061. int res = -EINVAL;
  1062. p_ptr = tipc_port_lock(ref);
  1063. if (!p_ptr)
  1064. return -EINVAL;
  1065. if (p_ptr->publ.published || p_ptr->publ.connected)
  1066. goto exit;
  1067. if (!peer->ref)
  1068. goto exit;
  1069. msg = &p_ptr->publ.phdr;
  1070. msg_set_destnode(msg, peer->node);
  1071. msg_set_destport(msg, peer->ref);
  1072. msg_set_orignode(msg, tipc_own_addr);
  1073. msg_set_origport(msg, p_ptr->publ.ref);
  1074. msg_set_transp_seqno(msg, 42);
  1075. msg_set_type(msg, TIPC_CONN_MSG);
  1076. if (!may_route(peer->node))
  1077. msg_set_hdr_sz(msg, SHORT_H_SIZE);
  1078. else
  1079. msg_set_hdr_sz(msg, LONG_H_SIZE);
  1080. p_ptr->probing_interval = PROBING_INTERVAL;
  1081. p_ptr->probing_state = CONFIRMED;
  1082. p_ptr->publ.connected = 1;
  1083. k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
  1084. tipc_nodesub_subscribe(&p_ptr->subscription,peer->node,
  1085. (void *)(unsigned long)ref,
  1086. (net_ev_handler)port_handle_node_down);
  1087. res = TIPC_OK;
  1088. exit:
  1089. tipc_port_unlock(p_ptr);
  1090. p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
  1091. return res;
  1092. }
  1093. /*
  1094. * tipc_disconnect(): Disconnect port form peer.
  1095. * This is a node local operation.
  1096. */
  1097. int tipc_disconnect(u32 ref)
  1098. {
  1099. struct port *p_ptr;
  1100. int res = -ENOTCONN;
  1101. p_ptr = tipc_port_lock(ref);
  1102. if (!p_ptr)
  1103. return -EINVAL;
  1104. if (p_ptr->publ.connected) {
  1105. p_ptr->publ.connected = 0;
  1106. /* let timer expire on it's own to avoid deadlock! */
  1107. tipc_nodesub_unsubscribe(&p_ptr->subscription);
  1108. res = TIPC_OK;
  1109. }
  1110. tipc_port_unlock(p_ptr);
  1111. return res;
  1112. }
  1113. /*
  1114. * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
  1115. */
  1116. int tipc_shutdown(u32 ref)
  1117. {
  1118. struct port *p_ptr;
  1119. struct sk_buff *buf = 0;
  1120. p_ptr = tipc_port_lock(ref);
  1121. if (!p_ptr)
  1122. return -EINVAL;
  1123. if (p_ptr->publ.connected) {
  1124. u32 imp = msg_importance(&p_ptr->publ.phdr);
  1125. if (imp < TIPC_CRITICAL_IMPORTANCE)
  1126. imp++;
  1127. buf = port_build_proto_msg(port_peerport(p_ptr),
  1128. port_peernode(p_ptr),
  1129. ref,
  1130. tipc_own_addr,
  1131. imp,
  1132. TIPC_CONN_MSG,
  1133. TIPC_CONN_SHUTDOWN,
  1134. port_out_seqno(p_ptr),
  1135. 0);
  1136. }
  1137. tipc_port_unlock(p_ptr);
  1138. tipc_net_route_msg(buf);
  1139. return tipc_disconnect(ref);
  1140. }
  1141. int tipc_isconnected(u32 ref, int *isconnected)
  1142. {
  1143. struct port *p_ptr;
  1144. p_ptr = tipc_port_lock(ref);
  1145. if (!p_ptr)
  1146. return -EINVAL;
  1147. *isconnected = p_ptr->publ.connected;
  1148. tipc_port_unlock(p_ptr);
  1149. return TIPC_OK;
  1150. }
  1151. int tipc_peer(u32 ref, struct tipc_portid *peer)
  1152. {
  1153. struct port *p_ptr;
  1154. int res;
  1155. p_ptr = tipc_port_lock(ref);
  1156. if (!p_ptr)
  1157. return -EINVAL;
  1158. if (p_ptr->publ.connected) {
  1159. peer->ref = port_peerport(p_ptr);
  1160. peer->node = port_peernode(p_ptr);
  1161. res = TIPC_OK;
  1162. } else
  1163. res = -ENOTCONN;
  1164. tipc_port_unlock(p_ptr);
  1165. return res;
  1166. }
  1167. int tipc_ref_valid(u32 ref)
  1168. {
  1169. /* Works irrespective of type */
  1170. return !!tipc_ref_deref(ref);
  1171. }
  1172. /*
  1173. * tipc_port_recv_sections(): Concatenate and deliver sectioned
  1174. * message for this node.
  1175. */
  1176. int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
  1177. struct iovec const *msg_sect)
  1178. {
  1179. struct sk_buff *buf;
  1180. int res;
  1181. res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
  1182. MAX_MSG_SIZE, !sender->user_port, &buf);
  1183. if (likely(buf))
  1184. tipc_port_recv_msg(buf);
  1185. return res;
  1186. }
  1187. /**
  1188. * tipc_send - send message sections on connection
  1189. */
  1190. int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
  1191. {
  1192. struct port *p_ptr;
  1193. u32 destnode;
  1194. int res;
  1195. p_ptr = tipc_port_deref(ref);
  1196. if (!p_ptr || !p_ptr->publ.connected)
  1197. return -EINVAL;
  1198. p_ptr->publ.congested = 1;
  1199. if (!tipc_port_congested(p_ptr)) {
  1200. destnode = port_peernode(p_ptr);
  1201. if (likely(destnode != tipc_own_addr))
  1202. res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
  1203. destnode);
  1204. else
  1205. res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
  1206. if (likely(res != -ELINKCONG)) {
  1207. port_incr_out_seqno(p_ptr);
  1208. p_ptr->publ.congested = 0;
  1209. p_ptr->sent++;
  1210. return res;
  1211. }
  1212. }
  1213. if (port_unreliable(p_ptr)) {
  1214. p_ptr->publ.congested = 0;
  1215. /* Just calculate msg length and return */
  1216. return msg_calc_data_size(msg_sect, num_sect);
  1217. }
  1218. return -ELINKCONG;
  1219. }
  1220. /**
  1221. * tipc_send_buf - send message buffer on connection
  1222. */
  1223. int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
  1224. {
  1225. struct port *p_ptr;
  1226. struct tipc_msg *msg;
  1227. u32 destnode;
  1228. u32 hsz;
  1229. u32 sz;
  1230. u32 res;
  1231. p_ptr = tipc_port_deref(ref);
  1232. if (!p_ptr || !p_ptr->publ.connected)
  1233. return -EINVAL;
  1234. msg = &p_ptr->publ.phdr;
  1235. hsz = msg_hdr_sz(msg);
  1236. sz = hsz + dsz;
  1237. msg_set_size(msg, sz);
  1238. if (skb_cow(buf, hsz))
  1239. return -ENOMEM;
  1240. skb_push(buf, hsz);
  1241. memcpy(buf->data, (unchar *)msg, hsz);
  1242. destnode = msg_destnode(msg);
  1243. p_ptr->publ.congested = 1;
  1244. if (!tipc_port_congested(p_ptr)) {
  1245. if (likely(destnode != tipc_own_addr))
  1246. res = tipc_send_buf_fast(buf, destnode);
  1247. else {
  1248. tipc_port_recv_msg(buf);
  1249. res = sz;
  1250. }
  1251. if (likely(res != -ELINKCONG)) {
  1252. port_incr_out_seqno(p_ptr);
  1253. p_ptr->sent++;
  1254. p_ptr->publ.congested = 0;
  1255. return res;
  1256. }
  1257. }
  1258. if (port_unreliable(p_ptr)) {
  1259. p_ptr->publ.congested = 0;
  1260. return dsz;
  1261. }
  1262. return -ELINKCONG;
  1263. }
  1264. /**
  1265. * tipc_forward2name - forward message sections to port name
  1266. */
  1267. int tipc_forward2name(u32 ref,
  1268. struct tipc_name const *name,
  1269. u32 domain,
  1270. u32 num_sect,
  1271. struct iovec const *msg_sect,
  1272. struct tipc_portid const *orig,
  1273. unsigned int importance)
  1274. {
  1275. struct port *p_ptr;
  1276. struct tipc_msg *msg;
  1277. u32 destnode = domain;
  1278. u32 destport = 0;
  1279. int res;
  1280. p_ptr = tipc_port_deref(ref);
  1281. if (!p_ptr || p_ptr->publ.connected)
  1282. return -EINVAL;
  1283. msg = &p_ptr->publ.phdr;
  1284. msg_set_type(msg, TIPC_NAMED_MSG);
  1285. msg_set_orignode(msg, orig->node);
  1286. msg_set_origport(msg, orig->ref);
  1287. msg_set_hdr_sz(msg, LONG_H_SIZE);
  1288. msg_set_nametype(msg, name->type);
  1289. msg_set_nameinst(msg, name->instance);
  1290. msg_set_lookup_scope(msg, addr_scope(domain));
  1291. if (importance <= TIPC_CRITICAL_IMPORTANCE)
  1292. msg_set_importance(msg,importance);
  1293. destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
  1294. msg_set_destnode(msg, destnode);
  1295. msg_set_destport(msg, destport);
  1296. if (likely(destport || destnode)) {
  1297. p_ptr->sent++;
  1298. if (likely(destnode == tipc_own_addr))
  1299. return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
  1300. res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
  1301. destnode);
  1302. if (likely(res != -ELINKCONG))
  1303. return res;
  1304. if (port_unreliable(p_ptr)) {
  1305. /* Just calculate msg length and return */
  1306. return msg_calc_data_size(msg_sect, num_sect);
  1307. }
  1308. return -ELINKCONG;
  1309. }
  1310. return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
  1311. TIPC_ERR_NO_NAME);
  1312. }
  1313. /**
  1314. * tipc_send2name - send message sections to port name
  1315. */
  1316. int tipc_send2name(u32 ref,
  1317. struct tipc_name const *name,
  1318. unsigned int domain,
  1319. unsigned int num_sect,
  1320. struct iovec const *msg_sect)
  1321. {
  1322. struct tipc_portid orig;
  1323. orig.ref = ref;
  1324. orig.node = tipc_own_addr;
  1325. return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig,
  1326. TIPC_PORT_IMPORTANCE);
  1327. }
  1328. /**
  1329. * tipc_forward_buf2name - forward message buffer to port name
  1330. */
  1331. int tipc_forward_buf2name(u32 ref,
  1332. struct tipc_name const *name,
  1333. u32 domain,
  1334. struct sk_buff *buf,
  1335. unsigned int dsz,
  1336. struct tipc_portid const *orig,
  1337. unsigned int importance)
  1338. {
  1339. struct port *p_ptr;
  1340. struct tipc_msg *msg;
  1341. u32 destnode = domain;
  1342. u32 destport = 0;
  1343. int res;
  1344. p_ptr = (struct port *)tipc_ref_deref(ref);
  1345. if (!p_ptr || p_ptr->publ.connected)
  1346. return -EINVAL;
  1347. msg = &p_ptr->publ.phdr;
  1348. if (importance <= TIPC_CRITICAL_IMPORTANCE)
  1349. msg_set_importance(msg, importance);
  1350. msg_set_type(msg, TIPC_NAMED_MSG);
  1351. msg_set_orignode(msg, orig->node);
  1352. msg_set_origport(msg, orig->ref);
  1353. msg_set_nametype(msg, name->type);
  1354. msg_set_nameinst(msg, name->instance);
  1355. msg_set_lookup_scope(msg, addr_scope(domain));
  1356. msg_set_hdr_sz(msg, LONG_H_SIZE);
  1357. msg_set_size(msg, LONG_H_SIZE + dsz);
  1358. destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
  1359. msg_set_destnode(msg, destnode);
  1360. msg_set_destport(msg, destport);
  1361. msg_dbg(msg, "forw2name ==> ");
  1362. if (skb_cow(buf, LONG_H_SIZE))
  1363. return -ENOMEM;
  1364. skb_push(buf, LONG_H_SIZE);
  1365. memcpy(buf->data, (unchar *)msg, LONG_H_SIZE);
  1366. msg_dbg(buf_msg(buf),"PREP:");
  1367. if (likely(destport || destnode)) {
  1368. p_ptr->sent++;
  1369. if (destnode == tipc_own_addr)
  1370. return tipc_port_recv_msg(buf);
  1371. res = tipc_send_buf_fast(buf, destnode);
  1372. if (likely(res != -ELINKCONG))
  1373. return res;
  1374. if (port_unreliable(p_ptr))
  1375. return dsz;
  1376. return -ELINKCONG;
  1377. }
  1378. return tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
  1379. }
  1380. /**
  1381. * tipc_send_buf2name - send message buffer to port name
  1382. */
  1383. int tipc_send_buf2name(u32 ref,
  1384. struct tipc_name const *dest,
  1385. u32 domain,
  1386. struct sk_buff *buf,
  1387. unsigned int dsz)
  1388. {
  1389. struct tipc_portid orig;
  1390. orig.ref = ref;
  1391. orig.node = tipc_own_addr;
  1392. return tipc_forward_buf2name(ref, dest, domain, buf, dsz, &orig,
  1393. TIPC_PORT_IMPORTANCE);
  1394. }
  1395. /**
  1396. * tipc_forward2port - forward message sections to port identity
  1397. */
  1398. int tipc_forward2port(u32 ref,
  1399. struct tipc_portid const *dest,
  1400. unsigned int num_sect,
  1401. struct iovec const *msg_sect,
  1402. struct tipc_portid const *orig,
  1403. unsigned int importance)
  1404. {
  1405. struct port *p_ptr;
  1406. struct tipc_msg *msg;
  1407. int res;
  1408. p_ptr = tipc_port_deref(ref);
  1409. if (!p_ptr || p_ptr->publ.connected)
  1410. return -EINVAL;
  1411. msg = &p_ptr->publ.phdr;
  1412. msg_set_type(msg, TIPC_DIRECT_MSG);
  1413. msg_set_orignode(msg, orig->node);
  1414. msg_set_origport(msg, orig->ref);
  1415. msg_set_destnode(msg, dest->node);
  1416. msg_set_destport(msg, dest->ref);
  1417. msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
  1418. if (importance <= TIPC_CRITICAL_IMPORTANCE)
  1419. msg_set_importance(msg, importance);
  1420. p_ptr->sent++;
  1421. if (dest->node == tipc_own_addr)
  1422. return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
  1423. res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
  1424. if (likely(res != -ELINKCONG))
  1425. return res;
  1426. if (port_unreliable(p_ptr)) {
  1427. /* Just calculate msg length and return */
  1428. return msg_calc_data_size(msg_sect, num_sect);
  1429. }
  1430. return -ELINKCONG;
  1431. }
  1432. /**
  1433. * tipc_send2port - send message sections to port identity
  1434. */
  1435. int tipc_send2port(u32 ref,
  1436. struct tipc_portid const *dest,
  1437. unsigned int num_sect,
  1438. struct iovec const *msg_sect)
  1439. {
  1440. struct tipc_portid orig;
  1441. orig.ref = ref;
  1442. orig.node = tipc_own_addr;
  1443. return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
  1444. TIPC_PORT_IMPORTANCE);
  1445. }
  1446. /**
  1447. * tipc_forward_buf2port - forward message buffer to port identity
  1448. */
  1449. int tipc_forward_buf2port(u32 ref,
  1450. struct tipc_portid const *dest,
  1451. struct sk_buff *buf,
  1452. unsigned int dsz,
  1453. struct tipc_portid const *orig,
  1454. unsigned int importance)
  1455. {
  1456. struct port *p_ptr;
  1457. struct tipc_msg *msg;
  1458. int res;
  1459. p_ptr = (struct port *)tipc_ref_deref(ref);
  1460. if (!p_ptr || p_ptr->publ.connected)
  1461. return -EINVAL;
  1462. msg = &p_ptr->publ.phdr;
  1463. msg_set_type(msg, TIPC_DIRECT_MSG);
  1464. msg_set_orignode(msg, orig->node);
  1465. msg_set_origport(msg, orig->ref);
  1466. msg_set_destnode(msg, dest->node);
  1467. msg_set_destport(msg, dest->ref);
  1468. msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
  1469. if (importance <= TIPC_CRITICAL_IMPORTANCE)
  1470. msg_set_importance(msg, importance);
  1471. msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
  1472. if (skb_cow(buf, DIR_MSG_H_SIZE))
  1473. return -ENOMEM;
  1474. skb_push(buf, DIR_MSG_H_SIZE);
  1475. memcpy(buf->data, (unchar *)msg, DIR_MSG_H_SIZE);
  1476. msg_dbg(msg, "buf2port: ");
  1477. p_ptr->sent++;
  1478. if (dest->node == tipc_own_addr)
  1479. return tipc_port_recv_msg(buf);
  1480. res = tipc_send_buf_fast(buf, dest->node);
  1481. if (likely(res != -ELINKCONG))
  1482. return res;
  1483. if (port_unreliable(p_ptr))
  1484. return dsz;
  1485. return -ELINKCONG;
  1486. }
  1487. /**
  1488. * tipc_send_buf2port - send message buffer to port identity
  1489. */
  1490. int tipc_send_buf2port(u32 ref,
  1491. struct tipc_portid const *dest,
  1492. struct sk_buff *buf,
  1493. unsigned int dsz)
  1494. {
  1495. struct tipc_portid orig;
  1496. orig.ref = ref;
  1497. orig.node = tipc_own_addr;
  1498. return tipc_forward_buf2port(ref, dest, buf, dsz, &orig,
  1499. TIPC_PORT_IMPORTANCE);
  1500. }