xpc_channel.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) channel support.
  10. *
  11. * This is the part of XPC that manages the channels and
  12. * sends/receives messages across them to/from other partitions.
  13. *
  14. */
  15. #include <linux/device.h>
  16. #include "xpc.h"
  17. /*
  18. * Process a connect message from a remote partition.
  19. *
  20. * Note: xpc_process_connect() is expecting to be called with the
  21. * spin_lock_irqsave held and will leave it locked upon return.
  22. */
  23. static void
  24. xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
  25. {
  26. enum xp_retval ret;
  27. DBUG_ON(!spin_is_locked(&ch->lock));
  28. if (!(ch->flags & XPC_C_OPENREQUEST) ||
  29. !(ch->flags & XPC_C_ROPENREQUEST)) {
  30. /* nothing more to do for now */
  31. return;
  32. }
  33. DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
  34. if (!(ch->flags & XPC_C_SETUP)) {
  35. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  36. ret = xpc_setup_msg_structures(ch);
  37. spin_lock_irqsave(&ch->lock, *irq_flags);
  38. if (ret != xpSuccess)
  39. XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
  40. ch->flags |= XPC_C_SETUP;
  41. if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
  42. return;
  43. DBUG_ON(ch->local_msgqueue == NULL);
  44. DBUG_ON(ch->remote_msgqueue == NULL);
  45. }
  46. if (!(ch->flags & XPC_C_OPENREPLY)) {
  47. ch->flags |= XPC_C_OPENREPLY;
  48. xpc_send_chctl_openreply(ch, irq_flags);
  49. }
  50. if (!(ch->flags & XPC_C_ROPENREPLY))
  51. return;
  52. ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
  53. dev_info(xpc_chan, "channel %d to partition %d connected\n",
  54. ch->number, ch->partid);
  55. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  56. xpc_create_kthreads(ch, 1, 0);
  57. spin_lock_irqsave(&ch->lock, *irq_flags);
  58. }
  59. /*
  60. * spin_lock_irqsave() is expected to be held on entry.
  61. */
  62. static void
  63. xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
  64. {
  65. struct xpc_partition *part = &xpc_partitions[ch->partid];
  66. u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
  67. DBUG_ON(!spin_is_locked(&ch->lock));
  68. if (!(ch->flags & XPC_C_DISCONNECTING))
  69. return;
  70. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  71. /* make sure all activity has settled down first */
  72. if (atomic_read(&ch->kthreads_assigned) > 0 ||
  73. atomic_read(&ch->references) > 0) {
  74. return;
  75. }
  76. DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  77. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
  78. if (part->act_state == XPC_P_AS_DEACTIVATING) {
  79. /* can't proceed until the other side disengages from us */
  80. if (xpc_partition_engaged(ch->partid))
  81. return;
  82. } else {
  83. /* as long as the other side is up do the full protocol */
  84. if (!(ch->flags & XPC_C_RCLOSEREQUEST))
  85. return;
  86. if (!(ch->flags & XPC_C_CLOSEREPLY)) {
  87. ch->flags |= XPC_C_CLOSEREPLY;
  88. xpc_send_chctl_closereply(ch, irq_flags);
  89. }
  90. if (!(ch->flags & XPC_C_RCLOSEREPLY))
  91. return;
  92. }
  93. /* wake those waiting for notify completion */
  94. if (atomic_read(&ch->n_to_notify) > 0) {
  95. /* we do callout while holding ch->lock, callout can't block */
  96. xpc_notify_senders_of_disconnect(ch);
  97. }
  98. /* both sides are disconnected now */
  99. if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
  100. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  101. xpc_disconnect_callout(ch, xpDisconnected);
  102. spin_lock_irqsave(&ch->lock, *irq_flags);
  103. }
  104. DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
  105. /* it's now safe to free the channel's message queues */
  106. xpc_teardown_msg_structures(ch);
  107. ch->func = NULL;
  108. ch->key = NULL;
  109. ch->entry_size = 0;
  110. ch->local_nentries = 0;
  111. ch->remote_nentries = 0;
  112. ch->kthreads_assigned_limit = 0;
  113. ch->kthreads_idle_limit = 0;
  114. /*
  115. * Mark the channel disconnected and clear all other flags, including
  116. * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but
  117. * not including XPC_C_WDISCONNECT (if it was set).
  118. */
  119. ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
  120. atomic_dec(&part->nchannels_active);
  121. if (channel_was_connected) {
  122. dev_info(xpc_chan, "channel %d to partition %d disconnected, "
  123. "reason=%d\n", ch->number, ch->partid, ch->reason);
  124. }
  125. if (ch->flags & XPC_C_WDISCONNECT) {
  126. /* we won't lose the CPU since we're holding ch->lock */
  127. complete(&ch->wdisconnect_wait);
  128. } else if (ch->delayed_chctl_flags) {
  129. if (part->act_state != XPC_P_AS_DEACTIVATING) {
  130. /* time to take action on any delayed chctl flags */
  131. spin_lock(&part->chctl_lock);
  132. part->chctl.flags[ch->number] |=
  133. ch->delayed_chctl_flags;
  134. spin_unlock(&part->chctl_lock);
  135. }
  136. ch->delayed_chctl_flags = 0;
  137. }
  138. }
  139. /*
  140. * Process a change in the channel's remote connection state.
  141. */
  142. static void
  143. xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
  144. u8 chctl_flags)
  145. {
  146. unsigned long irq_flags;
  147. struct xpc_openclose_args *args =
  148. &part->remote_openclose_args[ch_number];
  149. struct xpc_channel *ch = &part->channels[ch_number];
  150. enum xp_retval reason;
  151. spin_lock_irqsave(&ch->lock, irq_flags);
  152. again:
  153. if ((ch->flags & XPC_C_DISCONNECTED) &&
  154. (ch->flags & XPC_C_WDISCONNECT)) {
  155. /*
  156. * Delay processing chctl flags until thread waiting disconnect
  157. * has had a chance to see that the channel is disconnected.
  158. */
  159. ch->delayed_chctl_flags |= chctl_flags;
  160. spin_unlock_irqrestore(&ch->lock, irq_flags);
  161. return;
  162. }
  163. if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
  164. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
  165. "from partid=%d, channel=%d\n", args->reason,
  166. ch->partid, ch->number);
  167. /*
  168. * If RCLOSEREQUEST is set, we're probably waiting for
  169. * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
  170. * with this RCLOSEREQUEST in the chctl_flags.
  171. */
  172. if (ch->flags & XPC_C_RCLOSEREQUEST) {
  173. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
  174. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  175. DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
  176. DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
  177. DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
  178. chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
  179. ch->flags |= XPC_C_RCLOSEREPLY;
  180. /* both sides have finished disconnecting */
  181. xpc_process_disconnect(ch, &irq_flags);
  182. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
  183. goto again;
  184. }
  185. if (ch->flags & XPC_C_DISCONNECTED) {
  186. if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
  187. if (part->chctl.flags[ch_number] &
  188. XPC_CHCTL_OPENREQUEST) {
  189. DBUG_ON(ch->delayed_chctl_flags != 0);
  190. spin_lock(&part->chctl_lock);
  191. part->chctl.flags[ch_number] |=
  192. XPC_CHCTL_CLOSEREQUEST;
  193. spin_unlock(&part->chctl_lock);
  194. }
  195. spin_unlock_irqrestore(&ch->lock, irq_flags);
  196. return;
  197. }
  198. XPC_SET_REASON(ch, 0, 0);
  199. ch->flags &= ~XPC_C_DISCONNECTED;
  200. atomic_inc(&part->nchannels_active);
  201. ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
  202. }
  203. chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY);
  204. /*
  205. * The meaningful CLOSEREQUEST connection state fields are:
  206. * reason = reason connection is to be closed
  207. */
  208. ch->flags |= XPC_C_RCLOSEREQUEST;
  209. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  210. reason = args->reason;
  211. if (reason <= xpSuccess || reason > xpUnknownReason)
  212. reason = xpUnknownReason;
  213. else if (reason == xpUnregistering)
  214. reason = xpOtherUnregistering;
  215. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  216. DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
  217. spin_unlock_irqrestore(&ch->lock, irq_flags);
  218. return;
  219. }
  220. xpc_process_disconnect(ch, &irq_flags);
  221. }
  222. if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
  223. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
  224. "%d, channel=%d\n", ch->partid, ch->number);
  225. if (ch->flags & XPC_C_DISCONNECTED) {
  226. DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
  227. spin_unlock_irqrestore(&ch->lock, irq_flags);
  228. return;
  229. }
  230. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  231. if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
  232. if (part->chctl.flags[ch_number] &
  233. XPC_CHCTL_CLOSEREQUEST) {
  234. DBUG_ON(ch->delayed_chctl_flags != 0);
  235. spin_lock(&part->chctl_lock);
  236. part->chctl.flags[ch_number] |=
  237. XPC_CHCTL_CLOSEREPLY;
  238. spin_unlock(&part->chctl_lock);
  239. }
  240. spin_unlock_irqrestore(&ch->lock, irq_flags);
  241. return;
  242. }
  243. ch->flags |= XPC_C_RCLOSEREPLY;
  244. if (ch->flags & XPC_C_CLOSEREPLY) {
  245. /* both sides have finished disconnecting */
  246. xpc_process_disconnect(ch, &irq_flags);
  247. }
  248. }
  249. if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
  250. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, "
  251. "local_nentries=%d) received from partid=%d, "
  252. "channel=%d\n", args->entry_size, args->local_nentries,
  253. ch->partid, ch->number);
  254. if (part->act_state == XPC_P_AS_DEACTIVATING ||
  255. (ch->flags & XPC_C_ROPENREQUEST)) {
  256. spin_unlock_irqrestore(&ch->lock, irq_flags);
  257. return;
  258. }
  259. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
  260. ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
  261. spin_unlock_irqrestore(&ch->lock, irq_flags);
  262. return;
  263. }
  264. DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
  265. XPC_C_OPENREQUEST)));
  266. DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  267. XPC_C_OPENREPLY | XPC_C_CONNECTED));
  268. /*
  269. * The meaningful OPENREQUEST connection state fields are:
  270. * entry_size = size of channel's messages in bytes
  271. * local_nentries = remote partition's local_nentries
  272. */
  273. if (args->entry_size == 0 || args->local_nentries == 0) {
  274. /* assume OPENREQUEST was delayed by mistake */
  275. spin_unlock_irqrestore(&ch->lock, irq_flags);
  276. return;
  277. }
  278. ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
  279. ch->remote_nentries = args->local_nentries;
  280. if (ch->flags & XPC_C_OPENREQUEST) {
  281. if (args->entry_size != ch->entry_size) {
  282. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  283. &irq_flags);
  284. spin_unlock_irqrestore(&ch->lock, irq_flags);
  285. return;
  286. }
  287. } else {
  288. ch->entry_size = args->entry_size;
  289. XPC_SET_REASON(ch, 0, 0);
  290. ch->flags &= ~XPC_C_DISCONNECTED;
  291. atomic_inc(&part->nchannels_active);
  292. }
  293. xpc_process_connect(ch, &irq_flags);
  294. }
  295. if (chctl_flags & XPC_CHCTL_OPENREPLY) {
  296. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
  297. "0x%lx, local_nentries=%d, remote_nentries=%d) "
  298. "received from partid=%d, channel=%d\n",
  299. args->local_msgqueue_pa, args->local_nentries,
  300. args->remote_nentries, ch->partid, ch->number);
  301. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
  302. spin_unlock_irqrestore(&ch->lock, irq_flags);
  303. return;
  304. }
  305. if (!(ch->flags & XPC_C_OPENREQUEST)) {
  306. XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
  307. &irq_flags);
  308. spin_unlock_irqrestore(&ch->lock, irq_flags);
  309. return;
  310. }
  311. DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
  312. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  313. /*
  314. * The meaningful OPENREPLY connection state fields are:
  315. * local_msgqueue_pa = physical address of remote
  316. * partition's local_msgqueue
  317. * local_nentries = remote partition's local_nentries
  318. * remote_nentries = remote partition's remote_nentries
  319. */
  320. DBUG_ON(args->local_msgqueue_pa == 0);
  321. DBUG_ON(args->local_nentries == 0);
  322. DBUG_ON(args->remote_nentries == 0);
  323. ch->flags |= XPC_C_ROPENREPLY;
  324. xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa);
  325. if (args->local_nentries < ch->remote_nentries) {
  326. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  327. "remote_nentries=%d, old remote_nentries=%d, "
  328. "partid=%d, channel=%d\n",
  329. args->local_nentries, ch->remote_nentries,
  330. ch->partid, ch->number);
  331. ch->remote_nentries = args->local_nentries;
  332. }
  333. if (args->remote_nentries < ch->local_nentries) {
  334. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  335. "local_nentries=%d, old local_nentries=%d, "
  336. "partid=%d, channel=%d\n",
  337. args->remote_nentries, ch->local_nentries,
  338. ch->partid, ch->number);
  339. ch->local_nentries = args->remote_nentries;
  340. }
  341. xpc_process_connect(ch, &irq_flags);
  342. }
  343. spin_unlock_irqrestore(&ch->lock, irq_flags);
  344. }
  345. /*
  346. * Attempt to establish a channel connection to a remote partition.
  347. */
  348. static enum xp_retval
  349. xpc_connect_channel(struct xpc_channel *ch)
  350. {
  351. unsigned long irq_flags;
  352. struct xpc_registration *registration = &xpc_registrations[ch->number];
  353. if (mutex_trylock(&registration->mutex) == 0)
  354. return xpRetry;
  355. if (!XPC_CHANNEL_REGISTERED(ch->number)) {
  356. mutex_unlock(&registration->mutex);
  357. return xpUnregistered;
  358. }
  359. spin_lock_irqsave(&ch->lock, irq_flags);
  360. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  361. DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
  362. if (ch->flags & XPC_C_DISCONNECTING) {
  363. spin_unlock_irqrestore(&ch->lock, irq_flags);
  364. mutex_unlock(&registration->mutex);
  365. return ch->reason;
  366. }
  367. /* add info from the channel connect registration to the channel */
  368. ch->kthreads_assigned_limit = registration->assigned_limit;
  369. ch->kthreads_idle_limit = registration->idle_limit;
  370. DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
  371. DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
  372. DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
  373. ch->func = registration->func;
  374. DBUG_ON(registration->func == NULL);
  375. ch->key = registration->key;
  376. ch->local_nentries = registration->nentries;
  377. if (ch->flags & XPC_C_ROPENREQUEST) {
  378. if (registration->entry_size != ch->entry_size) {
  379. /* the local and remote sides aren't the same */
  380. /*
  381. * Because XPC_DISCONNECT_CHANNEL() can block we're
  382. * forced to up the registration sema before we unlock
  383. * the channel lock. But that's okay here because we're
  384. * done with the part that required the registration
  385. * sema. XPC_DISCONNECT_CHANNEL() requires that the
  386. * channel lock be locked and will unlock and relock
  387. * the channel lock as needed.
  388. */
  389. mutex_unlock(&registration->mutex);
  390. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  391. &irq_flags);
  392. spin_unlock_irqrestore(&ch->lock, irq_flags);
  393. return xpUnequalMsgSizes;
  394. }
  395. } else {
  396. ch->entry_size = registration->entry_size;
  397. XPC_SET_REASON(ch, 0, 0);
  398. ch->flags &= ~XPC_C_DISCONNECTED;
  399. atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
  400. }
  401. mutex_unlock(&registration->mutex);
  402. /* initiate the connection */
  403. ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
  404. xpc_send_chctl_openrequest(ch, &irq_flags);
  405. xpc_process_connect(ch, &irq_flags);
  406. spin_unlock_irqrestore(&ch->lock, irq_flags);
  407. return xpSuccess;
  408. }
  409. void
  410. xpc_process_sent_chctl_flags(struct xpc_partition *part)
  411. {
  412. unsigned long irq_flags;
  413. union xpc_channel_ctl_flags chctl;
  414. struct xpc_channel *ch;
  415. int ch_number;
  416. u32 ch_flags;
  417. chctl.all_flags = xpc_get_chctl_all_flags(part);
  418. /*
  419. * Initiate channel connections for registered channels.
  420. *
  421. * For each connected channel that has pending messages activate idle
  422. * kthreads and/or create new kthreads as needed.
  423. */
  424. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  425. ch = &part->channels[ch_number];
  426. /*
  427. * Process any open or close related chctl flags, and then deal
  428. * with connecting or disconnecting the channel as required.
  429. */
  430. if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
  431. xpc_process_openclose_chctl_flags(part, ch_number,
  432. chctl.flags[ch_number]);
  433. }
  434. ch_flags = ch->flags; /* need an atomic snapshot of flags */
  435. if (ch_flags & XPC_C_DISCONNECTING) {
  436. spin_lock_irqsave(&ch->lock, irq_flags);
  437. xpc_process_disconnect(ch, &irq_flags);
  438. spin_unlock_irqrestore(&ch->lock, irq_flags);
  439. continue;
  440. }
  441. if (part->act_state == XPC_P_AS_DEACTIVATING)
  442. continue;
  443. if (!(ch_flags & XPC_C_CONNECTED)) {
  444. if (!(ch_flags & XPC_C_OPENREQUEST)) {
  445. DBUG_ON(ch_flags & XPC_C_SETUP);
  446. (void)xpc_connect_channel(ch);
  447. } else {
  448. spin_lock_irqsave(&ch->lock, irq_flags);
  449. xpc_process_connect(ch, &irq_flags);
  450. spin_unlock_irqrestore(&ch->lock, irq_flags);
  451. }
  452. continue;
  453. }
  454. /*
  455. * Process any message related chctl flags, this may involve
  456. * the activation of kthreads to deliver any pending messages
  457. * sent from the other partition.
  458. */
  459. if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
  460. xpc_process_msg_chctl_flags(part, ch_number);
  461. }
  462. }
  463. /*
  464. * XPC's heartbeat code calls this function to inform XPC that a partition is
  465. * going down. XPC responds by tearing down the XPartition Communication
  466. * infrastructure used for the just downed partition.
  467. *
  468. * XPC's heartbeat code will never call this function and xpc_partition_up()
  469. * at the same time. Nor will it ever make multiple calls to either function
  470. * at the same time.
  471. */
  472. void
  473. xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
  474. {
  475. unsigned long irq_flags;
  476. int ch_number;
  477. struct xpc_channel *ch;
  478. dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
  479. XPC_PARTID(part), reason);
  480. if (!xpc_part_ref(part)) {
  481. /* infrastructure for this partition isn't currently set up */
  482. return;
  483. }
  484. /* disconnect channels associated with the partition going down */
  485. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  486. ch = &part->channels[ch_number];
  487. xpc_msgqueue_ref(ch);
  488. spin_lock_irqsave(&ch->lock, irq_flags);
  489. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  490. spin_unlock_irqrestore(&ch->lock, irq_flags);
  491. xpc_msgqueue_deref(ch);
  492. }
  493. xpc_wakeup_channel_mgr(part);
  494. xpc_part_deref(part);
  495. }
  496. /*
  497. * Called by XP at the time of channel connection registration to cause
  498. * XPC to establish connections to all currently active partitions.
  499. */
  500. void
  501. xpc_initiate_connect(int ch_number)
  502. {
  503. short partid;
  504. struct xpc_partition *part;
  505. struct xpc_channel *ch;
  506. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  507. for (partid = 0; partid < xp_max_npartitions; partid++) {
  508. part = &xpc_partitions[partid];
  509. if (xpc_part_ref(part)) {
  510. ch = &part->channels[ch_number];
  511. /*
  512. * Initiate the establishment of a connection on the
  513. * newly registered channel to the remote partition.
  514. */
  515. xpc_wakeup_channel_mgr(part);
  516. xpc_part_deref(part);
  517. }
  518. }
  519. }
  520. void
  521. xpc_connected_callout(struct xpc_channel *ch)
  522. {
  523. /* let the registerer know that a connection has been established */
  524. if (ch->func != NULL) {
  525. dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
  526. "partid=%d, channel=%d\n", ch->partid, ch->number);
  527. ch->func(xpConnected, ch->partid, ch->number,
  528. (void *)(u64)ch->local_nentries, ch->key);
  529. dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
  530. "partid=%d, channel=%d\n", ch->partid, ch->number);
  531. }
  532. }
  533. /*
  534. * Called by XP at the time of channel connection unregistration to cause
  535. * XPC to teardown all current connections for the specified channel.
  536. *
  537. * Before returning xpc_initiate_disconnect() will wait until all connections
  538. * on the specified channel have been closed/torndown. So the caller can be
  539. * assured that they will not be receiving any more callouts from XPC to the
  540. * function they registered via xpc_connect().
  541. *
  542. * Arguments:
  543. *
  544. * ch_number - channel # to unregister.
  545. */
  546. void
  547. xpc_initiate_disconnect(int ch_number)
  548. {
  549. unsigned long irq_flags;
  550. short partid;
  551. struct xpc_partition *part;
  552. struct xpc_channel *ch;
  553. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  554. /* initiate the channel disconnect for every active partition */
  555. for (partid = 0; partid < xp_max_npartitions; partid++) {
  556. part = &xpc_partitions[partid];
  557. if (xpc_part_ref(part)) {
  558. ch = &part->channels[ch_number];
  559. xpc_msgqueue_ref(ch);
  560. spin_lock_irqsave(&ch->lock, irq_flags);
  561. if (!(ch->flags & XPC_C_DISCONNECTED)) {
  562. ch->flags |= XPC_C_WDISCONNECT;
  563. XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
  564. &irq_flags);
  565. }
  566. spin_unlock_irqrestore(&ch->lock, irq_flags);
  567. xpc_msgqueue_deref(ch);
  568. xpc_part_deref(part);
  569. }
  570. }
  571. xpc_disconnect_wait(ch_number);
  572. }
  573. /*
  574. * To disconnect a channel, and reflect it back to all who may be waiting.
  575. *
  576. * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
  577. * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
  578. * xpc_disconnect_wait().
  579. *
  580. * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
  581. */
  582. void
  583. xpc_disconnect_channel(const int line, struct xpc_channel *ch,
  584. enum xp_retval reason, unsigned long *irq_flags)
  585. {
  586. u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
  587. DBUG_ON(!spin_is_locked(&ch->lock));
  588. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
  589. return;
  590. DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
  591. dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
  592. reason, line, ch->partid, ch->number);
  593. XPC_SET_REASON(ch, reason, line);
  594. ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
  595. /* some of these may not have been set */
  596. ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
  597. XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  598. XPC_C_CONNECTING | XPC_C_CONNECTED);
  599. xpc_send_chctl_closerequest(ch, irq_flags);
  600. if (channel_was_connected)
  601. ch->flags |= XPC_C_WASCONNECTED;
  602. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  603. /* wake all idle kthreads so they can exit */
  604. if (atomic_read(&ch->kthreads_idle) > 0) {
  605. wake_up_all(&ch->idle_wq);
  606. } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  607. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
  608. /* start a kthread that will do the xpDisconnecting callout */
  609. xpc_create_kthreads(ch, 1, 1);
  610. }
  611. /* wake those waiting to allocate an entry from the local msg queue */
  612. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
  613. wake_up(&ch->msg_allocate_wq);
  614. spin_lock_irqsave(&ch->lock, *irq_flags);
  615. }
  616. void
  617. xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
  618. {
  619. /*
  620. * Let the channel's registerer know that the channel is being
  621. * disconnected. We don't want to do this if the registerer was never
  622. * informed of a connection being made.
  623. */
  624. if (ch->func != NULL) {
  625. dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
  626. "channel=%d\n", reason, ch->partid, ch->number);
  627. ch->func(reason, ch->partid, ch->number, NULL, ch->key);
  628. dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
  629. "channel=%d\n", reason, ch->partid, ch->number);
  630. }
  631. }
  632. /*
  633. * Wait for a message entry to become available for the specified channel,
  634. * but don't wait any longer than 1 jiffy.
  635. */
  636. enum xp_retval
  637. xpc_allocate_msg_wait(struct xpc_channel *ch)
  638. {
  639. enum xp_retval ret;
  640. if (ch->flags & XPC_C_DISCONNECTING) {
  641. DBUG_ON(ch->reason == xpInterrupted);
  642. return ch->reason;
  643. }
  644. atomic_inc(&ch->n_on_msg_allocate_wq);
  645. ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
  646. atomic_dec(&ch->n_on_msg_allocate_wq);
  647. if (ch->flags & XPC_C_DISCONNECTING) {
  648. ret = ch->reason;
  649. DBUG_ON(ch->reason == xpInterrupted);
  650. } else if (ret == 0) {
  651. ret = xpTimeout;
  652. } else {
  653. ret = xpInterrupted;
  654. }
  655. return ret;
  656. }
  657. /*
  658. * Send a message that contains the user's payload on the specified channel
  659. * connected to the specified partition.
  660. *
  661. * NOTE that this routine can sleep waiting for a message entry to become
  662. * available. To not sleep, pass in the XPC_NOWAIT flag.
  663. *
  664. * Once sent, this routine will not wait for the message to be received, nor
  665. * will notification be given when it does happen.
  666. *
  667. * Arguments:
  668. *
  669. * partid - ID of partition to which the channel is connected.
  670. * ch_number - channel # to send message on.
  671. * flags - see xp.h for valid flags.
  672. * payload - pointer to the payload which is to be sent.
  673. * payload_size - size of the payload in bytes.
  674. */
  675. enum xp_retval
  676. xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
  677. u16 payload_size)
  678. {
  679. struct xpc_partition *part = &xpc_partitions[partid];
  680. enum xp_retval ret = xpUnknownReason;
  681. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  682. partid, ch_number);
  683. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  684. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  685. DBUG_ON(payload == NULL);
  686. if (xpc_part_ref(part)) {
  687. ret = xpc_send_payload(&part->channels[ch_number], flags,
  688. payload, payload_size, 0, NULL, NULL);
  689. xpc_part_deref(part);
  690. }
  691. return ret;
  692. }
  693. /*
  694. * Send a message that contains the user's payload on the specified channel
  695. * connected to the specified partition.
  696. *
  697. * NOTE that this routine can sleep waiting for a message entry to become
  698. * available. To not sleep, pass in the XPC_NOWAIT flag.
  699. *
  700. * This routine will not wait for the message to be sent or received.
  701. *
  702. * Once the remote end of the channel has received the message, the function
  703. * passed as an argument to xpc_initiate_send_notify() will be called. This
  704. * allows the sender to free up or re-use any buffers referenced by the
  705. * message, but does NOT mean the message has been processed at the remote
  706. * end by a receiver.
  707. *
  708. * If this routine returns an error, the caller's function will NOT be called.
  709. *
  710. * Arguments:
  711. *
  712. * partid - ID of partition to which the channel is connected.
  713. * ch_number - channel # to send message on.
  714. * flags - see xp.h for valid flags.
  715. * payload - pointer to the payload which is to be sent.
  716. * payload_size - size of the payload in bytes.
  717. * func - function to call with asynchronous notification of message
  718. * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
  719. * key - user-defined key to be passed to the function when it's called.
  720. */
  721. enum xp_retval
  722. xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
  723. u16 payload_size, xpc_notify_func func, void *key)
  724. {
  725. struct xpc_partition *part = &xpc_partitions[partid];
  726. enum xp_retval ret = xpUnknownReason;
  727. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  728. partid, ch_number);
  729. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  730. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  731. DBUG_ON(payload == NULL);
  732. DBUG_ON(func == NULL);
  733. if (xpc_part_ref(part)) {
  734. ret = xpc_send_payload(&part->channels[ch_number], flags,
  735. payload, payload_size, XPC_N_CALL, func,
  736. key);
  737. xpc_part_deref(part);
  738. }
  739. return ret;
  740. }
  741. /*
  742. * Deliver a message's payload to its intended recipient.
  743. */
  744. void
  745. xpc_deliver_payload(struct xpc_channel *ch)
  746. {
  747. void *payload;
  748. payload = xpc_get_deliverable_payload(ch);
  749. if (payload != NULL) {
  750. /*
  751. * This ref is taken to protect the payload itself from being
  752. * freed before the user is finished with it, which the user
  753. * indicates by calling xpc_initiate_received().
  754. */
  755. xpc_msgqueue_ref(ch);
  756. atomic_inc(&ch->kthreads_active);
  757. if (ch->func != NULL) {
  758. dev_dbg(xpc_chan, "ch->func() called, payload=0x%p "
  759. "partid=%d channel=%d\n", payload, ch->partid,
  760. ch->number);
  761. /* deliver the message to its intended recipient */
  762. ch->func(xpMsgReceived, ch->partid, ch->number, payload,
  763. ch->key);
  764. dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p "
  765. "partid=%d channel=%d\n", payload, ch->partid,
  766. ch->number);
  767. }
  768. atomic_dec(&ch->kthreads_active);
  769. }
  770. }
  771. /*
  772. * Acknowledge receipt of a delivered message's payload.
  773. *
  774. * This function, although called by users, does not call xpc_part_ref() to
  775. * ensure that the partition infrastructure is in place. It relies on the
  776. * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload().
  777. *
  778. * Arguments:
  779. *
  780. * partid - ID of partition to which the channel is connected.
  781. * ch_number - channel # message received on.
  782. * payload - pointer to the payload area allocated via
  783. * xpc_initiate_send() or xpc_initiate_send_notify().
  784. */
  785. void
  786. xpc_initiate_received(short partid, int ch_number, void *payload)
  787. {
  788. struct xpc_partition *part = &xpc_partitions[partid];
  789. struct xpc_channel *ch;
  790. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  791. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  792. ch = &part->channels[ch_number];
  793. xpc_received_payload(ch, payload);
  794. /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */
  795. xpc_msgqueue_deref(ch);
  796. }