xpc_channel.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) channel support.
  10. *
  11. * This is the part of XPC that manages the channels and
  12. * sends/receives messages across them to/from other partitions.
  13. *
  14. */
  15. #include <linux/device.h>
  16. #include "xpc.h"
  17. /*
  18. * Process a connect message from a remote partition.
  19. *
  20. * Note: xpc_process_connect() is expecting to be called with the
  21. * spin_lock_irqsave held and will leave it locked upon return.
  22. */
  23. static void
  24. xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
  25. {
  26. enum xp_retval ret;
  27. DBUG_ON(!spin_is_locked(&ch->lock));
  28. if (!(ch->flags & XPC_C_OPENREQUEST) ||
  29. !(ch->flags & XPC_C_ROPENREQUEST)) {
  30. /* nothing more to do for now */
  31. return;
  32. }
  33. DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
  34. if (!(ch->flags & XPC_C_SETUP)) {
  35. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  36. ret = xpc_setup_msg_structures(ch);
  37. spin_lock_irqsave(&ch->lock, *irq_flags);
  38. if (ret != xpSuccess)
  39. XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
  40. ch->flags |= XPC_C_SETUP;
  41. if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
  42. return;
  43. }
  44. if (!(ch->flags & XPC_C_OPENREPLY)) {
  45. ch->flags |= XPC_C_OPENREPLY;
  46. xpc_send_chctl_openreply(ch, irq_flags);
  47. }
  48. if (!(ch->flags & XPC_C_ROPENREPLY))
  49. return;
  50. ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
  51. dev_info(xpc_chan, "channel %d to partition %d connected\n",
  52. ch->number, ch->partid);
  53. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  54. xpc_create_kthreads(ch, 1, 0);
  55. spin_lock_irqsave(&ch->lock, *irq_flags);
  56. }
  57. /*
  58. * spin_lock_irqsave() is expected to be held on entry.
  59. */
  60. static void
  61. xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
  62. {
  63. struct xpc_partition *part = &xpc_partitions[ch->partid];
  64. u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
  65. DBUG_ON(!spin_is_locked(&ch->lock));
  66. if (!(ch->flags & XPC_C_DISCONNECTING))
  67. return;
  68. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  69. /* make sure all activity has settled down first */
  70. if (atomic_read(&ch->kthreads_assigned) > 0 ||
  71. atomic_read(&ch->references) > 0) {
  72. return;
  73. }
  74. DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  75. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
  76. if (part->act_state == XPC_P_AS_DEACTIVATING) {
  77. /* can't proceed until the other side disengages from us */
  78. if (xpc_partition_engaged(ch->partid))
  79. return;
  80. } else {
  81. /* as long as the other side is up do the full protocol */
  82. if (!(ch->flags & XPC_C_RCLOSEREQUEST))
  83. return;
  84. if (!(ch->flags & XPC_C_CLOSEREPLY)) {
  85. ch->flags |= XPC_C_CLOSEREPLY;
  86. xpc_send_chctl_closereply(ch, irq_flags);
  87. }
  88. if (!(ch->flags & XPC_C_RCLOSEREPLY))
  89. return;
  90. }
  91. /* wake those waiting for notify completion */
  92. if (atomic_read(&ch->n_to_notify) > 0) {
  93. /* we do callout while holding ch->lock, callout can't block */
  94. xpc_notify_senders_of_disconnect(ch);
  95. }
  96. /* both sides are disconnected now */
  97. if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
  98. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  99. xpc_disconnect_callout(ch, xpDisconnected);
  100. spin_lock_irqsave(&ch->lock, *irq_flags);
  101. }
  102. DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
  103. /* it's now safe to free the channel's message queues */
  104. xpc_teardown_msg_structures(ch);
  105. ch->func = NULL;
  106. ch->key = NULL;
  107. ch->entry_size = 0;
  108. ch->local_nentries = 0;
  109. ch->remote_nentries = 0;
  110. ch->kthreads_assigned_limit = 0;
  111. ch->kthreads_idle_limit = 0;
  112. /*
  113. * Mark the channel disconnected and clear all other flags, including
  114. * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but
  115. * not including XPC_C_WDISCONNECT (if it was set).
  116. */
  117. ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
  118. atomic_dec(&part->nchannels_active);
  119. if (channel_was_connected) {
  120. dev_info(xpc_chan, "channel %d to partition %d disconnected, "
  121. "reason=%d\n", ch->number, ch->partid, ch->reason);
  122. }
  123. if (ch->flags & XPC_C_WDISCONNECT) {
  124. /* we won't lose the CPU since we're holding ch->lock */
  125. complete(&ch->wdisconnect_wait);
  126. } else if (ch->delayed_chctl_flags) {
  127. if (part->act_state != XPC_P_AS_DEACTIVATING) {
  128. /* time to take action on any delayed chctl flags */
  129. spin_lock(&part->chctl_lock);
  130. part->chctl.flags[ch->number] |=
  131. ch->delayed_chctl_flags;
  132. spin_unlock(&part->chctl_lock);
  133. }
  134. ch->delayed_chctl_flags = 0;
  135. }
  136. }
  137. /*
  138. * Process a change in the channel's remote connection state.
  139. */
  140. static void
  141. xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
  142. u8 chctl_flags)
  143. {
  144. unsigned long irq_flags;
  145. struct xpc_openclose_args *args =
  146. &part->remote_openclose_args[ch_number];
  147. struct xpc_channel *ch = &part->channels[ch_number];
  148. enum xp_retval reason;
  149. enum xp_retval ret;
  150. spin_lock_irqsave(&ch->lock, irq_flags);
  151. again:
  152. if ((ch->flags & XPC_C_DISCONNECTED) &&
  153. (ch->flags & XPC_C_WDISCONNECT)) {
  154. /*
  155. * Delay processing chctl flags until thread waiting disconnect
  156. * has had a chance to see that the channel is disconnected.
  157. */
  158. ch->delayed_chctl_flags |= chctl_flags;
  159. spin_unlock_irqrestore(&ch->lock, irq_flags);
  160. return;
  161. }
  162. if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
  163. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
  164. "from partid=%d, channel=%d\n", args->reason,
  165. ch->partid, ch->number);
  166. /*
  167. * If RCLOSEREQUEST is set, we're probably waiting for
  168. * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
  169. * with this RCLOSEREQUEST in the chctl_flags.
  170. */
  171. if (ch->flags & XPC_C_RCLOSEREQUEST) {
  172. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
  173. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  174. DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
  175. DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
  176. DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
  177. chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
  178. ch->flags |= XPC_C_RCLOSEREPLY;
  179. /* both sides have finished disconnecting */
  180. xpc_process_disconnect(ch, &irq_flags);
  181. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
  182. goto again;
  183. }
  184. if (ch->flags & XPC_C_DISCONNECTED) {
  185. if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
  186. if (part->chctl.flags[ch_number] &
  187. XPC_CHCTL_OPENREQUEST) {
  188. DBUG_ON(ch->delayed_chctl_flags != 0);
  189. spin_lock(&part->chctl_lock);
  190. part->chctl.flags[ch_number] |=
  191. XPC_CHCTL_CLOSEREQUEST;
  192. spin_unlock(&part->chctl_lock);
  193. }
  194. spin_unlock_irqrestore(&ch->lock, irq_flags);
  195. return;
  196. }
  197. XPC_SET_REASON(ch, 0, 0);
  198. ch->flags &= ~XPC_C_DISCONNECTED;
  199. atomic_inc(&part->nchannels_active);
  200. ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
  201. }
  202. chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY);
  203. /*
  204. * The meaningful CLOSEREQUEST connection state fields are:
  205. * reason = reason connection is to be closed
  206. */
  207. ch->flags |= XPC_C_RCLOSEREQUEST;
  208. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  209. reason = args->reason;
  210. if (reason <= xpSuccess || reason > xpUnknownReason)
  211. reason = xpUnknownReason;
  212. else if (reason == xpUnregistering)
  213. reason = xpOtherUnregistering;
  214. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  215. DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
  216. spin_unlock_irqrestore(&ch->lock, irq_flags);
  217. return;
  218. }
  219. xpc_process_disconnect(ch, &irq_flags);
  220. }
  221. if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
  222. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
  223. "%d, channel=%d\n", ch->partid, ch->number);
  224. if (ch->flags & XPC_C_DISCONNECTED) {
  225. DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
  226. spin_unlock_irqrestore(&ch->lock, irq_flags);
  227. return;
  228. }
  229. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  230. if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
  231. if (part->chctl.flags[ch_number] &
  232. XPC_CHCTL_CLOSEREQUEST) {
  233. DBUG_ON(ch->delayed_chctl_flags != 0);
  234. spin_lock(&part->chctl_lock);
  235. part->chctl.flags[ch_number] |=
  236. XPC_CHCTL_CLOSEREPLY;
  237. spin_unlock(&part->chctl_lock);
  238. }
  239. spin_unlock_irqrestore(&ch->lock, irq_flags);
  240. return;
  241. }
  242. ch->flags |= XPC_C_RCLOSEREPLY;
  243. if (ch->flags & XPC_C_CLOSEREPLY) {
  244. /* both sides have finished disconnecting */
  245. xpc_process_disconnect(ch, &irq_flags);
  246. }
  247. }
  248. if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
  249. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, "
  250. "local_nentries=%d) received from partid=%d, "
  251. "channel=%d\n", args->entry_size, args->local_nentries,
  252. ch->partid, ch->number);
  253. if (part->act_state == XPC_P_AS_DEACTIVATING ||
  254. (ch->flags & XPC_C_ROPENREQUEST)) {
  255. spin_unlock_irqrestore(&ch->lock, irq_flags);
  256. return;
  257. }
  258. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
  259. ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
  260. spin_unlock_irqrestore(&ch->lock, irq_flags);
  261. return;
  262. }
  263. DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
  264. XPC_C_OPENREQUEST)));
  265. DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  266. XPC_C_OPENREPLY | XPC_C_CONNECTED));
  267. /*
  268. * The meaningful OPENREQUEST connection state fields are:
  269. * entry_size = size of channel's messages in bytes
  270. * local_nentries = remote partition's local_nentries
  271. */
  272. if (args->entry_size == 0 || args->local_nentries == 0) {
  273. /* assume OPENREQUEST was delayed by mistake */
  274. spin_unlock_irqrestore(&ch->lock, irq_flags);
  275. return;
  276. }
  277. ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
  278. ch->remote_nentries = args->local_nentries;
  279. if (ch->flags & XPC_C_OPENREQUEST) {
  280. if (args->entry_size != ch->entry_size) {
  281. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  282. &irq_flags);
  283. spin_unlock_irqrestore(&ch->lock, irq_flags);
  284. return;
  285. }
  286. } else {
  287. ch->entry_size = args->entry_size;
  288. XPC_SET_REASON(ch, 0, 0);
  289. ch->flags &= ~XPC_C_DISCONNECTED;
  290. atomic_inc(&part->nchannels_active);
  291. }
  292. xpc_process_connect(ch, &irq_flags);
  293. }
  294. if (chctl_flags & XPC_CHCTL_OPENREPLY) {
  295. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
  296. "0x%lx, local_nentries=%d, remote_nentries=%d) "
  297. "received from partid=%d, channel=%d\n",
  298. args->local_msgqueue_pa, args->local_nentries,
  299. args->remote_nentries, ch->partid, ch->number);
  300. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
  301. spin_unlock_irqrestore(&ch->lock, irq_flags);
  302. return;
  303. }
  304. if (!(ch->flags & XPC_C_OPENREQUEST)) {
  305. XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
  306. &irq_flags);
  307. spin_unlock_irqrestore(&ch->lock, irq_flags);
  308. return;
  309. }
  310. DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
  311. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  312. /*
  313. * The meaningful OPENREPLY connection state fields are:
  314. * local_msgqueue_pa = physical address of remote
  315. * partition's local_msgqueue
  316. * local_nentries = remote partition's local_nentries
  317. * remote_nentries = remote partition's remote_nentries
  318. */
  319. DBUG_ON(args->local_msgqueue_pa == 0);
  320. DBUG_ON(args->local_nentries == 0);
  321. DBUG_ON(args->remote_nentries == 0);
  322. ret = xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa);
  323. if (ret != xpSuccess) {
  324. XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags);
  325. spin_unlock_irqrestore(&ch->lock, irq_flags);
  326. return;
  327. }
  328. ch->flags |= XPC_C_ROPENREPLY;
  329. if (args->local_nentries < ch->remote_nentries) {
  330. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  331. "remote_nentries=%d, old remote_nentries=%d, "
  332. "partid=%d, channel=%d\n",
  333. args->local_nentries, ch->remote_nentries,
  334. ch->partid, ch->number);
  335. ch->remote_nentries = args->local_nentries;
  336. }
  337. if (args->remote_nentries < ch->local_nentries) {
  338. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  339. "local_nentries=%d, old local_nentries=%d, "
  340. "partid=%d, channel=%d\n",
  341. args->remote_nentries, ch->local_nentries,
  342. ch->partid, ch->number);
  343. ch->local_nentries = args->remote_nentries;
  344. }
  345. xpc_process_connect(ch, &irq_flags);
  346. }
  347. spin_unlock_irqrestore(&ch->lock, irq_flags);
  348. }
  349. /*
  350. * Attempt to establish a channel connection to a remote partition.
  351. */
  352. static enum xp_retval
  353. xpc_connect_channel(struct xpc_channel *ch)
  354. {
  355. unsigned long irq_flags;
  356. struct xpc_registration *registration = &xpc_registrations[ch->number];
  357. if (mutex_trylock(&registration->mutex) == 0)
  358. return xpRetry;
  359. if (!XPC_CHANNEL_REGISTERED(ch->number)) {
  360. mutex_unlock(&registration->mutex);
  361. return xpUnregistered;
  362. }
  363. spin_lock_irqsave(&ch->lock, irq_flags);
  364. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  365. DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
  366. if (ch->flags & XPC_C_DISCONNECTING) {
  367. spin_unlock_irqrestore(&ch->lock, irq_flags);
  368. mutex_unlock(&registration->mutex);
  369. return ch->reason;
  370. }
  371. /* add info from the channel connect registration to the channel */
  372. ch->kthreads_assigned_limit = registration->assigned_limit;
  373. ch->kthreads_idle_limit = registration->idle_limit;
  374. DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
  375. DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
  376. DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
  377. ch->func = registration->func;
  378. DBUG_ON(registration->func == NULL);
  379. ch->key = registration->key;
  380. ch->local_nentries = registration->nentries;
  381. if (ch->flags & XPC_C_ROPENREQUEST) {
  382. if (registration->entry_size != ch->entry_size) {
  383. /* the local and remote sides aren't the same */
  384. /*
  385. * Because XPC_DISCONNECT_CHANNEL() can block we're
  386. * forced to up the registration sema before we unlock
  387. * the channel lock. But that's okay here because we're
  388. * done with the part that required the registration
  389. * sema. XPC_DISCONNECT_CHANNEL() requires that the
  390. * channel lock be locked and will unlock and relock
  391. * the channel lock as needed.
  392. */
  393. mutex_unlock(&registration->mutex);
  394. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  395. &irq_flags);
  396. spin_unlock_irqrestore(&ch->lock, irq_flags);
  397. return xpUnequalMsgSizes;
  398. }
  399. } else {
  400. ch->entry_size = registration->entry_size;
  401. XPC_SET_REASON(ch, 0, 0);
  402. ch->flags &= ~XPC_C_DISCONNECTED;
  403. atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
  404. }
  405. mutex_unlock(&registration->mutex);
  406. /* initiate the connection */
  407. ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
  408. xpc_send_chctl_openrequest(ch, &irq_flags);
  409. xpc_process_connect(ch, &irq_flags);
  410. spin_unlock_irqrestore(&ch->lock, irq_flags);
  411. return xpSuccess;
  412. }
  413. void
  414. xpc_process_sent_chctl_flags(struct xpc_partition *part)
  415. {
  416. unsigned long irq_flags;
  417. union xpc_channel_ctl_flags chctl;
  418. struct xpc_channel *ch;
  419. int ch_number;
  420. u32 ch_flags;
  421. chctl.all_flags = xpc_get_chctl_all_flags(part);
  422. /*
  423. * Initiate channel connections for registered channels.
  424. *
  425. * For each connected channel that has pending messages activate idle
  426. * kthreads and/or create new kthreads as needed.
  427. */
  428. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  429. ch = &part->channels[ch_number];
  430. /*
  431. * Process any open or close related chctl flags, and then deal
  432. * with connecting or disconnecting the channel as required.
  433. */
  434. if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
  435. xpc_process_openclose_chctl_flags(part, ch_number,
  436. chctl.flags[ch_number]);
  437. }
  438. ch_flags = ch->flags; /* need an atomic snapshot of flags */
  439. if (ch_flags & XPC_C_DISCONNECTING) {
  440. spin_lock_irqsave(&ch->lock, irq_flags);
  441. xpc_process_disconnect(ch, &irq_flags);
  442. spin_unlock_irqrestore(&ch->lock, irq_flags);
  443. continue;
  444. }
  445. if (part->act_state == XPC_P_AS_DEACTIVATING)
  446. continue;
  447. if (!(ch_flags & XPC_C_CONNECTED)) {
  448. if (!(ch_flags & XPC_C_OPENREQUEST)) {
  449. DBUG_ON(ch_flags & XPC_C_SETUP);
  450. (void)xpc_connect_channel(ch);
  451. } else {
  452. spin_lock_irqsave(&ch->lock, irq_flags);
  453. xpc_process_connect(ch, &irq_flags);
  454. spin_unlock_irqrestore(&ch->lock, irq_flags);
  455. }
  456. continue;
  457. }
  458. /*
  459. * Process any message related chctl flags, this may involve
  460. * the activation of kthreads to deliver any pending messages
  461. * sent from the other partition.
  462. */
  463. if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
  464. xpc_process_msg_chctl_flags(part, ch_number);
  465. }
  466. }
  467. /*
  468. * XPC's heartbeat code calls this function to inform XPC that a partition is
  469. * going down. XPC responds by tearing down the XPartition Communication
  470. * infrastructure used for the just downed partition.
  471. *
  472. * XPC's heartbeat code will never call this function and xpc_partition_up()
  473. * at the same time. Nor will it ever make multiple calls to either function
  474. * at the same time.
  475. */
  476. void
  477. xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
  478. {
  479. unsigned long irq_flags;
  480. int ch_number;
  481. struct xpc_channel *ch;
  482. dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
  483. XPC_PARTID(part), reason);
  484. if (!xpc_part_ref(part)) {
  485. /* infrastructure for this partition isn't currently set up */
  486. return;
  487. }
  488. /* disconnect channels associated with the partition going down */
  489. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  490. ch = &part->channels[ch_number];
  491. xpc_msgqueue_ref(ch);
  492. spin_lock_irqsave(&ch->lock, irq_flags);
  493. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  494. spin_unlock_irqrestore(&ch->lock, irq_flags);
  495. xpc_msgqueue_deref(ch);
  496. }
  497. xpc_wakeup_channel_mgr(part);
  498. xpc_part_deref(part);
  499. }
  500. /*
  501. * Called by XP at the time of channel connection registration to cause
  502. * XPC to establish connections to all currently active partitions.
  503. */
  504. void
  505. xpc_initiate_connect(int ch_number)
  506. {
  507. short partid;
  508. struct xpc_partition *part;
  509. struct xpc_channel *ch;
  510. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  511. for (partid = 0; partid < xp_max_npartitions; partid++) {
  512. part = &xpc_partitions[partid];
  513. if (xpc_part_ref(part)) {
  514. ch = &part->channels[ch_number];
  515. /*
  516. * Initiate the establishment of a connection on the
  517. * newly registered channel to the remote partition.
  518. */
  519. xpc_wakeup_channel_mgr(part);
  520. xpc_part_deref(part);
  521. }
  522. }
  523. }
  524. void
  525. xpc_connected_callout(struct xpc_channel *ch)
  526. {
  527. /* let the registerer know that a connection has been established */
  528. if (ch->func != NULL) {
  529. dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
  530. "partid=%d, channel=%d\n", ch->partid, ch->number);
  531. ch->func(xpConnected, ch->partid, ch->number,
  532. (void *)(u64)ch->local_nentries, ch->key);
  533. dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
  534. "partid=%d, channel=%d\n", ch->partid, ch->number);
  535. }
  536. }
  537. /*
  538. * Called by XP at the time of channel connection unregistration to cause
  539. * XPC to teardown all current connections for the specified channel.
  540. *
  541. * Before returning xpc_initiate_disconnect() will wait until all connections
  542. * on the specified channel have been closed/torndown. So the caller can be
  543. * assured that they will not be receiving any more callouts from XPC to the
  544. * function they registered via xpc_connect().
  545. *
  546. * Arguments:
  547. *
  548. * ch_number - channel # to unregister.
  549. */
  550. void
  551. xpc_initiate_disconnect(int ch_number)
  552. {
  553. unsigned long irq_flags;
  554. short partid;
  555. struct xpc_partition *part;
  556. struct xpc_channel *ch;
  557. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  558. /* initiate the channel disconnect for every active partition */
  559. for (partid = 0; partid < xp_max_npartitions; partid++) {
  560. part = &xpc_partitions[partid];
  561. if (xpc_part_ref(part)) {
  562. ch = &part->channels[ch_number];
  563. xpc_msgqueue_ref(ch);
  564. spin_lock_irqsave(&ch->lock, irq_flags);
  565. if (!(ch->flags & XPC_C_DISCONNECTED)) {
  566. ch->flags |= XPC_C_WDISCONNECT;
  567. XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
  568. &irq_flags);
  569. }
  570. spin_unlock_irqrestore(&ch->lock, irq_flags);
  571. xpc_msgqueue_deref(ch);
  572. xpc_part_deref(part);
  573. }
  574. }
  575. xpc_disconnect_wait(ch_number);
  576. }
  577. /*
  578. * To disconnect a channel, and reflect it back to all who may be waiting.
  579. *
  580. * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
  581. * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
  582. * xpc_disconnect_wait().
  583. *
  584. * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
  585. */
  586. void
  587. xpc_disconnect_channel(const int line, struct xpc_channel *ch,
  588. enum xp_retval reason, unsigned long *irq_flags)
  589. {
  590. u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
  591. DBUG_ON(!spin_is_locked(&ch->lock));
  592. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
  593. return;
  594. DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
  595. dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
  596. reason, line, ch->partid, ch->number);
  597. XPC_SET_REASON(ch, reason, line);
  598. ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
  599. /* some of these may not have been set */
  600. ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
  601. XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  602. XPC_C_CONNECTING | XPC_C_CONNECTED);
  603. xpc_send_chctl_closerequest(ch, irq_flags);
  604. if (channel_was_connected)
  605. ch->flags |= XPC_C_WASCONNECTED;
  606. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  607. /* wake all idle kthreads so they can exit */
  608. if (atomic_read(&ch->kthreads_idle) > 0) {
  609. wake_up_all(&ch->idle_wq);
  610. } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  611. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
  612. /* start a kthread that will do the xpDisconnecting callout */
  613. xpc_create_kthreads(ch, 1, 1);
  614. }
  615. /* wake those waiting to allocate an entry from the local msg queue */
  616. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
  617. wake_up(&ch->msg_allocate_wq);
  618. spin_lock_irqsave(&ch->lock, *irq_flags);
  619. }
  620. void
  621. xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
  622. {
  623. /*
  624. * Let the channel's registerer know that the channel is being
  625. * disconnected. We don't want to do this if the registerer was never
  626. * informed of a connection being made.
  627. */
  628. if (ch->func != NULL) {
  629. dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
  630. "channel=%d\n", reason, ch->partid, ch->number);
  631. ch->func(reason, ch->partid, ch->number, NULL, ch->key);
  632. dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
  633. "channel=%d\n", reason, ch->partid, ch->number);
  634. }
  635. }
  636. /*
  637. * Wait for a message entry to become available for the specified channel,
  638. * but don't wait any longer than 1 jiffy.
  639. */
  640. enum xp_retval
  641. xpc_allocate_msg_wait(struct xpc_channel *ch)
  642. {
  643. enum xp_retval ret;
  644. if (ch->flags & XPC_C_DISCONNECTING) {
  645. DBUG_ON(ch->reason == xpInterrupted);
  646. return ch->reason;
  647. }
  648. atomic_inc(&ch->n_on_msg_allocate_wq);
  649. ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
  650. atomic_dec(&ch->n_on_msg_allocate_wq);
  651. if (ch->flags & XPC_C_DISCONNECTING) {
  652. ret = ch->reason;
  653. DBUG_ON(ch->reason == xpInterrupted);
  654. } else if (ret == 0) {
  655. ret = xpTimeout;
  656. } else {
  657. ret = xpInterrupted;
  658. }
  659. return ret;
  660. }
  661. /*
  662. * Send a message that contains the user's payload on the specified channel
  663. * connected to the specified partition.
  664. *
  665. * NOTE that this routine can sleep waiting for a message entry to become
  666. * available. To not sleep, pass in the XPC_NOWAIT flag.
  667. *
  668. * Once sent, this routine will not wait for the message to be received, nor
  669. * will notification be given when it does happen.
  670. *
  671. * Arguments:
  672. *
  673. * partid - ID of partition to which the channel is connected.
  674. * ch_number - channel # to send message on.
  675. * flags - see xp.h for valid flags.
  676. * payload - pointer to the payload which is to be sent.
  677. * payload_size - size of the payload in bytes.
  678. */
  679. enum xp_retval
  680. xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
  681. u16 payload_size)
  682. {
  683. struct xpc_partition *part = &xpc_partitions[partid];
  684. enum xp_retval ret = xpUnknownReason;
  685. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  686. partid, ch_number);
  687. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  688. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  689. DBUG_ON(payload == NULL);
  690. if (xpc_part_ref(part)) {
  691. ret = xpc_send_payload(&part->channels[ch_number], flags,
  692. payload, payload_size, 0, NULL, NULL);
  693. xpc_part_deref(part);
  694. }
  695. return ret;
  696. }
  697. /*
  698. * Send a message that contains the user's payload on the specified channel
  699. * connected to the specified partition.
  700. *
  701. * NOTE that this routine can sleep waiting for a message entry to become
  702. * available. To not sleep, pass in the XPC_NOWAIT flag.
  703. *
  704. * This routine will not wait for the message to be sent or received.
  705. *
  706. * Once the remote end of the channel has received the message, the function
  707. * passed as an argument to xpc_initiate_send_notify() will be called. This
  708. * allows the sender to free up or re-use any buffers referenced by the
  709. * message, but does NOT mean the message has been processed at the remote
  710. * end by a receiver.
  711. *
  712. * If this routine returns an error, the caller's function will NOT be called.
  713. *
  714. * Arguments:
  715. *
  716. * partid - ID of partition to which the channel is connected.
  717. * ch_number - channel # to send message on.
  718. * flags - see xp.h for valid flags.
  719. * payload - pointer to the payload which is to be sent.
  720. * payload_size - size of the payload in bytes.
  721. * func - function to call with asynchronous notification of message
  722. * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
  723. * key - user-defined key to be passed to the function when it's called.
  724. */
  725. enum xp_retval
  726. xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
  727. u16 payload_size, xpc_notify_func func, void *key)
  728. {
  729. struct xpc_partition *part = &xpc_partitions[partid];
  730. enum xp_retval ret = xpUnknownReason;
  731. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  732. partid, ch_number);
  733. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  734. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  735. DBUG_ON(payload == NULL);
  736. DBUG_ON(func == NULL);
  737. if (xpc_part_ref(part)) {
  738. ret = xpc_send_payload(&part->channels[ch_number], flags,
  739. payload, payload_size, XPC_N_CALL, func,
  740. key);
  741. xpc_part_deref(part);
  742. }
  743. return ret;
  744. }
  745. /*
  746. * Deliver a message's payload to its intended recipient.
  747. */
  748. void
  749. xpc_deliver_payload(struct xpc_channel *ch)
  750. {
  751. void *payload;
  752. payload = xpc_get_deliverable_payload(ch);
  753. if (payload != NULL) {
  754. /*
  755. * This ref is taken to protect the payload itself from being
  756. * freed before the user is finished with it, which the user
  757. * indicates by calling xpc_initiate_received().
  758. */
  759. xpc_msgqueue_ref(ch);
  760. atomic_inc(&ch->kthreads_active);
  761. if (ch->func != NULL) {
  762. dev_dbg(xpc_chan, "ch->func() called, payload=0x%p "
  763. "partid=%d channel=%d\n", payload, ch->partid,
  764. ch->number);
  765. /* deliver the message to its intended recipient */
  766. ch->func(xpMsgReceived, ch->partid, ch->number, payload,
  767. ch->key);
  768. dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p "
  769. "partid=%d channel=%d\n", payload, ch->partid,
  770. ch->number);
  771. }
  772. atomic_dec(&ch->kthreads_active);
  773. }
  774. }
  775. /*
  776. * Acknowledge receipt of a delivered message's payload.
  777. *
  778. * This function, although called by users, does not call xpc_part_ref() to
  779. * ensure that the partition infrastructure is in place. It relies on the
  780. * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload().
  781. *
  782. * Arguments:
  783. *
  784. * partid - ID of partition to which the channel is connected.
  785. * ch_number - channel # message received on.
  786. * payload - pointer to the payload area allocated via
  787. * xpc_initiate_send() or xpc_initiate_send_notify().
  788. */
  789. void
  790. xpc_initiate_received(short partid, int ch_number, void *payload)
  791. {
  792. struct xpc_partition *part = &xpc_partitions[partid];
  793. struct xpc_channel *ch;
  794. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  795. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  796. ch = &part->channels[ch_number];
  797. xpc_received_payload(ch, payload);
  798. /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */
  799. xpc_msgqueue_deref(ch);
  800. }