xpc_channel.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) channel support.
  10. *
  11. * This is the part of XPC that manages the channels and
  12. * sends/receives messages across them to/from other partitions.
  13. *
  14. */
  15. #include <linux/device.h>
  16. #include "xpc.h"
  17. /*
  18. * Process a connect message from a remote partition.
  19. *
  20. * Note: xpc_process_connect() is expecting to be called with the
  21. * spin_lock_irqsave held and will leave it locked upon return.
  22. */
  23. static void
  24. xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
  25. {
  26. enum xp_retval ret;
  27. DBUG_ON(!spin_is_locked(&ch->lock));
  28. if (!(ch->flags & XPC_C_OPENREQUEST) ||
  29. !(ch->flags & XPC_C_ROPENREQUEST)) {
  30. /* nothing more to do for now */
  31. return;
  32. }
  33. DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
  34. if (!(ch->flags & XPC_C_SETUP)) {
  35. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  36. ret = xpc_setup_msg_structures(ch);
  37. spin_lock_irqsave(&ch->lock, *irq_flags);
  38. if (ret != xpSuccess)
  39. XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
  40. ch->flags |= XPC_C_SETUP;
  41. if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
  42. return;
  43. }
  44. if (!(ch->flags & XPC_C_OPENREPLY)) {
  45. ch->flags |= XPC_C_OPENREPLY;
  46. xpc_send_chctl_openreply(ch, irq_flags);
  47. }
  48. if (!(ch->flags & XPC_C_ROPENREPLY))
  49. return;
  50. ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
  51. dev_info(xpc_chan, "channel %d to partition %d connected\n",
  52. ch->number, ch->partid);
  53. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  54. xpc_create_kthreads(ch, 1, 0);
  55. spin_lock_irqsave(&ch->lock, *irq_flags);
  56. }
  57. /*
  58. * spin_lock_irqsave() is expected to be held on entry.
  59. */
  60. static void
  61. xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
  62. {
  63. struct xpc_partition *part = &xpc_partitions[ch->partid];
  64. u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
  65. DBUG_ON(!spin_is_locked(&ch->lock));
  66. if (!(ch->flags & XPC_C_DISCONNECTING))
  67. return;
  68. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  69. /* make sure all activity has settled down first */
  70. if (atomic_read(&ch->kthreads_assigned) > 0 ||
  71. atomic_read(&ch->references) > 0) {
  72. return;
  73. }
  74. DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  75. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
  76. if (part->act_state == XPC_P_AS_DEACTIVATING) {
  77. /* can't proceed until the other side disengages from us */
  78. if (xpc_partition_engaged(ch->partid))
  79. return;
  80. } else {
  81. /* as long as the other side is up do the full protocol */
  82. if (!(ch->flags & XPC_C_RCLOSEREQUEST))
  83. return;
  84. if (!(ch->flags & XPC_C_CLOSEREPLY)) {
  85. ch->flags |= XPC_C_CLOSEREPLY;
  86. xpc_send_chctl_closereply(ch, irq_flags);
  87. }
  88. if (!(ch->flags & XPC_C_RCLOSEREPLY))
  89. return;
  90. }
  91. /* wake those waiting for notify completion */
  92. if (atomic_read(&ch->n_to_notify) > 0) {
  93. /* we do callout while holding ch->lock, callout can't block */
  94. xpc_notify_senders_of_disconnect(ch);
  95. }
  96. /* both sides are disconnected now */
  97. if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
  98. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  99. xpc_disconnect_callout(ch, xpDisconnected);
  100. spin_lock_irqsave(&ch->lock, *irq_flags);
  101. }
  102. DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
  103. /* it's now safe to free the channel's message queues */
  104. xpc_teardown_msg_structures(ch);
  105. ch->func = NULL;
  106. ch->key = NULL;
  107. ch->entry_size = 0;
  108. ch->local_nentries = 0;
  109. ch->remote_nentries = 0;
  110. ch->kthreads_assigned_limit = 0;
  111. ch->kthreads_idle_limit = 0;
  112. /*
  113. * Mark the channel disconnected and clear all other flags, including
  114. * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but
  115. * not including XPC_C_WDISCONNECT (if it was set).
  116. */
  117. ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
  118. atomic_dec(&part->nchannels_active);
  119. if (channel_was_connected) {
  120. dev_info(xpc_chan, "channel %d to partition %d disconnected, "
  121. "reason=%d\n", ch->number, ch->partid, ch->reason);
  122. }
  123. if (ch->flags & XPC_C_WDISCONNECT) {
  124. /* we won't lose the CPU since we're holding ch->lock */
  125. complete(&ch->wdisconnect_wait);
  126. } else if (ch->delayed_chctl_flags) {
  127. if (part->act_state != XPC_P_AS_DEACTIVATING) {
  128. /* time to take action on any delayed chctl flags */
  129. spin_lock(&part->chctl_lock);
  130. part->chctl.flags[ch->number] |=
  131. ch->delayed_chctl_flags;
  132. spin_unlock(&part->chctl_lock);
  133. }
  134. ch->delayed_chctl_flags = 0;
  135. }
  136. }
  137. /*
  138. * Process a change in the channel's remote connection state.
  139. */
  140. static void
  141. xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
  142. u8 chctl_flags)
  143. {
  144. unsigned long irq_flags;
  145. struct xpc_openclose_args *args =
  146. &part->remote_openclose_args[ch_number];
  147. struct xpc_channel *ch = &part->channels[ch_number];
  148. enum xp_retval reason;
  149. spin_lock_irqsave(&ch->lock, irq_flags);
  150. again:
  151. if ((ch->flags & XPC_C_DISCONNECTED) &&
  152. (ch->flags & XPC_C_WDISCONNECT)) {
  153. /*
  154. * Delay processing chctl flags until thread waiting disconnect
  155. * has had a chance to see that the channel is disconnected.
  156. */
  157. ch->delayed_chctl_flags |= chctl_flags;
  158. spin_unlock_irqrestore(&ch->lock, irq_flags);
  159. return;
  160. }
  161. if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
  162. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
  163. "from partid=%d, channel=%d\n", args->reason,
  164. ch->partid, ch->number);
  165. /*
  166. * If RCLOSEREQUEST is set, we're probably waiting for
  167. * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
  168. * with this RCLOSEREQUEST in the chctl_flags.
  169. */
  170. if (ch->flags & XPC_C_RCLOSEREQUEST) {
  171. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
  172. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  173. DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
  174. DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
  175. DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
  176. chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
  177. ch->flags |= XPC_C_RCLOSEREPLY;
  178. /* both sides have finished disconnecting */
  179. xpc_process_disconnect(ch, &irq_flags);
  180. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
  181. goto again;
  182. }
  183. if (ch->flags & XPC_C_DISCONNECTED) {
  184. if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
  185. if (part->chctl.flags[ch_number] &
  186. XPC_CHCTL_OPENREQUEST) {
  187. DBUG_ON(ch->delayed_chctl_flags != 0);
  188. spin_lock(&part->chctl_lock);
  189. part->chctl.flags[ch_number] |=
  190. XPC_CHCTL_CLOSEREQUEST;
  191. spin_unlock(&part->chctl_lock);
  192. }
  193. spin_unlock_irqrestore(&ch->lock, irq_flags);
  194. return;
  195. }
  196. XPC_SET_REASON(ch, 0, 0);
  197. ch->flags &= ~XPC_C_DISCONNECTED;
  198. atomic_inc(&part->nchannels_active);
  199. ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
  200. }
  201. chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY);
  202. /*
  203. * The meaningful CLOSEREQUEST connection state fields are:
  204. * reason = reason connection is to be closed
  205. */
  206. ch->flags |= XPC_C_RCLOSEREQUEST;
  207. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  208. reason = args->reason;
  209. if (reason <= xpSuccess || reason > xpUnknownReason)
  210. reason = xpUnknownReason;
  211. else if (reason == xpUnregistering)
  212. reason = xpOtherUnregistering;
  213. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  214. DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
  215. spin_unlock_irqrestore(&ch->lock, irq_flags);
  216. return;
  217. }
  218. xpc_process_disconnect(ch, &irq_flags);
  219. }
  220. if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
  221. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
  222. "%d, channel=%d\n", ch->partid, ch->number);
  223. if (ch->flags & XPC_C_DISCONNECTED) {
  224. DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
  225. spin_unlock_irqrestore(&ch->lock, irq_flags);
  226. return;
  227. }
  228. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  229. if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
  230. if (part->chctl.flags[ch_number] &
  231. XPC_CHCTL_CLOSEREQUEST) {
  232. DBUG_ON(ch->delayed_chctl_flags != 0);
  233. spin_lock(&part->chctl_lock);
  234. part->chctl.flags[ch_number] |=
  235. XPC_CHCTL_CLOSEREPLY;
  236. spin_unlock(&part->chctl_lock);
  237. }
  238. spin_unlock_irqrestore(&ch->lock, irq_flags);
  239. return;
  240. }
  241. ch->flags |= XPC_C_RCLOSEREPLY;
  242. if (ch->flags & XPC_C_CLOSEREPLY) {
  243. /* both sides have finished disconnecting */
  244. xpc_process_disconnect(ch, &irq_flags);
  245. }
  246. }
  247. if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
  248. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, "
  249. "local_nentries=%d) received from partid=%d, "
  250. "channel=%d\n", args->entry_size, args->local_nentries,
  251. ch->partid, ch->number);
  252. if (part->act_state == XPC_P_AS_DEACTIVATING ||
  253. (ch->flags & XPC_C_ROPENREQUEST)) {
  254. spin_unlock_irqrestore(&ch->lock, irq_flags);
  255. return;
  256. }
  257. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
  258. ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
  259. spin_unlock_irqrestore(&ch->lock, irq_flags);
  260. return;
  261. }
  262. DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
  263. XPC_C_OPENREQUEST)));
  264. DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  265. XPC_C_OPENREPLY | XPC_C_CONNECTED));
  266. /*
  267. * The meaningful OPENREQUEST connection state fields are:
  268. * entry_size = size of channel's messages in bytes
  269. * local_nentries = remote partition's local_nentries
  270. */
  271. if (args->entry_size == 0 || args->local_nentries == 0) {
  272. /* assume OPENREQUEST was delayed by mistake */
  273. spin_unlock_irqrestore(&ch->lock, irq_flags);
  274. return;
  275. }
  276. ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
  277. ch->remote_nentries = args->local_nentries;
  278. if (ch->flags & XPC_C_OPENREQUEST) {
  279. if (args->entry_size != ch->entry_size) {
  280. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  281. &irq_flags);
  282. spin_unlock_irqrestore(&ch->lock, irq_flags);
  283. return;
  284. }
  285. } else {
  286. ch->entry_size = args->entry_size;
  287. XPC_SET_REASON(ch, 0, 0);
  288. ch->flags &= ~XPC_C_DISCONNECTED;
  289. atomic_inc(&part->nchannels_active);
  290. }
  291. xpc_process_connect(ch, &irq_flags);
  292. }
  293. if (chctl_flags & XPC_CHCTL_OPENREPLY) {
  294. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
  295. "0x%lx, local_nentries=%d, remote_nentries=%d) "
  296. "received from partid=%d, channel=%d\n",
  297. args->local_msgqueue_pa, args->local_nentries,
  298. args->remote_nentries, ch->partid, ch->number);
  299. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
  300. spin_unlock_irqrestore(&ch->lock, irq_flags);
  301. return;
  302. }
  303. if (!(ch->flags & XPC_C_OPENREQUEST)) {
  304. XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
  305. &irq_flags);
  306. spin_unlock_irqrestore(&ch->lock, irq_flags);
  307. return;
  308. }
  309. DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
  310. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  311. /*
  312. * The meaningful OPENREPLY connection state fields are:
  313. * local_msgqueue_pa = physical address of remote
  314. * partition's local_msgqueue
  315. * local_nentries = remote partition's local_nentries
  316. * remote_nentries = remote partition's remote_nentries
  317. */
  318. DBUG_ON(args->local_msgqueue_pa == 0);
  319. DBUG_ON(args->local_nentries == 0);
  320. DBUG_ON(args->remote_nentries == 0);
  321. ch->flags |= XPC_C_ROPENREPLY;
  322. xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa);
  323. if (args->local_nentries < ch->remote_nentries) {
  324. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  325. "remote_nentries=%d, old remote_nentries=%d, "
  326. "partid=%d, channel=%d\n",
  327. args->local_nentries, ch->remote_nentries,
  328. ch->partid, ch->number);
  329. ch->remote_nentries = args->local_nentries;
  330. }
  331. if (args->remote_nentries < ch->local_nentries) {
  332. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  333. "local_nentries=%d, old local_nentries=%d, "
  334. "partid=%d, channel=%d\n",
  335. args->remote_nentries, ch->local_nentries,
  336. ch->partid, ch->number);
  337. ch->local_nentries = args->remote_nentries;
  338. }
  339. xpc_process_connect(ch, &irq_flags);
  340. }
  341. spin_unlock_irqrestore(&ch->lock, irq_flags);
  342. }
  343. /*
  344. * Attempt to establish a channel connection to a remote partition.
  345. */
  346. static enum xp_retval
  347. xpc_connect_channel(struct xpc_channel *ch)
  348. {
  349. unsigned long irq_flags;
  350. struct xpc_registration *registration = &xpc_registrations[ch->number];
  351. if (mutex_trylock(&registration->mutex) == 0)
  352. return xpRetry;
  353. if (!XPC_CHANNEL_REGISTERED(ch->number)) {
  354. mutex_unlock(&registration->mutex);
  355. return xpUnregistered;
  356. }
  357. spin_lock_irqsave(&ch->lock, irq_flags);
  358. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  359. DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
  360. if (ch->flags & XPC_C_DISCONNECTING) {
  361. spin_unlock_irqrestore(&ch->lock, irq_flags);
  362. mutex_unlock(&registration->mutex);
  363. return ch->reason;
  364. }
  365. /* add info from the channel connect registration to the channel */
  366. ch->kthreads_assigned_limit = registration->assigned_limit;
  367. ch->kthreads_idle_limit = registration->idle_limit;
  368. DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
  369. DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
  370. DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
  371. ch->func = registration->func;
  372. DBUG_ON(registration->func == NULL);
  373. ch->key = registration->key;
  374. ch->local_nentries = registration->nentries;
  375. if (ch->flags & XPC_C_ROPENREQUEST) {
  376. if (registration->entry_size != ch->entry_size) {
  377. /* the local and remote sides aren't the same */
  378. /*
  379. * Because XPC_DISCONNECT_CHANNEL() can block we're
  380. * forced to up the registration sema before we unlock
  381. * the channel lock. But that's okay here because we're
  382. * done with the part that required the registration
  383. * sema. XPC_DISCONNECT_CHANNEL() requires that the
  384. * channel lock be locked and will unlock and relock
  385. * the channel lock as needed.
  386. */
  387. mutex_unlock(&registration->mutex);
  388. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  389. &irq_flags);
  390. spin_unlock_irqrestore(&ch->lock, irq_flags);
  391. return xpUnequalMsgSizes;
  392. }
  393. } else {
  394. ch->entry_size = registration->entry_size;
  395. XPC_SET_REASON(ch, 0, 0);
  396. ch->flags &= ~XPC_C_DISCONNECTED;
  397. atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
  398. }
  399. mutex_unlock(&registration->mutex);
  400. /* initiate the connection */
  401. ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
  402. xpc_send_chctl_openrequest(ch, &irq_flags);
  403. xpc_process_connect(ch, &irq_flags);
  404. spin_unlock_irqrestore(&ch->lock, irq_flags);
  405. return xpSuccess;
  406. }
  407. void
  408. xpc_process_sent_chctl_flags(struct xpc_partition *part)
  409. {
  410. unsigned long irq_flags;
  411. union xpc_channel_ctl_flags chctl;
  412. struct xpc_channel *ch;
  413. int ch_number;
  414. u32 ch_flags;
  415. chctl.all_flags = xpc_get_chctl_all_flags(part);
  416. /*
  417. * Initiate channel connections for registered channels.
  418. *
  419. * For each connected channel that has pending messages activate idle
  420. * kthreads and/or create new kthreads as needed.
  421. */
  422. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  423. ch = &part->channels[ch_number];
  424. /*
  425. * Process any open or close related chctl flags, and then deal
  426. * with connecting or disconnecting the channel as required.
  427. */
  428. if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
  429. xpc_process_openclose_chctl_flags(part, ch_number,
  430. chctl.flags[ch_number]);
  431. }
  432. ch_flags = ch->flags; /* need an atomic snapshot of flags */
  433. if (ch_flags & XPC_C_DISCONNECTING) {
  434. spin_lock_irqsave(&ch->lock, irq_flags);
  435. xpc_process_disconnect(ch, &irq_flags);
  436. spin_unlock_irqrestore(&ch->lock, irq_flags);
  437. continue;
  438. }
  439. if (part->act_state == XPC_P_AS_DEACTIVATING)
  440. continue;
  441. if (!(ch_flags & XPC_C_CONNECTED)) {
  442. if (!(ch_flags & XPC_C_OPENREQUEST)) {
  443. DBUG_ON(ch_flags & XPC_C_SETUP);
  444. (void)xpc_connect_channel(ch);
  445. } else {
  446. spin_lock_irqsave(&ch->lock, irq_flags);
  447. xpc_process_connect(ch, &irq_flags);
  448. spin_unlock_irqrestore(&ch->lock, irq_flags);
  449. }
  450. continue;
  451. }
  452. /*
  453. * Process any message related chctl flags, this may involve
  454. * the activation of kthreads to deliver any pending messages
  455. * sent from the other partition.
  456. */
  457. if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
  458. xpc_process_msg_chctl_flags(part, ch_number);
  459. }
  460. }
  461. /*
  462. * XPC's heartbeat code calls this function to inform XPC that a partition is
  463. * going down. XPC responds by tearing down the XPartition Communication
  464. * infrastructure used for the just downed partition.
  465. *
  466. * XPC's heartbeat code will never call this function and xpc_partition_up()
  467. * at the same time. Nor will it ever make multiple calls to either function
  468. * at the same time.
  469. */
  470. void
  471. xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
  472. {
  473. unsigned long irq_flags;
  474. int ch_number;
  475. struct xpc_channel *ch;
  476. dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
  477. XPC_PARTID(part), reason);
  478. if (!xpc_part_ref(part)) {
  479. /* infrastructure for this partition isn't currently set up */
  480. return;
  481. }
  482. /* disconnect channels associated with the partition going down */
  483. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  484. ch = &part->channels[ch_number];
  485. xpc_msgqueue_ref(ch);
  486. spin_lock_irqsave(&ch->lock, irq_flags);
  487. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  488. spin_unlock_irqrestore(&ch->lock, irq_flags);
  489. xpc_msgqueue_deref(ch);
  490. }
  491. xpc_wakeup_channel_mgr(part);
  492. xpc_part_deref(part);
  493. }
  494. /*
  495. * Called by XP at the time of channel connection registration to cause
  496. * XPC to establish connections to all currently active partitions.
  497. */
  498. void
  499. xpc_initiate_connect(int ch_number)
  500. {
  501. short partid;
  502. struct xpc_partition *part;
  503. struct xpc_channel *ch;
  504. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  505. for (partid = 0; partid < xp_max_npartitions; partid++) {
  506. part = &xpc_partitions[partid];
  507. if (xpc_part_ref(part)) {
  508. ch = &part->channels[ch_number];
  509. /*
  510. * Initiate the establishment of a connection on the
  511. * newly registered channel to the remote partition.
  512. */
  513. xpc_wakeup_channel_mgr(part);
  514. xpc_part_deref(part);
  515. }
  516. }
  517. }
  518. void
  519. xpc_connected_callout(struct xpc_channel *ch)
  520. {
  521. /* let the registerer know that a connection has been established */
  522. if (ch->func != NULL) {
  523. dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
  524. "partid=%d, channel=%d\n", ch->partid, ch->number);
  525. ch->func(xpConnected, ch->partid, ch->number,
  526. (void *)(u64)ch->local_nentries, ch->key);
  527. dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
  528. "partid=%d, channel=%d\n", ch->partid, ch->number);
  529. }
  530. }
  531. /*
  532. * Called by XP at the time of channel connection unregistration to cause
  533. * XPC to teardown all current connections for the specified channel.
  534. *
  535. * Before returning xpc_initiate_disconnect() will wait until all connections
  536. * on the specified channel have been closed/torndown. So the caller can be
  537. * assured that they will not be receiving any more callouts from XPC to the
  538. * function they registered via xpc_connect().
  539. *
  540. * Arguments:
  541. *
  542. * ch_number - channel # to unregister.
  543. */
  544. void
  545. xpc_initiate_disconnect(int ch_number)
  546. {
  547. unsigned long irq_flags;
  548. short partid;
  549. struct xpc_partition *part;
  550. struct xpc_channel *ch;
  551. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  552. /* initiate the channel disconnect for every active partition */
  553. for (partid = 0; partid < xp_max_npartitions; partid++) {
  554. part = &xpc_partitions[partid];
  555. if (xpc_part_ref(part)) {
  556. ch = &part->channels[ch_number];
  557. xpc_msgqueue_ref(ch);
  558. spin_lock_irqsave(&ch->lock, irq_flags);
  559. if (!(ch->flags & XPC_C_DISCONNECTED)) {
  560. ch->flags |= XPC_C_WDISCONNECT;
  561. XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
  562. &irq_flags);
  563. }
  564. spin_unlock_irqrestore(&ch->lock, irq_flags);
  565. xpc_msgqueue_deref(ch);
  566. xpc_part_deref(part);
  567. }
  568. }
  569. xpc_disconnect_wait(ch_number);
  570. }
  571. /*
  572. * To disconnect a channel, and reflect it back to all who may be waiting.
  573. *
  574. * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
  575. * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
  576. * xpc_disconnect_wait().
  577. *
  578. * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
  579. */
  580. void
  581. xpc_disconnect_channel(const int line, struct xpc_channel *ch,
  582. enum xp_retval reason, unsigned long *irq_flags)
  583. {
  584. u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
  585. DBUG_ON(!spin_is_locked(&ch->lock));
  586. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
  587. return;
  588. DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
  589. dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
  590. reason, line, ch->partid, ch->number);
  591. XPC_SET_REASON(ch, reason, line);
  592. ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
  593. /* some of these may not have been set */
  594. ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
  595. XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  596. XPC_C_CONNECTING | XPC_C_CONNECTED);
  597. xpc_send_chctl_closerequest(ch, irq_flags);
  598. if (channel_was_connected)
  599. ch->flags |= XPC_C_WASCONNECTED;
  600. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  601. /* wake all idle kthreads so they can exit */
  602. if (atomic_read(&ch->kthreads_idle) > 0) {
  603. wake_up_all(&ch->idle_wq);
  604. } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  605. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
  606. /* start a kthread that will do the xpDisconnecting callout */
  607. xpc_create_kthreads(ch, 1, 1);
  608. }
  609. /* wake those waiting to allocate an entry from the local msg queue */
  610. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
  611. wake_up(&ch->msg_allocate_wq);
  612. spin_lock_irqsave(&ch->lock, *irq_flags);
  613. }
  614. void
  615. xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
  616. {
  617. /*
  618. * Let the channel's registerer know that the channel is being
  619. * disconnected. We don't want to do this if the registerer was never
  620. * informed of a connection being made.
  621. */
  622. if (ch->func != NULL) {
  623. dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
  624. "channel=%d\n", reason, ch->partid, ch->number);
  625. ch->func(reason, ch->partid, ch->number, NULL, ch->key);
  626. dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
  627. "channel=%d\n", reason, ch->partid, ch->number);
  628. }
  629. }
  630. /*
  631. * Wait for a message entry to become available for the specified channel,
  632. * but don't wait any longer than 1 jiffy.
  633. */
  634. enum xp_retval
  635. xpc_allocate_msg_wait(struct xpc_channel *ch)
  636. {
  637. enum xp_retval ret;
  638. if (ch->flags & XPC_C_DISCONNECTING) {
  639. DBUG_ON(ch->reason == xpInterrupted);
  640. return ch->reason;
  641. }
  642. atomic_inc(&ch->n_on_msg_allocate_wq);
  643. ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
  644. atomic_dec(&ch->n_on_msg_allocate_wq);
  645. if (ch->flags & XPC_C_DISCONNECTING) {
  646. ret = ch->reason;
  647. DBUG_ON(ch->reason == xpInterrupted);
  648. } else if (ret == 0) {
  649. ret = xpTimeout;
  650. } else {
  651. ret = xpInterrupted;
  652. }
  653. return ret;
  654. }
  655. /*
  656. * Send a message that contains the user's payload on the specified channel
  657. * connected to the specified partition.
  658. *
  659. * NOTE that this routine can sleep waiting for a message entry to become
  660. * available. To not sleep, pass in the XPC_NOWAIT flag.
  661. *
  662. * Once sent, this routine will not wait for the message to be received, nor
  663. * will notification be given when it does happen.
  664. *
  665. * Arguments:
  666. *
  667. * partid - ID of partition to which the channel is connected.
  668. * ch_number - channel # to send message on.
  669. * flags - see xp.h for valid flags.
  670. * payload - pointer to the payload which is to be sent.
  671. * payload_size - size of the payload in bytes.
  672. */
  673. enum xp_retval
  674. xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
  675. u16 payload_size)
  676. {
  677. struct xpc_partition *part = &xpc_partitions[partid];
  678. enum xp_retval ret = xpUnknownReason;
  679. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  680. partid, ch_number);
  681. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  682. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  683. DBUG_ON(payload == NULL);
  684. if (xpc_part_ref(part)) {
  685. ret = xpc_send_payload(&part->channels[ch_number], flags,
  686. payload, payload_size, 0, NULL, NULL);
  687. xpc_part_deref(part);
  688. }
  689. return ret;
  690. }
  691. /*
  692. * Send a message that contains the user's payload on the specified channel
  693. * connected to the specified partition.
  694. *
  695. * NOTE that this routine can sleep waiting for a message entry to become
  696. * available. To not sleep, pass in the XPC_NOWAIT flag.
  697. *
  698. * This routine will not wait for the message to be sent or received.
  699. *
  700. * Once the remote end of the channel has received the message, the function
  701. * passed as an argument to xpc_initiate_send_notify() will be called. This
  702. * allows the sender to free up or re-use any buffers referenced by the
  703. * message, but does NOT mean the message has been processed at the remote
  704. * end by a receiver.
  705. *
  706. * If this routine returns an error, the caller's function will NOT be called.
  707. *
  708. * Arguments:
  709. *
  710. * partid - ID of partition to which the channel is connected.
  711. * ch_number - channel # to send message on.
  712. * flags - see xp.h for valid flags.
  713. * payload - pointer to the payload which is to be sent.
  714. * payload_size - size of the payload in bytes.
  715. * func - function to call with asynchronous notification of message
  716. * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
  717. * key - user-defined key to be passed to the function when it's called.
  718. */
  719. enum xp_retval
  720. xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
  721. u16 payload_size, xpc_notify_func func, void *key)
  722. {
  723. struct xpc_partition *part = &xpc_partitions[partid];
  724. enum xp_retval ret = xpUnknownReason;
  725. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  726. partid, ch_number);
  727. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  728. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  729. DBUG_ON(payload == NULL);
  730. DBUG_ON(func == NULL);
  731. if (xpc_part_ref(part)) {
  732. ret = xpc_send_payload(&part->channels[ch_number], flags,
  733. payload, payload_size, XPC_N_CALL, func,
  734. key);
  735. xpc_part_deref(part);
  736. }
  737. return ret;
  738. }
  739. /*
  740. * Deliver a message's payload to its intended recipient.
  741. */
  742. void
  743. xpc_deliver_payload(struct xpc_channel *ch)
  744. {
  745. void *payload;
  746. payload = xpc_get_deliverable_payload(ch);
  747. if (payload != NULL) {
  748. /*
  749. * This ref is taken to protect the payload itself from being
  750. * freed before the user is finished with it, which the user
  751. * indicates by calling xpc_initiate_received().
  752. */
  753. xpc_msgqueue_ref(ch);
  754. atomic_inc(&ch->kthreads_active);
  755. if (ch->func != NULL) {
  756. dev_dbg(xpc_chan, "ch->func() called, payload=0x%p "
  757. "partid=%d channel=%d\n", payload, ch->partid,
  758. ch->number);
  759. /* deliver the message to its intended recipient */
  760. ch->func(xpMsgReceived, ch->partid, ch->number, payload,
  761. ch->key);
  762. dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p "
  763. "partid=%d channel=%d\n", payload, ch->partid,
  764. ch->number);
  765. }
  766. atomic_dec(&ch->kthreads_active);
  767. }
  768. }
  769. /*
  770. * Acknowledge receipt of a delivered message's payload.
  771. *
  772. * This function, although called by users, does not call xpc_part_ref() to
  773. * ensure that the partition infrastructure is in place. It relies on the
  774. * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload().
  775. *
  776. * Arguments:
  777. *
  778. * partid - ID of partition to which the channel is connected.
  779. * ch_number - channel # message received on.
  780. * payload - pointer to the payload area allocated via
  781. * xpc_initiate_send() or xpc_initiate_send_notify().
  782. */
  783. void
  784. xpc_initiate_received(short partid, int ch_number, void *payload)
  785. {
  786. struct xpc_partition *part = &xpc_partitions[partid];
  787. struct xpc_channel *ch;
  788. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  789. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  790. ch = &part->channels[ch_number];
  791. xpc_received_payload(ch, payload);
  792. /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */
  793. xpc_msgqueue_deref(ch);
  794. }