xpc_channel.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) channel support.
  10. *
  11. * This is the part of XPC that manages the channels and
  12. * sends/receives messages across them to/from other partitions.
  13. *
  14. */
  15. #include <linux/device.h>
  16. #include "xpc.h"
  17. /*
  18. * Process a connect message from a remote partition.
  19. *
  20. * Note: xpc_process_connect() is expecting to be called with the
  21. * spin_lock_irqsave held and will leave it locked upon return.
  22. */
  23. static void
  24. xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
  25. {
  26. enum xp_retval ret;
  27. DBUG_ON(!spin_is_locked(&ch->lock));
  28. if (!(ch->flags & XPC_C_OPENREQUEST) ||
  29. !(ch->flags & XPC_C_ROPENREQUEST)) {
  30. /* nothing more to do for now */
  31. return;
  32. }
  33. DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
  34. if (!(ch->flags & XPC_C_SETUP)) {
  35. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  36. ret = xpc_allocate_msgqueues(ch);
  37. spin_lock_irqsave(&ch->lock, *irq_flags);
  38. if (ret != xpSuccess)
  39. XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
  40. ch->flags |= XPC_C_SETUP;
  41. if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
  42. return;
  43. DBUG_ON(ch->local_msgqueue == NULL);
  44. DBUG_ON(ch->remote_msgqueue == NULL);
  45. }
  46. if (!(ch->flags & XPC_C_OPENREPLY)) {
  47. ch->flags |= XPC_C_OPENREPLY;
  48. xpc_send_chctl_openreply(ch, irq_flags);
  49. }
  50. if (!(ch->flags & XPC_C_ROPENREPLY))
  51. return;
  52. DBUG_ON(ch->remote_msgqueue_pa == 0);
  53. ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
  54. dev_info(xpc_chan, "channel %d to partition %d connected\n",
  55. ch->number, ch->partid);
  56. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  57. xpc_create_kthreads(ch, 1, 0);
  58. spin_lock_irqsave(&ch->lock, *irq_flags);
  59. }
  60. /*
  61. * spin_lock_irqsave() is expected to be held on entry.
  62. */
  63. static void
  64. xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
  65. {
  66. struct xpc_partition *part = &xpc_partitions[ch->partid];
  67. u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
  68. DBUG_ON(!spin_is_locked(&ch->lock));
  69. if (!(ch->flags & XPC_C_DISCONNECTING))
  70. return;
  71. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  72. /* make sure all activity has settled down first */
  73. if (atomic_read(&ch->kthreads_assigned) > 0 ||
  74. atomic_read(&ch->references) > 0) {
  75. return;
  76. }
  77. DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  78. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
  79. if (part->act_state == XPC_P_DEACTIVATING) {
  80. /* can't proceed until the other side disengages from us */
  81. if (xpc_partition_engaged(ch->partid))
  82. return;
  83. } else {
  84. /* as long as the other side is up do the full protocol */
  85. if (!(ch->flags & XPC_C_RCLOSEREQUEST))
  86. return;
  87. if (!(ch->flags & XPC_C_CLOSEREPLY)) {
  88. ch->flags |= XPC_C_CLOSEREPLY;
  89. xpc_send_chctl_closereply(ch, irq_flags);
  90. }
  91. if (!(ch->flags & XPC_C_RCLOSEREPLY))
  92. return;
  93. }
  94. /* wake those waiting for notify completion */
  95. if (atomic_read(&ch->n_to_notify) > 0) {
  96. /* we do callout while holding ch->lock, callout can't block */
  97. xpc_notify_senders_of_disconnect(ch);
  98. }
  99. /* both sides are disconnected now */
  100. if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
  101. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  102. xpc_disconnect_callout(ch, xpDisconnected);
  103. spin_lock_irqsave(&ch->lock, *irq_flags);
  104. }
  105. /* it's now safe to free the channel's message queues */
  106. xpc_free_msgqueues(ch);
  107. /*
  108. * Mark the channel disconnected and clear all other flags, including
  109. * XPC_C_SETUP (because of call to xpc_free_msgqueues()) but not
  110. * including XPC_C_WDISCONNECT (if it was set).
  111. */
  112. ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
  113. atomic_dec(&part->nchannels_active);
  114. if (channel_was_connected) {
  115. dev_info(xpc_chan, "channel %d to partition %d disconnected, "
  116. "reason=%d\n", ch->number, ch->partid, ch->reason);
  117. }
  118. if (ch->flags & XPC_C_WDISCONNECT) {
  119. /* we won't lose the CPU since we're holding ch->lock */
  120. complete(&ch->wdisconnect_wait);
  121. } else if (ch->delayed_chctl_flags) {
  122. if (part->act_state != XPC_P_DEACTIVATING) {
  123. /* time to take action on any delayed chctl flags */
  124. spin_lock(&part->chctl_lock);
  125. part->chctl.flags[ch->number] |=
  126. ch->delayed_chctl_flags;
  127. spin_unlock(&part->chctl_lock);
  128. }
  129. ch->delayed_chctl_flags = 0;
  130. }
  131. }
  132. /*
  133. * Process a change in the channel's remote connection state.
  134. */
  135. static void
  136. xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
  137. u8 chctl_flags)
  138. {
  139. unsigned long irq_flags;
  140. struct xpc_openclose_args *args =
  141. &part->remote_openclose_args[ch_number];
  142. struct xpc_channel *ch = &part->channels[ch_number];
  143. enum xp_retval reason;
  144. spin_lock_irqsave(&ch->lock, irq_flags);
  145. again:
  146. if ((ch->flags & XPC_C_DISCONNECTED) &&
  147. (ch->flags & XPC_C_WDISCONNECT)) {
  148. /*
  149. * Delay processing chctl flags until thread waiting disconnect
  150. * has had a chance to see that the channel is disconnected.
  151. */
  152. ch->delayed_chctl_flags |= chctl_flags;
  153. spin_unlock_irqrestore(&ch->lock, irq_flags);
  154. return;
  155. }
  156. if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
  157. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
  158. "from partid=%d, channel=%d\n", args->reason,
  159. ch->partid, ch->number);
  160. /*
  161. * If RCLOSEREQUEST is set, we're probably waiting for
  162. * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
  163. * with this RCLOSEREQUEST in the chctl_flags.
  164. */
  165. if (ch->flags & XPC_C_RCLOSEREQUEST) {
  166. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
  167. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  168. DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
  169. DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
  170. DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
  171. chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
  172. ch->flags |= XPC_C_RCLOSEREPLY;
  173. /* both sides have finished disconnecting */
  174. xpc_process_disconnect(ch, &irq_flags);
  175. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
  176. goto again;
  177. }
  178. if (ch->flags & XPC_C_DISCONNECTED) {
  179. if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
  180. if (part->chctl.flags[ch_number] &
  181. XPC_CHCTL_OPENREQUEST) {
  182. DBUG_ON(ch->delayed_chctl_flags != 0);
  183. spin_lock(&part->chctl_lock);
  184. part->chctl.flags[ch_number] |=
  185. XPC_CHCTL_CLOSEREQUEST;
  186. spin_unlock(&part->chctl_lock);
  187. }
  188. spin_unlock_irqrestore(&ch->lock, irq_flags);
  189. return;
  190. }
  191. XPC_SET_REASON(ch, 0, 0);
  192. ch->flags &= ~XPC_C_DISCONNECTED;
  193. atomic_inc(&part->nchannels_active);
  194. ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
  195. }
  196. chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY);
  197. /*
  198. * The meaningful CLOSEREQUEST connection state fields are:
  199. * reason = reason connection is to be closed
  200. */
  201. ch->flags |= XPC_C_RCLOSEREQUEST;
  202. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  203. reason = args->reason;
  204. if (reason <= xpSuccess || reason > xpUnknownReason)
  205. reason = xpUnknownReason;
  206. else if (reason == xpUnregistering)
  207. reason = xpOtherUnregistering;
  208. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  209. DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
  210. spin_unlock_irqrestore(&ch->lock, irq_flags);
  211. return;
  212. }
  213. xpc_process_disconnect(ch, &irq_flags);
  214. }
  215. if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
  216. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
  217. "%d, channel=%d\n", ch->partid, ch->number);
  218. if (ch->flags & XPC_C_DISCONNECTED) {
  219. DBUG_ON(part->act_state != XPC_P_DEACTIVATING);
  220. spin_unlock_irqrestore(&ch->lock, irq_flags);
  221. return;
  222. }
  223. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  224. if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
  225. if (part->chctl.flags[ch_number] &
  226. XPC_CHCTL_CLOSEREQUEST) {
  227. DBUG_ON(ch->delayed_chctl_flags != 0);
  228. spin_lock(&part->chctl_lock);
  229. part->chctl.flags[ch_number] |=
  230. XPC_CHCTL_CLOSEREPLY;
  231. spin_unlock(&part->chctl_lock);
  232. }
  233. spin_unlock_irqrestore(&ch->lock, irq_flags);
  234. return;
  235. }
  236. ch->flags |= XPC_C_RCLOSEREPLY;
  237. if (ch->flags & XPC_C_CLOSEREPLY) {
  238. /* both sides have finished disconnecting */
  239. xpc_process_disconnect(ch, &irq_flags);
  240. }
  241. }
  242. if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
  243. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (msg_size=%d, "
  244. "local_nentries=%d) received from partid=%d, "
  245. "channel=%d\n", args->msg_size, args->local_nentries,
  246. ch->partid, ch->number);
  247. if (part->act_state == XPC_P_DEACTIVATING ||
  248. (ch->flags & XPC_C_ROPENREQUEST)) {
  249. spin_unlock_irqrestore(&ch->lock, irq_flags);
  250. return;
  251. }
  252. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
  253. ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
  254. spin_unlock_irqrestore(&ch->lock, irq_flags);
  255. return;
  256. }
  257. DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
  258. XPC_C_OPENREQUEST)));
  259. DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  260. XPC_C_OPENREPLY | XPC_C_CONNECTED));
  261. /*
  262. * The meaningful OPENREQUEST connection state fields are:
  263. * msg_size = size of channel's messages in bytes
  264. * local_nentries = remote partition's local_nentries
  265. */
  266. if (args->msg_size == 0 || args->local_nentries == 0) {
  267. /* assume OPENREQUEST was delayed by mistake */
  268. spin_unlock_irqrestore(&ch->lock, irq_flags);
  269. return;
  270. }
  271. ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
  272. ch->remote_nentries = args->local_nentries;
  273. if (ch->flags & XPC_C_OPENREQUEST) {
  274. if (args->msg_size != ch->msg_size) {
  275. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  276. &irq_flags);
  277. spin_unlock_irqrestore(&ch->lock, irq_flags);
  278. return;
  279. }
  280. } else {
  281. ch->msg_size = args->msg_size;
  282. XPC_SET_REASON(ch, 0, 0);
  283. ch->flags &= ~XPC_C_DISCONNECTED;
  284. atomic_inc(&part->nchannels_active);
  285. }
  286. xpc_process_connect(ch, &irq_flags);
  287. }
  288. if (chctl_flags & XPC_CHCTL_OPENREPLY) {
  289. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
  290. "0x%lx, local_nentries=%d, remote_nentries=%d) "
  291. "received from partid=%d, channel=%d\n",
  292. (unsigned long)args->local_msgqueue_pa,
  293. args->local_nentries, args->remote_nentries,
  294. ch->partid, ch->number);
  295. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
  296. spin_unlock_irqrestore(&ch->lock, irq_flags);
  297. return;
  298. }
  299. if (!(ch->flags & XPC_C_OPENREQUEST)) {
  300. XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
  301. &irq_flags);
  302. spin_unlock_irqrestore(&ch->lock, irq_flags);
  303. return;
  304. }
  305. DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
  306. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  307. /*
  308. * The meaningful OPENREPLY connection state fields are:
  309. * local_msgqueue_pa = physical address of remote
  310. * partition's local_msgqueue
  311. * local_nentries = remote partition's local_nentries
  312. * remote_nentries = remote partition's remote_nentries
  313. */
  314. DBUG_ON(args->local_msgqueue_pa == 0);
  315. DBUG_ON(args->local_nentries == 0);
  316. DBUG_ON(args->remote_nentries == 0);
  317. ch->flags |= XPC_C_ROPENREPLY;
  318. ch->remote_msgqueue_pa = args->local_msgqueue_pa;
  319. if (args->local_nentries < ch->remote_nentries) {
  320. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  321. "remote_nentries=%d, old remote_nentries=%d, "
  322. "partid=%d, channel=%d\n",
  323. args->local_nentries, ch->remote_nentries,
  324. ch->partid, ch->number);
  325. ch->remote_nentries = args->local_nentries;
  326. }
  327. if (args->remote_nentries < ch->local_nentries) {
  328. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  329. "local_nentries=%d, old local_nentries=%d, "
  330. "partid=%d, channel=%d\n",
  331. args->remote_nentries, ch->local_nentries,
  332. ch->partid, ch->number);
  333. ch->local_nentries = args->remote_nentries;
  334. }
  335. xpc_process_connect(ch, &irq_flags);
  336. }
  337. spin_unlock_irqrestore(&ch->lock, irq_flags);
  338. }
  339. /*
  340. * Attempt to establish a channel connection to a remote partition.
  341. */
  342. static enum xp_retval
  343. xpc_connect_channel(struct xpc_channel *ch)
  344. {
  345. unsigned long irq_flags;
  346. struct xpc_registration *registration = &xpc_registrations[ch->number];
  347. if (mutex_trylock(&registration->mutex) == 0)
  348. return xpRetry;
  349. if (!XPC_CHANNEL_REGISTERED(ch->number)) {
  350. mutex_unlock(&registration->mutex);
  351. return xpUnregistered;
  352. }
  353. spin_lock_irqsave(&ch->lock, irq_flags);
  354. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  355. DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
  356. if (ch->flags & XPC_C_DISCONNECTING) {
  357. spin_unlock_irqrestore(&ch->lock, irq_flags);
  358. mutex_unlock(&registration->mutex);
  359. return ch->reason;
  360. }
  361. /* add info from the channel connect registration to the channel */
  362. ch->kthreads_assigned_limit = registration->assigned_limit;
  363. ch->kthreads_idle_limit = registration->idle_limit;
  364. DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
  365. DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
  366. DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
  367. ch->func = registration->func;
  368. DBUG_ON(registration->func == NULL);
  369. ch->key = registration->key;
  370. ch->local_nentries = registration->nentries;
  371. if (ch->flags & XPC_C_ROPENREQUEST) {
  372. if (registration->msg_size != ch->msg_size) {
  373. /* the local and remote sides aren't the same */
  374. /*
  375. * Because XPC_DISCONNECT_CHANNEL() can block we're
  376. * forced to up the registration sema before we unlock
  377. * the channel lock. But that's okay here because we're
  378. * done with the part that required the registration
  379. * sema. XPC_DISCONNECT_CHANNEL() requires that the
  380. * channel lock be locked and will unlock and relock
  381. * the channel lock as needed.
  382. */
  383. mutex_unlock(&registration->mutex);
  384. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  385. &irq_flags);
  386. spin_unlock_irqrestore(&ch->lock, irq_flags);
  387. return xpUnequalMsgSizes;
  388. }
  389. } else {
  390. ch->msg_size = registration->msg_size;
  391. XPC_SET_REASON(ch, 0, 0);
  392. ch->flags &= ~XPC_C_DISCONNECTED;
  393. atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
  394. }
  395. mutex_unlock(&registration->mutex);
  396. /* initiate the connection */
  397. ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
  398. xpc_send_chctl_openrequest(ch, &irq_flags);
  399. xpc_process_connect(ch, &irq_flags);
  400. spin_unlock_irqrestore(&ch->lock, irq_flags);
  401. return xpSuccess;
  402. }
  403. void
  404. xpc_process_sent_chctl_flags(struct xpc_partition *part)
  405. {
  406. unsigned long irq_flags;
  407. union xpc_channel_ctl_flags chctl;
  408. struct xpc_channel *ch;
  409. int ch_number;
  410. u32 ch_flags;
  411. chctl.all_flags = xpc_get_chctl_all_flags(part);
  412. /*
  413. * Initiate channel connections for registered channels.
  414. *
  415. * For each connected channel that has pending messages activate idle
  416. * kthreads and/or create new kthreads as needed.
  417. */
  418. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  419. ch = &part->channels[ch_number];
  420. /*
  421. * Process any open or close related chctl flags, and then deal
  422. * with connecting or disconnecting the channel as required.
  423. */
  424. if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
  425. xpc_process_openclose_chctl_flags(part, ch_number,
  426. chctl.flags[ch_number]);
  427. }
  428. ch_flags = ch->flags; /* need an atomic snapshot of flags */
  429. if (ch_flags & XPC_C_DISCONNECTING) {
  430. spin_lock_irqsave(&ch->lock, irq_flags);
  431. xpc_process_disconnect(ch, &irq_flags);
  432. spin_unlock_irqrestore(&ch->lock, irq_flags);
  433. continue;
  434. }
  435. if (part->act_state == XPC_P_DEACTIVATING)
  436. continue;
  437. if (!(ch_flags & XPC_C_CONNECTED)) {
  438. if (!(ch_flags & XPC_C_OPENREQUEST)) {
  439. DBUG_ON(ch_flags & XPC_C_SETUP);
  440. (void)xpc_connect_channel(ch);
  441. } else {
  442. spin_lock_irqsave(&ch->lock, irq_flags);
  443. xpc_process_connect(ch, &irq_flags);
  444. spin_unlock_irqrestore(&ch->lock, irq_flags);
  445. }
  446. continue;
  447. }
  448. /*
  449. * Process any message related chctl flags, this may involve
  450. * the activation of kthreads to deliver any pending messages
  451. * sent from the other partition.
  452. */
  453. if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
  454. xpc_process_msg_chctl_flags(part, ch_number);
  455. }
  456. }
  457. /*
  458. * XPC's heartbeat code calls this function to inform XPC that a partition is
  459. * going down. XPC responds by tearing down the XPartition Communication
  460. * infrastructure used for the just downed partition.
  461. *
  462. * XPC's heartbeat code will never call this function and xpc_partition_up()
  463. * at the same time. Nor will it ever make multiple calls to either function
  464. * at the same time.
  465. */
  466. void
  467. xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
  468. {
  469. unsigned long irq_flags;
  470. int ch_number;
  471. struct xpc_channel *ch;
  472. dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
  473. XPC_PARTID(part), reason);
  474. if (!xpc_part_ref(part)) {
  475. /* infrastructure for this partition isn't currently set up */
  476. return;
  477. }
  478. /* disconnect channels associated with the partition going down */
  479. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  480. ch = &part->channels[ch_number];
  481. xpc_msgqueue_ref(ch);
  482. spin_lock_irqsave(&ch->lock, irq_flags);
  483. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  484. spin_unlock_irqrestore(&ch->lock, irq_flags);
  485. xpc_msgqueue_deref(ch);
  486. }
  487. xpc_wakeup_channel_mgr(part);
  488. xpc_part_deref(part);
  489. }
  490. /*
  491. * Called by XP at the time of channel connection registration to cause
  492. * XPC to establish connections to all currently active partitions.
  493. */
  494. void
  495. xpc_initiate_connect(int ch_number)
  496. {
  497. short partid;
  498. struct xpc_partition *part;
  499. struct xpc_channel *ch;
  500. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  501. for (partid = 0; partid < xp_max_npartitions; partid++) {
  502. part = &xpc_partitions[partid];
  503. if (xpc_part_ref(part)) {
  504. ch = &part->channels[ch_number];
  505. /*
  506. * Initiate the establishment of a connection on the
  507. * newly registered channel to the remote partition.
  508. */
  509. xpc_wakeup_channel_mgr(part);
  510. xpc_part_deref(part);
  511. }
  512. }
  513. }
  514. void
  515. xpc_connected_callout(struct xpc_channel *ch)
  516. {
  517. /* let the registerer know that a connection has been established */
  518. if (ch->func != NULL) {
  519. dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
  520. "partid=%d, channel=%d\n", ch->partid, ch->number);
  521. ch->func(xpConnected, ch->partid, ch->number,
  522. (void *)(u64)ch->local_nentries, ch->key);
  523. dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
  524. "partid=%d, channel=%d\n", ch->partid, ch->number);
  525. }
  526. }
  527. /*
  528. * Called by XP at the time of channel connection unregistration to cause
  529. * XPC to teardown all current connections for the specified channel.
  530. *
  531. * Before returning xpc_initiate_disconnect() will wait until all connections
  532. * on the specified channel have been closed/torndown. So the caller can be
  533. * assured that they will not be receiving any more callouts from XPC to the
  534. * function they registered via xpc_connect().
  535. *
  536. * Arguments:
  537. *
  538. * ch_number - channel # to unregister.
  539. */
  540. void
  541. xpc_initiate_disconnect(int ch_number)
  542. {
  543. unsigned long irq_flags;
  544. short partid;
  545. struct xpc_partition *part;
  546. struct xpc_channel *ch;
  547. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  548. /* initiate the channel disconnect for every active partition */
  549. for (partid = 0; partid < xp_max_npartitions; partid++) {
  550. part = &xpc_partitions[partid];
  551. if (xpc_part_ref(part)) {
  552. ch = &part->channels[ch_number];
  553. xpc_msgqueue_ref(ch);
  554. spin_lock_irqsave(&ch->lock, irq_flags);
  555. if (!(ch->flags & XPC_C_DISCONNECTED)) {
  556. ch->flags |= XPC_C_WDISCONNECT;
  557. XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
  558. &irq_flags);
  559. }
  560. spin_unlock_irqrestore(&ch->lock, irq_flags);
  561. xpc_msgqueue_deref(ch);
  562. xpc_part_deref(part);
  563. }
  564. }
  565. xpc_disconnect_wait(ch_number);
  566. }
  567. /*
  568. * To disconnect a channel, and reflect it back to all who may be waiting.
  569. *
  570. * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
  571. * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
  572. * xpc_disconnect_wait().
  573. *
  574. * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
  575. */
  576. void
  577. xpc_disconnect_channel(const int line, struct xpc_channel *ch,
  578. enum xp_retval reason, unsigned long *irq_flags)
  579. {
  580. u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
  581. DBUG_ON(!spin_is_locked(&ch->lock));
  582. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
  583. return;
  584. DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
  585. dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
  586. reason, line, ch->partid, ch->number);
  587. XPC_SET_REASON(ch, reason, line);
  588. ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
  589. /* some of these may not have been set */
  590. ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
  591. XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  592. XPC_C_CONNECTING | XPC_C_CONNECTED);
  593. xpc_send_chctl_closerequest(ch, irq_flags);
  594. if (channel_was_connected)
  595. ch->flags |= XPC_C_WASCONNECTED;
  596. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  597. /* wake all idle kthreads so they can exit */
  598. if (atomic_read(&ch->kthreads_idle) > 0) {
  599. wake_up_all(&ch->idle_wq);
  600. } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  601. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
  602. /* start a kthread that will do the xpDisconnecting callout */
  603. xpc_create_kthreads(ch, 1, 1);
  604. }
  605. /* wake those waiting to allocate an entry from the local msg queue */
  606. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
  607. wake_up(&ch->msg_allocate_wq);
  608. spin_lock_irqsave(&ch->lock, *irq_flags);
  609. }
  610. void
  611. xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
  612. {
  613. /*
  614. * Let the channel's registerer know that the channel is being
  615. * disconnected. We don't want to do this if the registerer was never
  616. * informed of a connection being made.
  617. */
  618. if (ch->func != NULL) {
  619. dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
  620. "channel=%d\n", reason, ch->partid, ch->number);
  621. ch->func(reason, ch->partid, ch->number, NULL, ch->key);
  622. dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
  623. "channel=%d\n", reason, ch->partid, ch->number);
  624. }
  625. }
  626. /*
  627. * Wait for a message entry to become available for the specified channel,
  628. * but don't wait any longer than 1 jiffy.
  629. */
  630. enum xp_retval
  631. xpc_allocate_msg_wait(struct xpc_channel *ch)
  632. {
  633. enum xp_retval ret;
  634. if (ch->flags & XPC_C_DISCONNECTING) {
  635. DBUG_ON(ch->reason == xpInterrupted);
  636. return ch->reason;
  637. }
  638. atomic_inc(&ch->n_on_msg_allocate_wq);
  639. ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
  640. atomic_dec(&ch->n_on_msg_allocate_wq);
  641. if (ch->flags & XPC_C_DISCONNECTING) {
  642. ret = ch->reason;
  643. DBUG_ON(ch->reason == xpInterrupted);
  644. } else if (ret == 0) {
  645. ret = xpTimeout;
  646. } else {
  647. ret = xpInterrupted;
  648. }
  649. return ret;
  650. }
  651. /*
  652. * Send a message that contains the user's payload on the specified channel
  653. * connected to the specified partition.
  654. *
  655. * NOTE that this routine can sleep waiting for a message entry to become
  656. * available. To not sleep, pass in the XPC_NOWAIT flag.
  657. *
  658. * Once sent, this routine will not wait for the message to be received, nor
  659. * will notification be given when it does happen.
  660. *
  661. * Arguments:
  662. *
  663. * partid - ID of partition to which the channel is connected.
  664. * ch_number - channel # to send message on.
  665. * flags - see xp.h for valid flags.
  666. * payload - pointer to the payload which is to be sent.
  667. * payload_size - size of the payload in bytes.
  668. */
  669. enum xp_retval
  670. xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
  671. u16 payload_size)
  672. {
  673. struct xpc_partition *part = &xpc_partitions[partid];
  674. enum xp_retval ret = xpUnknownReason;
  675. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  676. partid, ch_number);
  677. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  678. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  679. DBUG_ON(payload == NULL);
  680. if (xpc_part_ref(part)) {
  681. ret = xpc_send_msg(&part->channels[ch_number], flags, payload,
  682. payload_size, 0, NULL, NULL);
  683. xpc_part_deref(part);
  684. }
  685. return ret;
  686. }
  687. /*
  688. * Send a message that contains the user's payload on the specified channel
  689. * connected to the specified partition.
  690. *
  691. * NOTE that this routine can sleep waiting for a message entry to become
  692. * available. To not sleep, pass in the XPC_NOWAIT flag.
  693. *
  694. * This routine will not wait for the message to be sent or received.
  695. *
  696. * Once the remote end of the channel has received the message, the function
  697. * passed as an argument to xpc_initiate_send_notify() will be called. This
  698. * allows the sender to free up or re-use any buffers referenced by the
  699. * message, but does NOT mean the message has been processed at the remote
  700. * end by a receiver.
  701. *
  702. * If this routine returns an error, the caller's function will NOT be called.
  703. *
  704. * Arguments:
  705. *
  706. * partid - ID of partition to which the channel is connected.
  707. * ch_number - channel # to send message on.
  708. * flags - see xp.h for valid flags.
  709. * payload - pointer to the payload which is to be sent.
  710. * payload_size - size of the payload in bytes.
  711. * func - function to call with asynchronous notification of message
  712. * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
  713. * key - user-defined key to be passed to the function when it's called.
  714. */
  715. enum xp_retval
  716. xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
  717. u16 payload_size, xpc_notify_func func, void *key)
  718. {
  719. struct xpc_partition *part = &xpc_partitions[partid];
  720. enum xp_retval ret = xpUnknownReason;
  721. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  722. partid, ch_number);
  723. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  724. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  725. DBUG_ON(payload == NULL);
  726. DBUG_ON(func == NULL);
  727. if (xpc_part_ref(part)) {
  728. ret = xpc_send_msg(&part->channels[ch_number], flags, payload,
  729. payload_size, XPC_N_CALL, func, key);
  730. xpc_part_deref(part);
  731. }
  732. return ret;
  733. }
  734. /*
  735. * Deliver a message to its intended recipient.
  736. */
  737. void
  738. xpc_deliver_msg(struct xpc_channel *ch)
  739. {
  740. struct xpc_msg *msg;
  741. msg = xpc_get_deliverable_msg(ch);
  742. if (msg != NULL) {
  743. /*
  744. * This ref is taken to protect the payload itself from being
  745. * freed before the user is finished with it, which the user
  746. * indicates by calling xpc_initiate_received().
  747. */
  748. xpc_msgqueue_ref(ch);
  749. atomic_inc(&ch->kthreads_active);
  750. if (ch->func != NULL) {
  751. dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
  752. "msg_number=%ld, partid=%d, channel=%d\n",
  753. msg, (signed long)msg->number, ch->partid,
  754. ch->number);
  755. /* deliver the message to its intended recipient */
  756. ch->func(xpMsgReceived, ch->partid, ch->number,
  757. &msg->payload, ch->key);
  758. dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
  759. "msg_number=%ld, partid=%d, channel=%d\n",
  760. msg, (signed long)msg->number, ch->partid,
  761. ch->number);
  762. }
  763. atomic_dec(&ch->kthreads_active);
  764. }
  765. }
  766. /*
  767. * Acknowledge receipt of a delivered message.
  768. *
  769. * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition
  770. * that sent the message.
  771. *
  772. * This function, although called by users, does not call xpc_part_ref() to
  773. * ensure that the partition infrastructure is in place. It relies on the
  774. * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg().
  775. *
  776. * Arguments:
  777. *
  778. * partid - ID of partition to which the channel is connected.
  779. * ch_number - channel # message received on.
  780. * payload - pointer to the payload area allocated via
  781. * xpc_initiate_send() or xpc_initiate_send_notify().
  782. */
  783. void
  784. xpc_initiate_received(short partid, int ch_number, void *payload)
  785. {
  786. struct xpc_partition *part = &xpc_partitions[partid];
  787. struct xpc_channel *ch;
  788. struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
  789. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  790. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  791. ch = &part->channels[ch_number];
  792. xpc_received_msg(ch, msg);
  793. /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
  794. xpc_msgqueue_deref(ch);
  795. }