xpc_channel.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) channel support.
  10. *
  11. * This is the part of XPC that manages the channels and
  12. * sends/receives messages across them to/from other partitions.
  13. *
  14. */
  15. #include <linux/device.h>
  16. #include "xpc.h"
  17. /*
  18. * Process a connect message from a remote partition.
  19. *
  20. * Note: xpc_process_connect() is expecting to be called with the
  21. * spin_lock_irqsave held and will leave it locked upon return.
  22. */
  23. static void
  24. xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
  25. {
  26. enum xp_retval ret;
  27. DBUG_ON(!spin_is_locked(&ch->lock));
  28. if (!(ch->flags & XPC_C_OPENREQUEST) ||
  29. !(ch->flags & XPC_C_ROPENREQUEST)) {
  30. /* nothing more to do for now */
  31. return;
  32. }
  33. DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
  34. if (!(ch->flags & XPC_C_SETUP)) {
  35. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  36. ret = xpc_allocate_msgqueues(ch);
  37. spin_lock_irqsave(&ch->lock, *irq_flags);
  38. if (ret != xpSuccess)
  39. XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
  40. ch->flags |= XPC_C_SETUP;
  41. if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
  42. return;
  43. DBUG_ON(ch->local_msgqueue == NULL);
  44. DBUG_ON(ch->remote_msgqueue == NULL);
  45. }
  46. if (!(ch->flags & XPC_C_OPENREPLY)) {
  47. ch->flags |= XPC_C_OPENREPLY;
  48. xpc_send_chctl_openreply(ch, irq_flags);
  49. }
  50. if (!(ch->flags & XPC_C_ROPENREPLY))
  51. return;
  52. DBUG_ON(ch->remote_msgqueue_pa == 0);
  53. ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
  54. dev_info(xpc_chan, "channel %d to partition %d connected\n",
  55. ch->number, ch->partid);
  56. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  57. xpc_create_kthreads(ch, 1, 0);
  58. spin_lock_irqsave(&ch->lock, *irq_flags);
  59. }
  60. /*
  61. * spin_lock_irqsave() is expected to be held on entry.
  62. */
  63. static void
  64. xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
  65. {
  66. struct xpc_partition *part = &xpc_partitions[ch->partid];
  67. u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
  68. DBUG_ON(!spin_is_locked(&ch->lock));
  69. if (!(ch->flags & XPC_C_DISCONNECTING))
  70. return;
  71. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  72. /* make sure all activity has settled down first */
  73. if (atomic_read(&ch->kthreads_assigned) > 0 ||
  74. atomic_read(&ch->references) > 0) {
  75. return;
  76. }
  77. DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  78. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
  79. if (part->act_state == XPC_P_AS_DEACTIVATING) {
  80. /* can't proceed until the other side disengages from us */
  81. if (xpc_partition_engaged(ch->partid))
  82. return;
  83. } else {
  84. /* as long as the other side is up do the full protocol */
  85. if (!(ch->flags & XPC_C_RCLOSEREQUEST))
  86. return;
  87. if (!(ch->flags & XPC_C_CLOSEREPLY)) {
  88. ch->flags |= XPC_C_CLOSEREPLY;
  89. xpc_send_chctl_closereply(ch, irq_flags);
  90. }
  91. if (!(ch->flags & XPC_C_RCLOSEREPLY))
  92. return;
  93. }
  94. /* wake those waiting for notify completion */
  95. if (atomic_read(&ch->n_to_notify) > 0) {
  96. /* we do callout while holding ch->lock, callout can't block */
  97. xpc_notify_senders_of_disconnect(ch);
  98. }
  99. /* both sides are disconnected now */
  100. if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
  101. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  102. xpc_disconnect_callout(ch, xpDisconnected);
  103. spin_lock_irqsave(&ch->lock, *irq_flags);
  104. }
  105. /* it's now safe to free the channel's message queues */
  106. xpc_free_msgqueues(ch);
  107. /*
  108. * Mark the channel disconnected and clear all other flags, including
  109. * XPC_C_SETUP (because of call to xpc_free_msgqueues()) but not
  110. * including XPC_C_WDISCONNECT (if it was set).
  111. */
  112. ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
  113. atomic_dec(&part->nchannels_active);
  114. if (channel_was_connected) {
  115. dev_info(xpc_chan, "channel %d to partition %d disconnected, "
  116. "reason=%d\n", ch->number, ch->partid, ch->reason);
  117. }
  118. if (ch->flags & XPC_C_WDISCONNECT) {
  119. /* we won't lose the CPU since we're holding ch->lock */
  120. complete(&ch->wdisconnect_wait);
  121. } else if (ch->delayed_chctl_flags) {
  122. if (part->act_state != XPC_P_AS_DEACTIVATING) {
  123. /* time to take action on any delayed chctl flags */
  124. spin_lock(&part->chctl_lock);
  125. part->chctl.flags[ch->number] |=
  126. ch->delayed_chctl_flags;
  127. spin_unlock(&part->chctl_lock);
  128. }
  129. ch->delayed_chctl_flags = 0;
  130. }
  131. }
  132. /*
  133. * Process a change in the channel's remote connection state.
  134. */
  135. static void
  136. xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
  137. u8 chctl_flags)
  138. {
  139. unsigned long irq_flags;
  140. struct xpc_openclose_args *args =
  141. &part->remote_openclose_args[ch_number];
  142. struct xpc_channel *ch = &part->channels[ch_number];
  143. enum xp_retval reason;
  144. spin_lock_irqsave(&ch->lock, irq_flags);
  145. again:
  146. if ((ch->flags & XPC_C_DISCONNECTED) &&
  147. (ch->flags & XPC_C_WDISCONNECT)) {
  148. /*
  149. * Delay processing chctl flags until thread waiting disconnect
  150. * has had a chance to see that the channel is disconnected.
  151. */
  152. ch->delayed_chctl_flags |= chctl_flags;
  153. spin_unlock_irqrestore(&ch->lock, irq_flags);
  154. return;
  155. }
  156. if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
  157. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
  158. "from partid=%d, channel=%d\n", args->reason,
  159. ch->partid, ch->number);
  160. /*
  161. * If RCLOSEREQUEST is set, we're probably waiting for
  162. * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
  163. * with this RCLOSEREQUEST in the chctl_flags.
  164. */
  165. if (ch->flags & XPC_C_RCLOSEREQUEST) {
  166. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
  167. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  168. DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
  169. DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
  170. DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
  171. chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
  172. ch->flags |= XPC_C_RCLOSEREPLY;
  173. /* both sides have finished disconnecting */
  174. xpc_process_disconnect(ch, &irq_flags);
  175. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
  176. goto again;
  177. }
  178. if (ch->flags & XPC_C_DISCONNECTED) {
  179. if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
  180. if (part->chctl.flags[ch_number] &
  181. XPC_CHCTL_OPENREQUEST) {
  182. DBUG_ON(ch->delayed_chctl_flags != 0);
  183. spin_lock(&part->chctl_lock);
  184. part->chctl.flags[ch_number] |=
  185. XPC_CHCTL_CLOSEREQUEST;
  186. spin_unlock(&part->chctl_lock);
  187. }
  188. spin_unlock_irqrestore(&ch->lock, irq_flags);
  189. return;
  190. }
  191. XPC_SET_REASON(ch, 0, 0);
  192. ch->flags &= ~XPC_C_DISCONNECTED;
  193. atomic_inc(&part->nchannels_active);
  194. ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
  195. }
  196. chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY);
  197. /*
  198. * The meaningful CLOSEREQUEST connection state fields are:
  199. * reason = reason connection is to be closed
  200. */
  201. ch->flags |= XPC_C_RCLOSEREQUEST;
  202. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  203. reason = args->reason;
  204. if (reason <= xpSuccess || reason > xpUnknownReason)
  205. reason = xpUnknownReason;
  206. else if (reason == xpUnregistering)
  207. reason = xpOtherUnregistering;
  208. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  209. DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
  210. spin_unlock_irqrestore(&ch->lock, irq_flags);
  211. return;
  212. }
  213. xpc_process_disconnect(ch, &irq_flags);
  214. }
  215. if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
  216. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
  217. "%d, channel=%d\n", ch->partid, ch->number);
  218. if (ch->flags & XPC_C_DISCONNECTED) {
  219. DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
  220. spin_unlock_irqrestore(&ch->lock, irq_flags);
  221. return;
  222. }
  223. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  224. if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
  225. if (part->chctl.flags[ch_number] &
  226. XPC_CHCTL_CLOSEREQUEST) {
  227. DBUG_ON(ch->delayed_chctl_flags != 0);
  228. spin_lock(&part->chctl_lock);
  229. part->chctl.flags[ch_number] |=
  230. XPC_CHCTL_CLOSEREPLY;
  231. spin_unlock(&part->chctl_lock);
  232. }
  233. spin_unlock_irqrestore(&ch->lock, irq_flags);
  234. return;
  235. }
  236. ch->flags |= XPC_C_RCLOSEREPLY;
  237. if (ch->flags & XPC_C_CLOSEREPLY) {
  238. /* both sides have finished disconnecting */
  239. xpc_process_disconnect(ch, &irq_flags);
  240. }
  241. }
  242. if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
  243. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (msg_size=%d, "
  244. "local_nentries=%d) received from partid=%d, "
  245. "channel=%d\n", args->msg_size, args->local_nentries,
  246. ch->partid, ch->number);
  247. if (part->act_state == XPC_P_AS_DEACTIVATING ||
  248. (ch->flags & XPC_C_ROPENREQUEST)) {
  249. spin_unlock_irqrestore(&ch->lock, irq_flags);
  250. return;
  251. }
  252. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
  253. ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
  254. spin_unlock_irqrestore(&ch->lock, irq_flags);
  255. return;
  256. }
  257. DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
  258. XPC_C_OPENREQUEST)));
  259. DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  260. XPC_C_OPENREPLY | XPC_C_CONNECTED));
  261. /*
  262. * The meaningful OPENREQUEST connection state fields are:
  263. * msg_size = size of channel's messages in bytes
  264. * local_nentries = remote partition's local_nentries
  265. */
  266. if (args->msg_size == 0 || args->local_nentries == 0) {
  267. /* assume OPENREQUEST was delayed by mistake */
  268. spin_unlock_irqrestore(&ch->lock, irq_flags);
  269. return;
  270. }
  271. ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
  272. ch->remote_nentries = args->local_nentries;
  273. if (ch->flags & XPC_C_OPENREQUEST) {
  274. if (args->msg_size != ch->msg_size) {
  275. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  276. &irq_flags);
  277. spin_unlock_irqrestore(&ch->lock, irq_flags);
  278. return;
  279. }
  280. } else {
  281. ch->msg_size = args->msg_size;
  282. XPC_SET_REASON(ch, 0, 0);
  283. ch->flags &= ~XPC_C_DISCONNECTED;
  284. atomic_inc(&part->nchannels_active);
  285. }
  286. xpc_process_connect(ch, &irq_flags);
  287. }
  288. if (chctl_flags & XPC_CHCTL_OPENREPLY) {
  289. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
  290. "0x%lx, local_nentries=%d, remote_nentries=%d) "
  291. "received from partid=%d, channel=%d\n",
  292. args->local_msgqueue_pa, args->local_nentries,
  293. args->remote_nentries, ch->partid, ch->number);
  294. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
  295. spin_unlock_irqrestore(&ch->lock, irq_flags);
  296. return;
  297. }
  298. if (!(ch->flags & XPC_C_OPENREQUEST)) {
  299. XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
  300. &irq_flags);
  301. spin_unlock_irqrestore(&ch->lock, irq_flags);
  302. return;
  303. }
  304. DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
  305. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  306. /*
  307. * The meaningful OPENREPLY connection state fields are:
  308. * local_msgqueue_pa = physical address of remote
  309. * partition's local_msgqueue
  310. * local_nentries = remote partition's local_nentries
  311. * remote_nentries = remote partition's remote_nentries
  312. */
  313. DBUG_ON(args->local_msgqueue_pa == 0);
  314. DBUG_ON(args->local_nentries == 0);
  315. DBUG_ON(args->remote_nentries == 0);
  316. ch->flags |= XPC_C_ROPENREPLY;
  317. ch->remote_msgqueue_pa = args->local_msgqueue_pa;
  318. if (args->local_nentries < ch->remote_nentries) {
  319. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  320. "remote_nentries=%d, old remote_nentries=%d, "
  321. "partid=%d, channel=%d\n",
  322. args->local_nentries, ch->remote_nentries,
  323. ch->partid, ch->number);
  324. ch->remote_nentries = args->local_nentries;
  325. }
  326. if (args->remote_nentries < ch->local_nentries) {
  327. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  328. "local_nentries=%d, old local_nentries=%d, "
  329. "partid=%d, channel=%d\n",
  330. args->remote_nentries, ch->local_nentries,
  331. ch->partid, ch->number);
  332. ch->local_nentries = args->remote_nentries;
  333. }
  334. xpc_process_connect(ch, &irq_flags);
  335. }
  336. spin_unlock_irqrestore(&ch->lock, irq_flags);
  337. }
  338. /*
  339. * Attempt to establish a channel connection to a remote partition.
  340. */
  341. static enum xp_retval
  342. xpc_connect_channel(struct xpc_channel *ch)
  343. {
  344. unsigned long irq_flags;
  345. struct xpc_registration *registration = &xpc_registrations[ch->number];
  346. if (mutex_trylock(&registration->mutex) == 0)
  347. return xpRetry;
  348. if (!XPC_CHANNEL_REGISTERED(ch->number)) {
  349. mutex_unlock(&registration->mutex);
  350. return xpUnregistered;
  351. }
  352. spin_lock_irqsave(&ch->lock, irq_flags);
  353. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  354. DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
  355. if (ch->flags & XPC_C_DISCONNECTING) {
  356. spin_unlock_irqrestore(&ch->lock, irq_flags);
  357. mutex_unlock(&registration->mutex);
  358. return ch->reason;
  359. }
  360. /* add info from the channel connect registration to the channel */
  361. ch->kthreads_assigned_limit = registration->assigned_limit;
  362. ch->kthreads_idle_limit = registration->idle_limit;
  363. DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
  364. DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
  365. DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
  366. ch->func = registration->func;
  367. DBUG_ON(registration->func == NULL);
  368. ch->key = registration->key;
  369. ch->local_nentries = registration->nentries;
  370. if (ch->flags & XPC_C_ROPENREQUEST) {
  371. if (registration->msg_size != ch->msg_size) {
  372. /* the local and remote sides aren't the same */
  373. /*
  374. * Because XPC_DISCONNECT_CHANNEL() can block we're
  375. * forced to up the registration sema before we unlock
  376. * the channel lock. But that's okay here because we're
  377. * done with the part that required the registration
  378. * sema. XPC_DISCONNECT_CHANNEL() requires that the
  379. * channel lock be locked and will unlock and relock
  380. * the channel lock as needed.
  381. */
  382. mutex_unlock(&registration->mutex);
  383. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  384. &irq_flags);
  385. spin_unlock_irqrestore(&ch->lock, irq_flags);
  386. return xpUnequalMsgSizes;
  387. }
  388. } else {
  389. ch->msg_size = registration->msg_size;
  390. XPC_SET_REASON(ch, 0, 0);
  391. ch->flags &= ~XPC_C_DISCONNECTED;
  392. atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
  393. }
  394. mutex_unlock(&registration->mutex);
  395. /* initiate the connection */
  396. ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
  397. xpc_send_chctl_openrequest(ch, &irq_flags);
  398. xpc_process_connect(ch, &irq_flags);
  399. spin_unlock_irqrestore(&ch->lock, irq_flags);
  400. return xpSuccess;
  401. }
  402. void
  403. xpc_process_sent_chctl_flags(struct xpc_partition *part)
  404. {
  405. unsigned long irq_flags;
  406. union xpc_channel_ctl_flags chctl;
  407. struct xpc_channel *ch;
  408. int ch_number;
  409. u32 ch_flags;
  410. chctl.all_flags = xpc_get_chctl_all_flags(part);
  411. /*
  412. * Initiate channel connections for registered channels.
  413. *
  414. * For each connected channel that has pending messages activate idle
  415. * kthreads and/or create new kthreads as needed.
  416. */
  417. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  418. ch = &part->channels[ch_number];
  419. /*
  420. * Process any open or close related chctl flags, and then deal
  421. * with connecting or disconnecting the channel as required.
  422. */
  423. if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
  424. xpc_process_openclose_chctl_flags(part, ch_number,
  425. chctl.flags[ch_number]);
  426. }
  427. ch_flags = ch->flags; /* need an atomic snapshot of flags */
  428. if (ch_flags & XPC_C_DISCONNECTING) {
  429. spin_lock_irqsave(&ch->lock, irq_flags);
  430. xpc_process_disconnect(ch, &irq_flags);
  431. spin_unlock_irqrestore(&ch->lock, irq_flags);
  432. continue;
  433. }
  434. if (part->act_state == XPC_P_AS_DEACTIVATING)
  435. continue;
  436. if (!(ch_flags & XPC_C_CONNECTED)) {
  437. if (!(ch_flags & XPC_C_OPENREQUEST)) {
  438. DBUG_ON(ch_flags & XPC_C_SETUP);
  439. (void)xpc_connect_channel(ch);
  440. } else {
  441. spin_lock_irqsave(&ch->lock, irq_flags);
  442. xpc_process_connect(ch, &irq_flags);
  443. spin_unlock_irqrestore(&ch->lock, irq_flags);
  444. }
  445. continue;
  446. }
  447. /*
  448. * Process any message related chctl flags, this may involve
  449. * the activation of kthreads to deliver any pending messages
  450. * sent from the other partition.
  451. */
  452. if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
  453. xpc_process_msg_chctl_flags(part, ch_number);
  454. }
  455. }
  456. /*
  457. * XPC's heartbeat code calls this function to inform XPC that a partition is
  458. * going down. XPC responds by tearing down the XPartition Communication
  459. * infrastructure used for the just downed partition.
  460. *
  461. * XPC's heartbeat code will never call this function and xpc_partition_up()
  462. * at the same time. Nor will it ever make multiple calls to either function
  463. * at the same time.
  464. */
  465. void
  466. xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
  467. {
  468. unsigned long irq_flags;
  469. int ch_number;
  470. struct xpc_channel *ch;
  471. dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
  472. XPC_PARTID(part), reason);
  473. if (!xpc_part_ref(part)) {
  474. /* infrastructure for this partition isn't currently set up */
  475. return;
  476. }
  477. /* disconnect channels associated with the partition going down */
  478. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  479. ch = &part->channels[ch_number];
  480. xpc_msgqueue_ref(ch);
  481. spin_lock_irqsave(&ch->lock, irq_flags);
  482. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  483. spin_unlock_irqrestore(&ch->lock, irq_flags);
  484. xpc_msgqueue_deref(ch);
  485. }
  486. xpc_wakeup_channel_mgr(part);
  487. xpc_part_deref(part);
  488. }
  489. /*
  490. * Called by XP at the time of channel connection registration to cause
  491. * XPC to establish connections to all currently active partitions.
  492. */
  493. void
  494. xpc_initiate_connect(int ch_number)
  495. {
  496. short partid;
  497. struct xpc_partition *part;
  498. struct xpc_channel *ch;
  499. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  500. for (partid = 0; partid < xp_max_npartitions; partid++) {
  501. part = &xpc_partitions[partid];
  502. if (xpc_part_ref(part)) {
  503. ch = &part->channels[ch_number];
  504. /*
  505. * Initiate the establishment of a connection on the
  506. * newly registered channel to the remote partition.
  507. */
  508. xpc_wakeup_channel_mgr(part);
  509. xpc_part_deref(part);
  510. }
  511. }
  512. }
  513. void
  514. xpc_connected_callout(struct xpc_channel *ch)
  515. {
  516. /* let the registerer know that a connection has been established */
  517. if (ch->func != NULL) {
  518. dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
  519. "partid=%d, channel=%d\n", ch->partid, ch->number);
  520. ch->func(xpConnected, ch->partid, ch->number,
  521. (void *)(u64)ch->local_nentries, ch->key);
  522. dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
  523. "partid=%d, channel=%d\n", ch->partid, ch->number);
  524. }
  525. }
  526. /*
  527. * Called by XP at the time of channel connection unregistration to cause
  528. * XPC to teardown all current connections for the specified channel.
  529. *
  530. * Before returning xpc_initiate_disconnect() will wait until all connections
  531. * on the specified channel have been closed/torndown. So the caller can be
  532. * assured that they will not be receiving any more callouts from XPC to the
  533. * function they registered via xpc_connect().
  534. *
  535. * Arguments:
  536. *
  537. * ch_number - channel # to unregister.
  538. */
  539. void
  540. xpc_initiate_disconnect(int ch_number)
  541. {
  542. unsigned long irq_flags;
  543. short partid;
  544. struct xpc_partition *part;
  545. struct xpc_channel *ch;
  546. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  547. /* initiate the channel disconnect for every active partition */
  548. for (partid = 0; partid < xp_max_npartitions; partid++) {
  549. part = &xpc_partitions[partid];
  550. if (xpc_part_ref(part)) {
  551. ch = &part->channels[ch_number];
  552. xpc_msgqueue_ref(ch);
  553. spin_lock_irqsave(&ch->lock, irq_flags);
  554. if (!(ch->flags & XPC_C_DISCONNECTED)) {
  555. ch->flags |= XPC_C_WDISCONNECT;
  556. XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
  557. &irq_flags);
  558. }
  559. spin_unlock_irqrestore(&ch->lock, irq_flags);
  560. xpc_msgqueue_deref(ch);
  561. xpc_part_deref(part);
  562. }
  563. }
  564. xpc_disconnect_wait(ch_number);
  565. }
  566. /*
  567. * To disconnect a channel, and reflect it back to all who may be waiting.
  568. *
  569. * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
  570. * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
  571. * xpc_disconnect_wait().
  572. *
  573. * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
  574. */
  575. void
  576. xpc_disconnect_channel(const int line, struct xpc_channel *ch,
  577. enum xp_retval reason, unsigned long *irq_flags)
  578. {
  579. u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
  580. DBUG_ON(!spin_is_locked(&ch->lock));
  581. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
  582. return;
  583. DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
  584. dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
  585. reason, line, ch->partid, ch->number);
  586. XPC_SET_REASON(ch, reason, line);
  587. ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
  588. /* some of these may not have been set */
  589. ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
  590. XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  591. XPC_C_CONNECTING | XPC_C_CONNECTED);
  592. xpc_send_chctl_closerequest(ch, irq_flags);
  593. if (channel_was_connected)
  594. ch->flags |= XPC_C_WASCONNECTED;
  595. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  596. /* wake all idle kthreads so they can exit */
  597. if (atomic_read(&ch->kthreads_idle) > 0) {
  598. wake_up_all(&ch->idle_wq);
  599. } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  600. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
  601. /* start a kthread that will do the xpDisconnecting callout */
  602. xpc_create_kthreads(ch, 1, 1);
  603. }
  604. /* wake those waiting to allocate an entry from the local msg queue */
  605. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
  606. wake_up(&ch->msg_allocate_wq);
  607. spin_lock_irqsave(&ch->lock, *irq_flags);
  608. }
  609. void
  610. xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
  611. {
  612. /*
  613. * Let the channel's registerer know that the channel is being
  614. * disconnected. We don't want to do this if the registerer was never
  615. * informed of a connection being made.
  616. */
  617. if (ch->func != NULL) {
  618. dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
  619. "channel=%d\n", reason, ch->partid, ch->number);
  620. ch->func(reason, ch->partid, ch->number, NULL, ch->key);
  621. dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
  622. "channel=%d\n", reason, ch->partid, ch->number);
  623. }
  624. }
  625. /*
  626. * Wait for a message entry to become available for the specified channel,
  627. * but don't wait any longer than 1 jiffy.
  628. */
  629. enum xp_retval
  630. xpc_allocate_msg_wait(struct xpc_channel *ch)
  631. {
  632. enum xp_retval ret;
  633. if (ch->flags & XPC_C_DISCONNECTING) {
  634. DBUG_ON(ch->reason == xpInterrupted);
  635. return ch->reason;
  636. }
  637. atomic_inc(&ch->n_on_msg_allocate_wq);
  638. ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
  639. atomic_dec(&ch->n_on_msg_allocate_wq);
  640. if (ch->flags & XPC_C_DISCONNECTING) {
  641. ret = ch->reason;
  642. DBUG_ON(ch->reason == xpInterrupted);
  643. } else if (ret == 0) {
  644. ret = xpTimeout;
  645. } else {
  646. ret = xpInterrupted;
  647. }
  648. return ret;
  649. }
  650. /*
  651. * Send a message that contains the user's payload on the specified channel
  652. * connected to the specified partition.
  653. *
  654. * NOTE that this routine can sleep waiting for a message entry to become
  655. * available. To not sleep, pass in the XPC_NOWAIT flag.
  656. *
  657. * Once sent, this routine will not wait for the message to be received, nor
  658. * will notification be given when it does happen.
  659. *
  660. * Arguments:
  661. *
  662. * partid - ID of partition to which the channel is connected.
  663. * ch_number - channel # to send message on.
  664. * flags - see xp.h for valid flags.
  665. * payload - pointer to the payload which is to be sent.
  666. * payload_size - size of the payload in bytes.
  667. */
  668. enum xp_retval
  669. xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
  670. u16 payload_size)
  671. {
  672. struct xpc_partition *part = &xpc_partitions[partid];
  673. enum xp_retval ret = xpUnknownReason;
  674. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  675. partid, ch_number);
  676. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  677. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  678. DBUG_ON(payload == NULL);
  679. if (xpc_part_ref(part)) {
  680. ret = xpc_send_msg(&part->channels[ch_number], flags, payload,
  681. payload_size, 0, NULL, NULL);
  682. xpc_part_deref(part);
  683. }
  684. return ret;
  685. }
  686. /*
  687. * Send a message that contains the user's payload on the specified channel
  688. * connected to the specified partition.
  689. *
  690. * NOTE that this routine can sleep waiting for a message entry to become
  691. * available. To not sleep, pass in the XPC_NOWAIT flag.
  692. *
  693. * This routine will not wait for the message to be sent or received.
  694. *
  695. * Once the remote end of the channel has received the message, the function
  696. * passed as an argument to xpc_initiate_send_notify() will be called. This
  697. * allows the sender to free up or re-use any buffers referenced by the
  698. * message, but does NOT mean the message has been processed at the remote
  699. * end by a receiver.
  700. *
  701. * If this routine returns an error, the caller's function will NOT be called.
  702. *
  703. * Arguments:
  704. *
  705. * partid - ID of partition to which the channel is connected.
  706. * ch_number - channel # to send message on.
  707. * flags - see xp.h for valid flags.
  708. * payload - pointer to the payload which is to be sent.
  709. * payload_size - size of the payload in bytes.
  710. * func - function to call with asynchronous notification of message
  711. * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
  712. * key - user-defined key to be passed to the function when it's called.
  713. */
  714. enum xp_retval
  715. xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
  716. u16 payload_size, xpc_notify_func func, void *key)
  717. {
  718. struct xpc_partition *part = &xpc_partitions[partid];
  719. enum xp_retval ret = xpUnknownReason;
  720. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  721. partid, ch_number);
  722. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  723. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  724. DBUG_ON(payload == NULL);
  725. DBUG_ON(func == NULL);
  726. if (xpc_part_ref(part)) {
  727. ret = xpc_send_msg(&part->channels[ch_number], flags, payload,
  728. payload_size, XPC_N_CALL, func, key);
  729. xpc_part_deref(part);
  730. }
  731. return ret;
  732. }
  733. /*
  734. * Deliver a message to its intended recipient.
  735. */
  736. void
  737. xpc_deliver_msg(struct xpc_channel *ch)
  738. {
  739. struct xpc_msg *msg;
  740. msg = xpc_get_deliverable_msg(ch);
  741. if (msg != NULL) {
  742. /*
  743. * This ref is taken to protect the payload itself from being
  744. * freed before the user is finished with it, which the user
  745. * indicates by calling xpc_initiate_received().
  746. */
  747. xpc_msgqueue_ref(ch);
  748. atomic_inc(&ch->kthreads_active);
  749. if (ch->func != NULL) {
  750. dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
  751. "msg_number=%ld, partid=%d, channel=%d\n",
  752. msg, (signed long)msg->number, ch->partid,
  753. ch->number);
  754. /* deliver the message to its intended recipient */
  755. ch->func(xpMsgReceived, ch->partid, ch->number,
  756. &msg->payload, ch->key);
  757. dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
  758. "msg_number=%ld, partid=%d, channel=%d\n",
  759. msg, (signed long)msg->number, ch->partid,
  760. ch->number);
  761. }
  762. atomic_dec(&ch->kthreads_active);
  763. }
  764. }
  765. /*
  766. * Acknowledge receipt of a delivered message.
  767. *
  768. * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition
  769. * that sent the message.
  770. *
  771. * This function, although called by users, does not call xpc_part_ref() to
  772. * ensure that the partition infrastructure is in place. It relies on the
  773. * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg().
  774. *
  775. * Arguments:
  776. *
  777. * partid - ID of partition to which the channel is connected.
  778. * ch_number - channel # message received on.
  779. * payload - pointer to the payload area allocated via
  780. * xpc_initiate_send() or xpc_initiate_send_notify().
  781. */
  782. void
  783. xpc_initiate_received(short partid, int ch_number, void *payload)
  784. {
  785. struct xpc_partition *part = &xpc_partitions[partid];
  786. struct xpc_channel *ch;
  787. struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
  788. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  789. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  790. ch = &part->channels[ch_number];
  791. xpc_received_msg(ch, msg);
  792. /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
  793. xpc_msgqueue_deref(ch);
  794. }