xpc_channel.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) channel support.
  10. *
  11. * This is the part of XPC that manages the channels and
  12. * sends/receives messages across them to/from other partitions.
  13. *
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/init.h>
  17. #include <linux/sched.h>
  18. #include <linux/cache.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/slab.h>
  21. #include <linux/mutex.h>
  22. #include <linux/completion.h>
  23. #include <asm/sn/bte.h>
  24. #include <asm/sn/sn_sal.h>
  25. #include <asm/sn/xpc.h>
  26. /*
  27. * Set up the initial values for the XPartition Communication channels.
  28. */
  29. static void
  30. xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
  31. {
  32. int ch_number;
  33. struct xpc_channel *ch;
  34. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  35. ch = &part->channels[ch_number];
  36. ch->partid = partid;
  37. ch->number = ch_number;
  38. ch->flags = XPC_C_DISCONNECTED;
  39. ch->local_GP = &part->local_GPs[ch_number];
  40. ch->local_openclose_args =
  41. &part->local_openclose_args[ch_number];
  42. atomic_set(&ch->kthreads_assigned, 0);
  43. atomic_set(&ch->kthreads_idle, 0);
  44. atomic_set(&ch->kthreads_active, 0);
  45. atomic_set(&ch->references, 0);
  46. atomic_set(&ch->n_to_notify, 0);
  47. spin_lock_init(&ch->lock);
  48. mutex_init(&ch->msg_to_pull_mutex);
  49. init_completion(&ch->wdisconnect_wait);
  50. atomic_set(&ch->n_on_msg_allocate_wq, 0);
  51. init_waitqueue_head(&ch->msg_allocate_wq);
  52. init_waitqueue_head(&ch->idle_wq);
  53. }
  54. }
  55. /*
  56. * Setup the infrastructure necessary to support XPartition Communication
  57. * between the specified remote partition and the local one.
  58. */
  59. enum xpc_retval
  60. xpc_setup_infrastructure(struct xpc_partition *part)
  61. {
  62. int ret, cpuid;
  63. struct timer_list *timer;
  64. partid_t partid = XPC_PARTID(part);
  65. /*
  66. * Zero out MOST of the entry for this partition. Only the fields
  67. * starting with `nchannels' will be zeroed. The preceding fields must
  68. * remain `viable' across partition ups and downs, since they may be
  69. * referenced during this memset() operation.
  70. */
  71. memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
  72. offsetof(struct xpc_partition, nchannels));
  73. /*
  74. * Allocate all of the channel structures as a contiguous chunk of
  75. * memory.
  76. */
  77. part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
  78. GFP_KERNEL);
  79. if (part->channels == NULL) {
  80. dev_err(xpc_chan, "can't get memory for channels\n");
  81. return xpcNoMemory;
  82. }
  83. memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS);
  84. part->nchannels = XPC_NCHANNELS;
  85. /* allocate all the required GET/PUT values */
  86. part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
  87. GFP_KERNEL, &part->local_GPs_base);
  88. if (part->local_GPs == NULL) {
  89. kfree(part->channels);
  90. part->channels = NULL;
  91. dev_err(xpc_chan, "can't get memory for local get/put "
  92. "values\n");
  93. return xpcNoMemory;
  94. }
  95. memset(part->local_GPs, 0, XPC_GP_SIZE);
  96. part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
  97. GFP_KERNEL, &part->remote_GPs_base);
  98. if (part->remote_GPs == NULL) {
  99. kfree(part->channels);
  100. part->channels = NULL;
  101. kfree(part->local_GPs_base);
  102. part->local_GPs = NULL;
  103. dev_err(xpc_chan, "can't get memory for remote get/put "
  104. "values\n");
  105. return xpcNoMemory;
  106. }
  107. memset(part->remote_GPs, 0, XPC_GP_SIZE);
  108. /* allocate all the required open and close args */
  109. part->local_openclose_args = xpc_kmalloc_cacheline_aligned(
  110. XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
  111. &part->local_openclose_args_base);
  112. if (part->local_openclose_args == NULL) {
  113. kfree(part->channels);
  114. part->channels = NULL;
  115. kfree(part->local_GPs_base);
  116. part->local_GPs = NULL;
  117. kfree(part->remote_GPs_base);
  118. part->remote_GPs = NULL;
  119. dev_err(xpc_chan, "can't get memory for local connect args\n");
  120. return xpcNoMemory;
  121. }
  122. memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
  123. part->remote_openclose_args = xpc_kmalloc_cacheline_aligned(
  124. XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
  125. &part->remote_openclose_args_base);
  126. if (part->remote_openclose_args == NULL) {
  127. kfree(part->channels);
  128. part->channels = NULL;
  129. kfree(part->local_GPs_base);
  130. part->local_GPs = NULL;
  131. kfree(part->remote_GPs_base);
  132. part->remote_GPs = NULL;
  133. kfree(part->local_openclose_args_base);
  134. part->local_openclose_args = NULL;
  135. dev_err(xpc_chan, "can't get memory for remote connect args\n");
  136. return xpcNoMemory;
  137. }
  138. memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
  139. xpc_initialize_channels(part, partid);
  140. atomic_set(&part->nchannels_active, 0);
  141. atomic_set(&part->nchannels_engaged, 0);
  142. /* local_IPI_amo were set to 0 by an earlier memset() */
  143. /* Initialize this partitions AMO_t structure */
  144. part->local_IPI_amo_va = xpc_IPI_init(partid);
  145. spin_lock_init(&part->IPI_lock);
  146. atomic_set(&part->channel_mgr_requests, 1);
  147. init_waitqueue_head(&part->channel_mgr_wq);
  148. sprintf(part->IPI_owner, "xpc%02d", partid);
  149. ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ,
  150. part->IPI_owner, (void *) (u64) partid);
  151. if (ret != 0) {
  152. kfree(part->channels);
  153. part->channels = NULL;
  154. kfree(part->local_GPs_base);
  155. part->local_GPs = NULL;
  156. kfree(part->remote_GPs_base);
  157. part->remote_GPs = NULL;
  158. kfree(part->local_openclose_args_base);
  159. part->local_openclose_args = NULL;
  160. kfree(part->remote_openclose_args_base);
  161. part->remote_openclose_args = NULL;
  162. dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
  163. "errno=%d\n", -ret);
  164. return xpcLackOfResources;
  165. }
  166. /* Setup a timer to check for dropped IPIs */
  167. timer = &part->dropped_IPI_timer;
  168. init_timer(timer);
  169. timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check;
  170. timer->data = (unsigned long) part;
  171. timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
  172. add_timer(timer);
  173. /*
  174. * With the setting of the partition setup_state to XPC_P_SETUP, we're
  175. * declaring that this partition is ready to go.
  176. */
  177. part->setup_state = XPC_P_SETUP;
  178. /*
  179. * Setup the per partition specific variables required by the
  180. * remote partition to establish channel connections with us.
  181. *
  182. * The setting of the magic # indicates that these per partition
  183. * specific variables are ready to be used.
  184. */
  185. xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
  186. xpc_vars_part[partid].openclose_args_pa =
  187. __pa(part->local_openclose_args);
  188. xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
  189. cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
  190. xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
  191. xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid);
  192. xpc_vars_part[partid].nchannels = part->nchannels;
  193. xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
  194. return xpcSuccess;
  195. }
  196. /*
  197. * Create a wrapper that hides the underlying mechanism for pulling a cacheline
  198. * (or multiple cachelines) from a remote partition.
  199. *
  200. * src must be a cacheline aligned physical address on the remote partition.
  201. * dst must be a cacheline aligned virtual address on this partition.
  202. * cnt must be an cacheline sized
  203. */
  204. static enum xpc_retval
  205. xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
  206. const void *src, size_t cnt)
  207. {
  208. bte_result_t bte_ret;
  209. DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src));
  210. DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
  211. DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
  212. if (part->act_state == XPC_P_DEACTIVATING) {
  213. return part->reason;
  214. }
  215. bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst),
  216. (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL);
  217. if (bte_ret == BTE_SUCCESS) {
  218. return xpcSuccess;
  219. }
  220. dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
  221. XPC_PARTID(part), bte_ret);
  222. return xpc_map_bte_errors(bte_ret);
  223. }
  224. /*
  225. * Pull the remote per partititon specific variables from the specified
  226. * partition.
  227. */
  228. enum xpc_retval
  229. xpc_pull_remote_vars_part(struct xpc_partition *part)
  230. {
  231. u8 buffer[L1_CACHE_BYTES * 2];
  232. struct xpc_vars_part *pulled_entry_cacheline =
  233. (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer);
  234. struct xpc_vars_part *pulled_entry;
  235. u64 remote_entry_cacheline_pa, remote_entry_pa;
  236. partid_t partid = XPC_PARTID(part);
  237. enum xpc_retval ret;
  238. /* pull the cacheline that contains the variables we're interested in */
  239. DBUG_ON(part->remote_vars_part_pa !=
  240. L1_CACHE_ALIGN(part->remote_vars_part_pa));
  241. DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
  242. remote_entry_pa = part->remote_vars_part_pa +
  243. sn_partition_id * sizeof(struct xpc_vars_part);
  244. remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
  245. pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline +
  246. (remote_entry_pa & (L1_CACHE_BYTES - 1)));
  247. ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
  248. (void *) remote_entry_cacheline_pa,
  249. L1_CACHE_BYTES);
  250. if (ret != xpcSuccess) {
  251. dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
  252. "partition %d, ret=%d\n", partid, ret);
  253. return ret;
  254. }
  255. /* see if they've been set up yet */
  256. if (pulled_entry->magic != XPC_VP_MAGIC1 &&
  257. pulled_entry->magic != XPC_VP_MAGIC2) {
  258. if (pulled_entry->magic != 0) {
  259. dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
  260. "partition %d has bad magic value (=0x%lx)\n",
  261. partid, sn_partition_id, pulled_entry->magic);
  262. return xpcBadMagic;
  263. }
  264. /* they've not been initialized yet */
  265. return xpcRetry;
  266. }
  267. if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) {
  268. /* validate the variables */
  269. if (pulled_entry->GPs_pa == 0 ||
  270. pulled_entry->openclose_args_pa == 0 ||
  271. pulled_entry->IPI_amo_pa == 0) {
  272. dev_err(xpc_chan, "partition %d's XPC vars_part for "
  273. "partition %d are not valid\n", partid,
  274. sn_partition_id);
  275. return xpcInvalidAddress;
  276. }
  277. /* the variables we imported look to be valid */
  278. part->remote_GPs_pa = pulled_entry->GPs_pa;
  279. part->remote_openclose_args_pa =
  280. pulled_entry->openclose_args_pa;
  281. part->remote_IPI_amo_va =
  282. (AMO_t *) __va(pulled_entry->IPI_amo_pa);
  283. part->remote_IPI_nasid = pulled_entry->IPI_nasid;
  284. part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
  285. if (part->nchannels > pulled_entry->nchannels) {
  286. part->nchannels = pulled_entry->nchannels;
  287. }
  288. /* let the other side know that we've pulled their variables */
  289. xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
  290. }
  291. if (pulled_entry->magic == XPC_VP_MAGIC1) {
  292. return xpcRetry;
  293. }
  294. return xpcSuccess;
  295. }
  296. /*
  297. * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
  298. */
  299. static u64
  300. xpc_get_IPI_flags(struct xpc_partition *part)
  301. {
  302. unsigned long irq_flags;
  303. u64 IPI_amo;
  304. enum xpc_retval ret;
  305. /*
  306. * See if there are any IPI flags to be handled.
  307. */
  308. spin_lock_irqsave(&part->IPI_lock, irq_flags);
  309. if ((IPI_amo = part->local_IPI_amo) != 0) {
  310. part->local_IPI_amo = 0;
  311. }
  312. spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
  313. if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
  314. ret = xpc_pull_remote_cachelines(part,
  315. part->remote_openclose_args,
  316. (void *) part->remote_openclose_args_pa,
  317. XPC_OPENCLOSE_ARGS_SIZE);
  318. if (ret != xpcSuccess) {
  319. XPC_DEACTIVATE_PARTITION(part, ret);
  320. dev_dbg(xpc_chan, "failed to pull openclose args from "
  321. "partition %d, ret=%d\n", XPC_PARTID(part),
  322. ret);
  323. /* don't bother processing IPIs anymore */
  324. IPI_amo = 0;
  325. }
  326. }
  327. if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
  328. ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
  329. (void *) part->remote_GPs_pa,
  330. XPC_GP_SIZE);
  331. if (ret != xpcSuccess) {
  332. XPC_DEACTIVATE_PARTITION(part, ret);
  333. dev_dbg(xpc_chan, "failed to pull GPs from partition "
  334. "%d, ret=%d\n", XPC_PARTID(part), ret);
  335. /* don't bother processing IPIs anymore */
  336. IPI_amo = 0;
  337. }
  338. }
  339. return IPI_amo;
  340. }
  341. /*
  342. * Allocate the local message queue and the notify queue.
  343. */
  344. static enum xpc_retval
  345. xpc_allocate_local_msgqueue(struct xpc_channel *ch)
  346. {
  347. unsigned long irq_flags;
  348. int nentries;
  349. size_t nbytes;
  350. // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
  351. // >>> iterations of the for-loop, bail if set?
  352. // >>> should we impose a minumum #of entries? like 4 or 8?
  353. for (nentries = ch->local_nentries; nentries > 0; nentries--) {
  354. nbytes = nentries * ch->msg_size;
  355. ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
  356. GFP_KERNEL,
  357. &ch->local_msgqueue_base);
  358. if (ch->local_msgqueue == NULL) {
  359. continue;
  360. }
  361. memset(ch->local_msgqueue, 0, nbytes);
  362. nbytes = nentries * sizeof(struct xpc_notify);
  363. ch->notify_queue = kmalloc(nbytes, GFP_KERNEL);
  364. if (ch->notify_queue == NULL) {
  365. kfree(ch->local_msgqueue_base);
  366. ch->local_msgqueue = NULL;
  367. continue;
  368. }
  369. memset(ch->notify_queue, 0, nbytes);
  370. spin_lock_irqsave(&ch->lock, irq_flags);
  371. if (nentries < ch->local_nentries) {
  372. dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
  373. "partid=%d, channel=%d\n", nentries,
  374. ch->local_nentries, ch->partid, ch->number);
  375. ch->local_nentries = nentries;
  376. }
  377. spin_unlock_irqrestore(&ch->lock, irq_flags);
  378. return xpcSuccess;
  379. }
  380. dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
  381. "queue, partid=%d, channel=%d\n", ch->partid, ch->number);
  382. return xpcNoMemory;
  383. }
  384. /*
  385. * Allocate the cached remote message queue.
  386. */
  387. static enum xpc_retval
  388. xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
  389. {
  390. unsigned long irq_flags;
  391. int nentries;
  392. size_t nbytes;
  393. DBUG_ON(ch->remote_nentries <= 0);
  394. // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
  395. // >>> iterations of the for-loop, bail if set?
  396. // >>> should we impose a minumum #of entries? like 4 or 8?
  397. for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
  398. nbytes = nentries * ch->msg_size;
  399. ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
  400. GFP_KERNEL,
  401. &ch->remote_msgqueue_base);
  402. if (ch->remote_msgqueue == NULL) {
  403. continue;
  404. }
  405. memset(ch->remote_msgqueue, 0, nbytes);
  406. spin_lock_irqsave(&ch->lock, irq_flags);
  407. if (nentries < ch->remote_nentries) {
  408. dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
  409. "partid=%d, channel=%d\n", nentries,
  410. ch->remote_nentries, ch->partid, ch->number);
  411. ch->remote_nentries = nentries;
  412. }
  413. spin_unlock_irqrestore(&ch->lock, irq_flags);
  414. return xpcSuccess;
  415. }
  416. dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
  417. "partid=%d, channel=%d\n", ch->partid, ch->number);
  418. return xpcNoMemory;
  419. }
  420. /*
  421. * Allocate message queues and other stuff associated with a channel.
  422. *
  423. * Note: Assumes all of the channel sizes are filled in.
  424. */
  425. static enum xpc_retval
  426. xpc_allocate_msgqueues(struct xpc_channel *ch)
  427. {
  428. unsigned long irq_flags;
  429. enum xpc_retval ret;
  430. DBUG_ON(ch->flags & XPC_C_SETUP);
  431. if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) {
  432. return ret;
  433. }
  434. if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) {
  435. kfree(ch->local_msgqueue_base);
  436. ch->local_msgqueue = NULL;
  437. kfree(ch->notify_queue);
  438. ch->notify_queue = NULL;
  439. return ret;
  440. }
  441. spin_lock_irqsave(&ch->lock, irq_flags);
  442. ch->flags |= XPC_C_SETUP;
  443. spin_unlock_irqrestore(&ch->lock, irq_flags);
  444. return xpcSuccess;
  445. }
  446. /*
  447. * Process a connect message from a remote partition.
  448. *
  449. * Note: xpc_process_connect() is expecting to be called with the
  450. * spin_lock_irqsave held and will leave it locked upon return.
  451. */
  452. static void
  453. xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
  454. {
  455. enum xpc_retval ret;
  456. DBUG_ON(!spin_is_locked(&ch->lock));
  457. if (!(ch->flags & XPC_C_OPENREQUEST) ||
  458. !(ch->flags & XPC_C_ROPENREQUEST)) {
  459. /* nothing more to do for now */
  460. return;
  461. }
  462. DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
  463. if (!(ch->flags & XPC_C_SETUP)) {
  464. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  465. ret = xpc_allocate_msgqueues(ch);
  466. spin_lock_irqsave(&ch->lock, *irq_flags);
  467. if (ret != xpcSuccess) {
  468. XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
  469. }
  470. if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) {
  471. return;
  472. }
  473. DBUG_ON(!(ch->flags & XPC_C_SETUP));
  474. DBUG_ON(ch->local_msgqueue == NULL);
  475. DBUG_ON(ch->remote_msgqueue == NULL);
  476. }
  477. if (!(ch->flags & XPC_C_OPENREPLY)) {
  478. ch->flags |= XPC_C_OPENREPLY;
  479. xpc_IPI_send_openreply(ch, irq_flags);
  480. }
  481. if (!(ch->flags & XPC_C_ROPENREPLY)) {
  482. return;
  483. }
  484. DBUG_ON(ch->remote_msgqueue_pa == 0);
  485. ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
  486. dev_info(xpc_chan, "channel %d to partition %d connected\n",
  487. ch->number, ch->partid);
  488. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  489. xpc_create_kthreads(ch, 1);
  490. spin_lock_irqsave(&ch->lock, *irq_flags);
  491. }
  492. /*
  493. * Notify those who wanted to be notified upon delivery of their message.
  494. */
  495. static void
  496. xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
  497. {
  498. struct xpc_notify *notify;
  499. u8 notify_type;
  500. s64 get = ch->w_remote_GP.get - 1;
  501. while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
  502. notify = &ch->notify_queue[get % ch->local_nentries];
  503. /*
  504. * See if the notify entry indicates it was associated with
  505. * a message who's sender wants to be notified. It is possible
  506. * that it is, but someone else is doing or has done the
  507. * notification.
  508. */
  509. notify_type = notify->type;
  510. if (notify_type == 0 ||
  511. cmpxchg(&notify->type, notify_type, 0) !=
  512. notify_type) {
  513. continue;
  514. }
  515. DBUG_ON(notify_type != XPC_N_CALL);
  516. atomic_dec(&ch->n_to_notify);
  517. if (notify->func != NULL) {
  518. dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
  519. "msg_number=%ld, partid=%d, channel=%d\n",
  520. (void *) notify, get, ch->partid, ch->number);
  521. notify->func(reason, ch->partid, ch->number,
  522. notify->key);
  523. dev_dbg(xpc_chan, "notify->func() returned, "
  524. "notify=0x%p, msg_number=%ld, partid=%d, "
  525. "channel=%d\n", (void *) notify, get,
  526. ch->partid, ch->number);
  527. }
  528. }
  529. }
  530. /*
  531. * Free up message queues and other stuff that were allocated for the specified
  532. * channel.
  533. *
  534. * Note: ch->reason and ch->reason_line are left set for debugging purposes,
  535. * they're cleared when XPC_C_DISCONNECTED is cleared.
  536. */
  537. static void
  538. xpc_free_msgqueues(struct xpc_channel *ch)
  539. {
  540. DBUG_ON(!spin_is_locked(&ch->lock));
  541. DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
  542. ch->remote_msgqueue_pa = 0;
  543. ch->func = NULL;
  544. ch->key = NULL;
  545. ch->msg_size = 0;
  546. ch->local_nentries = 0;
  547. ch->remote_nentries = 0;
  548. ch->kthreads_assigned_limit = 0;
  549. ch->kthreads_idle_limit = 0;
  550. ch->local_GP->get = 0;
  551. ch->local_GP->put = 0;
  552. ch->remote_GP.get = 0;
  553. ch->remote_GP.put = 0;
  554. ch->w_local_GP.get = 0;
  555. ch->w_local_GP.put = 0;
  556. ch->w_remote_GP.get = 0;
  557. ch->w_remote_GP.put = 0;
  558. ch->next_msg_to_pull = 0;
  559. if (ch->flags & XPC_C_SETUP) {
  560. ch->flags &= ~XPC_C_SETUP;
  561. dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
  562. ch->flags, ch->partid, ch->number);
  563. kfree(ch->local_msgqueue_base);
  564. ch->local_msgqueue = NULL;
  565. kfree(ch->remote_msgqueue_base);
  566. ch->remote_msgqueue = NULL;
  567. kfree(ch->notify_queue);
  568. ch->notify_queue = NULL;
  569. }
  570. }
  571. /*
  572. * spin_lock_irqsave() is expected to be held on entry.
  573. */
  574. static void
  575. xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
  576. {
  577. struct xpc_partition *part = &xpc_partitions[ch->partid];
  578. u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
  579. DBUG_ON(!spin_is_locked(&ch->lock));
  580. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  581. return;
  582. }
  583. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  584. /* make sure all activity has settled down first */
  585. if (atomic_read(&ch->references) > 0 ||
  586. ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  587. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE))) {
  588. return;
  589. }
  590. DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
  591. if (part->act_state == XPC_P_DEACTIVATING) {
  592. /* can't proceed until the other side disengages from us */
  593. if (xpc_partition_engaged(1UL << ch->partid)) {
  594. return;
  595. }
  596. } else {
  597. /* as long as the other side is up do the full protocol */
  598. if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
  599. return;
  600. }
  601. if (!(ch->flags & XPC_C_CLOSEREPLY)) {
  602. ch->flags |= XPC_C_CLOSEREPLY;
  603. xpc_IPI_send_closereply(ch, irq_flags);
  604. }
  605. if (!(ch->flags & XPC_C_RCLOSEREPLY)) {
  606. return;
  607. }
  608. }
  609. /* wake those waiting for notify completion */
  610. if (atomic_read(&ch->n_to_notify) > 0) {
  611. /* >>> we do callout while holding ch->lock */
  612. xpc_notify_senders(ch, ch->reason, ch->w_local_GP.put);
  613. }
  614. /* both sides are disconnected now */
  615. if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
  616. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  617. xpc_disconnect_callout(ch, xpcDisconnected);
  618. spin_lock_irqsave(&ch->lock, *irq_flags);
  619. }
  620. /* it's now safe to free the channel's message queues */
  621. xpc_free_msgqueues(ch);
  622. /* mark disconnected, clear all other flags except XPC_C_WDISCONNECT */
  623. ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
  624. atomic_dec(&part->nchannels_active);
  625. if (channel_was_connected) {
  626. dev_info(xpc_chan, "channel %d to partition %d disconnected, "
  627. "reason=%d\n", ch->number, ch->partid, ch->reason);
  628. }
  629. if (ch->flags & XPC_C_WDISCONNECT) {
  630. /* we won't lose the CPU since we're holding ch->lock */
  631. complete(&ch->wdisconnect_wait);
  632. } else if (ch->delayed_IPI_flags) {
  633. if (part->act_state != XPC_P_DEACTIVATING) {
  634. /* time to take action on any delayed IPI flags */
  635. spin_lock(&part->IPI_lock);
  636. XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number,
  637. ch->delayed_IPI_flags);
  638. spin_unlock(&part->IPI_lock);
  639. }
  640. ch->delayed_IPI_flags = 0;
  641. }
  642. }
  643. /*
  644. * Process a change in the channel's remote connection state.
  645. */
  646. static void
  647. xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
  648. u8 IPI_flags)
  649. {
  650. unsigned long irq_flags;
  651. struct xpc_openclose_args *args =
  652. &part->remote_openclose_args[ch_number];
  653. struct xpc_channel *ch = &part->channels[ch_number];
  654. enum xpc_retval reason;
  655. spin_lock_irqsave(&ch->lock, irq_flags);
  656. again:
  657. if ((ch->flags & XPC_C_DISCONNECTED) &&
  658. (ch->flags & XPC_C_WDISCONNECT)) {
  659. /*
  660. * Delay processing IPI flags until thread waiting disconnect
  661. * has had a chance to see that the channel is disconnected.
  662. */
  663. ch->delayed_IPI_flags |= IPI_flags;
  664. spin_unlock_irqrestore(&ch->lock, irq_flags);
  665. return;
  666. }
  667. if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
  668. dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
  669. "from partid=%d, channel=%d\n", args->reason,
  670. ch->partid, ch->number);
  671. /*
  672. * If RCLOSEREQUEST is set, we're probably waiting for
  673. * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
  674. * with this RCLOSEREQUEST in the IPI_flags.
  675. */
  676. if (ch->flags & XPC_C_RCLOSEREQUEST) {
  677. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
  678. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  679. DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
  680. DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
  681. DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY));
  682. IPI_flags &= ~XPC_IPI_CLOSEREPLY;
  683. ch->flags |= XPC_C_RCLOSEREPLY;
  684. /* both sides have finished disconnecting */
  685. xpc_process_disconnect(ch, &irq_flags);
  686. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
  687. goto again;
  688. }
  689. if (ch->flags & XPC_C_DISCONNECTED) {
  690. if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
  691. if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo,
  692. ch_number) & XPC_IPI_OPENREQUEST)) {
  693. DBUG_ON(ch->delayed_IPI_flags != 0);
  694. spin_lock(&part->IPI_lock);
  695. XPC_SET_IPI_FLAGS(part->local_IPI_amo,
  696. ch_number,
  697. XPC_IPI_CLOSEREQUEST);
  698. spin_unlock(&part->IPI_lock);
  699. }
  700. spin_unlock_irqrestore(&ch->lock, irq_flags);
  701. return;
  702. }
  703. XPC_SET_REASON(ch, 0, 0);
  704. ch->flags &= ~XPC_C_DISCONNECTED;
  705. atomic_inc(&part->nchannels_active);
  706. ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
  707. }
  708. IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY);
  709. /*
  710. * The meaningful CLOSEREQUEST connection state fields are:
  711. * reason = reason connection is to be closed
  712. */
  713. ch->flags |= XPC_C_RCLOSEREQUEST;
  714. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  715. reason = args->reason;
  716. if (reason <= xpcSuccess || reason > xpcUnknownReason) {
  717. reason = xpcUnknownReason;
  718. } else if (reason == xpcUnregistering) {
  719. reason = xpcOtherUnregistering;
  720. }
  721. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  722. DBUG_ON(IPI_flags & XPC_IPI_CLOSEREPLY);
  723. spin_unlock_irqrestore(&ch->lock, irq_flags);
  724. return;
  725. }
  726. xpc_process_disconnect(ch, &irq_flags);
  727. }
  728. if (IPI_flags & XPC_IPI_CLOSEREPLY) {
  729. dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
  730. " channel=%d\n", ch->partid, ch->number);
  731. if (ch->flags & XPC_C_DISCONNECTED) {
  732. DBUG_ON(part->act_state != XPC_P_DEACTIVATING);
  733. spin_unlock_irqrestore(&ch->lock, irq_flags);
  734. return;
  735. }
  736. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  737. if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
  738. if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number)
  739. & XPC_IPI_CLOSEREQUEST)) {
  740. DBUG_ON(ch->delayed_IPI_flags != 0);
  741. spin_lock(&part->IPI_lock);
  742. XPC_SET_IPI_FLAGS(part->local_IPI_amo,
  743. ch_number, XPC_IPI_CLOSEREPLY);
  744. spin_unlock(&part->IPI_lock);
  745. }
  746. spin_unlock_irqrestore(&ch->lock, irq_flags);
  747. return;
  748. }
  749. ch->flags |= XPC_C_RCLOSEREPLY;
  750. if (ch->flags & XPC_C_CLOSEREPLY) {
  751. /* both sides have finished disconnecting */
  752. xpc_process_disconnect(ch, &irq_flags);
  753. }
  754. }
  755. if (IPI_flags & XPC_IPI_OPENREQUEST) {
  756. dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
  757. "local_nentries=%d) received from partid=%d, "
  758. "channel=%d\n", args->msg_size, args->local_nentries,
  759. ch->partid, ch->number);
  760. if (part->act_state == XPC_P_DEACTIVATING ||
  761. (ch->flags & XPC_C_ROPENREQUEST)) {
  762. spin_unlock_irqrestore(&ch->lock, irq_flags);
  763. return;
  764. }
  765. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
  766. ch->delayed_IPI_flags |= XPC_IPI_OPENREQUEST;
  767. spin_unlock_irqrestore(&ch->lock, irq_flags);
  768. return;
  769. }
  770. DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
  771. XPC_C_OPENREQUEST)));
  772. DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  773. XPC_C_OPENREPLY | XPC_C_CONNECTED));
  774. /*
  775. * The meaningful OPENREQUEST connection state fields are:
  776. * msg_size = size of channel's messages in bytes
  777. * local_nentries = remote partition's local_nentries
  778. */
  779. if (args->msg_size == 0 || args->local_nentries == 0) {
  780. /* assume OPENREQUEST was delayed by mistake */
  781. spin_unlock_irqrestore(&ch->lock, irq_flags);
  782. return;
  783. }
  784. ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
  785. ch->remote_nentries = args->local_nentries;
  786. if (ch->flags & XPC_C_OPENREQUEST) {
  787. if (args->msg_size != ch->msg_size) {
  788. XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
  789. &irq_flags);
  790. spin_unlock_irqrestore(&ch->lock, irq_flags);
  791. return;
  792. }
  793. } else {
  794. ch->msg_size = args->msg_size;
  795. XPC_SET_REASON(ch, 0, 0);
  796. ch->flags &= ~XPC_C_DISCONNECTED;
  797. atomic_inc(&part->nchannels_active);
  798. }
  799. xpc_process_connect(ch, &irq_flags);
  800. }
  801. if (IPI_flags & XPC_IPI_OPENREPLY) {
  802. dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
  803. "local_nentries=%d, remote_nentries=%d) received from "
  804. "partid=%d, channel=%d\n", args->local_msgqueue_pa,
  805. args->local_nentries, args->remote_nentries,
  806. ch->partid, ch->number);
  807. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
  808. spin_unlock_irqrestore(&ch->lock, irq_flags);
  809. return;
  810. }
  811. if (!(ch->flags & XPC_C_OPENREQUEST)) {
  812. XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError,
  813. &irq_flags);
  814. spin_unlock_irqrestore(&ch->lock, irq_flags);
  815. return;
  816. }
  817. DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
  818. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  819. /*
  820. * The meaningful OPENREPLY connection state fields are:
  821. * local_msgqueue_pa = physical address of remote
  822. * partition's local_msgqueue
  823. * local_nentries = remote partition's local_nentries
  824. * remote_nentries = remote partition's remote_nentries
  825. */
  826. DBUG_ON(args->local_msgqueue_pa == 0);
  827. DBUG_ON(args->local_nentries == 0);
  828. DBUG_ON(args->remote_nentries == 0);
  829. ch->flags |= XPC_C_ROPENREPLY;
  830. ch->remote_msgqueue_pa = args->local_msgqueue_pa;
  831. if (args->local_nentries < ch->remote_nentries) {
  832. dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
  833. "remote_nentries=%d, old remote_nentries=%d, "
  834. "partid=%d, channel=%d\n",
  835. args->local_nentries, ch->remote_nentries,
  836. ch->partid, ch->number);
  837. ch->remote_nentries = args->local_nentries;
  838. }
  839. if (args->remote_nentries < ch->local_nentries) {
  840. dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
  841. "local_nentries=%d, old local_nentries=%d, "
  842. "partid=%d, channel=%d\n",
  843. args->remote_nentries, ch->local_nentries,
  844. ch->partid, ch->number);
  845. ch->local_nentries = args->remote_nentries;
  846. }
  847. xpc_process_connect(ch, &irq_flags);
  848. }
  849. spin_unlock_irqrestore(&ch->lock, irq_flags);
  850. }
  851. /*
  852. * Attempt to establish a channel connection to a remote partition.
  853. */
  854. static enum xpc_retval
  855. xpc_connect_channel(struct xpc_channel *ch)
  856. {
  857. unsigned long irq_flags;
  858. struct xpc_registration *registration = &xpc_registrations[ch->number];
  859. if (mutex_trylock(&registration->mutex) == 0) {
  860. return xpcRetry;
  861. }
  862. if (!XPC_CHANNEL_REGISTERED(ch->number)) {
  863. mutex_unlock(&registration->mutex);
  864. return xpcUnregistered;
  865. }
  866. spin_lock_irqsave(&ch->lock, irq_flags);
  867. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  868. DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
  869. if (ch->flags & XPC_C_DISCONNECTING) {
  870. spin_unlock_irqrestore(&ch->lock, irq_flags);
  871. mutex_unlock(&registration->mutex);
  872. return ch->reason;
  873. }
  874. /* add info from the channel connect registration to the channel */
  875. ch->kthreads_assigned_limit = registration->assigned_limit;
  876. ch->kthreads_idle_limit = registration->idle_limit;
  877. DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
  878. DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
  879. DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
  880. ch->func = registration->func;
  881. DBUG_ON(registration->func == NULL);
  882. ch->key = registration->key;
  883. ch->local_nentries = registration->nentries;
  884. if (ch->flags & XPC_C_ROPENREQUEST) {
  885. if (registration->msg_size != ch->msg_size) {
  886. /* the local and remote sides aren't the same */
  887. /*
  888. * Because XPC_DISCONNECT_CHANNEL() can block we're
  889. * forced to up the registration sema before we unlock
  890. * the channel lock. But that's okay here because we're
  891. * done with the part that required the registration
  892. * sema. XPC_DISCONNECT_CHANNEL() requires that the
  893. * channel lock be locked and will unlock and relock
  894. * the channel lock as needed.
  895. */
  896. mutex_unlock(&registration->mutex);
  897. XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
  898. &irq_flags);
  899. spin_unlock_irqrestore(&ch->lock, irq_flags);
  900. return xpcUnequalMsgSizes;
  901. }
  902. } else {
  903. ch->msg_size = registration->msg_size;
  904. XPC_SET_REASON(ch, 0, 0);
  905. ch->flags &= ~XPC_C_DISCONNECTED;
  906. atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
  907. }
  908. mutex_unlock(&registration->mutex);
  909. /* initiate the connection */
  910. ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
  911. xpc_IPI_send_openrequest(ch, &irq_flags);
  912. xpc_process_connect(ch, &irq_flags);
  913. spin_unlock_irqrestore(&ch->lock, irq_flags);
  914. return xpcSuccess;
  915. }
  916. /*
  917. * Clear some of the msg flags in the local message queue.
  918. */
  919. static inline void
  920. xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
  921. {
  922. struct xpc_msg *msg;
  923. s64 get;
  924. get = ch->w_remote_GP.get;
  925. do {
  926. msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
  927. (get % ch->local_nentries) * ch->msg_size);
  928. msg->flags = 0;
  929. } while (++get < (volatile s64) ch->remote_GP.get);
  930. }
  931. /*
  932. * Clear some of the msg flags in the remote message queue.
  933. */
  934. static inline void
  935. xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
  936. {
  937. struct xpc_msg *msg;
  938. s64 put;
  939. put = ch->w_remote_GP.put;
  940. do {
  941. msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
  942. (put % ch->remote_nentries) * ch->msg_size);
  943. msg->flags = 0;
  944. } while (++put < (volatile s64) ch->remote_GP.put);
  945. }
  946. static void
  947. xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
  948. {
  949. struct xpc_channel *ch = &part->channels[ch_number];
  950. int nmsgs_sent;
  951. ch->remote_GP = part->remote_GPs[ch_number];
  952. /* See what, if anything, has changed for each connected channel */
  953. xpc_msgqueue_ref(ch);
  954. if (ch->w_remote_GP.get == ch->remote_GP.get &&
  955. ch->w_remote_GP.put == ch->remote_GP.put) {
  956. /* nothing changed since GPs were last pulled */
  957. xpc_msgqueue_deref(ch);
  958. return;
  959. }
  960. if (!(ch->flags & XPC_C_CONNECTED)){
  961. xpc_msgqueue_deref(ch);
  962. return;
  963. }
  964. /*
  965. * First check to see if messages recently sent by us have been
  966. * received by the other side. (The remote GET value will have
  967. * changed since we last looked at it.)
  968. */
  969. if (ch->w_remote_GP.get != ch->remote_GP.get) {
  970. /*
  971. * We need to notify any senders that want to be notified
  972. * that their sent messages have been received by their
  973. * intended recipients. We need to do this before updating
  974. * w_remote_GP.get so that we don't allocate the same message
  975. * queue entries prematurely (see xpc_allocate_msg()).
  976. */
  977. if (atomic_read(&ch->n_to_notify) > 0) {
  978. /*
  979. * Notify senders that messages sent have been
  980. * received and delivered by the other side.
  981. */
  982. xpc_notify_senders(ch, xpcMsgDelivered,
  983. ch->remote_GP.get);
  984. }
  985. /*
  986. * Clear msg->flags in previously sent messages, so that
  987. * they're ready for xpc_allocate_msg().
  988. */
  989. xpc_clear_local_msgqueue_flags(ch);
  990. ch->w_remote_GP.get = ch->remote_GP.get;
  991. dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
  992. "channel=%d\n", ch->w_remote_GP.get, ch->partid,
  993. ch->number);
  994. /*
  995. * If anyone was waiting for message queue entries to become
  996. * available, wake them up.
  997. */
  998. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
  999. wake_up(&ch->msg_allocate_wq);
  1000. }
  1001. }
  1002. /*
  1003. * Now check for newly sent messages by the other side. (The remote
  1004. * PUT value will have changed since we last looked at it.)
  1005. */
  1006. if (ch->w_remote_GP.put != ch->remote_GP.put) {
  1007. /*
  1008. * Clear msg->flags in previously received messages, so that
  1009. * they're ready for xpc_get_deliverable_msg().
  1010. */
  1011. xpc_clear_remote_msgqueue_flags(ch);
  1012. ch->w_remote_GP.put = ch->remote_GP.put;
  1013. dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
  1014. "channel=%d\n", ch->w_remote_GP.put, ch->partid,
  1015. ch->number);
  1016. nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get;
  1017. if (nmsgs_sent > 0) {
  1018. dev_dbg(xpc_chan, "msgs waiting to be copied and "
  1019. "delivered=%d, partid=%d, channel=%d\n",
  1020. nmsgs_sent, ch->partid, ch->number);
  1021. if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
  1022. xpc_activate_kthreads(ch, nmsgs_sent);
  1023. }
  1024. }
  1025. }
  1026. xpc_msgqueue_deref(ch);
  1027. }
  1028. void
  1029. xpc_process_channel_activity(struct xpc_partition *part)
  1030. {
  1031. unsigned long irq_flags;
  1032. u64 IPI_amo, IPI_flags;
  1033. struct xpc_channel *ch;
  1034. int ch_number;
  1035. u32 ch_flags;
  1036. IPI_amo = xpc_get_IPI_flags(part);
  1037. /*
  1038. * Initiate channel connections for registered channels.
  1039. *
  1040. * For each connected channel that has pending messages activate idle
  1041. * kthreads and/or create new kthreads as needed.
  1042. */
  1043. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  1044. ch = &part->channels[ch_number];
  1045. /*
  1046. * Process any open or close related IPI flags, and then deal
  1047. * with connecting or disconnecting the channel as required.
  1048. */
  1049. IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
  1050. if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) {
  1051. xpc_process_openclose_IPI(part, ch_number, IPI_flags);
  1052. }
  1053. ch_flags = ch->flags; /* need an atomic snapshot of flags */
  1054. if (ch_flags & XPC_C_DISCONNECTING) {
  1055. spin_lock_irqsave(&ch->lock, irq_flags);
  1056. xpc_process_disconnect(ch, &irq_flags);
  1057. spin_unlock_irqrestore(&ch->lock, irq_flags);
  1058. continue;
  1059. }
  1060. if (part->act_state == XPC_P_DEACTIVATING) {
  1061. continue;
  1062. }
  1063. if (!(ch_flags & XPC_C_CONNECTED)) {
  1064. if (!(ch_flags & XPC_C_OPENREQUEST)) {
  1065. DBUG_ON(ch_flags & XPC_C_SETUP);
  1066. (void) xpc_connect_channel(ch);
  1067. } else {
  1068. spin_lock_irqsave(&ch->lock, irq_flags);
  1069. xpc_process_connect(ch, &irq_flags);
  1070. spin_unlock_irqrestore(&ch->lock, irq_flags);
  1071. }
  1072. continue;
  1073. }
  1074. /*
  1075. * Process any message related IPI flags, this may involve the
  1076. * activation of kthreads to deliver any pending messages sent
  1077. * from the other partition.
  1078. */
  1079. if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) {
  1080. xpc_process_msg_IPI(part, ch_number);
  1081. }
  1082. }
  1083. }
  1084. /*
  1085. * XPC's heartbeat code calls this function to inform XPC that a partition is
  1086. * going down. XPC responds by tearing down the XPartition Communication
  1087. * infrastructure used for the just downed partition.
  1088. *
  1089. * XPC's heartbeat code will never call this function and xpc_partition_up()
  1090. * at the same time. Nor will it ever make multiple calls to either function
  1091. * at the same time.
  1092. */
  1093. void
  1094. xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
  1095. {
  1096. unsigned long irq_flags;
  1097. int ch_number;
  1098. struct xpc_channel *ch;
  1099. dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
  1100. XPC_PARTID(part), reason);
  1101. if (!xpc_part_ref(part)) {
  1102. /* infrastructure for this partition isn't currently set up */
  1103. return;
  1104. }
  1105. /* disconnect channels associated with the partition going down */
  1106. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  1107. ch = &part->channels[ch_number];
  1108. xpc_msgqueue_ref(ch);
  1109. spin_lock_irqsave(&ch->lock, irq_flags);
  1110. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  1111. spin_unlock_irqrestore(&ch->lock, irq_flags);
  1112. xpc_msgqueue_deref(ch);
  1113. }
  1114. xpc_wakeup_channel_mgr(part);
  1115. xpc_part_deref(part);
  1116. }
  1117. /*
  1118. * Teardown the infrastructure necessary to support XPartition Communication
  1119. * between the specified remote partition and the local one.
  1120. */
  1121. void
  1122. xpc_teardown_infrastructure(struct xpc_partition *part)
  1123. {
  1124. partid_t partid = XPC_PARTID(part);
  1125. /*
  1126. * We start off by making this partition inaccessible to local
  1127. * processes by marking it as no longer setup. Then we make it
  1128. * inaccessible to remote processes by clearing the XPC per partition
  1129. * specific variable's magic # (which indicates that these variables
  1130. * are no longer valid) and by ignoring all XPC notify IPIs sent to
  1131. * this partition.
  1132. */
  1133. DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
  1134. DBUG_ON(atomic_read(&part->nchannels_active) != 0);
  1135. DBUG_ON(part->setup_state != XPC_P_SETUP);
  1136. part->setup_state = XPC_P_WTEARDOWN;
  1137. xpc_vars_part[partid].magic = 0;
  1138. free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
  1139. /*
  1140. * Before proceding with the teardown we have to wait until all
  1141. * existing references cease.
  1142. */
  1143. wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
  1144. /* now we can begin tearing down the infrastructure */
  1145. part->setup_state = XPC_P_TORNDOWN;
  1146. /* in case we've still got outstanding timers registered... */
  1147. del_timer_sync(&part->dropped_IPI_timer);
  1148. kfree(part->remote_openclose_args_base);
  1149. part->remote_openclose_args = NULL;
  1150. kfree(part->local_openclose_args_base);
  1151. part->local_openclose_args = NULL;
  1152. kfree(part->remote_GPs_base);
  1153. part->remote_GPs = NULL;
  1154. kfree(part->local_GPs_base);
  1155. part->local_GPs = NULL;
  1156. kfree(part->channels);
  1157. part->channels = NULL;
  1158. part->local_IPI_amo_va = NULL;
  1159. }
  1160. /*
  1161. * Called by XP at the time of channel connection registration to cause
  1162. * XPC to establish connections to all currently active partitions.
  1163. */
  1164. void
  1165. xpc_initiate_connect(int ch_number)
  1166. {
  1167. partid_t partid;
  1168. struct xpc_partition *part;
  1169. struct xpc_channel *ch;
  1170. DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
  1171. for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
  1172. part = &xpc_partitions[partid];
  1173. if (xpc_part_ref(part)) {
  1174. ch = &part->channels[ch_number];
  1175. /*
  1176. * Initiate the establishment of a connection on the
  1177. * newly registered channel to the remote partition.
  1178. */
  1179. xpc_wakeup_channel_mgr(part);
  1180. xpc_part_deref(part);
  1181. }
  1182. }
  1183. }
  1184. void
  1185. xpc_connected_callout(struct xpc_channel *ch)
  1186. {
  1187. /* let the registerer know that a connection has been established */
  1188. if (ch->func != NULL) {
  1189. dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, "
  1190. "partid=%d, channel=%d\n", ch->partid, ch->number);
  1191. ch->func(xpcConnected, ch->partid, ch->number,
  1192. (void *) (u64) ch->local_nentries, ch->key);
  1193. dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
  1194. "partid=%d, channel=%d\n", ch->partid, ch->number);
  1195. }
  1196. }
  1197. /*
  1198. * Called by XP at the time of channel connection unregistration to cause
  1199. * XPC to teardown all current connections for the specified channel.
  1200. *
  1201. * Before returning xpc_initiate_disconnect() will wait until all connections
  1202. * on the specified channel have been closed/torndown. So the caller can be
  1203. * assured that they will not be receiving any more callouts from XPC to the
  1204. * function they registered via xpc_connect().
  1205. *
  1206. * Arguments:
  1207. *
  1208. * ch_number - channel # to unregister.
  1209. */
  1210. void
  1211. xpc_initiate_disconnect(int ch_number)
  1212. {
  1213. unsigned long irq_flags;
  1214. partid_t partid;
  1215. struct xpc_partition *part;
  1216. struct xpc_channel *ch;
  1217. DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
  1218. /* initiate the channel disconnect for every active partition */
  1219. for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
  1220. part = &xpc_partitions[partid];
  1221. if (xpc_part_ref(part)) {
  1222. ch = &part->channels[ch_number];
  1223. xpc_msgqueue_ref(ch);
  1224. spin_lock_irqsave(&ch->lock, irq_flags);
  1225. if (!(ch->flags & XPC_C_DISCONNECTED)) {
  1226. ch->flags |= XPC_C_WDISCONNECT;
  1227. XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
  1228. &irq_flags);
  1229. }
  1230. spin_unlock_irqrestore(&ch->lock, irq_flags);
  1231. xpc_msgqueue_deref(ch);
  1232. xpc_part_deref(part);
  1233. }
  1234. }
  1235. xpc_disconnect_wait(ch_number);
  1236. }
  1237. /*
  1238. * To disconnect a channel, and reflect it back to all who may be waiting.
  1239. *
  1240. * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
  1241. * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
  1242. * xpc_disconnect_wait().
  1243. *
  1244. * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
  1245. */
  1246. void
  1247. xpc_disconnect_channel(const int line, struct xpc_channel *ch,
  1248. enum xpc_retval reason, unsigned long *irq_flags)
  1249. {
  1250. u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
  1251. DBUG_ON(!spin_is_locked(&ch->lock));
  1252. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
  1253. return;
  1254. }
  1255. DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
  1256. dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
  1257. reason, line, ch->partid, ch->number);
  1258. XPC_SET_REASON(ch, reason, line);
  1259. ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
  1260. /* some of these may not have been set */
  1261. ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
  1262. XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  1263. XPC_C_CONNECTING | XPC_C_CONNECTED);
  1264. xpc_IPI_send_closerequest(ch, irq_flags);
  1265. if (channel_was_connected) {
  1266. ch->flags |= XPC_C_WASCONNECTED;
  1267. }
  1268. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  1269. /* wake all idle kthreads so they can exit */
  1270. if (atomic_read(&ch->kthreads_idle) > 0) {
  1271. wake_up_all(&ch->idle_wq);
  1272. }
  1273. /* wake those waiting to allocate an entry from the local msg queue */
  1274. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
  1275. wake_up(&ch->msg_allocate_wq);
  1276. }
  1277. spin_lock_irqsave(&ch->lock, *irq_flags);
  1278. }
  1279. void
  1280. xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
  1281. {
  1282. /*
  1283. * Let the channel's registerer know that the channel is being
  1284. * disconnected. We don't want to do this if the registerer was never
  1285. * informed of a connection being made.
  1286. */
  1287. if (ch->func != NULL) {
  1288. dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
  1289. "channel=%d\n", reason, ch->partid, ch->number);
  1290. ch->func(reason, ch->partid, ch->number, NULL, ch->key);
  1291. dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
  1292. "channel=%d\n", reason, ch->partid, ch->number);
  1293. }
  1294. }
  1295. /*
  1296. * Wait for a message entry to become available for the specified channel,
  1297. * but don't wait any longer than 1 jiffy.
  1298. */
  1299. static enum xpc_retval
  1300. xpc_allocate_msg_wait(struct xpc_channel *ch)
  1301. {
  1302. enum xpc_retval ret;
  1303. if (ch->flags & XPC_C_DISCONNECTING) {
  1304. DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
  1305. return ch->reason;
  1306. }
  1307. atomic_inc(&ch->n_on_msg_allocate_wq);
  1308. ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
  1309. atomic_dec(&ch->n_on_msg_allocate_wq);
  1310. if (ch->flags & XPC_C_DISCONNECTING) {
  1311. ret = ch->reason;
  1312. DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
  1313. } else if (ret == 0) {
  1314. ret = xpcTimeout;
  1315. } else {
  1316. ret = xpcInterrupted;
  1317. }
  1318. return ret;
  1319. }
  1320. /*
  1321. * Allocate an entry for a message from the message queue associated with the
  1322. * specified channel.
  1323. */
  1324. static enum xpc_retval
  1325. xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
  1326. struct xpc_msg **address_of_msg)
  1327. {
  1328. struct xpc_msg *msg;
  1329. enum xpc_retval ret;
  1330. s64 put;
  1331. /* this reference will be dropped in xpc_send_msg() */
  1332. xpc_msgqueue_ref(ch);
  1333. if (ch->flags & XPC_C_DISCONNECTING) {
  1334. xpc_msgqueue_deref(ch);
  1335. return ch->reason;
  1336. }
  1337. if (!(ch->flags & XPC_C_CONNECTED)) {
  1338. xpc_msgqueue_deref(ch);
  1339. return xpcNotConnected;
  1340. }
  1341. /*
  1342. * Get the next available message entry from the local message queue.
  1343. * If none are available, we'll make sure that we grab the latest
  1344. * GP values.
  1345. */
  1346. ret = xpcTimeout;
  1347. while (1) {
  1348. put = (volatile s64) ch->w_local_GP.put;
  1349. if (put - (volatile s64) ch->w_remote_GP.get <
  1350. ch->local_nentries) {
  1351. /* There are available message entries. We need to try
  1352. * to secure one for ourselves. We'll do this by trying
  1353. * to increment w_local_GP.put as long as someone else
  1354. * doesn't beat us to it. If they do, we'll have to
  1355. * try again.
  1356. */
  1357. if (cmpxchg(&ch->w_local_GP.put, put, put + 1) ==
  1358. put) {
  1359. /* we got the entry referenced by put */
  1360. break;
  1361. }
  1362. continue; /* try again */
  1363. }
  1364. /*
  1365. * There aren't any available msg entries at this time.
  1366. *
  1367. * In waiting for a message entry to become available,
  1368. * we set a timeout in case the other side is not
  1369. * sending completion IPIs. This lets us fake an IPI
  1370. * that will cause the IPI handler to fetch the latest
  1371. * GP values as if an IPI was sent by the other side.
  1372. */
  1373. if (ret == xpcTimeout) {
  1374. xpc_IPI_send_local_msgrequest(ch);
  1375. }
  1376. if (flags & XPC_NOWAIT) {
  1377. xpc_msgqueue_deref(ch);
  1378. return xpcNoWait;
  1379. }
  1380. ret = xpc_allocate_msg_wait(ch);
  1381. if (ret != xpcInterrupted && ret != xpcTimeout) {
  1382. xpc_msgqueue_deref(ch);
  1383. return ret;
  1384. }
  1385. }
  1386. /* get the message's address and initialize it */
  1387. msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
  1388. (put % ch->local_nentries) * ch->msg_size);
  1389. DBUG_ON(msg->flags != 0);
  1390. msg->number = put;
  1391. dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
  1392. "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
  1393. (void *) msg, msg->number, ch->partid, ch->number);
  1394. *address_of_msg = msg;
  1395. return xpcSuccess;
  1396. }
  1397. /*
  1398. * Allocate an entry for a message from the message queue associated with the
  1399. * specified channel. NOTE that this routine can sleep waiting for a message
  1400. * entry to become available. To not sleep, pass in the XPC_NOWAIT flag.
  1401. *
  1402. * Arguments:
  1403. *
  1404. * partid - ID of partition to which the channel is connected.
  1405. * ch_number - channel #.
  1406. * flags - see xpc.h for valid flags.
  1407. * payload - address of the allocated payload area pointer (filled in on
  1408. * return) in which the user-defined message is constructed.
  1409. */
  1410. enum xpc_retval
  1411. xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
  1412. {
  1413. struct xpc_partition *part = &xpc_partitions[partid];
  1414. enum xpc_retval ret = xpcUnknownReason;
  1415. struct xpc_msg *msg;
  1416. DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
  1417. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  1418. *payload = NULL;
  1419. if (xpc_part_ref(part)) {
  1420. ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
  1421. xpc_part_deref(part);
  1422. if (msg != NULL) {
  1423. *payload = &msg->payload;
  1424. }
  1425. }
  1426. return ret;
  1427. }
  1428. /*
  1429. * Now we actually send the messages that are ready to be sent by advancing
  1430. * the local message queue's Put value and then send an IPI to the recipient
  1431. * partition.
  1432. */
  1433. static void
  1434. xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
  1435. {
  1436. struct xpc_msg *msg;
  1437. s64 put = initial_put + 1;
  1438. int send_IPI = 0;
  1439. while (1) {
  1440. while (1) {
  1441. if (put == (volatile s64) ch->w_local_GP.put) {
  1442. break;
  1443. }
  1444. msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
  1445. (put % ch->local_nentries) * ch->msg_size);
  1446. if (!(msg->flags & XPC_M_READY)) {
  1447. break;
  1448. }
  1449. put++;
  1450. }
  1451. if (put == initial_put) {
  1452. /* nothing's changed */
  1453. break;
  1454. }
  1455. if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
  1456. initial_put) {
  1457. /* someone else beat us to it */
  1458. DBUG_ON((volatile s64) ch->local_GP->put < initial_put);
  1459. break;
  1460. }
  1461. /* we just set the new value of local_GP->put */
  1462. dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
  1463. "channel=%d\n", put, ch->partid, ch->number);
  1464. send_IPI = 1;
  1465. /*
  1466. * We need to ensure that the message referenced by
  1467. * local_GP->put is not XPC_M_READY or that local_GP->put
  1468. * equals w_local_GP.put, so we'll go have a look.
  1469. */
  1470. initial_put = put;
  1471. }
  1472. if (send_IPI) {
  1473. xpc_IPI_send_msgrequest(ch);
  1474. }
  1475. }
  1476. /*
  1477. * Common code that does the actual sending of the message by advancing the
  1478. * local message queue's Put value and sends an IPI to the partition the
  1479. * message is being sent to.
  1480. */
  1481. static enum xpc_retval
  1482. xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
  1483. xpc_notify_func func, void *key)
  1484. {
  1485. enum xpc_retval ret = xpcSuccess;
  1486. struct xpc_notify *notify = notify;
  1487. s64 put, msg_number = msg->number;
  1488. DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
  1489. DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) !=
  1490. msg_number % ch->local_nentries);
  1491. DBUG_ON(msg->flags & XPC_M_READY);
  1492. if (ch->flags & XPC_C_DISCONNECTING) {
  1493. /* drop the reference grabbed in xpc_allocate_msg() */
  1494. xpc_msgqueue_deref(ch);
  1495. return ch->reason;
  1496. }
  1497. if (notify_type != 0) {
  1498. /*
  1499. * Tell the remote side to send an ACK interrupt when the
  1500. * message has been delivered.
  1501. */
  1502. msg->flags |= XPC_M_INTERRUPT;
  1503. atomic_inc(&ch->n_to_notify);
  1504. notify = &ch->notify_queue[msg_number % ch->local_nentries];
  1505. notify->func = func;
  1506. notify->key = key;
  1507. notify->type = notify_type;
  1508. // >>> is a mb() needed here?
  1509. if (ch->flags & XPC_C_DISCONNECTING) {
  1510. /*
  1511. * An error occurred between our last error check and
  1512. * this one. We will try to clear the type field from
  1513. * the notify entry. If we succeed then
  1514. * xpc_disconnect_channel() didn't already process
  1515. * the notify entry.
  1516. */
  1517. if (cmpxchg(&notify->type, notify_type, 0) ==
  1518. notify_type) {
  1519. atomic_dec(&ch->n_to_notify);
  1520. ret = ch->reason;
  1521. }
  1522. /* drop the reference grabbed in xpc_allocate_msg() */
  1523. xpc_msgqueue_deref(ch);
  1524. return ret;
  1525. }
  1526. }
  1527. msg->flags |= XPC_M_READY;
  1528. /*
  1529. * The preceding store of msg->flags must occur before the following
  1530. * load of ch->local_GP->put.
  1531. */
  1532. mb();
  1533. /* see if the message is next in line to be sent, if so send it */
  1534. put = ch->local_GP->put;
  1535. if (put == msg_number) {
  1536. xpc_send_msgs(ch, put);
  1537. }
  1538. /* drop the reference grabbed in xpc_allocate_msg() */
  1539. xpc_msgqueue_deref(ch);
  1540. return ret;
  1541. }
  1542. /*
  1543. * Send a message previously allocated using xpc_initiate_allocate() on the
  1544. * specified channel connected to the specified partition.
  1545. *
  1546. * This routine will not wait for the message to be received, nor will
  1547. * notification be given when it does happen. Once this routine has returned
  1548. * the message entry allocated via xpc_initiate_allocate() is no longer
  1549. * accessable to the caller.
  1550. *
  1551. * This routine, although called by users, does not call xpc_part_ref() to
  1552. * ensure that the partition infrastructure is in place. It relies on the
  1553. * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
  1554. *
  1555. * Arguments:
  1556. *
  1557. * partid - ID of partition to which the channel is connected.
  1558. * ch_number - channel # to send message on.
  1559. * payload - pointer to the payload area allocated via
  1560. * xpc_initiate_allocate().
  1561. */
  1562. enum xpc_retval
  1563. xpc_initiate_send(partid_t partid, int ch_number, void *payload)
  1564. {
  1565. struct xpc_partition *part = &xpc_partitions[partid];
  1566. struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
  1567. enum xpc_retval ret;
  1568. dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
  1569. partid, ch_number);
  1570. DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
  1571. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  1572. DBUG_ON(msg == NULL);
  1573. ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL);
  1574. return ret;
  1575. }
  1576. /*
  1577. * Send a message previously allocated using xpc_initiate_allocate on the
  1578. * specified channel connected to the specified partition.
  1579. *
  1580. * This routine will not wait for the message to be sent. Once this routine
  1581. * has returned the message entry allocated via xpc_initiate_allocate() is no
  1582. * longer accessable to the caller.
  1583. *
  1584. * Once the remote end of the channel has received the message, the function
  1585. * passed as an argument to xpc_initiate_send_notify() will be called. This
  1586. * allows the sender to free up or re-use any buffers referenced by the
  1587. * message, but does NOT mean the message has been processed at the remote
  1588. * end by a receiver.
  1589. *
  1590. * If this routine returns an error, the caller's function will NOT be called.
  1591. *
  1592. * This routine, although called by users, does not call xpc_part_ref() to
  1593. * ensure that the partition infrastructure is in place. It relies on the
  1594. * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
  1595. *
  1596. * Arguments:
  1597. *
  1598. * partid - ID of partition to which the channel is connected.
  1599. * ch_number - channel # to send message on.
  1600. * payload - pointer to the payload area allocated via
  1601. * xpc_initiate_allocate().
  1602. * func - function to call with asynchronous notification of message
  1603. * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
  1604. * key - user-defined key to be passed to the function when it's called.
  1605. */
  1606. enum xpc_retval
  1607. xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
  1608. xpc_notify_func func, void *key)
  1609. {
  1610. struct xpc_partition *part = &xpc_partitions[partid];
  1611. struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
  1612. enum xpc_retval ret;
  1613. dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
  1614. partid, ch_number);
  1615. DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
  1616. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  1617. DBUG_ON(msg == NULL);
  1618. DBUG_ON(func == NULL);
  1619. ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
  1620. func, key);
  1621. return ret;
  1622. }
  1623. static struct xpc_msg *
  1624. xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
  1625. {
  1626. struct xpc_partition *part = &xpc_partitions[ch->partid];
  1627. struct xpc_msg *remote_msg, *msg;
  1628. u32 msg_index, nmsgs;
  1629. u64 msg_offset;
  1630. enum xpc_retval ret;
  1631. if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
  1632. /* we were interrupted by a signal */
  1633. return NULL;
  1634. }
  1635. while (get >= ch->next_msg_to_pull) {
  1636. /* pull as many messages as are ready and able to be pulled */
  1637. msg_index = ch->next_msg_to_pull % ch->remote_nentries;
  1638. DBUG_ON(ch->next_msg_to_pull >=
  1639. (volatile s64) ch->w_remote_GP.put);
  1640. nmsgs = (volatile s64) ch->w_remote_GP.put -
  1641. ch->next_msg_to_pull;
  1642. if (msg_index + nmsgs > ch->remote_nentries) {
  1643. /* ignore the ones that wrap the msg queue for now */
  1644. nmsgs = ch->remote_nentries - msg_index;
  1645. }
  1646. msg_offset = msg_index * ch->msg_size;
  1647. msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
  1648. msg_offset);
  1649. remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa +
  1650. msg_offset);
  1651. if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
  1652. nmsgs * ch->msg_size)) != xpcSuccess) {
  1653. dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
  1654. " msg %ld from partition %d, channel=%d, "
  1655. "ret=%d\n", nmsgs, ch->next_msg_to_pull,
  1656. ch->partid, ch->number, ret);
  1657. XPC_DEACTIVATE_PARTITION(part, ret);
  1658. mutex_unlock(&ch->msg_to_pull_mutex);
  1659. return NULL;
  1660. }
  1661. mb(); /* >>> this may not be needed, we're not sure */
  1662. ch->next_msg_to_pull += nmsgs;
  1663. }
  1664. mutex_unlock(&ch->msg_to_pull_mutex);
  1665. /* return the message we were looking for */
  1666. msg_offset = (get % ch->remote_nentries) * ch->msg_size;
  1667. msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset);
  1668. return msg;
  1669. }
  1670. /*
  1671. * Get a message to be delivered.
  1672. */
  1673. static struct xpc_msg *
  1674. xpc_get_deliverable_msg(struct xpc_channel *ch)
  1675. {
  1676. struct xpc_msg *msg = NULL;
  1677. s64 get;
  1678. do {
  1679. if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) {
  1680. break;
  1681. }
  1682. get = (volatile s64) ch->w_local_GP.get;
  1683. if (get == (volatile s64) ch->w_remote_GP.put) {
  1684. break;
  1685. }
  1686. /* There are messages waiting to be pulled and delivered.
  1687. * We need to try to secure one for ourselves. We'll do this
  1688. * by trying to increment w_local_GP.get and hope that no one
  1689. * else beats us to it. If they do, we'll we'll simply have
  1690. * to try again for the next one.
  1691. */
  1692. if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
  1693. /* we got the entry referenced by get */
  1694. dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
  1695. "partid=%d, channel=%d\n", get + 1,
  1696. ch->partid, ch->number);
  1697. /* pull the message from the remote partition */
  1698. msg = xpc_pull_remote_msg(ch, get);
  1699. DBUG_ON(msg != NULL && msg->number != get);
  1700. DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE));
  1701. DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY));
  1702. break;
  1703. }
  1704. } while (1);
  1705. return msg;
  1706. }
  1707. /*
  1708. * Deliver a message to its intended recipient.
  1709. */
  1710. void
  1711. xpc_deliver_msg(struct xpc_channel *ch)
  1712. {
  1713. struct xpc_msg *msg;
  1714. if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
  1715. /*
  1716. * This ref is taken to protect the payload itself from being
  1717. * freed before the user is finished with it, which the user
  1718. * indicates by calling xpc_initiate_received().
  1719. */
  1720. xpc_msgqueue_ref(ch);
  1721. atomic_inc(&ch->kthreads_active);
  1722. if (ch->func != NULL) {
  1723. dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
  1724. "msg_number=%ld, partid=%d, channel=%d\n",
  1725. (void *) msg, msg->number, ch->partid,
  1726. ch->number);
  1727. /* deliver the message to its intended recipient */
  1728. ch->func(xpcMsgReceived, ch->partid, ch->number,
  1729. &msg->payload, ch->key);
  1730. dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
  1731. "msg_number=%ld, partid=%d, channel=%d\n",
  1732. (void *) msg, msg->number, ch->partid,
  1733. ch->number);
  1734. }
  1735. atomic_dec(&ch->kthreads_active);
  1736. }
  1737. }
  1738. /*
  1739. * Now we actually acknowledge the messages that have been delivered and ack'd
  1740. * by advancing the cached remote message queue's Get value and if requested
  1741. * send an IPI to the message sender's partition.
  1742. */
  1743. static void
  1744. xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
  1745. {
  1746. struct xpc_msg *msg;
  1747. s64 get = initial_get + 1;
  1748. int send_IPI = 0;
  1749. while (1) {
  1750. while (1) {
  1751. if (get == (volatile s64) ch->w_local_GP.get) {
  1752. break;
  1753. }
  1754. msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
  1755. (get % ch->remote_nentries) * ch->msg_size);
  1756. if (!(msg->flags & XPC_M_DONE)) {
  1757. break;
  1758. }
  1759. msg_flags |= msg->flags;
  1760. get++;
  1761. }
  1762. if (get == initial_get) {
  1763. /* nothing's changed */
  1764. break;
  1765. }
  1766. if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
  1767. initial_get) {
  1768. /* someone else beat us to it */
  1769. DBUG_ON((volatile s64) ch->local_GP->get <=
  1770. initial_get);
  1771. break;
  1772. }
  1773. /* we just set the new value of local_GP->get */
  1774. dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
  1775. "channel=%d\n", get, ch->partid, ch->number);
  1776. send_IPI = (msg_flags & XPC_M_INTERRUPT);
  1777. /*
  1778. * We need to ensure that the message referenced by
  1779. * local_GP->get is not XPC_M_DONE or that local_GP->get
  1780. * equals w_local_GP.get, so we'll go have a look.
  1781. */
  1782. initial_get = get;
  1783. }
  1784. if (send_IPI) {
  1785. xpc_IPI_send_msgrequest(ch);
  1786. }
  1787. }
  1788. /*
  1789. * Acknowledge receipt of a delivered message.
  1790. *
  1791. * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition
  1792. * that sent the message.
  1793. *
  1794. * This function, although called by users, does not call xpc_part_ref() to
  1795. * ensure that the partition infrastructure is in place. It relies on the
  1796. * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg().
  1797. *
  1798. * Arguments:
  1799. *
  1800. * partid - ID of partition to which the channel is connected.
  1801. * ch_number - channel # message received on.
  1802. * payload - pointer to the payload area allocated via
  1803. * xpc_initiate_allocate().
  1804. */
  1805. void
  1806. xpc_initiate_received(partid_t partid, int ch_number, void *payload)
  1807. {
  1808. struct xpc_partition *part = &xpc_partitions[partid];
  1809. struct xpc_channel *ch;
  1810. struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
  1811. s64 get, msg_number = msg->number;
  1812. DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
  1813. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  1814. ch = &part->channels[ch_number];
  1815. dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
  1816. (void *) msg, msg_number, ch->partid, ch->number);
  1817. DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) !=
  1818. msg_number % ch->remote_nentries);
  1819. DBUG_ON(msg->flags & XPC_M_DONE);
  1820. msg->flags |= XPC_M_DONE;
  1821. /*
  1822. * The preceding store of msg->flags must occur before the following
  1823. * load of ch->local_GP->get.
  1824. */
  1825. mb();
  1826. /*
  1827. * See if this message is next in line to be acknowledged as having
  1828. * been delivered.
  1829. */
  1830. get = ch->local_GP->get;
  1831. if (get == msg_number) {
  1832. xpc_acknowledge_msgs(ch, get, msg->flags);
  1833. }
  1834. /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
  1835. xpc_msgqueue_deref(ch);
  1836. }