xpc_uv.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) uv-based functions.
  10. *
  11. * Architecture specific implementation of common functions.
  12. *
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/delay.h>
  18. #include <linux/device.h>
  19. #include <linux/err.h>
  20. #include <asm/uv/uv_hub.h>
  21. #if defined CONFIG_X86_64
  22. #include <asm/uv/bios.h>
  23. #include <asm/uv/uv_irq.h>
  24. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  25. #include <asm/sn/intr.h>
  26. #include <asm/sn/sn_sal.h>
  27. #endif
  28. #include "../sgi-gru/gru.h"
  29. #include "../sgi-gru/grukservices.h"
  30. #include "xpc.h"
  31. #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  32. struct uv_IO_APIC_route_entry {
  33. __u64 vector : 8,
  34. delivery_mode : 3,
  35. dest_mode : 1,
  36. delivery_status : 1,
  37. polarity : 1,
  38. __reserved_1 : 1,
  39. trigger : 1,
  40. mask : 1,
  41. __reserved_2 : 15,
  42. dest : 32;
  43. };
  44. #endif
  45. static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
  46. #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
  47. #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
  48. XPC_ACTIVATE_MSG_SIZE_UV)
  49. #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
  50. #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
  51. #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
  52. XPC_NOTIFY_MSG_SIZE_UV)
  53. #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
  54. static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
  55. static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
  56. static int
  57. xpc_setup_partitions_uv(void)
  58. {
  59. short partid;
  60. struct xpc_partition_uv *part_uv;
  61. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  62. part_uv = &xpc_partitions[partid].sn.uv;
  63. mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
  64. spin_lock_init(&part_uv->flags_lock);
  65. part_uv->remote_act_state = XPC_P_AS_INACTIVE;
  66. }
  67. return 0;
  68. }
  69. static void
  70. xpc_teardown_partitions_uv(void)
  71. {
  72. short partid;
  73. struct xpc_partition_uv *part_uv;
  74. unsigned long irq_flags;
  75. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  76. part_uv = &xpc_partitions[partid].sn.uv;
  77. if (part_uv->cached_activate_gru_mq_desc != NULL) {
  78. mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
  79. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  80. part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
  81. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  82. kfree(part_uv->cached_activate_gru_mq_desc);
  83. part_uv->cached_activate_gru_mq_desc = NULL;
  84. mutex_unlock(&part_uv->
  85. cached_activate_gru_mq_desc_mutex);
  86. }
  87. }
  88. }
  89. static int
  90. xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
  91. {
  92. int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  93. #if defined CONFIG_X86_64
  94. mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
  95. UV_AFFINITY_CPU);
  96. if (mq->irq < 0) {
  97. dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
  98. -mq->irq);
  99. return mq->irq;
  100. }
  101. mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
  102. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  103. if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
  104. mq->irq = SGI_XPC_ACTIVATE;
  105. else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
  106. mq->irq = SGI_XPC_NOTIFY;
  107. else
  108. return -EINVAL;
  109. mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
  110. uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
  111. #else
  112. #error not a supported configuration
  113. #endif
  114. return 0;
  115. }
  116. static void
  117. xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
  118. {
  119. #if defined CONFIG_X86_64
  120. uv_teardown_irq(mq->irq);
  121. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  122. int mmr_pnode;
  123. unsigned long mmr_value;
  124. mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  125. mmr_value = 1UL << 16;
  126. uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
  127. #else
  128. #error not a supported configuration
  129. #endif
  130. }
  131. static int
  132. xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
  133. {
  134. int ret;
  135. #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  136. int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  137. ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
  138. mq->order, &mq->mmr_offset);
  139. if (ret < 0) {
  140. dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
  141. ret);
  142. return -EBUSY;
  143. }
  144. #elif defined CONFIG_X86_64
  145. ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
  146. mq->order, &mq->mmr_offset);
  147. if (ret < 0) {
  148. dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
  149. "ret=%d\n", ret);
  150. return ret;
  151. }
  152. #else
  153. #error not a supported configuration
  154. #endif
  155. mq->watchlist_num = ret;
  156. return 0;
  157. }
  158. static void
  159. xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
  160. {
  161. int ret;
  162. int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  163. #if defined CONFIG_X86_64
  164. ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
  165. BUG_ON(ret != BIOS_STATUS_SUCCESS);
  166. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  167. ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
  168. BUG_ON(ret != SALRET_OK);
  169. #else
  170. #error not a supported configuration
  171. #endif
  172. }
  173. static struct xpc_gru_mq_uv *
  174. xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
  175. irq_handler_t irq_handler)
  176. {
  177. enum xp_retval xp_ret;
  178. int ret;
  179. int nid;
  180. int nasid;
  181. int pg_order;
  182. struct page *page;
  183. struct xpc_gru_mq_uv *mq;
  184. struct uv_IO_APIC_route_entry *mmr_value;
  185. mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
  186. if (mq == NULL) {
  187. dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
  188. "a xpc_gru_mq_uv structure\n");
  189. ret = -ENOMEM;
  190. goto out_0;
  191. }
  192. mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
  193. GFP_KERNEL);
  194. if (mq->gru_mq_desc == NULL) {
  195. dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
  196. "a gru_message_queue_desc structure\n");
  197. ret = -ENOMEM;
  198. goto out_1;
  199. }
  200. pg_order = get_order(mq_size);
  201. mq->order = pg_order + PAGE_SHIFT;
  202. mq_size = 1UL << mq->order;
  203. mq->mmr_blade = uv_cpu_to_blade_id(cpu);
  204. nid = cpu_to_node(cpu);
  205. page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
  206. pg_order);
  207. if (page == NULL) {
  208. dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
  209. "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
  210. ret = -ENOMEM;
  211. goto out_2;
  212. }
  213. mq->address = page_address(page);
  214. /* enable generation of irq when GRU mq operation occurs to this mq */
  215. ret = xpc_gru_mq_watchlist_alloc_uv(mq);
  216. if (ret != 0)
  217. goto out_3;
  218. ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
  219. if (ret != 0)
  220. goto out_4;
  221. ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
  222. if (ret != 0) {
  223. dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
  224. mq->irq, -ret);
  225. goto out_5;
  226. }
  227. nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
  228. mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
  229. ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
  230. nasid, mmr_value->vector, mmr_value->dest);
  231. if (ret != 0) {
  232. dev_err(xpc_part, "gru_create_message_queue() returned "
  233. "error=%d\n", ret);
  234. ret = -EINVAL;
  235. goto out_6;
  236. }
  237. /* allow other partitions to access this GRU mq */
  238. xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
  239. if (xp_ret != xpSuccess) {
  240. ret = -EACCES;
  241. goto out_6;
  242. }
  243. return mq;
  244. /* something went wrong */
  245. out_6:
  246. free_irq(mq->irq, NULL);
  247. out_5:
  248. xpc_release_gru_mq_irq_uv(mq);
  249. out_4:
  250. xpc_gru_mq_watchlist_free_uv(mq);
  251. out_3:
  252. free_pages((unsigned long)mq->address, pg_order);
  253. out_2:
  254. kfree(mq->gru_mq_desc);
  255. out_1:
  256. kfree(mq);
  257. out_0:
  258. return ERR_PTR(ret);
  259. }
  260. static void
  261. xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
  262. {
  263. unsigned int mq_size;
  264. int pg_order;
  265. int ret;
  266. /* disallow other partitions to access GRU mq */
  267. mq_size = 1UL << mq->order;
  268. ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
  269. BUG_ON(ret != xpSuccess);
  270. /* unregister irq handler and release mq irq/vector mapping */
  271. free_irq(mq->irq, NULL);
  272. xpc_release_gru_mq_irq_uv(mq);
  273. /* disable generation of irq when GRU mq op occurs to this mq */
  274. xpc_gru_mq_watchlist_free_uv(mq);
  275. pg_order = mq->order - PAGE_SHIFT;
  276. free_pages((unsigned long)mq->address, pg_order);
  277. kfree(mq);
  278. }
  279. static enum xp_retval
  280. xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
  281. size_t msg_size)
  282. {
  283. enum xp_retval xp_ret;
  284. int ret;
  285. while (1) {
  286. ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
  287. if (ret == MQE_OK) {
  288. xp_ret = xpSuccess;
  289. break;
  290. }
  291. if (ret == MQE_QUEUE_FULL) {
  292. dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
  293. "error=MQE_QUEUE_FULL\n");
  294. /* !!! handle QLimit reached; delay & try again */
  295. /* ??? Do we add a limit to the number of retries? */
  296. (void)msleep_interruptible(10);
  297. } else if (ret == MQE_CONGESTION) {
  298. dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
  299. "error=MQE_CONGESTION\n");
  300. /* !!! handle LB Overflow; simply try again */
  301. /* ??? Do we add a limit to the number of retries? */
  302. } else {
  303. /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
  304. dev_err(xpc_chan, "gru_send_message_gpa() returned "
  305. "error=%d\n", ret);
  306. xp_ret = xpGruSendMqError;
  307. break;
  308. }
  309. }
  310. return xp_ret;
  311. }
  312. static void
  313. xpc_process_activate_IRQ_rcvd_uv(void)
  314. {
  315. unsigned long irq_flags;
  316. short partid;
  317. struct xpc_partition *part;
  318. u8 act_state_req;
  319. DBUG_ON(xpc_activate_IRQ_rcvd == 0);
  320. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  321. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  322. part = &xpc_partitions[partid];
  323. if (part->sn.uv.act_state_req == 0)
  324. continue;
  325. xpc_activate_IRQ_rcvd--;
  326. BUG_ON(xpc_activate_IRQ_rcvd < 0);
  327. act_state_req = part->sn.uv.act_state_req;
  328. part->sn.uv.act_state_req = 0;
  329. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  330. if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
  331. if (part->act_state == XPC_P_AS_INACTIVE)
  332. xpc_activate_partition(part);
  333. else if (part->act_state == XPC_P_AS_DEACTIVATING)
  334. XPC_DEACTIVATE_PARTITION(part, xpReactivating);
  335. } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
  336. if (part->act_state == XPC_P_AS_INACTIVE)
  337. xpc_activate_partition(part);
  338. else
  339. XPC_DEACTIVATE_PARTITION(part, xpReactivating);
  340. } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
  341. XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
  342. } else {
  343. BUG();
  344. }
  345. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  346. if (xpc_activate_IRQ_rcvd == 0)
  347. break;
  348. }
  349. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  350. }
  351. static void
  352. xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
  353. struct xpc_activate_mq_msghdr_uv *msg_hdr,
  354. int *wakeup_hb_checker)
  355. {
  356. unsigned long irq_flags;
  357. struct xpc_partition_uv *part_uv = &part->sn.uv;
  358. struct xpc_openclose_args *args;
  359. part_uv->remote_act_state = msg_hdr->act_state;
  360. switch (msg_hdr->type) {
  361. case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
  362. /* syncing of remote_act_state was just done above */
  363. break;
  364. case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
  365. struct xpc_activate_mq_msg_activate_req_uv *msg;
  366. /*
  367. * ??? Do we deal here with ts_jiffies being different
  368. * ??? if act_state != XPC_P_AS_INACTIVE instead of
  369. * ??? below?
  370. */
  371. msg = container_of(msg_hdr, struct
  372. xpc_activate_mq_msg_activate_req_uv, hdr);
  373. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  374. if (part_uv->act_state_req == 0)
  375. xpc_activate_IRQ_rcvd++;
  376. part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
  377. part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
  378. part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
  379. part_uv->heartbeat_gpa = msg->heartbeat_gpa;
  380. if (msg->activate_gru_mq_desc_gpa !=
  381. part_uv->activate_gru_mq_desc_gpa) {
  382. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  383. part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
  384. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  385. part_uv->activate_gru_mq_desc_gpa =
  386. msg->activate_gru_mq_desc_gpa;
  387. }
  388. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  389. (*wakeup_hb_checker)++;
  390. break;
  391. }
  392. case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
  393. struct xpc_activate_mq_msg_deactivate_req_uv *msg;
  394. msg = container_of(msg_hdr, struct
  395. xpc_activate_mq_msg_deactivate_req_uv, hdr);
  396. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  397. if (part_uv->act_state_req == 0)
  398. xpc_activate_IRQ_rcvd++;
  399. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  400. part_uv->reason = msg->reason;
  401. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  402. (*wakeup_hb_checker)++;
  403. return;
  404. }
  405. case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
  406. struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
  407. msg = container_of(msg_hdr, struct
  408. xpc_activate_mq_msg_chctl_closerequest_uv,
  409. hdr);
  410. args = &part->remote_openclose_args[msg->ch_number];
  411. args->reason = msg->reason;
  412. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  413. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
  414. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  415. xpc_wakeup_channel_mgr(part);
  416. break;
  417. }
  418. case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
  419. struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
  420. msg = container_of(msg_hdr, struct
  421. xpc_activate_mq_msg_chctl_closereply_uv,
  422. hdr);
  423. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  424. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
  425. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  426. xpc_wakeup_channel_mgr(part);
  427. break;
  428. }
  429. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
  430. struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
  431. msg = container_of(msg_hdr, struct
  432. xpc_activate_mq_msg_chctl_openrequest_uv,
  433. hdr);
  434. args = &part->remote_openclose_args[msg->ch_number];
  435. args->entry_size = msg->entry_size;
  436. args->local_nentries = msg->local_nentries;
  437. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  438. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
  439. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  440. xpc_wakeup_channel_mgr(part);
  441. break;
  442. }
  443. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
  444. struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
  445. msg = container_of(msg_hdr, struct
  446. xpc_activate_mq_msg_chctl_openreply_uv, hdr);
  447. args = &part->remote_openclose_args[msg->ch_number];
  448. args->remote_nentries = msg->remote_nentries;
  449. args->local_nentries = msg->local_nentries;
  450. args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
  451. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  452. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
  453. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  454. xpc_wakeup_channel_mgr(part);
  455. break;
  456. }
  457. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
  458. struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
  459. msg = container_of(msg_hdr, struct
  460. xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
  461. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  462. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
  463. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  464. xpc_wakeup_channel_mgr(part);
  465. }
  466. case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
  467. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  468. part_uv->flags |= XPC_P_ENGAGED_UV;
  469. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  470. break;
  471. case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
  472. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  473. part_uv->flags &= ~XPC_P_ENGAGED_UV;
  474. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  475. break;
  476. default:
  477. dev_err(xpc_part, "received unknown activate_mq msg type=%d "
  478. "from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
  479. /* get hb checker to deactivate from the remote partition */
  480. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  481. if (part_uv->act_state_req == 0)
  482. xpc_activate_IRQ_rcvd++;
  483. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  484. part_uv->reason = xpBadMsgType;
  485. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  486. (*wakeup_hb_checker)++;
  487. return;
  488. }
  489. if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
  490. part->remote_rp_ts_jiffies != 0) {
  491. /*
  492. * ??? Does what we do here need to be sensitive to
  493. * ??? act_state or remote_act_state?
  494. */
  495. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  496. if (part_uv->act_state_req == 0)
  497. xpc_activate_IRQ_rcvd++;
  498. part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
  499. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  500. (*wakeup_hb_checker)++;
  501. }
  502. }
  503. static irqreturn_t
  504. xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
  505. {
  506. struct xpc_activate_mq_msghdr_uv *msg_hdr;
  507. short partid;
  508. struct xpc_partition *part;
  509. int wakeup_hb_checker = 0;
  510. int part_referenced;
  511. while (1) {
  512. msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
  513. if (msg_hdr == NULL)
  514. break;
  515. partid = msg_hdr->partid;
  516. if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
  517. dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
  518. "received invalid partid=0x%x in message\n",
  519. partid);
  520. } else {
  521. part = &xpc_partitions[partid];
  522. part_referenced = xpc_part_ref(part);
  523. xpc_handle_activate_mq_msg_uv(part, msg_hdr,
  524. &wakeup_hb_checker);
  525. if (part_referenced)
  526. xpc_part_deref(part);
  527. }
  528. gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
  529. }
  530. if (wakeup_hb_checker)
  531. wake_up_interruptible(&xpc_activate_IRQ_wq);
  532. return IRQ_HANDLED;
  533. }
  534. static enum xp_retval
  535. xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
  536. unsigned long gru_mq_desc_gpa)
  537. {
  538. enum xp_retval ret;
  539. ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
  540. sizeof(struct gru_message_queue_desc));
  541. if (ret == xpSuccess)
  542. gru_mq_desc->mq = NULL;
  543. return ret;
  544. }
  545. static enum xp_retval
  546. xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
  547. int msg_type)
  548. {
  549. struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
  550. struct xpc_partition_uv *part_uv = &part->sn.uv;
  551. struct gru_message_queue_desc *gru_mq_desc;
  552. unsigned long irq_flags;
  553. enum xp_retval ret;
  554. DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
  555. msg_hdr->type = msg_type;
  556. msg_hdr->partid = xp_partition_id;
  557. msg_hdr->act_state = part->act_state;
  558. msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
  559. mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
  560. again:
  561. if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
  562. gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
  563. if (gru_mq_desc == NULL) {
  564. gru_mq_desc = kmalloc(sizeof(struct
  565. gru_message_queue_desc),
  566. GFP_KERNEL);
  567. if (gru_mq_desc == NULL) {
  568. ret = xpNoMemory;
  569. goto done;
  570. }
  571. part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
  572. }
  573. ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
  574. part_uv->
  575. activate_gru_mq_desc_gpa);
  576. if (ret != xpSuccess)
  577. goto done;
  578. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  579. part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
  580. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  581. }
  582. /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
  583. ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
  584. msg_size);
  585. if (ret != xpSuccess) {
  586. smp_rmb(); /* ensure a fresh copy of part_uv->flags */
  587. if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
  588. goto again;
  589. }
  590. done:
  591. mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
  592. return ret;
  593. }
  594. static void
  595. xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
  596. size_t msg_size, int msg_type)
  597. {
  598. enum xp_retval ret;
  599. ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
  600. if (unlikely(ret != xpSuccess))
  601. XPC_DEACTIVATE_PARTITION(part, ret);
  602. }
  603. static void
  604. xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
  605. void *msg, size_t msg_size, int msg_type)
  606. {
  607. struct xpc_partition *part = &xpc_partitions[ch->partid];
  608. enum xp_retval ret;
  609. ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
  610. if (unlikely(ret != xpSuccess)) {
  611. if (irq_flags != NULL)
  612. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  613. XPC_DEACTIVATE_PARTITION(part, ret);
  614. if (irq_flags != NULL)
  615. spin_lock_irqsave(&ch->lock, *irq_flags);
  616. }
  617. }
  618. static void
  619. xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
  620. {
  621. unsigned long irq_flags;
  622. struct xpc_partition_uv *part_uv = &part->sn.uv;
  623. /*
  624. * !!! Make our side think that the remote partition sent an activate
  625. * !!! mq message our way by doing what the activate IRQ handler would
  626. * !!! do had one really been sent.
  627. */
  628. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  629. if (part_uv->act_state_req == 0)
  630. xpc_activate_IRQ_rcvd++;
  631. part_uv->act_state_req = act_state_req;
  632. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  633. wake_up_interruptible(&xpc_activate_IRQ_wq);
  634. }
  635. static enum xp_retval
  636. xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
  637. size_t *len)
  638. {
  639. s64 status;
  640. enum xp_retval ret;
  641. #if defined CONFIG_X86_64
  642. status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
  643. (u64 *)len);
  644. if (status == BIOS_STATUS_SUCCESS)
  645. ret = xpSuccess;
  646. else if (status == BIOS_STATUS_MORE_PASSES)
  647. ret = xpNeedMoreInfo;
  648. else
  649. ret = xpBiosError;
  650. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  651. status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
  652. if (status == SALRET_OK)
  653. ret = xpSuccess;
  654. else if (status == SALRET_MORE_PASSES)
  655. ret = xpNeedMoreInfo;
  656. else
  657. ret = xpSalError;
  658. #else
  659. #error not a supported configuration
  660. #endif
  661. return ret;
  662. }
  663. static int
  664. xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
  665. {
  666. xpc_heartbeat_uv =
  667. &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
  668. rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
  669. rp->sn.uv.activate_gru_mq_desc_gpa =
  670. uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
  671. return 0;
  672. }
  673. static void
  674. xpc_allow_hb_uv(short partid)
  675. {
  676. }
  677. static void
  678. xpc_disallow_hb_uv(short partid)
  679. {
  680. }
  681. static void
  682. xpc_disallow_all_hbs_uv(void)
  683. {
  684. }
  685. static void
  686. xpc_increment_heartbeat_uv(void)
  687. {
  688. xpc_heartbeat_uv->value++;
  689. }
  690. static void
  691. xpc_offline_heartbeat_uv(void)
  692. {
  693. xpc_increment_heartbeat_uv();
  694. xpc_heartbeat_uv->offline = 1;
  695. }
  696. static void
  697. xpc_online_heartbeat_uv(void)
  698. {
  699. xpc_increment_heartbeat_uv();
  700. xpc_heartbeat_uv->offline = 0;
  701. }
  702. static void
  703. xpc_heartbeat_init_uv(void)
  704. {
  705. xpc_heartbeat_uv->value = 1;
  706. xpc_heartbeat_uv->offline = 0;
  707. }
  708. static void
  709. xpc_heartbeat_exit_uv(void)
  710. {
  711. xpc_offline_heartbeat_uv();
  712. }
  713. static enum xp_retval
  714. xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
  715. {
  716. struct xpc_partition_uv *part_uv = &part->sn.uv;
  717. enum xp_retval ret;
  718. ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
  719. part_uv->heartbeat_gpa,
  720. sizeof(struct xpc_heartbeat_uv));
  721. if (ret != xpSuccess)
  722. return ret;
  723. if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
  724. !part_uv->cached_heartbeat.offline) {
  725. ret = xpNoHeartbeat;
  726. } else {
  727. part->last_heartbeat = part_uv->cached_heartbeat.value;
  728. }
  729. return ret;
  730. }
  731. static void
  732. xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
  733. unsigned long remote_rp_gpa, int nasid)
  734. {
  735. short partid = remote_rp->SAL_partid;
  736. struct xpc_partition *part = &xpc_partitions[partid];
  737. struct xpc_activate_mq_msg_activate_req_uv msg;
  738. part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
  739. part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
  740. part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
  741. part->sn.uv.activate_gru_mq_desc_gpa =
  742. remote_rp->sn.uv.activate_gru_mq_desc_gpa;
  743. /*
  744. * ??? Is it a good idea to make this conditional on what is
  745. * ??? potentially stale state information?
  746. */
  747. if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
  748. msg.rp_gpa = uv_gpa(xpc_rsvd_page);
  749. msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
  750. msg.activate_gru_mq_desc_gpa =
  751. xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
  752. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  753. XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
  754. }
  755. if (part->act_state == XPC_P_AS_INACTIVE)
  756. xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
  757. }
  758. static void
  759. xpc_request_partition_reactivation_uv(struct xpc_partition *part)
  760. {
  761. xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
  762. }
  763. static void
  764. xpc_request_partition_deactivation_uv(struct xpc_partition *part)
  765. {
  766. struct xpc_activate_mq_msg_deactivate_req_uv msg;
  767. /*
  768. * ??? Is it a good idea to make this conditional on what is
  769. * ??? potentially stale state information?
  770. */
  771. if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
  772. part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
  773. msg.reason = part->reason;
  774. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  775. XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
  776. }
  777. }
  778. static void
  779. xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
  780. {
  781. /* nothing needs to be done */
  782. return;
  783. }
  784. static void
  785. xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
  786. {
  787. head->first = NULL;
  788. head->last = NULL;
  789. spin_lock_init(&head->lock);
  790. head->n_entries = 0;
  791. }
  792. static void *
  793. xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
  794. {
  795. unsigned long irq_flags;
  796. struct xpc_fifo_entry_uv *first;
  797. spin_lock_irqsave(&head->lock, irq_flags);
  798. first = head->first;
  799. if (head->first != NULL) {
  800. head->first = first->next;
  801. if (head->first == NULL)
  802. head->last = NULL;
  803. head->n_entries--;
  804. BUG_ON(head->n_entries < 0);
  805. first->next = NULL;
  806. }
  807. spin_unlock_irqrestore(&head->lock, irq_flags);
  808. return first;
  809. }
  810. static void
  811. xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
  812. struct xpc_fifo_entry_uv *last)
  813. {
  814. unsigned long irq_flags;
  815. last->next = NULL;
  816. spin_lock_irqsave(&head->lock, irq_flags);
  817. if (head->last != NULL)
  818. head->last->next = last;
  819. else
  820. head->first = last;
  821. head->last = last;
  822. head->n_entries++;
  823. spin_unlock_irqrestore(&head->lock, irq_flags);
  824. }
  825. static int
  826. xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
  827. {
  828. return head->n_entries;
  829. }
  830. /*
  831. * Setup the channel structures that are uv specific.
  832. */
  833. static enum xp_retval
  834. xpc_setup_ch_structures_uv(struct xpc_partition *part)
  835. {
  836. struct xpc_channel_uv *ch_uv;
  837. int ch_number;
  838. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  839. ch_uv = &part->channels[ch_number].sn.uv;
  840. xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
  841. xpc_init_fifo_uv(&ch_uv->recv_msg_list);
  842. }
  843. return xpSuccess;
  844. }
  845. /*
  846. * Teardown the channel structures that are uv specific.
  847. */
  848. static void
  849. xpc_teardown_ch_structures_uv(struct xpc_partition *part)
  850. {
  851. /* nothing needs to be done */
  852. return;
  853. }
  854. static enum xp_retval
  855. xpc_make_first_contact_uv(struct xpc_partition *part)
  856. {
  857. struct xpc_activate_mq_msg_uv msg;
  858. /*
  859. * We send a sync msg to get the remote partition's remote_act_state
  860. * updated to our current act_state which at this point should
  861. * be XPC_P_AS_ACTIVATING.
  862. */
  863. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  864. XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
  865. while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
  866. (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
  867. dev_dbg(xpc_part, "waiting to make first contact with "
  868. "partition %d\n", XPC_PARTID(part));
  869. /* wait a 1/4 of a second or so */
  870. (void)msleep_interruptible(250);
  871. if (part->act_state == XPC_P_AS_DEACTIVATING)
  872. return part->reason;
  873. }
  874. return xpSuccess;
  875. }
  876. static u64
  877. xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
  878. {
  879. unsigned long irq_flags;
  880. union xpc_channel_ctl_flags chctl;
  881. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  882. chctl = part->chctl;
  883. if (chctl.all_flags != 0)
  884. part->chctl.all_flags = 0;
  885. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  886. return chctl.all_flags;
  887. }
  888. static enum xp_retval
  889. xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
  890. {
  891. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  892. struct xpc_send_msg_slot_uv *msg_slot;
  893. unsigned long irq_flags;
  894. int nentries;
  895. int entry;
  896. size_t nbytes;
  897. for (nentries = ch->local_nentries; nentries > 0; nentries--) {
  898. nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
  899. ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
  900. if (ch_uv->send_msg_slots == NULL)
  901. continue;
  902. for (entry = 0; entry < nentries; entry++) {
  903. msg_slot = &ch_uv->send_msg_slots[entry];
  904. msg_slot->msg_slot_number = entry;
  905. xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
  906. &msg_slot->next);
  907. }
  908. spin_lock_irqsave(&ch->lock, irq_flags);
  909. if (nentries < ch->local_nentries)
  910. ch->local_nentries = nentries;
  911. spin_unlock_irqrestore(&ch->lock, irq_flags);
  912. return xpSuccess;
  913. }
  914. return xpNoMemory;
  915. }
  916. static enum xp_retval
  917. xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
  918. {
  919. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  920. struct xpc_notify_mq_msg_uv *msg_slot;
  921. unsigned long irq_flags;
  922. int nentries;
  923. int entry;
  924. size_t nbytes;
  925. for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
  926. nbytes = nentries * ch->entry_size;
  927. ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
  928. if (ch_uv->recv_msg_slots == NULL)
  929. continue;
  930. for (entry = 0; entry < nentries; entry++) {
  931. msg_slot = ch_uv->recv_msg_slots +
  932. entry * ch->entry_size;
  933. msg_slot->hdr.msg_slot_number = entry;
  934. }
  935. spin_lock_irqsave(&ch->lock, irq_flags);
  936. if (nentries < ch->remote_nentries)
  937. ch->remote_nentries = nentries;
  938. spin_unlock_irqrestore(&ch->lock, irq_flags);
  939. return xpSuccess;
  940. }
  941. return xpNoMemory;
  942. }
  943. /*
  944. * Allocate msg_slots associated with the channel.
  945. */
  946. static enum xp_retval
  947. xpc_setup_msg_structures_uv(struct xpc_channel *ch)
  948. {
  949. static enum xp_retval ret;
  950. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  951. DBUG_ON(ch->flags & XPC_C_SETUP);
  952. ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
  953. gru_message_queue_desc),
  954. GFP_KERNEL);
  955. if (ch_uv->cached_notify_gru_mq_desc == NULL)
  956. return xpNoMemory;
  957. ret = xpc_allocate_send_msg_slot_uv(ch);
  958. if (ret == xpSuccess) {
  959. ret = xpc_allocate_recv_msg_slot_uv(ch);
  960. if (ret != xpSuccess) {
  961. kfree(ch_uv->send_msg_slots);
  962. xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
  963. }
  964. }
  965. return ret;
  966. }
  967. /*
  968. * Free up msg_slots and clear other stuff that were setup for the specified
  969. * channel.
  970. */
  971. static void
  972. xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
  973. {
  974. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  975. DBUG_ON(!spin_is_locked(&ch->lock));
  976. kfree(ch_uv->cached_notify_gru_mq_desc);
  977. ch_uv->cached_notify_gru_mq_desc = NULL;
  978. if (ch->flags & XPC_C_SETUP) {
  979. xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
  980. kfree(ch_uv->send_msg_slots);
  981. xpc_init_fifo_uv(&ch_uv->recv_msg_list);
  982. kfree(ch_uv->recv_msg_slots);
  983. }
  984. }
  985. static void
  986. xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  987. {
  988. struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
  989. msg.ch_number = ch->number;
  990. msg.reason = ch->reason;
  991. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  992. XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
  993. }
  994. static void
  995. xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  996. {
  997. struct xpc_activate_mq_msg_chctl_closereply_uv msg;
  998. msg.ch_number = ch->number;
  999. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1000. XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
  1001. }
  1002. static void
  1003. xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1004. {
  1005. struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
  1006. msg.ch_number = ch->number;
  1007. msg.entry_size = ch->entry_size;
  1008. msg.local_nentries = ch->local_nentries;
  1009. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1010. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
  1011. }
  1012. static void
  1013. xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1014. {
  1015. struct xpc_activate_mq_msg_chctl_openreply_uv msg;
  1016. msg.ch_number = ch->number;
  1017. msg.local_nentries = ch->local_nentries;
  1018. msg.remote_nentries = ch->remote_nentries;
  1019. msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
  1020. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1021. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
  1022. }
  1023. static void
  1024. xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1025. {
  1026. struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
  1027. msg.ch_number = ch->number;
  1028. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1029. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
  1030. }
  1031. static void
  1032. xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
  1033. {
  1034. unsigned long irq_flags;
  1035. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  1036. part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
  1037. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  1038. xpc_wakeup_channel_mgr(part);
  1039. }
  1040. static enum xp_retval
  1041. xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
  1042. unsigned long gru_mq_desc_gpa)
  1043. {
  1044. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  1045. DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
  1046. return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
  1047. gru_mq_desc_gpa);
  1048. }
  1049. static void
  1050. xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
  1051. {
  1052. struct xpc_activate_mq_msg_uv msg;
  1053. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  1054. XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
  1055. }
  1056. static void
  1057. xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
  1058. {
  1059. struct xpc_activate_mq_msg_uv msg;
  1060. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  1061. XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
  1062. }
  1063. static void
  1064. xpc_assume_partition_disengaged_uv(short partid)
  1065. {
  1066. struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
  1067. unsigned long irq_flags;
  1068. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  1069. part_uv->flags &= ~XPC_P_ENGAGED_UV;
  1070. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  1071. }
  1072. static int
  1073. xpc_partition_engaged_uv(short partid)
  1074. {
  1075. return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
  1076. }
  1077. static int
  1078. xpc_any_partition_engaged_uv(void)
  1079. {
  1080. struct xpc_partition_uv *part_uv;
  1081. short partid;
  1082. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  1083. part_uv = &xpc_partitions[partid].sn.uv;
  1084. if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
  1085. return 1;
  1086. }
  1087. return 0;
  1088. }
  1089. static enum xp_retval
  1090. xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
  1091. struct xpc_send_msg_slot_uv **address_of_msg_slot)
  1092. {
  1093. enum xp_retval ret;
  1094. struct xpc_send_msg_slot_uv *msg_slot;
  1095. struct xpc_fifo_entry_uv *entry;
  1096. while (1) {
  1097. entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
  1098. if (entry != NULL)
  1099. break;
  1100. if (flags & XPC_NOWAIT)
  1101. return xpNoWait;
  1102. ret = xpc_allocate_msg_wait(ch);
  1103. if (ret != xpInterrupted && ret != xpTimeout)
  1104. return ret;
  1105. }
  1106. msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
  1107. *address_of_msg_slot = msg_slot;
  1108. return xpSuccess;
  1109. }
  1110. static void
  1111. xpc_free_msg_slot_uv(struct xpc_channel *ch,
  1112. struct xpc_send_msg_slot_uv *msg_slot)
  1113. {
  1114. xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
  1115. /* wakeup anyone waiting for a free msg slot */
  1116. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
  1117. wake_up(&ch->msg_allocate_wq);
  1118. }
  1119. static void
  1120. xpc_notify_sender_uv(struct xpc_channel *ch,
  1121. struct xpc_send_msg_slot_uv *msg_slot,
  1122. enum xp_retval reason)
  1123. {
  1124. xpc_notify_func func = msg_slot->func;
  1125. if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
  1126. atomic_dec(&ch->n_to_notify);
  1127. dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
  1128. "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
  1129. msg_slot->msg_slot_number, ch->partid, ch->number);
  1130. func(reason, ch->partid, ch->number, msg_slot->key);
  1131. dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
  1132. "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
  1133. msg_slot->msg_slot_number, ch->partid, ch->number);
  1134. }
  1135. }
  1136. static void
  1137. xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
  1138. struct xpc_notify_mq_msg_uv *msg)
  1139. {
  1140. struct xpc_send_msg_slot_uv *msg_slot;
  1141. int entry = msg->hdr.msg_slot_number % ch->local_nentries;
  1142. msg_slot = &ch->sn.uv.send_msg_slots[entry];
  1143. BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
  1144. msg_slot->msg_slot_number += ch->local_nentries;
  1145. if (msg_slot->func != NULL)
  1146. xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
  1147. xpc_free_msg_slot_uv(ch, msg_slot);
  1148. }
  1149. static void
  1150. xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
  1151. struct xpc_notify_mq_msg_uv *msg)
  1152. {
  1153. struct xpc_partition_uv *part_uv = &part->sn.uv;
  1154. struct xpc_channel *ch;
  1155. struct xpc_channel_uv *ch_uv;
  1156. struct xpc_notify_mq_msg_uv *msg_slot;
  1157. unsigned long irq_flags;
  1158. int ch_number = msg->hdr.ch_number;
  1159. if (unlikely(ch_number >= part->nchannels)) {
  1160. dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
  1161. "channel number=0x%x in message from partid=%d\n",
  1162. ch_number, XPC_PARTID(part));
  1163. /* get hb checker to deactivate from the remote partition */
  1164. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  1165. if (part_uv->act_state_req == 0)
  1166. xpc_activate_IRQ_rcvd++;
  1167. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  1168. part_uv->reason = xpBadChannelNumber;
  1169. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  1170. wake_up_interruptible(&xpc_activate_IRQ_wq);
  1171. return;
  1172. }
  1173. ch = &part->channels[ch_number];
  1174. xpc_msgqueue_ref(ch);
  1175. if (!(ch->flags & XPC_C_CONNECTED)) {
  1176. xpc_msgqueue_deref(ch);
  1177. return;
  1178. }
  1179. /* see if we're really dealing with an ACK for a previously sent msg */
  1180. if (msg->hdr.size == 0) {
  1181. xpc_handle_notify_mq_ack_uv(ch, msg);
  1182. xpc_msgqueue_deref(ch);
  1183. return;
  1184. }
  1185. /* we're dealing with a normal message sent via the notify_mq */
  1186. ch_uv = &ch->sn.uv;
  1187. msg_slot = ch_uv->recv_msg_slots +
  1188. (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
  1189. BUG_ON(msg_slot->hdr.size != 0);
  1190. memcpy(msg_slot, msg, msg->hdr.size);
  1191. xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
  1192. if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
  1193. /*
  1194. * If there is an existing idle kthread get it to deliver
  1195. * the payload, otherwise we'll have to get the channel mgr
  1196. * for this partition to create a kthread to do the delivery.
  1197. */
  1198. if (atomic_read(&ch->kthreads_idle) > 0)
  1199. wake_up_nr(&ch->idle_wq, 1);
  1200. else
  1201. xpc_send_chctl_local_msgrequest_uv(part, ch->number);
  1202. }
  1203. xpc_msgqueue_deref(ch);
  1204. }
  1205. static irqreturn_t
  1206. xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
  1207. {
  1208. struct xpc_notify_mq_msg_uv *msg;
  1209. short partid;
  1210. struct xpc_partition *part;
  1211. while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
  1212. NULL) {
  1213. partid = msg->hdr.partid;
  1214. if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
  1215. dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
  1216. "invalid partid=0x%x in message\n", partid);
  1217. } else {
  1218. part = &xpc_partitions[partid];
  1219. if (xpc_part_ref(part)) {
  1220. xpc_handle_notify_mq_msg_uv(part, msg);
  1221. xpc_part_deref(part);
  1222. }
  1223. }
  1224. gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
  1225. }
  1226. return IRQ_HANDLED;
  1227. }
  1228. static int
  1229. xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
  1230. {
  1231. return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
  1232. }
  1233. static void
  1234. xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
  1235. {
  1236. struct xpc_channel *ch = &part->channels[ch_number];
  1237. int ndeliverable_payloads;
  1238. xpc_msgqueue_ref(ch);
  1239. ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
  1240. if (ndeliverable_payloads > 0 &&
  1241. (ch->flags & XPC_C_CONNECTED) &&
  1242. (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
  1243. xpc_activate_kthreads(ch, ndeliverable_payloads);
  1244. }
  1245. xpc_msgqueue_deref(ch);
  1246. }
  1247. static enum xp_retval
  1248. xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
  1249. u16 payload_size, u8 notify_type, xpc_notify_func func,
  1250. void *key)
  1251. {
  1252. enum xp_retval ret = xpSuccess;
  1253. struct xpc_send_msg_slot_uv *msg_slot = NULL;
  1254. struct xpc_notify_mq_msg_uv *msg;
  1255. u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
  1256. size_t msg_size;
  1257. DBUG_ON(notify_type != XPC_N_CALL);
  1258. msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
  1259. if (msg_size > ch->entry_size)
  1260. return xpPayloadTooBig;
  1261. xpc_msgqueue_ref(ch);
  1262. if (ch->flags & XPC_C_DISCONNECTING) {
  1263. ret = ch->reason;
  1264. goto out_1;
  1265. }
  1266. if (!(ch->flags & XPC_C_CONNECTED)) {
  1267. ret = xpNotConnected;
  1268. goto out_1;
  1269. }
  1270. ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
  1271. if (ret != xpSuccess)
  1272. goto out_1;
  1273. if (func != NULL) {
  1274. atomic_inc(&ch->n_to_notify);
  1275. msg_slot->key = key;
  1276. smp_wmb(); /* a non-NULL func must hit memory after the key */
  1277. msg_slot->func = func;
  1278. if (ch->flags & XPC_C_DISCONNECTING) {
  1279. ret = ch->reason;
  1280. goto out_2;
  1281. }
  1282. }
  1283. msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
  1284. msg->hdr.partid = xp_partition_id;
  1285. msg->hdr.ch_number = ch->number;
  1286. msg->hdr.size = msg_size;
  1287. msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
  1288. memcpy(&msg->payload, payload, payload_size);
  1289. ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
  1290. msg_size);
  1291. if (ret == xpSuccess)
  1292. goto out_1;
  1293. XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
  1294. out_2:
  1295. if (func != NULL) {
  1296. /*
  1297. * Try to NULL the msg_slot's func field. If we fail, then
  1298. * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
  1299. * case we need to pretend we succeeded to send the message
  1300. * since the user will get a callout for the disconnect error
  1301. * by xpc_notify_senders_of_disconnect_uv(), and to also get an
  1302. * error returned here will confuse them. Additionally, since
  1303. * in this case the channel is being disconnected we don't need
  1304. * to put the the msg_slot back on the free list.
  1305. */
  1306. if (cmpxchg(&msg_slot->func, func, NULL) != func) {
  1307. ret = xpSuccess;
  1308. goto out_1;
  1309. }
  1310. msg_slot->key = NULL;
  1311. atomic_dec(&ch->n_to_notify);
  1312. }
  1313. xpc_free_msg_slot_uv(ch, msg_slot);
  1314. out_1:
  1315. xpc_msgqueue_deref(ch);
  1316. return ret;
  1317. }
  1318. /*
  1319. * Tell the callers of xpc_send_notify() that the status of their payloads
  1320. * is unknown because the channel is now disconnecting.
  1321. *
  1322. * We don't worry about putting these msg_slots on the free list since the
  1323. * msg_slots themselves are about to be kfree'd.
  1324. */
  1325. static void
  1326. xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
  1327. {
  1328. struct xpc_send_msg_slot_uv *msg_slot;
  1329. int entry;
  1330. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
  1331. for (entry = 0; entry < ch->local_nentries; entry++) {
  1332. if (atomic_read(&ch->n_to_notify) == 0)
  1333. break;
  1334. msg_slot = &ch->sn.uv.send_msg_slots[entry];
  1335. if (msg_slot->func != NULL)
  1336. xpc_notify_sender_uv(ch, msg_slot, ch->reason);
  1337. }
  1338. }
  1339. /*
  1340. * Get the next deliverable message's payload.
  1341. */
  1342. static void *
  1343. xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
  1344. {
  1345. struct xpc_fifo_entry_uv *entry;
  1346. struct xpc_notify_mq_msg_uv *msg;
  1347. void *payload = NULL;
  1348. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  1349. entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
  1350. if (entry != NULL) {
  1351. msg = container_of(entry, struct xpc_notify_mq_msg_uv,
  1352. hdr.u.next);
  1353. payload = &msg->payload;
  1354. }
  1355. }
  1356. return payload;
  1357. }
  1358. static void
  1359. xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
  1360. {
  1361. struct xpc_notify_mq_msg_uv *msg;
  1362. enum xp_retval ret;
  1363. msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
  1364. /* return an ACK to the sender of this message */
  1365. msg->hdr.partid = xp_partition_id;
  1366. msg->hdr.size = 0; /* size of zero indicates this is an ACK */
  1367. ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
  1368. sizeof(struct xpc_notify_mq_msghdr_uv));
  1369. if (ret != xpSuccess)
  1370. XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
  1371. }
  1372. static struct xpc_arch_operations xpc_arch_ops_uv = {
  1373. .setup_partitions = xpc_setup_partitions_uv,
  1374. .teardown_partitions = xpc_teardown_partitions_uv,
  1375. .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
  1376. .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
  1377. .setup_rsvd_page = xpc_setup_rsvd_page_uv,
  1378. .allow_hb = xpc_allow_hb_uv,
  1379. .disallow_hb = xpc_disallow_hb_uv,
  1380. .disallow_all_hbs = xpc_disallow_all_hbs_uv,
  1381. .increment_heartbeat = xpc_increment_heartbeat_uv,
  1382. .offline_heartbeat = xpc_offline_heartbeat_uv,
  1383. .online_heartbeat = xpc_online_heartbeat_uv,
  1384. .heartbeat_init = xpc_heartbeat_init_uv,
  1385. .heartbeat_exit = xpc_heartbeat_exit_uv,
  1386. .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
  1387. .request_partition_activation =
  1388. xpc_request_partition_activation_uv,
  1389. .request_partition_reactivation =
  1390. xpc_request_partition_reactivation_uv,
  1391. .request_partition_deactivation =
  1392. xpc_request_partition_deactivation_uv,
  1393. .cancel_partition_deactivation_request =
  1394. xpc_cancel_partition_deactivation_request_uv,
  1395. .setup_ch_structures = xpc_setup_ch_structures_uv,
  1396. .teardown_ch_structures = xpc_teardown_ch_structures_uv,
  1397. .make_first_contact = xpc_make_first_contact_uv,
  1398. .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
  1399. .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
  1400. .send_chctl_closereply = xpc_send_chctl_closereply_uv,
  1401. .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
  1402. .send_chctl_openreply = xpc_send_chctl_openreply_uv,
  1403. .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
  1404. .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
  1405. .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
  1406. .setup_msg_structures = xpc_setup_msg_structures_uv,
  1407. .teardown_msg_structures = xpc_teardown_msg_structures_uv,
  1408. .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
  1409. .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
  1410. .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
  1411. .partition_engaged = xpc_partition_engaged_uv,
  1412. .any_partition_engaged = xpc_any_partition_engaged_uv,
  1413. .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
  1414. .send_payload = xpc_send_payload_uv,
  1415. .get_deliverable_payload = xpc_get_deliverable_payload_uv,
  1416. .received_payload = xpc_received_payload_uv,
  1417. .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
  1418. };
  1419. int
  1420. xpc_init_uv(void)
  1421. {
  1422. xpc_arch_ops = xpc_arch_ops_uv;
  1423. if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
  1424. dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
  1425. XPC_MSG_HDR_MAX_SIZE);
  1426. return -E2BIG;
  1427. }
  1428. xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
  1429. XPC_ACTIVATE_IRQ_NAME,
  1430. xpc_handle_activate_IRQ_uv);
  1431. if (IS_ERR(xpc_activate_mq_uv))
  1432. return PTR_ERR(xpc_activate_mq_uv);
  1433. xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
  1434. XPC_NOTIFY_IRQ_NAME,
  1435. xpc_handle_notify_IRQ_uv);
  1436. if (IS_ERR(xpc_notify_mq_uv)) {
  1437. xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
  1438. return PTR_ERR(xpc_notify_mq_uv);
  1439. }
  1440. return 0;
  1441. }
  1442. void
  1443. xpc_exit_uv(void)
  1444. {
  1445. xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
  1446. xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
  1447. }