xpc_uv.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) uv-based functions.
  10. *
  11. * Architecture specific implementation of common functions.
  12. *
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/delay.h>
  18. #include <linux/device.h>
  19. #include <linux/err.h>
  20. #include <asm/uv/uv_hub.h>
  21. #if defined CONFIG_X86_64
  22. #include <asm/uv/bios.h>
  23. #include <asm/uv/uv_irq.h>
  24. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  25. #include <asm/sn/intr.h>
  26. #include <asm/sn/sn_sal.h>
  27. #endif
  28. #include "../sgi-gru/gru.h"
  29. #include "../sgi-gru/grukservices.h"
  30. #include "xpc.h"
  31. #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  32. struct uv_IO_APIC_route_entry {
  33. __u64 vector : 8,
  34. delivery_mode : 3,
  35. dest_mode : 1,
  36. delivery_status : 1,
  37. polarity : 1,
  38. __reserved_1 : 1,
  39. trigger : 1,
  40. mask : 1,
  41. __reserved_2 : 15,
  42. dest : 32;
  43. };
  44. #endif
  45. static atomic64_t xpc_heartbeat_uv;
  46. static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
  47. #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
  48. #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
  49. XPC_ACTIVATE_MSG_SIZE_UV)
  50. #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
  51. #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
  52. #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
  53. XPC_NOTIFY_MSG_SIZE_UV)
  54. #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
  55. static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
  56. static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
  57. static int
  58. xpc_setup_partitions_sn_uv(void)
  59. {
  60. short partid;
  61. struct xpc_partition_uv *part_uv;
  62. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  63. part_uv = &xpc_partitions[partid].sn.uv;
  64. mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
  65. spin_lock_init(&part_uv->flags_lock);
  66. part_uv->remote_act_state = XPC_P_AS_INACTIVE;
  67. }
  68. return 0;
  69. }
  70. static void
  71. xpc_teardown_partitions_sn_uv(void)
  72. {
  73. short partid;
  74. struct xpc_partition_uv *part_uv;
  75. unsigned long irq_flags;
  76. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  77. part_uv = &xpc_partitions[partid].sn.uv;
  78. if (part_uv->cached_activate_gru_mq_desc != NULL) {
  79. mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
  80. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  81. part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
  82. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  83. kfree(part_uv->cached_activate_gru_mq_desc);
  84. part_uv->cached_activate_gru_mq_desc = NULL;
  85. mutex_unlock(&part_uv->
  86. cached_activate_gru_mq_desc_mutex);
  87. }
  88. }
  89. }
  90. static int
  91. xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
  92. {
  93. int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  94. #if defined CONFIG_X86_64
  95. mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset);
  96. if (mq->irq < 0) {
  97. dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
  98. -mq->irq);
  99. return mq->irq;
  100. }
  101. mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
  102. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  103. if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
  104. mq->irq = SGI_XPC_ACTIVATE;
  105. else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
  106. mq->irq = SGI_XPC_NOTIFY;
  107. else
  108. return -EINVAL;
  109. mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
  110. uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
  111. #else
  112. #error not a supported configuration
  113. #endif
  114. return 0;
  115. }
  116. static void
  117. xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
  118. {
  119. #if defined CONFIG_X86_64
  120. uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset);
  121. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  122. int mmr_pnode;
  123. unsigned long mmr_value;
  124. mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  125. mmr_value = 1UL << 16;
  126. uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
  127. #else
  128. #error not a supported configuration
  129. #endif
  130. }
  131. static int
  132. xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
  133. {
  134. int ret;
  135. #if defined CONFIG_X86_64
  136. ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
  137. mq->order, &mq->mmr_offset);
  138. if (ret < 0) {
  139. dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
  140. "ret=%d\n", ret);
  141. return ret;
  142. }
  143. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  144. ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address),
  145. mq->order, &mq->mmr_offset);
  146. if (ret < 0) {
  147. dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
  148. ret);
  149. return -EBUSY;
  150. }
  151. #else
  152. #error not a supported configuration
  153. #endif
  154. mq->watchlist_num = ret;
  155. return 0;
  156. }
  157. static void
  158. xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
  159. {
  160. int ret;
  161. #if defined CONFIG_X86_64
  162. ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
  163. BUG_ON(ret != BIOS_STATUS_SUCCESS);
  164. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  165. ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
  166. BUG_ON(ret != SALRET_OK);
  167. #else
  168. #error not a supported configuration
  169. #endif
  170. }
  171. static struct xpc_gru_mq_uv *
  172. xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
  173. irq_handler_t irq_handler)
  174. {
  175. enum xp_retval xp_ret;
  176. int ret;
  177. int nid;
  178. int pg_order;
  179. struct page *page;
  180. struct xpc_gru_mq_uv *mq;
  181. struct uv_IO_APIC_route_entry *mmr_value;
  182. mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
  183. if (mq == NULL) {
  184. dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
  185. "a xpc_gru_mq_uv structure\n");
  186. ret = -ENOMEM;
  187. goto out_0;
  188. }
  189. mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
  190. GFP_KERNEL);
  191. if (mq->gru_mq_desc == NULL) {
  192. dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
  193. "a gru_message_queue_desc structure\n");
  194. ret = -ENOMEM;
  195. goto out_1;
  196. }
  197. pg_order = get_order(mq_size);
  198. mq->order = pg_order + PAGE_SHIFT;
  199. mq_size = 1UL << mq->order;
  200. mq->mmr_blade = uv_cpu_to_blade_id(cpu);
  201. nid = cpu_to_node(cpu);
  202. page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
  203. pg_order);
  204. if (page == NULL) {
  205. dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
  206. "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
  207. ret = -ENOMEM;
  208. goto out_2;
  209. }
  210. mq->address = page_address(page);
  211. /* enable generation of irq when GRU mq operation occurs to this mq */
  212. ret = xpc_gru_mq_watchlist_alloc_uv(mq);
  213. if (ret != 0)
  214. goto out_3;
  215. ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
  216. if (ret != 0)
  217. goto out_4;
  218. ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
  219. if (ret != 0) {
  220. dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
  221. mq->irq, -ret);
  222. goto out_5;
  223. }
  224. mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
  225. ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
  226. nid, mmr_value->vector, mmr_value->dest);
  227. if (ret != 0) {
  228. dev_err(xpc_part, "gru_create_message_queue() returned "
  229. "error=%d\n", ret);
  230. ret = -EINVAL;
  231. goto out_6;
  232. }
  233. /* allow other partitions to access this GRU mq */
  234. xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
  235. if (xp_ret != xpSuccess) {
  236. ret = -EACCES;
  237. goto out_6;
  238. }
  239. return mq;
  240. /* something went wrong */
  241. out_6:
  242. free_irq(mq->irq, NULL);
  243. out_5:
  244. xpc_release_gru_mq_irq_uv(mq);
  245. out_4:
  246. xpc_gru_mq_watchlist_free_uv(mq);
  247. out_3:
  248. free_pages((unsigned long)mq->address, pg_order);
  249. out_2:
  250. kfree(mq->gru_mq_desc);
  251. out_1:
  252. kfree(mq);
  253. out_0:
  254. return ERR_PTR(ret);
  255. }
  256. static void
  257. xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
  258. {
  259. unsigned int mq_size;
  260. int pg_order;
  261. int ret;
  262. /* disallow other partitions to access GRU mq */
  263. mq_size = 1UL << mq->order;
  264. ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
  265. BUG_ON(ret != xpSuccess);
  266. /* unregister irq handler and release mq irq/vector mapping */
  267. free_irq(mq->irq, NULL);
  268. xpc_release_gru_mq_irq_uv(mq);
  269. /* disable generation of irq when GRU mq op occurs to this mq */
  270. xpc_gru_mq_watchlist_free_uv(mq);
  271. pg_order = mq->order - PAGE_SHIFT;
  272. free_pages((unsigned long)mq->address, pg_order);
  273. kfree(mq);
  274. }
  275. static enum xp_retval
  276. xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
  277. size_t msg_size)
  278. {
  279. enum xp_retval xp_ret;
  280. int ret;
  281. while (1) {
  282. ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
  283. if (ret == MQE_OK) {
  284. xp_ret = xpSuccess;
  285. break;
  286. }
  287. if (ret == MQE_QUEUE_FULL) {
  288. dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
  289. "error=MQE_QUEUE_FULL\n");
  290. /* !!! handle QLimit reached; delay & try again */
  291. /* ??? Do we add a limit to the number of retries? */
  292. (void)msleep_interruptible(10);
  293. } else if (ret == MQE_CONGESTION) {
  294. dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
  295. "error=MQE_CONGESTION\n");
  296. /* !!! handle LB Overflow; simply try again */
  297. /* ??? Do we add a limit to the number of retries? */
  298. } else {
  299. /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
  300. dev_err(xpc_chan, "gru_send_message_gpa() returned "
  301. "error=%d\n", ret);
  302. xp_ret = xpGruSendMqError;
  303. break;
  304. }
  305. }
  306. return xp_ret;
  307. }
  308. static void
  309. xpc_process_activate_IRQ_rcvd_uv(void)
  310. {
  311. unsigned long irq_flags;
  312. short partid;
  313. struct xpc_partition *part;
  314. u8 act_state_req;
  315. DBUG_ON(xpc_activate_IRQ_rcvd == 0);
  316. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  317. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  318. part = &xpc_partitions[partid];
  319. if (part->sn.uv.act_state_req == 0)
  320. continue;
  321. xpc_activate_IRQ_rcvd--;
  322. BUG_ON(xpc_activate_IRQ_rcvd < 0);
  323. act_state_req = part->sn.uv.act_state_req;
  324. part->sn.uv.act_state_req = 0;
  325. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  326. if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
  327. if (part->act_state == XPC_P_AS_INACTIVE)
  328. xpc_activate_partition(part);
  329. else if (part->act_state == XPC_P_AS_DEACTIVATING)
  330. XPC_DEACTIVATE_PARTITION(part, xpReactivating);
  331. } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
  332. if (part->act_state == XPC_P_AS_INACTIVE)
  333. xpc_activate_partition(part);
  334. else
  335. XPC_DEACTIVATE_PARTITION(part, xpReactivating);
  336. } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
  337. XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
  338. } else {
  339. BUG();
  340. }
  341. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  342. if (xpc_activate_IRQ_rcvd == 0)
  343. break;
  344. }
  345. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  346. }
  347. static void
  348. xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
  349. struct xpc_activate_mq_msghdr_uv *msg_hdr,
  350. int *wakeup_hb_checker)
  351. {
  352. unsigned long irq_flags;
  353. struct xpc_partition_uv *part_uv = &part->sn.uv;
  354. struct xpc_openclose_args *args;
  355. part_uv->remote_act_state = msg_hdr->act_state;
  356. switch (msg_hdr->type) {
  357. case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
  358. /* syncing of remote_act_state was just done above */
  359. break;
  360. case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: {
  361. struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
  362. msg = container_of(msg_hdr,
  363. struct xpc_activate_mq_msg_heartbeat_req_uv,
  364. hdr);
  365. part_uv->heartbeat = msg->heartbeat;
  366. break;
  367. }
  368. case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: {
  369. struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
  370. msg = container_of(msg_hdr,
  371. struct xpc_activate_mq_msg_heartbeat_req_uv,
  372. hdr);
  373. part_uv->heartbeat = msg->heartbeat;
  374. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  375. part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV;
  376. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  377. break;
  378. }
  379. case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: {
  380. struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
  381. msg = container_of(msg_hdr,
  382. struct xpc_activate_mq_msg_heartbeat_req_uv,
  383. hdr);
  384. part_uv->heartbeat = msg->heartbeat;
  385. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  386. part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV;
  387. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  388. break;
  389. }
  390. case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
  391. struct xpc_activate_mq_msg_activate_req_uv *msg;
  392. /*
  393. * ??? Do we deal here with ts_jiffies being different
  394. * ??? if act_state != XPC_P_AS_INACTIVE instead of
  395. * ??? below?
  396. */
  397. msg = container_of(msg_hdr, struct
  398. xpc_activate_mq_msg_activate_req_uv, hdr);
  399. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  400. if (part_uv->act_state_req == 0)
  401. xpc_activate_IRQ_rcvd++;
  402. part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
  403. part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
  404. part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
  405. if (msg->activate_gru_mq_desc_gpa !=
  406. part_uv->activate_gru_mq_desc_gpa) {
  407. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  408. part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
  409. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  410. part_uv->activate_gru_mq_desc_gpa =
  411. msg->activate_gru_mq_desc_gpa;
  412. }
  413. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  414. (*wakeup_hb_checker)++;
  415. break;
  416. }
  417. case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
  418. struct xpc_activate_mq_msg_deactivate_req_uv *msg;
  419. msg = container_of(msg_hdr, struct
  420. xpc_activate_mq_msg_deactivate_req_uv, hdr);
  421. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  422. if (part_uv->act_state_req == 0)
  423. xpc_activate_IRQ_rcvd++;
  424. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  425. part_uv->reason = msg->reason;
  426. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  427. (*wakeup_hb_checker)++;
  428. return;
  429. }
  430. case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
  431. struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
  432. msg = container_of(msg_hdr, struct
  433. xpc_activate_mq_msg_chctl_closerequest_uv,
  434. hdr);
  435. args = &part->remote_openclose_args[msg->ch_number];
  436. args->reason = msg->reason;
  437. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  438. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
  439. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  440. xpc_wakeup_channel_mgr(part);
  441. break;
  442. }
  443. case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
  444. struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
  445. msg = container_of(msg_hdr, struct
  446. xpc_activate_mq_msg_chctl_closereply_uv,
  447. hdr);
  448. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  449. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
  450. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  451. xpc_wakeup_channel_mgr(part);
  452. break;
  453. }
  454. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
  455. struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
  456. msg = container_of(msg_hdr, struct
  457. xpc_activate_mq_msg_chctl_openrequest_uv,
  458. hdr);
  459. args = &part->remote_openclose_args[msg->ch_number];
  460. args->entry_size = msg->entry_size;
  461. args->local_nentries = msg->local_nentries;
  462. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  463. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
  464. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  465. xpc_wakeup_channel_mgr(part);
  466. break;
  467. }
  468. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
  469. struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
  470. msg = container_of(msg_hdr, struct
  471. xpc_activate_mq_msg_chctl_openreply_uv, hdr);
  472. args = &part->remote_openclose_args[msg->ch_number];
  473. args->remote_nentries = msg->remote_nentries;
  474. args->local_nentries = msg->local_nentries;
  475. args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
  476. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  477. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
  478. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  479. xpc_wakeup_channel_mgr(part);
  480. break;
  481. }
  482. case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
  483. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  484. part_uv->flags |= XPC_P_ENGAGED_UV;
  485. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  486. break;
  487. case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
  488. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  489. part_uv->flags &= ~XPC_P_ENGAGED_UV;
  490. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  491. break;
  492. default:
  493. dev_err(xpc_part, "received unknown activate_mq msg type=%d "
  494. "from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
  495. /* get hb checker to deactivate from the remote partition */
  496. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  497. if (part_uv->act_state_req == 0)
  498. xpc_activate_IRQ_rcvd++;
  499. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  500. part_uv->reason = xpBadMsgType;
  501. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  502. (*wakeup_hb_checker)++;
  503. return;
  504. }
  505. if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
  506. part->remote_rp_ts_jiffies != 0) {
  507. /*
  508. * ??? Does what we do here need to be sensitive to
  509. * ??? act_state or remote_act_state?
  510. */
  511. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  512. if (part_uv->act_state_req == 0)
  513. xpc_activate_IRQ_rcvd++;
  514. part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
  515. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  516. (*wakeup_hb_checker)++;
  517. }
  518. }
  519. static irqreturn_t
  520. xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
  521. {
  522. struct xpc_activate_mq_msghdr_uv *msg_hdr;
  523. short partid;
  524. struct xpc_partition *part;
  525. int wakeup_hb_checker = 0;
  526. int part_referenced;
  527. while (1) {
  528. msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
  529. if (msg_hdr == NULL)
  530. break;
  531. partid = msg_hdr->partid;
  532. if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
  533. dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
  534. "received invalid partid=0x%x in message\n",
  535. partid);
  536. } else {
  537. part = &xpc_partitions[partid];
  538. part_referenced = xpc_part_ref(part);
  539. xpc_handle_activate_mq_msg_uv(part, msg_hdr,
  540. &wakeup_hb_checker);
  541. if (part_referenced)
  542. xpc_part_deref(part);
  543. }
  544. gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
  545. }
  546. if (wakeup_hb_checker)
  547. wake_up_interruptible(&xpc_activate_IRQ_wq);
  548. return IRQ_HANDLED;
  549. }
  550. static enum xp_retval
  551. xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
  552. unsigned long gru_mq_desc_gpa)
  553. {
  554. enum xp_retval ret;
  555. ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
  556. sizeof(struct gru_message_queue_desc));
  557. if (ret == xpSuccess)
  558. gru_mq_desc->mq = NULL;
  559. return ret;
  560. }
  561. static enum xp_retval
  562. xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
  563. int msg_type)
  564. {
  565. struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
  566. struct xpc_partition_uv *part_uv = &part->sn.uv;
  567. struct gru_message_queue_desc *gru_mq_desc;
  568. unsigned long irq_flags;
  569. enum xp_retval ret;
  570. DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
  571. msg_hdr->type = msg_type;
  572. msg_hdr->partid = xp_partition_id;
  573. msg_hdr->act_state = part->act_state;
  574. msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
  575. mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
  576. again:
  577. if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
  578. gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
  579. if (gru_mq_desc == NULL) {
  580. gru_mq_desc = kmalloc(sizeof(struct
  581. gru_message_queue_desc),
  582. GFP_KERNEL);
  583. if (gru_mq_desc == NULL) {
  584. ret = xpNoMemory;
  585. goto done;
  586. }
  587. part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
  588. }
  589. ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
  590. part_uv->
  591. activate_gru_mq_desc_gpa);
  592. if (ret != xpSuccess)
  593. goto done;
  594. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  595. part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
  596. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  597. }
  598. /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
  599. ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
  600. msg_size);
  601. if (ret != xpSuccess) {
  602. smp_rmb(); /* ensure a fresh copy of part_uv->flags */
  603. if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
  604. goto again;
  605. }
  606. done:
  607. mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
  608. return ret;
  609. }
  610. static void
  611. xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
  612. size_t msg_size, int msg_type)
  613. {
  614. enum xp_retval ret;
  615. ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
  616. if (unlikely(ret != xpSuccess))
  617. XPC_DEACTIVATE_PARTITION(part, ret);
  618. }
  619. static void
  620. xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
  621. void *msg, size_t msg_size, int msg_type)
  622. {
  623. struct xpc_partition *part = &xpc_partitions[ch->partid];
  624. enum xp_retval ret;
  625. ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
  626. if (unlikely(ret != xpSuccess)) {
  627. if (irq_flags != NULL)
  628. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  629. XPC_DEACTIVATE_PARTITION(part, ret);
  630. if (irq_flags != NULL)
  631. spin_lock_irqsave(&ch->lock, *irq_flags);
  632. }
  633. }
  634. static void
  635. xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
  636. {
  637. unsigned long irq_flags;
  638. struct xpc_partition_uv *part_uv = &part->sn.uv;
  639. /*
  640. * !!! Make our side think that the remote partition sent an activate
  641. * !!! message our way by doing what the activate IRQ handler would
  642. * !!! do had one really been sent.
  643. */
  644. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  645. if (part_uv->act_state_req == 0)
  646. xpc_activate_IRQ_rcvd++;
  647. part_uv->act_state_req = act_state_req;
  648. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  649. wake_up_interruptible(&xpc_activate_IRQ_wq);
  650. }
  651. static enum xp_retval
  652. xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
  653. size_t *len)
  654. {
  655. s64 status;
  656. enum xp_retval ret;
  657. #if defined CONFIG_X86_64
  658. status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
  659. (u64 *)len);
  660. if (status == BIOS_STATUS_SUCCESS)
  661. ret = xpSuccess;
  662. else if (status == BIOS_STATUS_MORE_PASSES)
  663. ret = xpNeedMoreInfo;
  664. else
  665. ret = xpBiosError;
  666. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  667. status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
  668. if (status == SALRET_OK)
  669. ret = xpSuccess;
  670. else if (status == SALRET_MORE_PASSES)
  671. ret = xpNeedMoreInfo;
  672. else
  673. ret = xpSalError;
  674. #else
  675. #error not a supported configuration
  676. #endif
  677. return ret;
  678. }
  679. static int
  680. xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
  681. {
  682. rp->sn.activate_gru_mq_desc_gpa =
  683. uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
  684. return 0;
  685. }
  686. static void
  687. xpc_send_heartbeat_uv(int msg_type)
  688. {
  689. short partid;
  690. struct xpc_partition *part;
  691. struct xpc_activate_mq_msg_heartbeat_req_uv msg;
  692. /*
  693. * !!! On uv we're broadcasting a heartbeat message every 5 seconds.
  694. * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20
  695. * !!! seconds. This is an increase in numalink traffic.
  696. * ??? Is this good?
  697. */
  698. msg.heartbeat = atomic64_inc_return(&xpc_heartbeat_uv);
  699. partid = find_first_bit(xpc_heartbeating_to_mask_uv,
  700. XP_MAX_NPARTITIONS_UV);
  701. while (partid < XP_MAX_NPARTITIONS_UV) {
  702. part = &xpc_partitions[partid];
  703. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  704. msg_type);
  705. partid = find_next_bit(xpc_heartbeating_to_mask_uv,
  706. XP_MAX_NPARTITIONS_UV, partid + 1);
  707. }
  708. }
  709. static void
  710. xpc_increment_heartbeat_uv(void)
  711. {
  712. xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV);
  713. }
  714. static void
  715. xpc_offline_heartbeat_uv(void)
  716. {
  717. xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV);
  718. }
  719. static void
  720. xpc_online_heartbeat_uv(void)
  721. {
  722. xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV);
  723. }
  724. static void
  725. xpc_heartbeat_init_uv(void)
  726. {
  727. atomic64_set(&xpc_heartbeat_uv, 0);
  728. bitmap_zero(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
  729. xpc_heartbeating_to_mask = &xpc_heartbeating_to_mask_uv[0];
  730. }
  731. static void
  732. xpc_heartbeat_exit_uv(void)
  733. {
  734. xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV);
  735. }
  736. static enum xp_retval
  737. xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
  738. {
  739. struct xpc_partition_uv *part_uv = &part->sn.uv;
  740. enum xp_retval ret = xpNoHeartbeat;
  741. if (part_uv->remote_act_state != XPC_P_AS_INACTIVE &&
  742. part_uv->remote_act_state != XPC_P_AS_DEACTIVATING) {
  743. if (part_uv->heartbeat != part->last_heartbeat ||
  744. (part_uv->flags & XPC_P_HEARTBEAT_OFFLINE_UV)) {
  745. part->last_heartbeat = part_uv->heartbeat;
  746. ret = xpSuccess;
  747. }
  748. }
  749. return ret;
  750. }
  751. static void
  752. xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
  753. unsigned long remote_rp_gpa, int nasid)
  754. {
  755. short partid = remote_rp->SAL_partid;
  756. struct xpc_partition *part = &xpc_partitions[partid];
  757. struct xpc_activate_mq_msg_activate_req_uv msg;
  758. part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
  759. part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
  760. part->sn.uv.activate_gru_mq_desc_gpa =
  761. remote_rp->sn.activate_gru_mq_desc_gpa;
  762. /*
  763. * ??? Is it a good idea to make this conditional on what is
  764. * ??? potentially stale state information?
  765. */
  766. if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
  767. msg.rp_gpa = uv_gpa(xpc_rsvd_page);
  768. msg.activate_gru_mq_desc_gpa =
  769. xpc_rsvd_page->sn.activate_gru_mq_desc_gpa;
  770. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  771. XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
  772. }
  773. if (part->act_state == XPC_P_AS_INACTIVE)
  774. xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
  775. }
  776. static void
  777. xpc_request_partition_reactivation_uv(struct xpc_partition *part)
  778. {
  779. xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
  780. }
  781. static void
  782. xpc_request_partition_deactivation_uv(struct xpc_partition *part)
  783. {
  784. struct xpc_activate_mq_msg_deactivate_req_uv msg;
  785. /*
  786. * ??? Is it a good idea to make this conditional on what is
  787. * ??? potentially stale state information?
  788. */
  789. if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
  790. part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
  791. msg.reason = part->reason;
  792. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  793. XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
  794. }
  795. }
  796. static void
  797. xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
  798. {
  799. /* nothing needs to be done */
  800. return;
  801. }
  802. static void
  803. xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
  804. {
  805. head->first = NULL;
  806. head->last = NULL;
  807. spin_lock_init(&head->lock);
  808. head->n_entries = 0;
  809. }
  810. static void *
  811. xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
  812. {
  813. unsigned long irq_flags;
  814. struct xpc_fifo_entry_uv *first;
  815. spin_lock_irqsave(&head->lock, irq_flags);
  816. first = head->first;
  817. if (head->first != NULL) {
  818. head->first = first->next;
  819. if (head->first == NULL)
  820. head->last = NULL;
  821. }
  822. head->n_entries--;
  823. BUG_ON(head->n_entries < 0);
  824. spin_unlock_irqrestore(&head->lock, irq_flags);
  825. first->next = NULL;
  826. return first;
  827. }
  828. static void
  829. xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
  830. struct xpc_fifo_entry_uv *last)
  831. {
  832. unsigned long irq_flags;
  833. last->next = NULL;
  834. spin_lock_irqsave(&head->lock, irq_flags);
  835. if (head->last != NULL)
  836. head->last->next = last;
  837. else
  838. head->first = last;
  839. head->last = last;
  840. head->n_entries++;
  841. spin_unlock_irqrestore(&head->lock, irq_flags);
  842. }
  843. static int
  844. xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
  845. {
  846. return head->n_entries;
  847. }
  848. /*
  849. * Setup the channel structures that are uv specific.
  850. */
  851. static enum xp_retval
  852. xpc_setup_ch_structures_sn_uv(struct xpc_partition *part)
  853. {
  854. struct xpc_channel_uv *ch_uv;
  855. int ch_number;
  856. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  857. ch_uv = &part->channels[ch_number].sn.uv;
  858. xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
  859. xpc_init_fifo_uv(&ch_uv->recv_msg_list);
  860. }
  861. return xpSuccess;
  862. }
  863. /*
  864. * Teardown the channel structures that are uv specific.
  865. */
  866. static void
  867. xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part)
  868. {
  869. /* nothing needs to be done */
  870. return;
  871. }
  872. static enum xp_retval
  873. xpc_make_first_contact_uv(struct xpc_partition *part)
  874. {
  875. struct xpc_activate_mq_msg_uv msg;
  876. /*
  877. * We send a sync msg to get the remote partition's remote_act_state
  878. * updated to our current act_state which at this point should
  879. * be XPC_P_AS_ACTIVATING.
  880. */
  881. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  882. XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
  883. while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) {
  884. dev_dbg(xpc_part, "waiting to make first contact with "
  885. "partition %d\n", XPC_PARTID(part));
  886. /* wait a 1/4 of a second or so */
  887. (void)msleep_interruptible(250);
  888. if (part->act_state == XPC_P_AS_DEACTIVATING)
  889. return part->reason;
  890. }
  891. return xpSuccess;
  892. }
  893. static u64
  894. xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
  895. {
  896. unsigned long irq_flags;
  897. union xpc_channel_ctl_flags chctl;
  898. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  899. chctl = part->chctl;
  900. if (chctl.all_flags != 0)
  901. part->chctl.all_flags = 0;
  902. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  903. return chctl.all_flags;
  904. }
  905. static enum xp_retval
  906. xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
  907. {
  908. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  909. struct xpc_send_msg_slot_uv *msg_slot;
  910. unsigned long irq_flags;
  911. int nentries;
  912. int entry;
  913. size_t nbytes;
  914. for (nentries = ch->local_nentries; nentries > 0; nentries--) {
  915. nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
  916. ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
  917. if (ch_uv->send_msg_slots == NULL)
  918. continue;
  919. for (entry = 0; entry < nentries; entry++) {
  920. msg_slot = &ch_uv->send_msg_slots[entry];
  921. msg_slot->msg_slot_number = entry;
  922. xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
  923. &msg_slot->next);
  924. }
  925. spin_lock_irqsave(&ch->lock, irq_flags);
  926. if (nentries < ch->local_nentries)
  927. ch->local_nentries = nentries;
  928. spin_unlock_irqrestore(&ch->lock, irq_flags);
  929. return xpSuccess;
  930. }
  931. return xpNoMemory;
  932. }
  933. static enum xp_retval
  934. xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
  935. {
  936. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  937. struct xpc_notify_mq_msg_uv *msg_slot;
  938. unsigned long irq_flags;
  939. int nentries;
  940. int entry;
  941. size_t nbytes;
  942. for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
  943. nbytes = nentries * ch->entry_size;
  944. ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
  945. if (ch_uv->recv_msg_slots == NULL)
  946. continue;
  947. for (entry = 0; entry < nentries; entry++) {
  948. msg_slot = ch_uv->recv_msg_slots +
  949. entry * ch->entry_size;
  950. msg_slot->hdr.msg_slot_number = entry;
  951. }
  952. spin_lock_irqsave(&ch->lock, irq_flags);
  953. if (nentries < ch->remote_nentries)
  954. ch->remote_nentries = nentries;
  955. spin_unlock_irqrestore(&ch->lock, irq_flags);
  956. return xpSuccess;
  957. }
  958. return xpNoMemory;
  959. }
  960. /*
  961. * Allocate msg_slots associated with the channel.
  962. */
  963. static enum xp_retval
  964. xpc_setup_msg_structures_uv(struct xpc_channel *ch)
  965. {
  966. static enum xp_retval ret;
  967. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  968. DBUG_ON(ch->flags & XPC_C_SETUP);
  969. ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
  970. gru_message_queue_desc),
  971. GFP_KERNEL);
  972. if (ch_uv->cached_notify_gru_mq_desc == NULL)
  973. return xpNoMemory;
  974. ret = xpc_allocate_send_msg_slot_uv(ch);
  975. if (ret == xpSuccess) {
  976. ret = xpc_allocate_recv_msg_slot_uv(ch);
  977. if (ret != xpSuccess) {
  978. kfree(ch_uv->send_msg_slots);
  979. xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
  980. }
  981. }
  982. return ret;
  983. }
  984. /*
  985. * Free up msg_slots and clear other stuff that were setup for the specified
  986. * channel.
  987. */
  988. static void
  989. xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
  990. {
  991. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  992. DBUG_ON(!spin_is_locked(&ch->lock));
  993. kfree(ch_uv->cached_notify_gru_mq_desc);
  994. ch_uv->cached_notify_gru_mq_desc = NULL;
  995. if (ch->flags & XPC_C_SETUP) {
  996. xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
  997. kfree(ch_uv->send_msg_slots);
  998. xpc_init_fifo_uv(&ch_uv->recv_msg_list);
  999. kfree(ch_uv->recv_msg_slots);
  1000. }
  1001. }
  1002. static void
  1003. xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1004. {
  1005. struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
  1006. msg.ch_number = ch->number;
  1007. msg.reason = ch->reason;
  1008. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1009. XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
  1010. }
  1011. static void
  1012. xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1013. {
  1014. struct xpc_activate_mq_msg_chctl_closereply_uv msg;
  1015. msg.ch_number = ch->number;
  1016. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1017. XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
  1018. }
  1019. static void
  1020. xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1021. {
  1022. struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
  1023. msg.ch_number = ch->number;
  1024. msg.entry_size = ch->entry_size;
  1025. msg.local_nentries = ch->local_nentries;
  1026. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1027. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
  1028. }
  1029. static void
  1030. xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1031. {
  1032. struct xpc_activate_mq_msg_chctl_openreply_uv msg;
  1033. msg.ch_number = ch->number;
  1034. msg.local_nentries = ch->local_nentries;
  1035. msg.remote_nentries = ch->remote_nentries;
  1036. msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
  1037. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1038. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
  1039. }
  1040. static void
  1041. xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
  1042. {
  1043. unsigned long irq_flags;
  1044. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  1045. part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
  1046. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  1047. xpc_wakeup_channel_mgr(part);
  1048. }
  1049. static enum xp_retval
  1050. xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
  1051. unsigned long gru_mq_desc_gpa)
  1052. {
  1053. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  1054. DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
  1055. return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
  1056. gru_mq_desc_gpa);
  1057. }
  1058. static void
  1059. xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
  1060. {
  1061. struct xpc_activate_mq_msg_uv msg;
  1062. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  1063. XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
  1064. }
  1065. static void
  1066. xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
  1067. {
  1068. struct xpc_activate_mq_msg_uv msg;
  1069. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  1070. XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
  1071. }
  1072. static void
  1073. xpc_assume_partition_disengaged_uv(short partid)
  1074. {
  1075. struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
  1076. unsigned long irq_flags;
  1077. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  1078. part_uv->flags &= ~XPC_P_ENGAGED_UV;
  1079. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  1080. }
  1081. static int
  1082. xpc_partition_engaged_uv(short partid)
  1083. {
  1084. return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
  1085. }
  1086. static int
  1087. xpc_any_partition_engaged_uv(void)
  1088. {
  1089. struct xpc_partition_uv *part_uv;
  1090. short partid;
  1091. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  1092. part_uv = &xpc_partitions[partid].sn.uv;
  1093. if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
  1094. return 1;
  1095. }
  1096. return 0;
  1097. }
  1098. static enum xp_retval
  1099. xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
  1100. struct xpc_send_msg_slot_uv **address_of_msg_slot)
  1101. {
  1102. enum xp_retval ret;
  1103. struct xpc_send_msg_slot_uv *msg_slot;
  1104. struct xpc_fifo_entry_uv *entry;
  1105. while (1) {
  1106. entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
  1107. if (entry != NULL)
  1108. break;
  1109. if (flags & XPC_NOWAIT)
  1110. return xpNoWait;
  1111. ret = xpc_allocate_msg_wait(ch);
  1112. if (ret != xpInterrupted && ret != xpTimeout)
  1113. return ret;
  1114. }
  1115. msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
  1116. *address_of_msg_slot = msg_slot;
  1117. return xpSuccess;
  1118. }
  1119. static void
  1120. xpc_free_msg_slot_uv(struct xpc_channel *ch,
  1121. struct xpc_send_msg_slot_uv *msg_slot)
  1122. {
  1123. xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
  1124. /* wakeup anyone waiting for a free msg slot */
  1125. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
  1126. wake_up(&ch->msg_allocate_wq);
  1127. }
  1128. static void
  1129. xpc_notify_sender_uv(struct xpc_channel *ch,
  1130. struct xpc_send_msg_slot_uv *msg_slot,
  1131. enum xp_retval reason)
  1132. {
  1133. xpc_notify_func func = msg_slot->func;
  1134. if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
  1135. atomic_dec(&ch->n_to_notify);
  1136. dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
  1137. "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
  1138. msg_slot->msg_slot_number, ch->partid, ch->number);
  1139. func(reason, ch->partid, ch->number, msg_slot->key);
  1140. dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
  1141. "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
  1142. msg_slot->msg_slot_number, ch->partid, ch->number);
  1143. }
  1144. }
  1145. static void
  1146. xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
  1147. struct xpc_notify_mq_msg_uv *msg)
  1148. {
  1149. struct xpc_send_msg_slot_uv *msg_slot;
  1150. int entry = msg->hdr.msg_slot_number % ch->local_nentries;
  1151. msg_slot = &ch->sn.uv.send_msg_slots[entry];
  1152. BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
  1153. msg_slot->msg_slot_number += ch->local_nentries;
  1154. if (msg_slot->func != NULL)
  1155. xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
  1156. xpc_free_msg_slot_uv(ch, msg_slot);
  1157. }
  1158. static void
  1159. xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
  1160. struct xpc_notify_mq_msg_uv *msg)
  1161. {
  1162. struct xpc_partition_uv *part_uv = &part->sn.uv;
  1163. struct xpc_channel *ch;
  1164. struct xpc_channel_uv *ch_uv;
  1165. struct xpc_notify_mq_msg_uv *msg_slot;
  1166. unsigned long irq_flags;
  1167. int ch_number = msg->hdr.ch_number;
  1168. if (unlikely(ch_number >= part->nchannels)) {
  1169. dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
  1170. "channel number=0x%x in message from partid=%d\n",
  1171. ch_number, XPC_PARTID(part));
  1172. /* get hb checker to deactivate from the remote partition */
  1173. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  1174. if (part_uv->act_state_req == 0)
  1175. xpc_activate_IRQ_rcvd++;
  1176. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  1177. part_uv->reason = xpBadChannelNumber;
  1178. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  1179. wake_up_interruptible(&xpc_activate_IRQ_wq);
  1180. return;
  1181. }
  1182. ch = &part->channels[ch_number];
  1183. xpc_msgqueue_ref(ch);
  1184. if (!(ch->flags & XPC_C_CONNECTED)) {
  1185. xpc_msgqueue_deref(ch);
  1186. return;
  1187. }
  1188. /* see if we're really dealing with an ACK for a previously sent msg */
  1189. if (msg->hdr.size == 0) {
  1190. xpc_handle_notify_mq_ack_uv(ch, msg);
  1191. xpc_msgqueue_deref(ch);
  1192. return;
  1193. }
  1194. /* we're dealing with a normal message sent via the notify_mq */
  1195. ch_uv = &ch->sn.uv;
  1196. msg_slot = ch_uv->recv_msg_slots +
  1197. (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
  1198. BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
  1199. BUG_ON(msg_slot->hdr.size != 0);
  1200. memcpy(msg_slot, msg, msg->hdr.size);
  1201. xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
  1202. if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
  1203. /*
  1204. * If there is an existing idle kthread get it to deliver
  1205. * the payload, otherwise we'll have to get the channel mgr
  1206. * for this partition to create a kthread to do the delivery.
  1207. */
  1208. if (atomic_read(&ch->kthreads_idle) > 0)
  1209. wake_up_nr(&ch->idle_wq, 1);
  1210. else
  1211. xpc_send_chctl_local_msgrequest_uv(part, ch->number);
  1212. }
  1213. xpc_msgqueue_deref(ch);
  1214. }
  1215. static irqreturn_t
  1216. xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
  1217. {
  1218. struct xpc_notify_mq_msg_uv *msg;
  1219. short partid;
  1220. struct xpc_partition *part;
  1221. while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
  1222. NULL) {
  1223. partid = msg->hdr.partid;
  1224. if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
  1225. dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
  1226. "invalid partid=0x%x in message\n", partid);
  1227. } else {
  1228. part = &xpc_partitions[partid];
  1229. if (xpc_part_ref(part)) {
  1230. xpc_handle_notify_mq_msg_uv(part, msg);
  1231. xpc_part_deref(part);
  1232. }
  1233. }
  1234. gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
  1235. }
  1236. return IRQ_HANDLED;
  1237. }
  1238. static int
  1239. xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
  1240. {
  1241. return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
  1242. }
  1243. static void
  1244. xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
  1245. {
  1246. struct xpc_channel *ch = &part->channels[ch_number];
  1247. int ndeliverable_payloads;
  1248. xpc_msgqueue_ref(ch);
  1249. ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
  1250. if (ndeliverable_payloads > 0 &&
  1251. (ch->flags & XPC_C_CONNECTED) &&
  1252. (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
  1253. xpc_activate_kthreads(ch, ndeliverable_payloads);
  1254. }
  1255. xpc_msgqueue_deref(ch);
  1256. }
  1257. static enum xp_retval
  1258. xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
  1259. u16 payload_size, u8 notify_type, xpc_notify_func func,
  1260. void *key)
  1261. {
  1262. enum xp_retval ret = xpSuccess;
  1263. struct xpc_send_msg_slot_uv *msg_slot = NULL;
  1264. struct xpc_notify_mq_msg_uv *msg;
  1265. u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
  1266. size_t msg_size;
  1267. DBUG_ON(notify_type != XPC_N_CALL);
  1268. msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
  1269. if (msg_size > ch->entry_size)
  1270. return xpPayloadTooBig;
  1271. xpc_msgqueue_ref(ch);
  1272. if (ch->flags & XPC_C_DISCONNECTING) {
  1273. ret = ch->reason;
  1274. goto out_1;
  1275. }
  1276. if (!(ch->flags & XPC_C_CONNECTED)) {
  1277. ret = xpNotConnected;
  1278. goto out_1;
  1279. }
  1280. ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
  1281. if (ret != xpSuccess)
  1282. goto out_1;
  1283. if (func != NULL) {
  1284. atomic_inc(&ch->n_to_notify);
  1285. msg_slot->key = key;
  1286. smp_wmb(); /* a non-NULL func must hit memory after the key */
  1287. msg_slot->func = func;
  1288. if (ch->flags & XPC_C_DISCONNECTING) {
  1289. ret = ch->reason;
  1290. goto out_2;
  1291. }
  1292. }
  1293. msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
  1294. msg->hdr.partid = xp_partition_id;
  1295. msg->hdr.ch_number = ch->number;
  1296. msg->hdr.size = msg_size;
  1297. msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
  1298. memcpy(&msg->payload, payload, payload_size);
  1299. ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
  1300. msg_size);
  1301. if (ret == xpSuccess)
  1302. goto out_1;
  1303. XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
  1304. out_2:
  1305. if (func != NULL) {
  1306. /*
  1307. * Try to NULL the msg_slot's func field. If we fail, then
  1308. * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
  1309. * case we need to pretend we succeeded to send the message
  1310. * since the user will get a callout for the disconnect error
  1311. * by xpc_notify_senders_of_disconnect_uv(), and to also get an
  1312. * error returned here will confuse them. Additionally, since
  1313. * in this case the channel is being disconnected we don't need
  1314. * to put the the msg_slot back on the free list.
  1315. */
  1316. if (cmpxchg(&msg_slot->func, func, NULL) != func) {
  1317. ret = xpSuccess;
  1318. goto out_1;
  1319. }
  1320. msg_slot->key = NULL;
  1321. atomic_dec(&ch->n_to_notify);
  1322. }
  1323. xpc_free_msg_slot_uv(ch, msg_slot);
  1324. out_1:
  1325. xpc_msgqueue_deref(ch);
  1326. return ret;
  1327. }
  1328. /*
  1329. * Tell the callers of xpc_send_notify() that the status of their payloads
  1330. * is unknown because the channel is now disconnecting.
  1331. *
  1332. * We don't worry about putting these msg_slots on the free list since the
  1333. * msg_slots themselves are about to be kfree'd.
  1334. */
  1335. static void
  1336. xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
  1337. {
  1338. struct xpc_send_msg_slot_uv *msg_slot;
  1339. int entry;
  1340. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
  1341. for (entry = 0; entry < ch->local_nentries; entry++) {
  1342. if (atomic_read(&ch->n_to_notify) == 0)
  1343. break;
  1344. msg_slot = &ch->sn.uv.send_msg_slots[entry];
  1345. if (msg_slot->func != NULL)
  1346. xpc_notify_sender_uv(ch, msg_slot, ch->reason);
  1347. }
  1348. }
  1349. /*
  1350. * Get the next deliverable message's payload.
  1351. */
  1352. static void *
  1353. xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
  1354. {
  1355. struct xpc_fifo_entry_uv *entry;
  1356. struct xpc_notify_mq_msg_uv *msg;
  1357. void *payload = NULL;
  1358. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  1359. entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
  1360. if (entry != NULL) {
  1361. msg = container_of(entry, struct xpc_notify_mq_msg_uv,
  1362. hdr.u.next);
  1363. payload = &msg->payload;
  1364. }
  1365. }
  1366. return payload;
  1367. }
  1368. static void
  1369. xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
  1370. {
  1371. struct xpc_notify_mq_msg_uv *msg;
  1372. enum xp_retval ret;
  1373. msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
  1374. /* return an ACK to the sender of this message */
  1375. msg->hdr.partid = xp_partition_id;
  1376. msg->hdr.size = 0; /* size of zero indicates this is an ACK */
  1377. ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
  1378. sizeof(struct xpc_notify_mq_msghdr_uv));
  1379. if (ret != xpSuccess)
  1380. XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
  1381. msg->hdr.msg_slot_number += ch->remote_nentries;
  1382. }
  1383. int
  1384. xpc_init_uv(void)
  1385. {
  1386. xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv;
  1387. xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_uv;
  1388. xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv;
  1389. xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv;
  1390. xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv;
  1391. xpc_increment_heartbeat = xpc_increment_heartbeat_uv;
  1392. xpc_offline_heartbeat = xpc_offline_heartbeat_uv;
  1393. xpc_online_heartbeat = xpc_online_heartbeat_uv;
  1394. xpc_heartbeat_init = xpc_heartbeat_init_uv;
  1395. xpc_heartbeat_exit = xpc_heartbeat_exit_uv;
  1396. xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_uv;
  1397. xpc_request_partition_activation = xpc_request_partition_activation_uv;
  1398. xpc_request_partition_reactivation =
  1399. xpc_request_partition_reactivation_uv;
  1400. xpc_request_partition_deactivation =
  1401. xpc_request_partition_deactivation_uv;
  1402. xpc_cancel_partition_deactivation_request =
  1403. xpc_cancel_partition_deactivation_request_uv;
  1404. xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv;
  1405. xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv;
  1406. xpc_make_first_contact = xpc_make_first_contact_uv;
  1407. xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv;
  1408. xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_uv;
  1409. xpc_send_chctl_closereply = xpc_send_chctl_closereply_uv;
  1410. xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_uv;
  1411. xpc_send_chctl_openreply = xpc_send_chctl_openreply_uv;
  1412. xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv;
  1413. xpc_setup_msg_structures = xpc_setup_msg_structures_uv;
  1414. xpc_teardown_msg_structures = xpc_teardown_msg_structures_uv;
  1415. xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_uv;
  1416. xpc_indicate_partition_disengaged =
  1417. xpc_indicate_partition_disengaged_uv;
  1418. xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_uv;
  1419. xpc_partition_engaged = xpc_partition_engaged_uv;
  1420. xpc_any_partition_engaged = xpc_any_partition_engaged_uv;
  1421. xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv;
  1422. xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv;
  1423. xpc_send_payload = xpc_send_payload_uv;
  1424. xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv;
  1425. xpc_get_deliverable_payload = xpc_get_deliverable_payload_uv;
  1426. xpc_received_payload = xpc_received_payload_uv;
  1427. if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
  1428. dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
  1429. XPC_MSG_HDR_MAX_SIZE);
  1430. return -E2BIG;
  1431. }
  1432. xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
  1433. XPC_ACTIVATE_IRQ_NAME,
  1434. xpc_handle_activate_IRQ_uv);
  1435. if (IS_ERR(xpc_activate_mq_uv))
  1436. return PTR_ERR(xpc_activate_mq_uv);
  1437. xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
  1438. XPC_NOTIFY_IRQ_NAME,
  1439. xpc_handle_notify_IRQ_uv);
  1440. if (IS_ERR(xpc_notify_mq_uv)) {
  1441. xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
  1442. return PTR_ERR(xpc_notify_mq_uv);
  1443. }
  1444. return 0;
  1445. }
  1446. void
  1447. xpc_exit_uv(void)
  1448. {
  1449. xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
  1450. xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
  1451. }