xpc_uv.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) uv-based functions.
  10. *
  11. * Architecture specific implementation of common functions.
  12. *
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/delay.h>
  18. #include <linux/device.h>
  19. #include <linux/err.h>
  20. #include <asm/uv/uv_hub.h>
  21. #if defined CONFIG_X86_64
  22. #include <asm/uv/bios.h>
  23. #include <asm/uv/uv_irq.h>
  24. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  25. #include <asm/sn/intr.h>
  26. #include <asm/sn/sn_sal.h>
  27. #endif
  28. #include "../sgi-gru/gru.h"
  29. #include "../sgi-gru/grukservices.h"
  30. #include "xpc.h"
  31. #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  32. struct uv_IO_APIC_route_entry {
  33. __u64 vector : 8,
  34. delivery_mode : 3,
  35. dest_mode : 1,
  36. delivery_status : 1,
  37. polarity : 1,
  38. __reserved_1 : 1,
  39. trigger : 1,
  40. mask : 1,
  41. __reserved_2 : 15,
  42. dest : 32;
  43. };
  44. #endif
  45. static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
  46. #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
  47. #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
  48. XPC_ACTIVATE_MSG_SIZE_UV)
  49. #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
  50. #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
  51. #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
  52. XPC_NOTIFY_MSG_SIZE_UV)
  53. #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
  54. static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
  55. static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
  56. static int
  57. xpc_setup_partitions_uv(void)
  58. {
  59. short partid;
  60. struct xpc_partition_uv *part_uv;
  61. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  62. part_uv = &xpc_partitions[partid].sn.uv;
  63. mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
  64. spin_lock_init(&part_uv->flags_lock);
  65. part_uv->remote_act_state = XPC_P_AS_INACTIVE;
  66. }
  67. return 0;
  68. }
  69. static void
  70. xpc_teardown_partitions_uv(void)
  71. {
  72. short partid;
  73. struct xpc_partition_uv *part_uv;
  74. unsigned long irq_flags;
  75. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  76. part_uv = &xpc_partitions[partid].sn.uv;
  77. if (part_uv->cached_activate_gru_mq_desc != NULL) {
  78. mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
  79. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  80. part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
  81. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  82. kfree(part_uv->cached_activate_gru_mq_desc);
  83. part_uv->cached_activate_gru_mq_desc = NULL;
  84. mutex_unlock(&part_uv->
  85. cached_activate_gru_mq_desc_mutex);
  86. }
  87. }
  88. }
  89. static int
  90. xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
  91. {
  92. int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  93. #if defined CONFIG_X86_64
  94. mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
  95. UV_AFFINITY_CPU);
  96. if (mq->irq < 0) {
  97. dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
  98. -mq->irq);
  99. return mq->irq;
  100. }
  101. mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
  102. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  103. if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
  104. mq->irq = SGI_XPC_ACTIVATE;
  105. else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
  106. mq->irq = SGI_XPC_NOTIFY;
  107. else
  108. return -EINVAL;
  109. mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
  110. uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
  111. #else
  112. #error not a supported configuration
  113. #endif
  114. return 0;
  115. }
  116. static void
  117. xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
  118. {
  119. #if defined CONFIG_X86_64
  120. uv_teardown_irq(mq->irq);
  121. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  122. int mmr_pnode;
  123. unsigned long mmr_value;
  124. mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  125. mmr_value = 1UL << 16;
  126. uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
  127. #else
  128. #error not a supported configuration
  129. #endif
  130. }
  131. static int
  132. xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
  133. {
  134. int ret;
  135. #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  136. int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  137. ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
  138. mq->order, &mq->mmr_offset);
  139. if (ret < 0) {
  140. dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
  141. ret);
  142. return -EBUSY;
  143. }
  144. #elif defined CONFIG_X86_64
  145. ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
  146. mq->order, &mq->mmr_offset);
  147. if (ret < 0) {
  148. dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
  149. "ret=%d\n", ret);
  150. return ret;
  151. }
  152. #else
  153. #error not a supported configuration
  154. #endif
  155. mq->watchlist_num = ret;
  156. return 0;
  157. }
  158. static void
  159. xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
  160. {
  161. int ret;
  162. int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  163. #if defined CONFIG_X86_64
  164. ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
  165. BUG_ON(ret != BIOS_STATUS_SUCCESS);
  166. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  167. ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
  168. BUG_ON(ret != SALRET_OK);
  169. #else
  170. #error not a supported configuration
  171. #endif
  172. }
  173. static struct xpc_gru_mq_uv *
  174. xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
  175. irq_handler_t irq_handler)
  176. {
  177. enum xp_retval xp_ret;
  178. int ret;
  179. int nid;
  180. int pg_order;
  181. struct page *page;
  182. struct xpc_gru_mq_uv *mq;
  183. struct uv_IO_APIC_route_entry *mmr_value;
  184. mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
  185. if (mq == NULL) {
  186. dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
  187. "a xpc_gru_mq_uv structure\n");
  188. ret = -ENOMEM;
  189. goto out_0;
  190. }
  191. mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
  192. GFP_KERNEL);
  193. if (mq->gru_mq_desc == NULL) {
  194. dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
  195. "a gru_message_queue_desc structure\n");
  196. ret = -ENOMEM;
  197. goto out_1;
  198. }
  199. pg_order = get_order(mq_size);
  200. mq->order = pg_order + PAGE_SHIFT;
  201. mq_size = 1UL << mq->order;
  202. mq->mmr_blade = uv_cpu_to_blade_id(cpu);
  203. nid = cpu_to_node(cpu);
  204. page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
  205. pg_order);
  206. if (page == NULL) {
  207. dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
  208. "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
  209. ret = -ENOMEM;
  210. goto out_2;
  211. }
  212. mq->address = page_address(page);
  213. /* enable generation of irq when GRU mq operation occurs to this mq */
  214. ret = xpc_gru_mq_watchlist_alloc_uv(mq);
  215. if (ret != 0)
  216. goto out_3;
  217. ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
  218. if (ret != 0)
  219. goto out_4;
  220. ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
  221. if (ret != 0) {
  222. dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
  223. mq->irq, -ret);
  224. goto out_5;
  225. }
  226. mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
  227. ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
  228. nid, mmr_value->vector, mmr_value->dest);
  229. if (ret != 0) {
  230. dev_err(xpc_part, "gru_create_message_queue() returned "
  231. "error=%d\n", ret);
  232. ret = -EINVAL;
  233. goto out_6;
  234. }
  235. /* allow other partitions to access this GRU mq */
  236. xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
  237. if (xp_ret != xpSuccess) {
  238. ret = -EACCES;
  239. goto out_6;
  240. }
  241. return mq;
  242. /* something went wrong */
  243. out_6:
  244. free_irq(mq->irq, NULL);
  245. out_5:
  246. xpc_release_gru_mq_irq_uv(mq);
  247. out_4:
  248. xpc_gru_mq_watchlist_free_uv(mq);
  249. out_3:
  250. free_pages((unsigned long)mq->address, pg_order);
  251. out_2:
  252. kfree(mq->gru_mq_desc);
  253. out_1:
  254. kfree(mq);
  255. out_0:
  256. return ERR_PTR(ret);
  257. }
  258. static void
  259. xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
  260. {
  261. unsigned int mq_size;
  262. int pg_order;
  263. int ret;
  264. /* disallow other partitions to access GRU mq */
  265. mq_size = 1UL << mq->order;
  266. ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
  267. BUG_ON(ret != xpSuccess);
  268. /* unregister irq handler and release mq irq/vector mapping */
  269. free_irq(mq->irq, NULL);
  270. xpc_release_gru_mq_irq_uv(mq);
  271. /* disable generation of irq when GRU mq op occurs to this mq */
  272. xpc_gru_mq_watchlist_free_uv(mq);
  273. pg_order = mq->order - PAGE_SHIFT;
  274. free_pages((unsigned long)mq->address, pg_order);
  275. kfree(mq);
  276. }
  277. static enum xp_retval
  278. xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
  279. size_t msg_size)
  280. {
  281. enum xp_retval xp_ret;
  282. int ret;
  283. while (1) {
  284. ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
  285. if (ret == MQE_OK) {
  286. xp_ret = xpSuccess;
  287. break;
  288. }
  289. if (ret == MQE_QUEUE_FULL) {
  290. dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
  291. "error=MQE_QUEUE_FULL\n");
  292. /* !!! handle QLimit reached; delay & try again */
  293. /* ??? Do we add a limit to the number of retries? */
  294. (void)msleep_interruptible(10);
  295. } else if (ret == MQE_CONGESTION) {
  296. dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
  297. "error=MQE_CONGESTION\n");
  298. /* !!! handle LB Overflow; simply try again */
  299. /* ??? Do we add a limit to the number of retries? */
  300. } else {
  301. /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
  302. dev_err(xpc_chan, "gru_send_message_gpa() returned "
  303. "error=%d\n", ret);
  304. xp_ret = xpGruSendMqError;
  305. break;
  306. }
  307. }
  308. return xp_ret;
  309. }
  310. static void
  311. xpc_process_activate_IRQ_rcvd_uv(void)
  312. {
  313. unsigned long irq_flags;
  314. short partid;
  315. struct xpc_partition *part;
  316. u8 act_state_req;
  317. DBUG_ON(xpc_activate_IRQ_rcvd == 0);
  318. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  319. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  320. part = &xpc_partitions[partid];
  321. if (part->sn.uv.act_state_req == 0)
  322. continue;
  323. xpc_activate_IRQ_rcvd--;
  324. BUG_ON(xpc_activate_IRQ_rcvd < 0);
  325. act_state_req = part->sn.uv.act_state_req;
  326. part->sn.uv.act_state_req = 0;
  327. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  328. if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
  329. if (part->act_state == XPC_P_AS_INACTIVE)
  330. xpc_activate_partition(part);
  331. else if (part->act_state == XPC_P_AS_DEACTIVATING)
  332. XPC_DEACTIVATE_PARTITION(part, xpReactivating);
  333. } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
  334. if (part->act_state == XPC_P_AS_INACTIVE)
  335. xpc_activate_partition(part);
  336. else
  337. XPC_DEACTIVATE_PARTITION(part, xpReactivating);
  338. } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
  339. XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
  340. } else {
  341. BUG();
  342. }
  343. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  344. if (xpc_activate_IRQ_rcvd == 0)
  345. break;
  346. }
  347. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  348. }
  349. static void
  350. xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
  351. struct xpc_activate_mq_msghdr_uv *msg_hdr,
  352. int *wakeup_hb_checker)
  353. {
  354. unsigned long irq_flags;
  355. struct xpc_partition_uv *part_uv = &part->sn.uv;
  356. struct xpc_openclose_args *args;
  357. part_uv->remote_act_state = msg_hdr->act_state;
  358. switch (msg_hdr->type) {
  359. case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
  360. /* syncing of remote_act_state was just done above */
  361. break;
  362. case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
  363. struct xpc_activate_mq_msg_activate_req_uv *msg;
  364. /*
  365. * ??? Do we deal here with ts_jiffies being different
  366. * ??? if act_state != XPC_P_AS_INACTIVE instead of
  367. * ??? below?
  368. */
  369. msg = container_of(msg_hdr, struct
  370. xpc_activate_mq_msg_activate_req_uv, hdr);
  371. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  372. if (part_uv->act_state_req == 0)
  373. xpc_activate_IRQ_rcvd++;
  374. part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
  375. part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
  376. part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
  377. part_uv->heartbeat_gpa = msg->heartbeat_gpa;
  378. if (msg->activate_gru_mq_desc_gpa !=
  379. part_uv->activate_gru_mq_desc_gpa) {
  380. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  381. part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
  382. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  383. part_uv->activate_gru_mq_desc_gpa =
  384. msg->activate_gru_mq_desc_gpa;
  385. }
  386. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  387. (*wakeup_hb_checker)++;
  388. break;
  389. }
  390. case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
  391. struct xpc_activate_mq_msg_deactivate_req_uv *msg;
  392. msg = container_of(msg_hdr, struct
  393. xpc_activate_mq_msg_deactivate_req_uv, hdr);
  394. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  395. if (part_uv->act_state_req == 0)
  396. xpc_activate_IRQ_rcvd++;
  397. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  398. part_uv->reason = msg->reason;
  399. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  400. (*wakeup_hb_checker)++;
  401. return;
  402. }
  403. case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
  404. struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
  405. msg = container_of(msg_hdr, struct
  406. xpc_activate_mq_msg_chctl_closerequest_uv,
  407. hdr);
  408. args = &part->remote_openclose_args[msg->ch_number];
  409. args->reason = msg->reason;
  410. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  411. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
  412. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  413. xpc_wakeup_channel_mgr(part);
  414. break;
  415. }
  416. case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
  417. struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
  418. msg = container_of(msg_hdr, struct
  419. xpc_activate_mq_msg_chctl_closereply_uv,
  420. hdr);
  421. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  422. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
  423. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  424. xpc_wakeup_channel_mgr(part);
  425. break;
  426. }
  427. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
  428. struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
  429. msg = container_of(msg_hdr, struct
  430. xpc_activate_mq_msg_chctl_openrequest_uv,
  431. hdr);
  432. args = &part->remote_openclose_args[msg->ch_number];
  433. args->entry_size = msg->entry_size;
  434. args->local_nentries = msg->local_nentries;
  435. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  436. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
  437. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  438. xpc_wakeup_channel_mgr(part);
  439. break;
  440. }
  441. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
  442. struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
  443. msg = container_of(msg_hdr, struct
  444. xpc_activate_mq_msg_chctl_openreply_uv, hdr);
  445. args = &part->remote_openclose_args[msg->ch_number];
  446. args->remote_nentries = msg->remote_nentries;
  447. args->local_nentries = msg->local_nentries;
  448. args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
  449. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  450. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
  451. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  452. xpc_wakeup_channel_mgr(part);
  453. break;
  454. }
  455. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
  456. struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
  457. msg = container_of(msg_hdr, struct
  458. xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
  459. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  460. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
  461. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  462. xpc_wakeup_channel_mgr(part);
  463. }
  464. case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
  465. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  466. part_uv->flags |= XPC_P_ENGAGED_UV;
  467. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  468. break;
  469. case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
  470. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  471. part_uv->flags &= ~XPC_P_ENGAGED_UV;
  472. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  473. break;
  474. default:
  475. dev_err(xpc_part, "received unknown activate_mq msg type=%d "
  476. "from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
  477. /* get hb checker to deactivate from the remote partition */
  478. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  479. if (part_uv->act_state_req == 0)
  480. xpc_activate_IRQ_rcvd++;
  481. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  482. part_uv->reason = xpBadMsgType;
  483. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  484. (*wakeup_hb_checker)++;
  485. return;
  486. }
  487. if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
  488. part->remote_rp_ts_jiffies != 0) {
  489. /*
  490. * ??? Does what we do here need to be sensitive to
  491. * ??? act_state or remote_act_state?
  492. */
  493. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  494. if (part_uv->act_state_req == 0)
  495. xpc_activate_IRQ_rcvd++;
  496. part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
  497. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  498. (*wakeup_hb_checker)++;
  499. }
  500. }
  501. static irqreturn_t
  502. xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
  503. {
  504. struct xpc_activate_mq_msghdr_uv *msg_hdr;
  505. short partid;
  506. struct xpc_partition *part;
  507. int wakeup_hb_checker = 0;
  508. int part_referenced;
  509. while (1) {
  510. msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
  511. if (msg_hdr == NULL)
  512. break;
  513. partid = msg_hdr->partid;
  514. if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
  515. dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
  516. "received invalid partid=0x%x in message\n",
  517. partid);
  518. } else {
  519. part = &xpc_partitions[partid];
  520. part_referenced = xpc_part_ref(part);
  521. xpc_handle_activate_mq_msg_uv(part, msg_hdr,
  522. &wakeup_hb_checker);
  523. if (part_referenced)
  524. xpc_part_deref(part);
  525. }
  526. gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
  527. }
  528. if (wakeup_hb_checker)
  529. wake_up_interruptible(&xpc_activate_IRQ_wq);
  530. return IRQ_HANDLED;
  531. }
  532. static enum xp_retval
  533. xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
  534. unsigned long gru_mq_desc_gpa)
  535. {
  536. enum xp_retval ret;
  537. ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
  538. sizeof(struct gru_message_queue_desc));
  539. if (ret == xpSuccess)
  540. gru_mq_desc->mq = NULL;
  541. return ret;
  542. }
  543. static enum xp_retval
  544. xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
  545. int msg_type)
  546. {
  547. struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
  548. struct xpc_partition_uv *part_uv = &part->sn.uv;
  549. struct gru_message_queue_desc *gru_mq_desc;
  550. unsigned long irq_flags;
  551. enum xp_retval ret;
  552. DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
  553. msg_hdr->type = msg_type;
  554. msg_hdr->partid = xp_partition_id;
  555. msg_hdr->act_state = part->act_state;
  556. msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
  557. mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
  558. again:
  559. if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
  560. gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
  561. if (gru_mq_desc == NULL) {
  562. gru_mq_desc = kmalloc(sizeof(struct
  563. gru_message_queue_desc),
  564. GFP_KERNEL);
  565. if (gru_mq_desc == NULL) {
  566. ret = xpNoMemory;
  567. goto done;
  568. }
  569. part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
  570. }
  571. ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
  572. part_uv->
  573. activate_gru_mq_desc_gpa);
  574. if (ret != xpSuccess)
  575. goto done;
  576. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  577. part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
  578. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  579. }
  580. /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
  581. ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
  582. msg_size);
  583. if (ret != xpSuccess) {
  584. smp_rmb(); /* ensure a fresh copy of part_uv->flags */
  585. if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
  586. goto again;
  587. }
  588. done:
  589. mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
  590. return ret;
  591. }
  592. static void
  593. xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
  594. size_t msg_size, int msg_type)
  595. {
  596. enum xp_retval ret;
  597. ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
  598. if (unlikely(ret != xpSuccess))
  599. XPC_DEACTIVATE_PARTITION(part, ret);
  600. }
  601. static void
  602. xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
  603. void *msg, size_t msg_size, int msg_type)
  604. {
  605. struct xpc_partition *part = &xpc_partitions[ch->partid];
  606. enum xp_retval ret;
  607. ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
  608. if (unlikely(ret != xpSuccess)) {
  609. if (irq_flags != NULL)
  610. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  611. XPC_DEACTIVATE_PARTITION(part, ret);
  612. if (irq_flags != NULL)
  613. spin_lock_irqsave(&ch->lock, *irq_flags);
  614. }
  615. }
  616. static void
  617. xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
  618. {
  619. unsigned long irq_flags;
  620. struct xpc_partition_uv *part_uv = &part->sn.uv;
  621. /*
  622. * !!! Make our side think that the remote partition sent an activate
  623. * !!! mq message our way by doing what the activate IRQ handler would
  624. * !!! do had one really been sent.
  625. */
  626. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  627. if (part_uv->act_state_req == 0)
  628. xpc_activate_IRQ_rcvd++;
  629. part_uv->act_state_req = act_state_req;
  630. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  631. wake_up_interruptible(&xpc_activate_IRQ_wq);
  632. }
  633. static enum xp_retval
  634. xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
  635. size_t *len)
  636. {
  637. s64 status;
  638. enum xp_retval ret;
  639. #if defined CONFIG_X86_64
  640. status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
  641. (u64 *)len);
  642. if (status == BIOS_STATUS_SUCCESS)
  643. ret = xpSuccess;
  644. else if (status == BIOS_STATUS_MORE_PASSES)
  645. ret = xpNeedMoreInfo;
  646. else
  647. ret = xpBiosError;
  648. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  649. status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
  650. if (status == SALRET_OK)
  651. ret = xpSuccess;
  652. else if (status == SALRET_MORE_PASSES)
  653. ret = xpNeedMoreInfo;
  654. else
  655. ret = xpSalError;
  656. #else
  657. #error not a supported configuration
  658. #endif
  659. return ret;
  660. }
  661. static int
  662. xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
  663. {
  664. xpc_heartbeat_uv =
  665. &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
  666. rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
  667. rp->sn.uv.activate_gru_mq_desc_gpa =
  668. uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
  669. return 0;
  670. }
  671. static void
  672. xpc_allow_hb_uv(short partid)
  673. {
  674. }
  675. static void
  676. xpc_disallow_hb_uv(short partid)
  677. {
  678. }
  679. static void
  680. xpc_disallow_all_hbs_uv(void)
  681. {
  682. }
  683. static void
  684. xpc_increment_heartbeat_uv(void)
  685. {
  686. xpc_heartbeat_uv->value++;
  687. }
  688. static void
  689. xpc_offline_heartbeat_uv(void)
  690. {
  691. xpc_increment_heartbeat_uv();
  692. xpc_heartbeat_uv->offline = 1;
  693. }
  694. static void
  695. xpc_online_heartbeat_uv(void)
  696. {
  697. xpc_increment_heartbeat_uv();
  698. xpc_heartbeat_uv->offline = 0;
  699. }
  700. static void
  701. xpc_heartbeat_init_uv(void)
  702. {
  703. xpc_heartbeat_uv->value = 1;
  704. xpc_heartbeat_uv->offline = 0;
  705. }
  706. static void
  707. xpc_heartbeat_exit_uv(void)
  708. {
  709. xpc_offline_heartbeat_uv();
  710. }
  711. static enum xp_retval
  712. xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
  713. {
  714. struct xpc_partition_uv *part_uv = &part->sn.uv;
  715. enum xp_retval ret;
  716. ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
  717. part_uv->heartbeat_gpa,
  718. sizeof(struct xpc_heartbeat_uv));
  719. if (ret != xpSuccess)
  720. return ret;
  721. if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
  722. !part_uv->cached_heartbeat.offline) {
  723. ret = xpNoHeartbeat;
  724. } else {
  725. part->last_heartbeat = part_uv->cached_heartbeat.value;
  726. }
  727. return ret;
  728. }
  729. static void
  730. xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
  731. unsigned long remote_rp_gpa, int nasid)
  732. {
  733. short partid = remote_rp->SAL_partid;
  734. struct xpc_partition *part = &xpc_partitions[partid];
  735. struct xpc_activate_mq_msg_activate_req_uv msg;
  736. part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
  737. part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
  738. part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
  739. part->sn.uv.activate_gru_mq_desc_gpa =
  740. remote_rp->sn.uv.activate_gru_mq_desc_gpa;
  741. /*
  742. * ??? Is it a good idea to make this conditional on what is
  743. * ??? potentially stale state information?
  744. */
  745. if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
  746. msg.rp_gpa = uv_gpa(xpc_rsvd_page);
  747. msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
  748. msg.activate_gru_mq_desc_gpa =
  749. xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
  750. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  751. XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
  752. }
  753. if (part->act_state == XPC_P_AS_INACTIVE)
  754. xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
  755. }
  756. static void
  757. xpc_request_partition_reactivation_uv(struct xpc_partition *part)
  758. {
  759. xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
  760. }
  761. static void
  762. xpc_request_partition_deactivation_uv(struct xpc_partition *part)
  763. {
  764. struct xpc_activate_mq_msg_deactivate_req_uv msg;
  765. /*
  766. * ??? Is it a good idea to make this conditional on what is
  767. * ??? potentially stale state information?
  768. */
  769. if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
  770. part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
  771. msg.reason = part->reason;
  772. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  773. XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
  774. }
  775. }
  776. static void
  777. xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
  778. {
  779. /* nothing needs to be done */
  780. return;
  781. }
  782. static void
  783. xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
  784. {
  785. head->first = NULL;
  786. head->last = NULL;
  787. spin_lock_init(&head->lock);
  788. head->n_entries = 0;
  789. }
  790. static void *
  791. xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
  792. {
  793. unsigned long irq_flags;
  794. struct xpc_fifo_entry_uv *first;
  795. spin_lock_irqsave(&head->lock, irq_flags);
  796. first = head->first;
  797. if (head->first != NULL) {
  798. head->first = first->next;
  799. if (head->first == NULL)
  800. head->last = NULL;
  801. head->n_entries--;
  802. BUG_ON(head->n_entries < 0);
  803. first->next = NULL;
  804. }
  805. spin_unlock_irqrestore(&head->lock, irq_flags);
  806. return first;
  807. }
  808. static void
  809. xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
  810. struct xpc_fifo_entry_uv *last)
  811. {
  812. unsigned long irq_flags;
  813. last->next = NULL;
  814. spin_lock_irqsave(&head->lock, irq_flags);
  815. if (head->last != NULL)
  816. head->last->next = last;
  817. else
  818. head->first = last;
  819. head->last = last;
  820. head->n_entries++;
  821. spin_unlock_irqrestore(&head->lock, irq_flags);
  822. }
  823. static int
  824. xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
  825. {
  826. return head->n_entries;
  827. }
  828. /*
  829. * Setup the channel structures that are uv specific.
  830. */
  831. static enum xp_retval
  832. xpc_setup_ch_structures_uv(struct xpc_partition *part)
  833. {
  834. struct xpc_channel_uv *ch_uv;
  835. int ch_number;
  836. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  837. ch_uv = &part->channels[ch_number].sn.uv;
  838. xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
  839. xpc_init_fifo_uv(&ch_uv->recv_msg_list);
  840. }
  841. return xpSuccess;
  842. }
  843. /*
  844. * Teardown the channel structures that are uv specific.
  845. */
  846. static void
  847. xpc_teardown_ch_structures_uv(struct xpc_partition *part)
  848. {
  849. /* nothing needs to be done */
  850. return;
  851. }
  852. static enum xp_retval
  853. xpc_make_first_contact_uv(struct xpc_partition *part)
  854. {
  855. struct xpc_activate_mq_msg_uv msg;
  856. /*
  857. * We send a sync msg to get the remote partition's remote_act_state
  858. * updated to our current act_state which at this point should
  859. * be XPC_P_AS_ACTIVATING.
  860. */
  861. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  862. XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
  863. while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) {
  864. dev_dbg(xpc_part, "waiting to make first contact with "
  865. "partition %d\n", XPC_PARTID(part));
  866. /* wait a 1/4 of a second or so */
  867. (void)msleep_interruptible(250);
  868. if (part->act_state == XPC_P_AS_DEACTIVATING)
  869. return part->reason;
  870. }
  871. return xpSuccess;
  872. }
  873. static u64
  874. xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
  875. {
  876. unsigned long irq_flags;
  877. union xpc_channel_ctl_flags chctl;
  878. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  879. chctl = part->chctl;
  880. if (chctl.all_flags != 0)
  881. part->chctl.all_flags = 0;
  882. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  883. return chctl.all_flags;
  884. }
  885. static enum xp_retval
  886. xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
  887. {
  888. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  889. struct xpc_send_msg_slot_uv *msg_slot;
  890. unsigned long irq_flags;
  891. int nentries;
  892. int entry;
  893. size_t nbytes;
  894. for (nentries = ch->local_nentries; nentries > 0; nentries--) {
  895. nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
  896. ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
  897. if (ch_uv->send_msg_slots == NULL)
  898. continue;
  899. for (entry = 0; entry < nentries; entry++) {
  900. msg_slot = &ch_uv->send_msg_slots[entry];
  901. msg_slot->msg_slot_number = entry;
  902. xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
  903. &msg_slot->next);
  904. }
  905. spin_lock_irqsave(&ch->lock, irq_flags);
  906. if (nentries < ch->local_nentries)
  907. ch->local_nentries = nentries;
  908. spin_unlock_irqrestore(&ch->lock, irq_flags);
  909. return xpSuccess;
  910. }
  911. return xpNoMemory;
  912. }
  913. static enum xp_retval
  914. xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
  915. {
  916. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  917. struct xpc_notify_mq_msg_uv *msg_slot;
  918. unsigned long irq_flags;
  919. int nentries;
  920. int entry;
  921. size_t nbytes;
  922. for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
  923. nbytes = nentries * ch->entry_size;
  924. ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
  925. if (ch_uv->recv_msg_slots == NULL)
  926. continue;
  927. for (entry = 0; entry < nentries; entry++) {
  928. msg_slot = ch_uv->recv_msg_slots +
  929. entry * ch->entry_size;
  930. msg_slot->hdr.msg_slot_number = entry;
  931. }
  932. spin_lock_irqsave(&ch->lock, irq_flags);
  933. if (nentries < ch->remote_nentries)
  934. ch->remote_nentries = nentries;
  935. spin_unlock_irqrestore(&ch->lock, irq_flags);
  936. return xpSuccess;
  937. }
  938. return xpNoMemory;
  939. }
  940. /*
  941. * Allocate msg_slots associated with the channel.
  942. */
  943. static enum xp_retval
  944. xpc_setup_msg_structures_uv(struct xpc_channel *ch)
  945. {
  946. static enum xp_retval ret;
  947. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  948. DBUG_ON(ch->flags & XPC_C_SETUP);
  949. ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
  950. gru_message_queue_desc),
  951. GFP_KERNEL);
  952. if (ch_uv->cached_notify_gru_mq_desc == NULL)
  953. return xpNoMemory;
  954. ret = xpc_allocate_send_msg_slot_uv(ch);
  955. if (ret == xpSuccess) {
  956. ret = xpc_allocate_recv_msg_slot_uv(ch);
  957. if (ret != xpSuccess) {
  958. kfree(ch_uv->send_msg_slots);
  959. xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
  960. }
  961. }
  962. return ret;
  963. }
  964. /*
  965. * Free up msg_slots and clear other stuff that were setup for the specified
  966. * channel.
  967. */
  968. static void
  969. xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
  970. {
  971. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  972. DBUG_ON(!spin_is_locked(&ch->lock));
  973. kfree(ch_uv->cached_notify_gru_mq_desc);
  974. ch_uv->cached_notify_gru_mq_desc = NULL;
  975. if (ch->flags & XPC_C_SETUP) {
  976. xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
  977. kfree(ch_uv->send_msg_slots);
  978. xpc_init_fifo_uv(&ch_uv->recv_msg_list);
  979. kfree(ch_uv->recv_msg_slots);
  980. }
  981. }
  982. static void
  983. xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  984. {
  985. struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
  986. msg.ch_number = ch->number;
  987. msg.reason = ch->reason;
  988. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  989. XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
  990. }
  991. static void
  992. xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  993. {
  994. struct xpc_activate_mq_msg_chctl_closereply_uv msg;
  995. msg.ch_number = ch->number;
  996. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  997. XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
  998. }
  999. static void
  1000. xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1001. {
  1002. struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
  1003. msg.ch_number = ch->number;
  1004. msg.entry_size = ch->entry_size;
  1005. msg.local_nentries = ch->local_nentries;
  1006. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1007. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
  1008. }
  1009. static void
  1010. xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1011. {
  1012. struct xpc_activate_mq_msg_chctl_openreply_uv msg;
  1013. msg.ch_number = ch->number;
  1014. msg.local_nentries = ch->local_nentries;
  1015. msg.remote_nentries = ch->remote_nentries;
  1016. msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
  1017. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1018. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
  1019. }
  1020. static void
  1021. xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1022. {
  1023. struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
  1024. msg.ch_number = ch->number;
  1025. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1026. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
  1027. }
  1028. static void
  1029. xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
  1030. {
  1031. unsigned long irq_flags;
  1032. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  1033. part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
  1034. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  1035. xpc_wakeup_channel_mgr(part);
  1036. }
  1037. static enum xp_retval
  1038. xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
  1039. unsigned long gru_mq_desc_gpa)
  1040. {
  1041. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  1042. DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
  1043. return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
  1044. gru_mq_desc_gpa);
  1045. }
  1046. static void
  1047. xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
  1048. {
  1049. struct xpc_activate_mq_msg_uv msg;
  1050. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  1051. XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
  1052. }
  1053. static void
  1054. xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
  1055. {
  1056. struct xpc_activate_mq_msg_uv msg;
  1057. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  1058. XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
  1059. }
  1060. static void
  1061. xpc_assume_partition_disengaged_uv(short partid)
  1062. {
  1063. struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
  1064. unsigned long irq_flags;
  1065. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  1066. part_uv->flags &= ~XPC_P_ENGAGED_UV;
  1067. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  1068. }
  1069. static int
  1070. xpc_partition_engaged_uv(short partid)
  1071. {
  1072. return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
  1073. }
  1074. static int
  1075. xpc_any_partition_engaged_uv(void)
  1076. {
  1077. struct xpc_partition_uv *part_uv;
  1078. short partid;
  1079. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  1080. part_uv = &xpc_partitions[partid].sn.uv;
  1081. if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
  1082. return 1;
  1083. }
  1084. return 0;
  1085. }
  1086. static enum xp_retval
  1087. xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
  1088. struct xpc_send_msg_slot_uv **address_of_msg_slot)
  1089. {
  1090. enum xp_retval ret;
  1091. struct xpc_send_msg_slot_uv *msg_slot;
  1092. struct xpc_fifo_entry_uv *entry;
  1093. while (1) {
  1094. entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
  1095. if (entry != NULL)
  1096. break;
  1097. if (flags & XPC_NOWAIT)
  1098. return xpNoWait;
  1099. ret = xpc_allocate_msg_wait(ch);
  1100. if (ret != xpInterrupted && ret != xpTimeout)
  1101. return ret;
  1102. }
  1103. msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
  1104. *address_of_msg_slot = msg_slot;
  1105. return xpSuccess;
  1106. }
  1107. static void
  1108. xpc_free_msg_slot_uv(struct xpc_channel *ch,
  1109. struct xpc_send_msg_slot_uv *msg_slot)
  1110. {
  1111. xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
  1112. /* wakeup anyone waiting for a free msg slot */
  1113. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
  1114. wake_up(&ch->msg_allocate_wq);
  1115. }
  1116. static void
  1117. xpc_notify_sender_uv(struct xpc_channel *ch,
  1118. struct xpc_send_msg_slot_uv *msg_slot,
  1119. enum xp_retval reason)
  1120. {
  1121. xpc_notify_func func = msg_slot->func;
  1122. if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
  1123. atomic_dec(&ch->n_to_notify);
  1124. dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
  1125. "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
  1126. msg_slot->msg_slot_number, ch->partid, ch->number);
  1127. func(reason, ch->partid, ch->number, msg_slot->key);
  1128. dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
  1129. "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
  1130. msg_slot->msg_slot_number, ch->partid, ch->number);
  1131. }
  1132. }
  1133. static void
  1134. xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
  1135. struct xpc_notify_mq_msg_uv *msg)
  1136. {
  1137. struct xpc_send_msg_slot_uv *msg_slot;
  1138. int entry = msg->hdr.msg_slot_number % ch->local_nentries;
  1139. msg_slot = &ch->sn.uv.send_msg_slots[entry];
  1140. BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
  1141. msg_slot->msg_slot_number += ch->local_nentries;
  1142. if (msg_slot->func != NULL)
  1143. xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
  1144. xpc_free_msg_slot_uv(ch, msg_slot);
  1145. }
  1146. static void
  1147. xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
  1148. struct xpc_notify_mq_msg_uv *msg)
  1149. {
  1150. struct xpc_partition_uv *part_uv = &part->sn.uv;
  1151. struct xpc_channel *ch;
  1152. struct xpc_channel_uv *ch_uv;
  1153. struct xpc_notify_mq_msg_uv *msg_slot;
  1154. unsigned long irq_flags;
  1155. int ch_number = msg->hdr.ch_number;
  1156. if (unlikely(ch_number >= part->nchannels)) {
  1157. dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
  1158. "channel number=0x%x in message from partid=%d\n",
  1159. ch_number, XPC_PARTID(part));
  1160. /* get hb checker to deactivate from the remote partition */
  1161. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  1162. if (part_uv->act_state_req == 0)
  1163. xpc_activate_IRQ_rcvd++;
  1164. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  1165. part_uv->reason = xpBadChannelNumber;
  1166. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  1167. wake_up_interruptible(&xpc_activate_IRQ_wq);
  1168. return;
  1169. }
  1170. ch = &part->channels[ch_number];
  1171. xpc_msgqueue_ref(ch);
  1172. if (!(ch->flags & XPC_C_CONNECTED)) {
  1173. xpc_msgqueue_deref(ch);
  1174. return;
  1175. }
  1176. /* see if we're really dealing with an ACK for a previously sent msg */
  1177. if (msg->hdr.size == 0) {
  1178. xpc_handle_notify_mq_ack_uv(ch, msg);
  1179. xpc_msgqueue_deref(ch);
  1180. return;
  1181. }
  1182. /* we're dealing with a normal message sent via the notify_mq */
  1183. ch_uv = &ch->sn.uv;
  1184. msg_slot = ch_uv->recv_msg_slots +
  1185. (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
  1186. BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
  1187. BUG_ON(msg_slot->hdr.size != 0);
  1188. memcpy(msg_slot, msg, msg->hdr.size);
  1189. xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
  1190. if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
  1191. /*
  1192. * If there is an existing idle kthread get it to deliver
  1193. * the payload, otherwise we'll have to get the channel mgr
  1194. * for this partition to create a kthread to do the delivery.
  1195. */
  1196. if (atomic_read(&ch->kthreads_idle) > 0)
  1197. wake_up_nr(&ch->idle_wq, 1);
  1198. else
  1199. xpc_send_chctl_local_msgrequest_uv(part, ch->number);
  1200. }
  1201. xpc_msgqueue_deref(ch);
  1202. }
  1203. static irqreturn_t
  1204. xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
  1205. {
  1206. struct xpc_notify_mq_msg_uv *msg;
  1207. short partid;
  1208. struct xpc_partition *part;
  1209. while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
  1210. NULL) {
  1211. partid = msg->hdr.partid;
  1212. if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
  1213. dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
  1214. "invalid partid=0x%x in message\n", partid);
  1215. } else {
  1216. part = &xpc_partitions[partid];
  1217. if (xpc_part_ref(part)) {
  1218. xpc_handle_notify_mq_msg_uv(part, msg);
  1219. xpc_part_deref(part);
  1220. }
  1221. }
  1222. gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
  1223. }
  1224. return IRQ_HANDLED;
  1225. }
  1226. static int
  1227. xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
  1228. {
  1229. return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
  1230. }
  1231. static void
  1232. xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
  1233. {
  1234. struct xpc_channel *ch = &part->channels[ch_number];
  1235. int ndeliverable_payloads;
  1236. xpc_msgqueue_ref(ch);
  1237. ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
  1238. if (ndeliverable_payloads > 0 &&
  1239. (ch->flags & XPC_C_CONNECTED) &&
  1240. (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
  1241. xpc_activate_kthreads(ch, ndeliverable_payloads);
  1242. }
  1243. xpc_msgqueue_deref(ch);
  1244. }
  1245. static enum xp_retval
  1246. xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
  1247. u16 payload_size, u8 notify_type, xpc_notify_func func,
  1248. void *key)
  1249. {
  1250. enum xp_retval ret = xpSuccess;
  1251. struct xpc_send_msg_slot_uv *msg_slot = NULL;
  1252. struct xpc_notify_mq_msg_uv *msg;
  1253. u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
  1254. size_t msg_size;
  1255. DBUG_ON(notify_type != XPC_N_CALL);
  1256. msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
  1257. if (msg_size > ch->entry_size)
  1258. return xpPayloadTooBig;
  1259. xpc_msgqueue_ref(ch);
  1260. if (ch->flags & XPC_C_DISCONNECTING) {
  1261. ret = ch->reason;
  1262. goto out_1;
  1263. }
  1264. if (!(ch->flags & XPC_C_CONNECTED)) {
  1265. ret = xpNotConnected;
  1266. goto out_1;
  1267. }
  1268. ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
  1269. if (ret != xpSuccess)
  1270. goto out_1;
  1271. if (func != NULL) {
  1272. atomic_inc(&ch->n_to_notify);
  1273. msg_slot->key = key;
  1274. smp_wmb(); /* a non-NULL func must hit memory after the key */
  1275. msg_slot->func = func;
  1276. if (ch->flags & XPC_C_DISCONNECTING) {
  1277. ret = ch->reason;
  1278. goto out_2;
  1279. }
  1280. }
  1281. msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
  1282. msg->hdr.partid = xp_partition_id;
  1283. msg->hdr.ch_number = ch->number;
  1284. msg->hdr.size = msg_size;
  1285. msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
  1286. memcpy(&msg->payload, payload, payload_size);
  1287. ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
  1288. msg_size);
  1289. if (ret == xpSuccess)
  1290. goto out_1;
  1291. XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
  1292. out_2:
  1293. if (func != NULL) {
  1294. /*
  1295. * Try to NULL the msg_slot's func field. If we fail, then
  1296. * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
  1297. * case we need to pretend we succeeded to send the message
  1298. * since the user will get a callout for the disconnect error
  1299. * by xpc_notify_senders_of_disconnect_uv(), and to also get an
  1300. * error returned here will confuse them. Additionally, since
  1301. * in this case the channel is being disconnected we don't need
  1302. * to put the the msg_slot back on the free list.
  1303. */
  1304. if (cmpxchg(&msg_slot->func, func, NULL) != func) {
  1305. ret = xpSuccess;
  1306. goto out_1;
  1307. }
  1308. msg_slot->key = NULL;
  1309. atomic_dec(&ch->n_to_notify);
  1310. }
  1311. xpc_free_msg_slot_uv(ch, msg_slot);
  1312. out_1:
  1313. xpc_msgqueue_deref(ch);
  1314. return ret;
  1315. }
  1316. /*
  1317. * Tell the callers of xpc_send_notify() that the status of their payloads
  1318. * is unknown because the channel is now disconnecting.
  1319. *
  1320. * We don't worry about putting these msg_slots on the free list since the
  1321. * msg_slots themselves are about to be kfree'd.
  1322. */
  1323. static void
  1324. xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
  1325. {
  1326. struct xpc_send_msg_slot_uv *msg_slot;
  1327. int entry;
  1328. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
  1329. for (entry = 0; entry < ch->local_nentries; entry++) {
  1330. if (atomic_read(&ch->n_to_notify) == 0)
  1331. break;
  1332. msg_slot = &ch->sn.uv.send_msg_slots[entry];
  1333. if (msg_slot->func != NULL)
  1334. xpc_notify_sender_uv(ch, msg_slot, ch->reason);
  1335. }
  1336. }
  1337. /*
  1338. * Get the next deliverable message's payload.
  1339. */
  1340. static void *
  1341. xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
  1342. {
  1343. struct xpc_fifo_entry_uv *entry;
  1344. struct xpc_notify_mq_msg_uv *msg;
  1345. void *payload = NULL;
  1346. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  1347. entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
  1348. if (entry != NULL) {
  1349. msg = container_of(entry, struct xpc_notify_mq_msg_uv,
  1350. hdr.u.next);
  1351. payload = &msg->payload;
  1352. }
  1353. }
  1354. return payload;
  1355. }
  1356. static void
  1357. xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
  1358. {
  1359. struct xpc_notify_mq_msg_uv *msg;
  1360. enum xp_retval ret;
  1361. msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
  1362. /* return an ACK to the sender of this message */
  1363. msg->hdr.partid = xp_partition_id;
  1364. msg->hdr.size = 0; /* size of zero indicates this is an ACK */
  1365. ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
  1366. sizeof(struct xpc_notify_mq_msghdr_uv));
  1367. if (ret != xpSuccess)
  1368. XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
  1369. msg->hdr.msg_slot_number += ch->remote_nentries;
  1370. }
  1371. static struct xpc_arch_operations xpc_arch_ops_uv = {
  1372. .setup_partitions = xpc_setup_partitions_uv,
  1373. .teardown_partitions = xpc_teardown_partitions_uv,
  1374. .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
  1375. .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
  1376. .setup_rsvd_page = xpc_setup_rsvd_page_uv,
  1377. .allow_hb = xpc_allow_hb_uv,
  1378. .disallow_hb = xpc_disallow_hb_uv,
  1379. .disallow_all_hbs = xpc_disallow_all_hbs_uv,
  1380. .increment_heartbeat = xpc_increment_heartbeat_uv,
  1381. .offline_heartbeat = xpc_offline_heartbeat_uv,
  1382. .online_heartbeat = xpc_online_heartbeat_uv,
  1383. .heartbeat_init = xpc_heartbeat_init_uv,
  1384. .heartbeat_exit = xpc_heartbeat_exit_uv,
  1385. .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
  1386. .request_partition_activation =
  1387. xpc_request_partition_activation_uv,
  1388. .request_partition_reactivation =
  1389. xpc_request_partition_reactivation_uv,
  1390. .request_partition_deactivation =
  1391. xpc_request_partition_deactivation_uv,
  1392. .cancel_partition_deactivation_request =
  1393. xpc_cancel_partition_deactivation_request_uv,
  1394. .setup_ch_structures = xpc_setup_ch_structures_uv,
  1395. .teardown_ch_structures = xpc_teardown_ch_structures_uv,
  1396. .make_first_contact = xpc_make_first_contact_uv,
  1397. .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
  1398. .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
  1399. .send_chctl_closereply = xpc_send_chctl_closereply_uv,
  1400. .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
  1401. .send_chctl_openreply = xpc_send_chctl_openreply_uv,
  1402. .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
  1403. .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
  1404. .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
  1405. .setup_msg_structures = xpc_setup_msg_structures_uv,
  1406. .teardown_msg_structures = xpc_teardown_msg_structures_uv,
  1407. .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
  1408. .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
  1409. .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
  1410. .partition_engaged = xpc_partition_engaged_uv,
  1411. .any_partition_engaged = xpc_any_partition_engaged_uv,
  1412. .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
  1413. .send_payload = xpc_send_payload_uv,
  1414. .get_deliverable_payload = xpc_get_deliverable_payload_uv,
  1415. .received_payload = xpc_received_payload_uv,
  1416. .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
  1417. };
  1418. int
  1419. xpc_init_uv(void)
  1420. {
  1421. xpc_arch_ops = xpc_arch_ops_uv;
  1422. if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
  1423. dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
  1424. XPC_MSG_HDR_MAX_SIZE);
  1425. return -E2BIG;
  1426. }
  1427. xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
  1428. XPC_ACTIVATE_IRQ_NAME,
  1429. xpc_handle_activate_IRQ_uv);
  1430. if (IS_ERR(xpc_activate_mq_uv))
  1431. return PTR_ERR(xpc_activate_mq_uv);
  1432. xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
  1433. XPC_NOTIFY_IRQ_NAME,
  1434. xpc_handle_notify_IRQ_uv);
  1435. if (IS_ERR(xpc_notify_mq_uv)) {
  1436. xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
  1437. return PTR_ERR(xpc_notify_mq_uv);
  1438. }
  1439. return 0;
  1440. }
  1441. void
  1442. xpc_exit_uv(void)
  1443. {
  1444. xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
  1445. xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
  1446. }