xpc_uv.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) uv-based functions.
  10. *
  11. * Architecture specific implementation of common functions.
  12. *
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/delay.h>
  18. #include <linux/device.h>
  19. #include <asm/uv/uv_hub.h>
  20. #include "../sgi-gru/gru.h"
  21. #include "../sgi-gru/grukservices.h"
  22. #include "xpc.h"
  23. static atomic64_t xpc_heartbeat_uv;
  24. static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
  25. #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
  26. #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
  27. #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
  28. XPC_ACTIVATE_MSG_SIZE_UV)
  29. #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
  30. XPC_NOTIFY_MSG_SIZE_UV)
  31. static void *xpc_activate_mq_uv;
  32. static void *xpc_notify_mq_uv;
  33. static int
  34. xpc_setup_partitions_sn_uv(void)
  35. {
  36. short partid;
  37. struct xpc_partition_uv *part_uv;
  38. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  39. part_uv = &xpc_partitions[partid].sn.uv;
  40. spin_lock_init(&part_uv->flags_lock);
  41. part_uv->remote_act_state = XPC_P_AS_INACTIVE;
  42. }
  43. return 0;
  44. }
  45. static void *
  46. xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq,
  47. irq_handler_t irq_handler)
  48. {
  49. int ret;
  50. int nid;
  51. int mq_order;
  52. struct page *page;
  53. void *mq;
  54. nid = cpu_to_node(cpuid);
  55. mq_order = get_order(mq_size);
  56. page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
  57. mq_order);
  58. if (page == NULL)
  59. return NULL;
  60. mq = page_address(page);
  61. ret = gru_create_message_queue(mq, mq_size);
  62. if (ret != 0) {
  63. dev_err(xpc_part, "gru_create_message_queue() returned "
  64. "error=%d\n", ret);
  65. free_pages((unsigned long)mq, mq_order);
  66. return NULL;
  67. }
  68. /* !!! Need to do some other things to set up IRQ */
  69. ret = request_irq(irq, irq_handler, 0, "xpc", NULL);
  70. if (ret != 0) {
  71. dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
  72. irq, ret);
  73. free_pages((unsigned long)mq, mq_order);
  74. return NULL;
  75. }
  76. /* !!! enable generation of irq when GRU mq op occurs to this mq */
  77. /* ??? allow other partitions to access GRU mq? */
  78. return mq;
  79. }
  80. static void
  81. xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq)
  82. {
  83. /* ??? disallow other partitions to access GRU mq? */
  84. /* !!! disable generation of irq when GRU mq op occurs to this mq */
  85. free_irq(irq, NULL);
  86. free_pages((unsigned long)mq, get_order(mq_size));
  87. }
  88. static enum xp_retval
  89. xpc_send_gru_msg(unsigned long mq_gpa, void *msg, size_t msg_size)
  90. {
  91. enum xp_retval xp_ret;
  92. int ret;
  93. while (1) {
  94. ret = gru_send_message_gpa(mq_gpa, msg, msg_size);
  95. if (ret == MQE_OK) {
  96. xp_ret = xpSuccess;
  97. break;
  98. }
  99. if (ret == MQE_QUEUE_FULL) {
  100. dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
  101. "error=MQE_QUEUE_FULL\n");
  102. /* !!! handle QLimit reached; delay & try again */
  103. /* ??? Do we add a limit to the number of retries? */
  104. (void)msleep_interruptible(10);
  105. } else if (ret == MQE_CONGESTION) {
  106. dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
  107. "error=MQE_CONGESTION\n");
  108. /* !!! handle LB Overflow; simply try again */
  109. /* ??? Do we add a limit to the number of retries? */
  110. } else {
  111. /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
  112. dev_err(xpc_chan, "gru_send_message_gpa() returned "
  113. "error=%d\n", ret);
  114. xp_ret = xpGruSendMqError;
  115. break;
  116. }
  117. }
  118. return xp_ret;
  119. }
  120. static void
  121. xpc_process_activate_IRQ_rcvd_uv(void)
  122. {
  123. unsigned long irq_flags;
  124. short partid;
  125. struct xpc_partition *part;
  126. u8 act_state_req;
  127. DBUG_ON(xpc_activate_IRQ_rcvd == 0);
  128. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  129. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  130. part = &xpc_partitions[partid];
  131. if (part->sn.uv.act_state_req == 0)
  132. continue;
  133. xpc_activate_IRQ_rcvd--;
  134. BUG_ON(xpc_activate_IRQ_rcvd < 0);
  135. act_state_req = part->sn.uv.act_state_req;
  136. part->sn.uv.act_state_req = 0;
  137. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  138. if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
  139. if (part->act_state == XPC_P_AS_INACTIVE)
  140. xpc_activate_partition(part);
  141. else if (part->act_state == XPC_P_AS_DEACTIVATING)
  142. XPC_DEACTIVATE_PARTITION(part, xpReactivating);
  143. } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
  144. if (part->act_state == XPC_P_AS_INACTIVE)
  145. xpc_activate_partition(part);
  146. else
  147. XPC_DEACTIVATE_PARTITION(part, xpReactivating);
  148. } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
  149. XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
  150. } else {
  151. BUG();
  152. }
  153. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  154. if (xpc_activate_IRQ_rcvd == 0)
  155. break;
  156. }
  157. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  158. }
  159. static irqreturn_t
  160. xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
  161. {
  162. unsigned long irq_flags;
  163. struct xpc_activate_mq_msghdr_uv *msg_hdr;
  164. short partid;
  165. struct xpc_partition *part;
  166. struct xpc_partition_uv *part_uv;
  167. struct xpc_openclose_args *args;
  168. int wakeup_hb_checker = 0;
  169. while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) {
  170. partid = msg_hdr->partid;
  171. if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
  172. dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() invalid"
  173. "partid=0x%x passed in message\n", partid);
  174. gru_free_message(xpc_activate_mq_uv, msg_hdr);
  175. continue;
  176. }
  177. part = &xpc_partitions[partid];
  178. part_uv = &part->sn.uv;
  179. part_uv->remote_act_state = msg_hdr->act_state;
  180. switch (msg_hdr->type) {
  181. case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
  182. /* syncing of remote_act_state was just done above */
  183. break;
  184. case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: {
  185. struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
  186. msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *)
  187. msg_hdr;
  188. part_uv->heartbeat = msg->heartbeat;
  189. break;
  190. }
  191. case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: {
  192. struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
  193. msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *)
  194. msg_hdr;
  195. part_uv->heartbeat = msg->heartbeat;
  196. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  197. part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV;
  198. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  199. break;
  200. }
  201. case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: {
  202. struct xpc_activate_mq_msg_heartbeat_req_uv *msg;
  203. msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *)
  204. msg_hdr;
  205. part_uv->heartbeat = msg->heartbeat;
  206. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  207. part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV;
  208. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  209. break;
  210. }
  211. case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
  212. struct xpc_activate_mq_msg_activate_req_uv *msg;
  213. /*
  214. * ??? Do we deal here with ts_jiffies being different
  215. * ??? if act_state != XPC_P_AS_INACTIVE instead of
  216. * ??? below?
  217. */
  218. msg = (struct xpc_activate_mq_msg_activate_req_uv *)
  219. msg_hdr;
  220. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock,
  221. irq_flags);
  222. if (part_uv->act_state_req == 0)
  223. xpc_activate_IRQ_rcvd++;
  224. part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
  225. part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
  226. part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
  227. part_uv->remote_activate_mq_gpa = msg->activate_mq_gpa;
  228. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock,
  229. irq_flags);
  230. wakeup_hb_checker++;
  231. break;
  232. }
  233. case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
  234. struct xpc_activate_mq_msg_deactivate_req_uv *msg;
  235. msg = (struct xpc_activate_mq_msg_deactivate_req_uv *)
  236. msg_hdr;
  237. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock,
  238. irq_flags);
  239. if (part_uv->act_state_req == 0)
  240. xpc_activate_IRQ_rcvd++;
  241. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  242. part_uv->reason = msg->reason;
  243. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock,
  244. irq_flags);
  245. wakeup_hb_checker++;
  246. break;
  247. }
  248. case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
  249. struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
  250. msg = (struct xpc_activate_mq_msg_chctl_closerequest_uv
  251. *)msg_hdr;
  252. args = &part->remote_openclose_args[msg->ch_number];
  253. args->reason = msg->reason;
  254. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  255. part->chctl.flags[msg->ch_number] |=
  256. XPC_CHCTL_CLOSEREQUEST;
  257. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  258. xpc_wakeup_channel_mgr(part);
  259. break;
  260. }
  261. case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
  262. struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
  263. msg = (struct xpc_activate_mq_msg_chctl_closereply_uv *)
  264. msg_hdr;
  265. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  266. part->chctl.flags[msg->ch_number] |=
  267. XPC_CHCTL_CLOSEREPLY;
  268. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  269. xpc_wakeup_channel_mgr(part);
  270. break;
  271. }
  272. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
  273. struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
  274. msg = (struct xpc_activate_mq_msg_chctl_openrequest_uv
  275. *)msg_hdr;
  276. args = &part->remote_openclose_args[msg->ch_number];
  277. args->msg_size = msg->msg_size;
  278. args->local_nentries = msg->local_nentries;
  279. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  280. part->chctl.flags[msg->ch_number] |=
  281. XPC_CHCTL_OPENREQUEST;
  282. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  283. xpc_wakeup_channel_mgr(part);
  284. break;
  285. }
  286. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
  287. struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
  288. msg = (struct xpc_activate_mq_msg_chctl_openreply_uv *)
  289. msg_hdr;
  290. args = &part->remote_openclose_args[msg->ch_number];
  291. args->remote_nentries = msg->remote_nentries;
  292. args->local_nentries = msg->local_nentries;
  293. args->local_msgqueue_pa = msg->local_notify_mq_gpa;
  294. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  295. part->chctl.flags[msg->ch_number] |=
  296. XPC_CHCTL_OPENREPLY;
  297. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  298. xpc_wakeup_channel_mgr(part);
  299. break;
  300. }
  301. case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
  302. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  303. part_uv->flags |= XPC_P_ENGAGED_UV;
  304. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  305. break;
  306. case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
  307. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  308. part_uv->flags &= ~XPC_P_ENGAGED_UV;
  309. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  310. break;
  311. default:
  312. dev_err(xpc_part, "received unknown activate_mq msg "
  313. "type=%d from partition=%d\n", msg_hdr->type,
  314. partid);
  315. }
  316. if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
  317. part->remote_rp_ts_jiffies != 0) {
  318. /*
  319. * ??? Does what we do here need to be sensitive to
  320. * ??? act_state or remote_act_state?
  321. */
  322. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock,
  323. irq_flags);
  324. if (part_uv->act_state_req == 0)
  325. xpc_activate_IRQ_rcvd++;
  326. part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
  327. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock,
  328. irq_flags);
  329. wakeup_hb_checker++;
  330. }
  331. gru_free_message(xpc_activate_mq_uv, msg_hdr);
  332. }
  333. if (wakeup_hb_checker)
  334. wake_up_interruptible(&xpc_activate_IRQ_wq);
  335. return IRQ_HANDLED;
  336. }
  337. static enum xp_retval
  338. xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
  339. int msg_type)
  340. {
  341. struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
  342. DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
  343. msg_hdr->type = msg_type;
  344. msg_hdr->partid = XPC_PARTID(part);
  345. msg_hdr->act_state = part->act_state;
  346. msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
  347. /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
  348. return xpc_send_gru_msg(part->sn.uv.remote_activate_mq_gpa, msg,
  349. msg_size);
  350. }
  351. static void
  352. xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
  353. size_t msg_size, int msg_type)
  354. {
  355. enum xp_retval ret;
  356. ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
  357. if (unlikely(ret != xpSuccess))
  358. XPC_DEACTIVATE_PARTITION(part, ret);
  359. }
  360. static void
  361. xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
  362. void *msg, size_t msg_size, int msg_type)
  363. {
  364. struct xpc_partition *part = &xpc_partitions[ch->number];
  365. enum xp_retval ret;
  366. ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
  367. if (unlikely(ret != xpSuccess)) {
  368. if (irq_flags != NULL)
  369. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  370. XPC_DEACTIVATE_PARTITION(part, ret);
  371. if (irq_flags != NULL)
  372. spin_lock_irqsave(&ch->lock, *irq_flags);
  373. }
  374. }
  375. static void
  376. xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
  377. {
  378. unsigned long irq_flags;
  379. struct xpc_partition_uv *part_uv = &part->sn.uv;
  380. /*
  381. * !!! Make our side think that the remote parition sent an activate
  382. * !!! message our way by doing what the activate IRQ handler would
  383. * !!! do had one really been sent.
  384. */
  385. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  386. if (part_uv->act_state_req == 0)
  387. xpc_activate_IRQ_rcvd++;
  388. part_uv->act_state_req = act_state_req;
  389. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  390. wake_up_interruptible(&xpc_activate_IRQ_wq);
  391. }
  392. static enum xp_retval
  393. xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
  394. size_t *len)
  395. {
  396. /* !!! call the UV version of sn_partition_reserved_page_pa() */
  397. return xpUnsupported;
  398. }
  399. static int
  400. xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
  401. {
  402. rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv);
  403. return 0;
  404. }
  405. static void
  406. xpc_send_heartbeat_uv(int msg_type)
  407. {
  408. short partid;
  409. struct xpc_partition *part;
  410. struct xpc_activate_mq_msg_heartbeat_req_uv msg;
  411. /*
  412. * !!! On uv we're broadcasting a heartbeat message every 5 seconds.
  413. * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20
  414. * !!! seconds. This is an increase in numalink traffic.
  415. * ??? Is this good?
  416. */
  417. msg.heartbeat = atomic64_inc_return(&xpc_heartbeat_uv);
  418. partid = find_first_bit(xpc_heartbeating_to_mask_uv,
  419. XP_MAX_NPARTITIONS_UV);
  420. while (partid < XP_MAX_NPARTITIONS_UV) {
  421. part = &xpc_partitions[partid];
  422. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  423. msg_type);
  424. partid = find_next_bit(xpc_heartbeating_to_mask_uv,
  425. XP_MAX_NPARTITIONS_UV, partid + 1);
  426. }
  427. }
  428. static void
  429. xpc_increment_heartbeat_uv(void)
  430. {
  431. xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV);
  432. }
  433. static void
  434. xpc_offline_heartbeat_uv(void)
  435. {
  436. xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV);
  437. }
  438. static void
  439. xpc_online_heartbeat_uv(void)
  440. {
  441. xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV);
  442. }
  443. static void
  444. xpc_heartbeat_init_uv(void)
  445. {
  446. atomic64_set(&xpc_heartbeat_uv, 0);
  447. bitmap_zero(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
  448. xpc_heartbeating_to_mask = &xpc_heartbeating_to_mask_uv[0];
  449. }
  450. static void
  451. xpc_heartbeat_exit_uv(void)
  452. {
  453. xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV);
  454. }
  455. static enum xp_retval
  456. xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
  457. {
  458. struct xpc_partition_uv *part_uv = &part->sn.uv;
  459. enum xp_retval ret = xpNoHeartbeat;
  460. if (part_uv->remote_act_state != XPC_P_AS_INACTIVE &&
  461. part_uv->remote_act_state != XPC_P_AS_DEACTIVATING) {
  462. if (part_uv->heartbeat != part->last_heartbeat ||
  463. (part_uv->flags & XPC_P_HEARTBEAT_OFFLINE_UV)) {
  464. part->last_heartbeat = part_uv->heartbeat;
  465. ret = xpSuccess;
  466. }
  467. }
  468. return ret;
  469. }
  470. static void
  471. xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
  472. unsigned long remote_rp_gpa, int nasid)
  473. {
  474. short partid = remote_rp->SAL_partid;
  475. struct xpc_partition *part = &xpc_partitions[partid];
  476. struct xpc_activate_mq_msg_activate_req_uv msg;
  477. part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
  478. part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
  479. part->sn.uv.remote_activate_mq_gpa = remote_rp->sn.activate_mq_gpa;
  480. /*
  481. * ??? Is it a good idea to make this conditional on what is
  482. * ??? potentially stale state information?
  483. */
  484. if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
  485. msg.rp_gpa = uv_gpa(xpc_rsvd_page);
  486. msg.activate_mq_gpa = xpc_rsvd_page->sn.activate_mq_gpa;
  487. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  488. XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
  489. }
  490. if (part->act_state == XPC_P_AS_INACTIVE)
  491. xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
  492. }
  493. static void
  494. xpc_request_partition_reactivation_uv(struct xpc_partition *part)
  495. {
  496. xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
  497. }
  498. static void
  499. xpc_request_partition_deactivation_uv(struct xpc_partition *part)
  500. {
  501. struct xpc_activate_mq_msg_deactivate_req_uv msg;
  502. /*
  503. * ??? Is it a good idea to make this conditional on what is
  504. * ??? potentially stale state information?
  505. */
  506. if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
  507. part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
  508. msg.reason = part->reason;
  509. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  510. XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
  511. }
  512. }
  513. /*
  514. * Setup the channel structures that are uv specific.
  515. */
  516. static enum xp_retval
  517. xpc_setup_ch_structures_sn_uv(struct xpc_partition *part)
  518. {
  519. /* !!! this function needs fleshing out */
  520. return xpUnsupported;
  521. }
  522. /*
  523. * Teardown the channel structures that are uv specific.
  524. */
  525. static void
  526. xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part)
  527. {
  528. /* !!! this function needs fleshing out */
  529. return;
  530. }
  531. static enum xp_retval
  532. xpc_make_first_contact_uv(struct xpc_partition *part)
  533. {
  534. struct xpc_activate_mq_msg_uv msg;
  535. /*
  536. * We send a sync msg to get the remote partition's remote_act_state
  537. * updated to our current act_state which at this point should
  538. * be XPC_P_AS_ACTIVATING.
  539. */
  540. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  541. XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
  542. while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) {
  543. dev_dbg(xpc_part, "waiting to make first contact with "
  544. "partition %d\n", XPC_PARTID(part));
  545. /* wait a 1/4 of a second or so */
  546. (void)msleep_interruptible(250);
  547. if (part->act_state == XPC_P_AS_DEACTIVATING)
  548. return part->reason;
  549. }
  550. return xpSuccess;
  551. }
  552. static u64
  553. xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
  554. {
  555. unsigned long irq_flags;
  556. union xpc_channel_ctl_flags chctl;
  557. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  558. chctl = part->chctl;
  559. if (chctl.all_flags != 0)
  560. part->chctl.all_flags = 0;
  561. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  562. return chctl.all_flags;
  563. }
  564. static enum xp_retval
  565. xpc_setup_msg_structures_uv(struct xpc_channel *ch)
  566. {
  567. /* !!! this function needs fleshing out */
  568. return xpUnsupported;
  569. }
  570. static void
  571. xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
  572. {
  573. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  574. ch_uv->remote_notify_mq_gpa = 0;
  575. /* !!! this function needs fleshing out */
  576. }
  577. static void
  578. xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  579. {
  580. struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
  581. msg.ch_number = ch->number;
  582. msg.reason = ch->reason;
  583. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  584. XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
  585. }
  586. static void
  587. xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  588. {
  589. struct xpc_activate_mq_msg_chctl_closereply_uv msg;
  590. msg.ch_number = ch->number;
  591. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  592. XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
  593. }
  594. static void
  595. xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  596. {
  597. struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
  598. msg.ch_number = ch->number;
  599. msg.msg_size = ch->msg_size;
  600. msg.local_nentries = ch->local_nentries;
  601. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  602. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
  603. }
  604. static void
  605. xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  606. {
  607. struct xpc_activate_mq_msg_chctl_openreply_uv msg;
  608. msg.ch_number = ch->number;
  609. msg.local_nentries = ch->local_nentries;
  610. msg.remote_nentries = ch->remote_nentries;
  611. msg.local_notify_mq_gpa = uv_gpa(xpc_notify_mq_uv);
  612. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  613. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
  614. }
  615. static void
  616. xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
  617. unsigned long msgqueue_pa)
  618. {
  619. ch->sn.uv.remote_notify_mq_gpa = msgqueue_pa;
  620. }
  621. static void
  622. xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
  623. {
  624. struct xpc_activate_mq_msg_uv msg;
  625. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  626. XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
  627. }
  628. static void
  629. xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
  630. {
  631. struct xpc_activate_mq_msg_uv msg;
  632. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  633. XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
  634. }
  635. static void
  636. xpc_assume_partition_disengaged_uv(short partid)
  637. {
  638. struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
  639. unsigned long irq_flags;
  640. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  641. part_uv->flags &= ~XPC_P_ENGAGED_UV;
  642. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  643. }
  644. static int
  645. xpc_partition_engaged_uv(short partid)
  646. {
  647. return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
  648. }
  649. static int
  650. xpc_any_partition_engaged_uv(void)
  651. {
  652. struct xpc_partition_uv *part_uv;
  653. short partid;
  654. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  655. part_uv = &xpc_partitions[partid].sn.uv;
  656. if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
  657. return 1;
  658. }
  659. return 0;
  660. }
  661. static struct xpc_msg *
  662. xpc_get_deliverable_msg_uv(struct xpc_channel *ch)
  663. {
  664. /* !!! this function needs fleshing out */
  665. return NULL;
  666. }
  667. int
  668. xpc_init_uv(void)
  669. {
  670. xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv;
  671. xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv;
  672. xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv;
  673. xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv;
  674. xpc_increment_heartbeat = xpc_increment_heartbeat_uv;
  675. xpc_offline_heartbeat = xpc_offline_heartbeat_uv;
  676. xpc_online_heartbeat = xpc_online_heartbeat_uv;
  677. xpc_heartbeat_init = xpc_heartbeat_init_uv;
  678. xpc_heartbeat_exit = xpc_heartbeat_exit_uv;
  679. xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_uv;
  680. xpc_request_partition_activation = xpc_request_partition_activation_uv;
  681. xpc_request_partition_reactivation =
  682. xpc_request_partition_reactivation_uv;
  683. xpc_request_partition_deactivation =
  684. xpc_request_partition_deactivation_uv;
  685. xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv;
  686. xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv;
  687. xpc_make_first_contact = xpc_make_first_contact_uv;
  688. xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv;
  689. xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_uv;
  690. xpc_send_chctl_closereply = xpc_send_chctl_closereply_uv;
  691. xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_uv;
  692. xpc_send_chctl_openreply = xpc_send_chctl_openreply_uv;
  693. xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv;
  694. xpc_setup_msg_structures = xpc_setup_msg_structures_uv;
  695. xpc_teardown_msg_structures = xpc_teardown_msg_structures_uv;
  696. xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_uv;
  697. xpc_indicate_partition_disengaged =
  698. xpc_indicate_partition_disengaged_uv;
  699. xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_uv;
  700. xpc_partition_engaged = xpc_partition_engaged_uv;
  701. xpc_any_partition_engaged = xpc_any_partition_engaged_uv;
  702. xpc_get_deliverable_msg = xpc_get_deliverable_msg_uv;
  703. /* ??? The cpuid argument's value is 0, is that what we want? */
  704. /* !!! The irq argument's value isn't correct. */
  705. xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0,
  706. xpc_handle_activate_IRQ_uv);
  707. if (xpc_activate_mq_uv == NULL)
  708. return -ENOMEM;
  709. return 0;
  710. }
  711. void
  712. xpc_exit_uv(void)
  713. {
  714. /* !!! The irq argument's value isn't correct. */
  715. xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0);
  716. }