xpc_partition.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) partition support.
  10. *
  11. * This is the part of XPC that detects the presence/absence of
  12. * other partitions. It provides a heartbeat and monitors the
  13. * heartbeats of other partitions.
  14. *
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/sysctl.h>
  18. #include <linux/cache.h>
  19. #include <linux/mmzone.h>
  20. #include <linux/nodemask.h>
  21. #include <asm/uncached.h>
  22. #include <asm/sn/intr.h>
  23. #include <asm/sn/sn_sal.h>
  24. #include <asm/sn/nodepda.h>
  25. #include <asm/sn/addrs.h>
  26. #include "xpc.h"
  27. /* XPC is exiting flag */
  28. int xpc_exiting;
  29. /* SH_IPI_ACCESS shub register value on startup */
  30. static u64 xpc_sh1_IPI_access;
  31. static u64 xpc_sh2_IPI_access0;
  32. static u64 xpc_sh2_IPI_access1;
  33. static u64 xpc_sh2_IPI_access2;
  34. static u64 xpc_sh2_IPI_access3;
  35. /* original protection values for each node */
  36. u64 xpc_prot_vec[MAX_NUMNODES];
  37. /* this partition's reserved page pointers */
  38. struct xpc_rsvd_page *xpc_rsvd_page;
  39. static u64 *xpc_part_nasids;
  40. static u64 *xpc_mach_nasids;
  41. struct xpc_vars *xpc_vars;
  42. struct xpc_vars_part *xpc_vars_part;
  43. static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */
  44. static int xp_nasid_mask_words; /* actual size in words of nasid mask */
  45. struct xpc_partition *xpc_partitions;
  46. /*
  47. * Generic buffer used to store a local copy of portions of a remote
  48. * partition's reserved page (either its header and part_nasids mask,
  49. * or its vars).
  50. */
  51. char *xpc_remote_copy_buffer;
  52. void *xpc_remote_copy_buffer_base;
  53. /*
  54. * Guarantee that the kmalloc'd memory is cacheline aligned.
  55. */
  56. void *
  57. xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
  58. {
  59. /* see if kmalloc will give us cachline aligned memory by default */
  60. *base = kmalloc(size, flags);
  61. if (*base == NULL)
  62. return NULL;
  63. if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
  64. return *base;
  65. kfree(*base);
  66. /* nope, we'll have to do it ourselves */
  67. *base = kmalloc(size + L1_CACHE_BYTES, flags);
  68. if (*base == NULL)
  69. return NULL;
  70. return (void *)L1_CACHE_ALIGN((u64)*base);
  71. }
  72. /*
  73. * Given a nasid, get the physical address of the partition's reserved page
  74. * for that nasid. This function returns 0 on any error.
  75. */
  76. static u64
  77. xpc_get_rsvd_page_pa(int nasid)
  78. {
  79. enum xp_retval ret;
  80. s64 status;
  81. u64 cookie = 0;
  82. u64 rp_pa = nasid; /* seed with nasid */
  83. u64 len = 0;
  84. u64 buf = buf;
  85. u64 buf_len = 0;
  86. void *buf_base = NULL;
  87. while (1) {
  88. status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
  89. &len);
  90. dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
  91. "0x%016lx, address=0x%016lx, len=0x%016lx\n",
  92. status, cookie, rp_pa, len);
  93. if (status != SALRET_MORE_PASSES)
  94. break;
  95. /* >>> L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
  96. if (L1_CACHE_ALIGN(len) > buf_len) {
  97. kfree(buf_base);
  98. buf_len = L1_CACHE_ALIGN(len);
  99. buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len,
  100. GFP_KERNEL,
  101. &buf_base);
  102. if (buf_base == NULL) {
  103. dev_err(xpc_part, "unable to kmalloc "
  104. "len=0x%016lx\n", buf_len);
  105. status = SALRET_ERROR;
  106. break;
  107. }
  108. }
  109. ret = xp_remote_memcpy((void *)buf, (void *)rp_pa, buf_len);
  110. if (ret != xpSuccess) {
  111. dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
  112. status = SALRET_ERROR;
  113. break;
  114. }
  115. }
  116. kfree(buf_base);
  117. if (status != SALRET_OK)
  118. rp_pa = 0;
  119. dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
  120. return rp_pa;
  121. }
  122. /*
  123. * Fill the partition reserved page with the information needed by
  124. * other partitions to discover we are alive and establish initial
  125. * communications.
  126. */
  127. struct xpc_rsvd_page *
  128. xpc_rsvd_page_init(void)
  129. {
  130. struct xpc_rsvd_page *rp;
  131. AMO_t *amos_page;
  132. u64 rp_pa, nasid_array = 0;
  133. int i, ret;
  134. /* get the local reserved page's address */
  135. preempt_disable();
  136. rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id()));
  137. preempt_enable();
  138. if (rp_pa == 0) {
  139. dev_err(xpc_part, "SAL failed to locate the reserved page\n");
  140. return NULL;
  141. }
  142. rp = (struct xpc_rsvd_page *)__va(rp_pa);
  143. if (rp->partid != sn_partition_id) {
  144. dev_err(xpc_part, "the reserved page's partid of %d should be "
  145. "%d\n", rp->partid, sn_partition_id);
  146. return NULL;
  147. }
  148. rp->version = XPC_RP_VERSION;
  149. /* establish the actual sizes of the nasid masks */
  150. if (rp->SAL_version == 1) {
  151. /* SAL_version 1 didn't set the nasids_size field */
  152. rp->nasids_size = 128;
  153. }
  154. xp_nasid_mask_bytes = rp->nasids_size;
  155. xp_nasid_mask_words = xp_nasid_mask_bytes / 8;
  156. /* setup the pointers to the various items in the reserved page */
  157. xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
  158. xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
  159. xpc_vars = XPC_RP_VARS(rp);
  160. xpc_vars_part = XPC_RP_VARS_PART(rp);
  161. /*
  162. * Before clearing xpc_vars, see if a page of AMOs had been previously
  163. * allocated. If not we'll need to allocate one and set permissions
  164. * so that cross-partition AMOs are allowed.
  165. *
  166. * The allocated AMO page needs MCA reporting to remain disabled after
  167. * XPC has unloaded. To make this work, we keep a copy of the pointer
  168. * to this page (i.e., amos_page) in the struct xpc_vars structure,
  169. * which is pointed to by the reserved page, and re-use that saved copy
  170. * on subsequent loads of XPC. This AMO page is never freed, and its
  171. * memory protections are never restricted.
  172. */
  173. amos_page = xpc_vars->amos_page;
  174. if (amos_page == NULL) {
  175. amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0, 1));
  176. if (amos_page == NULL) {
  177. dev_err(xpc_part, "can't allocate page of AMOs\n");
  178. return NULL;
  179. }
  180. /*
  181. * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems
  182. * when xpc_allow_IPI_ops() is called via xpc_hb_init().
  183. */
  184. if (!enable_shub_wars_1_1()) {
  185. ret = sn_change_memprotect(ia64_tpa((u64)amos_page),
  186. PAGE_SIZE,
  187. SN_MEMPROT_ACCESS_CLASS_1,
  188. &nasid_array);
  189. if (ret != 0) {
  190. dev_err(xpc_part, "can't change memory "
  191. "protections\n");
  192. uncached_free_page(__IA64_UNCACHED_OFFSET |
  193. TO_PHYS((u64)amos_page), 1);
  194. return NULL;
  195. }
  196. }
  197. } else if (!IS_AMO_ADDRESS((u64)amos_page)) {
  198. /*
  199. * EFI's XPBOOT can also set amos_page in the reserved page,
  200. * but it happens to leave it as an uncached physical address
  201. * and we need it to be an uncached virtual, so we'll have to
  202. * convert it.
  203. */
  204. if (!IS_AMO_PHYS_ADDRESS((u64)amos_page)) {
  205. dev_err(xpc_part, "previously used amos_page address "
  206. "is bad = 0x%p\n", (void *)amos_page);
  207. return NULL;
  208. }
  209. amos_page = (AMO_t *)TO_AMO((u64)amos_page);
  210. }
  211. /* clear xpc_vars */
  212. memset(xpc_vars, 0, sizeof(struct xpc_vars));
  213. xpc_vars->version = XPC_V_VERSION;
  214. xpc_vars->act_nasid = cpuid_to_nasid(0);
  215. xpc_vars->act_phys_cpuid = cpu_physical_id(0);
  216. xpc_vars->vars_part_pa = __pa(xpc_vars_part);
  217. xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page);
  218. xpc_vars->amos_page = amos_page; /* save for next load of XPC */
  219. /* clear xpc_vars_part */
  220. memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
  221. xp_max_npartitions);
  222. /* initialize the activate IRQ related AMO variables */
  223. for (i = 0; i < xp_nasid_mask_words; i++)
  224. (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
  225. /* initialize the engaged remote partitions related AMO variables */
  226. (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
  227. (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
  228. /* timestamp of when reserved page was setup by XPC */
  229. rp->stamp = CURRENT_TIME;
  230. /*
  231. * This signifies to the remote partition that our reserved
  232. * page is initialized.
  233. */
  234. rp->vars_pa = __pa(xpc_vars);
  235. return rp;
  236. }
  237. /*
  238. * Change protections to allow IPI operations (and AMO operations on
  239. * Shub 1.1 systems).
  240. */
  241. void
  242. xpc_allow_IPI_ops(void)
  243. {
  244. int node;
  245. int nasid;
  246. /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
  247. if (is_shub2()) {
  248. xpc_sh2_IPI_access0 =
  249. (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
  250. xpc_sh2_IPI_access1 =
  251. (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
  252. xpc_sh2_IPI_access2 =
  253. (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
  254. xpc_sh2_IPI_access3 =
  255. (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
  256. for_each_online_node(node) {
  257. nasid = cnodeid_to_nasid(node);
  258. HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
  259. -1UL);
  260. HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
  261. -1UL);
  262. HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
  263. -1UL);
  264. HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
  265. -1UL);
  266. }
  267. } else {
  268. xpc_sh1_IPI_access =
  269. (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
  270. for_each_online_node(node) {
  271. nasid = cnodeid_to_nasid(node);
  272. HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
  273. -1UL);
  274. /*
  275. * Since the BIST collides with memory operations on
  276. * SHUB 1.1 sn_change_memprotect() cannot be used.
  277. */
  278. if (enable_shub_wars_1_1()) {
  279. /* open up everything */
  280. xpc_prot_vec[node] = (u64)HUB_L((u64 *)
  281. GLOBAL_MMR_ADDR
  282. (nasid,
  283. SH1_MD_DQLP_MMR_DIR_PRIVEC0));
  284. HUB_S((u64 *)
  285. GLOBAL_MMR_ADDR(nasid,
  286. SH1_MD_DQLP_MMR_DIR_PRIVEC0),
  287. -1UL);
  288. HUB_S((u64 *)
  289. GLOBAL_MMR_ADDR(nasid,
  290. SH1_MD_DQRP_MMR_DIR_PRIVEC0),
  291. -1UL);
  292. }
  293. }
  294. }
  295. }
  296. /*
  297. * Restrict protections to disallow IPI operations (and AMO operations on
  298. * Shub 1.1 systems).
  299. */
  300. void
  301. xpc_restrict_IPI_ops(void)
  302. {
  303. int node;
  304. int nasid;
  305. /* >>> Change SH_IPI_ACCESS code to use SAL call once it is available */
  306. if (is_shub2()) {
  307. for_each_online_node(node) {
  308. nasid = cnodeid_to_nasid(node);
  309. HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
  310. xpc_sh2_IPI_access0);
  311. HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
  312. xpc_sh2_IPI_access1);
  313. HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
  314. xpc_sh2_IPI_access2);
  315. HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
  316. xpc_sh2_IPI_access3);
  317. }
  318. } else {
  319. for_each_online_node(node) {
  320. nasid = cnodeid_to_nasid(node);
  321. HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
  322. xpc_sh1_IPI_access);
  323. if (enable_shub_wars_1_1()) {
  324. HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
  325. SH1_MD_DQLP_MMR_DIR_PRIVEC0),
  326. xpc_prot_vec[node]);
  327. HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
  328. SH1_MD_DQRP_MMR_DIR_PRIVEC0),
  329. xpc_prot_vec[node]);
  330. }
  331. }
  332. }
  333. }
  334. /*
  335. * At periodic intervals, scan through all active partitions and ensure
  336. * their heartbeat is still active. If not, the partition is deactivated.
  337. */
  338. void
  339. xpc_check_remote_hb(void)
  340. {
  341. struct xpc_vars *remote_vars;
  342. struct xpc_partition *part;
  343. short partid;
  344. enum xp_retval ret;
  345. remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
  346. for (partid = 0; partid < xp_max_npartitions; partid++) {
  347. if (xpc_exiting)
  348. break;
  349. if (partid == sn_partition_id)
  350. continue;
  351. part = &xpc_partitions[partid];
  352. if (part->act_state == XPC_P_INACTIVE ||
  353. part->act_state == XPC_P_DEACTIVATING) {
  354. continue;
  355. }
  356. /* pull the remote_hb cache line */
  357. ret = xp_remote_memcpy(remote_vars,
  358. (void *)part->remote_vars_pa,
  359. XPC_RP_VARS_SIZE);
  360. if (ret != xpSuccess) {
  361. XPC_DEACTIVATE_PARTITION(part, ret);
  362. continue;
  363. }
  364. dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
  365. " = %ld, heartbeat_offline = %ld, HB_mask = 0x%lx\n",
  366. partid, remote_vars->heartbeat, part->last_heartbeat,
  367. remote_vars->heartbeat_offline,
  368. remote_vars->heartbeating_to_mask);
  369. if (((remote_vars->heartbeat == part->last_heartbeat) &&
  370. (remote_vars->heartbeat_offline == 0)) ||
  371. !xpc_hb_allowed(sn_partition_id, remote_vars)) {
  372. XPC_DEACTIVATE_PARTITION(part, xpNoHeartbeat);
  373. continue;
  374. }
  375. part->last_heartbeat = remote_vars->heartbeat;
  376. }
  377. }
  378. /*
  379. * Get a copy of a portion of the remote partition's rsvd page.
  380. *
  381. * remote_rp points to a buffer that is cacheline aligned for BTE copies and
  382. * is large enough to contain a copy of their reserved page header and
  383. * part_nasids mask.
  384. */
  385. static enum xp_retval
  386. xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
  387. struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
  388. {
  389. int i;
  390. enum xp_retval ret;
  391. /* get the reserved page's physical address */
  392. *remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
  393. if (*remote_rp_pa == 0)
  394. return xpNoRsvdPageAddr;
  395. /* pull over the reserved page header and part_nasids mask */
  396. ret = xp_remote_memcpy(remote_rp, (void *)*remote_rp_pa,
  397. XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes);
  398. if (ret != xpSuccess)
  399. return ret;
  400. if (discovered_nasids != NULL) {
  401. u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
  402. for (i = 0; i < xp_nasid_mask_words; i++)
  403. discovered_nasids[i] |= remote_part_nasids[i];
  404. }
  405. /* check that the partid is for another partition */
  406. if (remote_rp->partid < 0 || remote_rp->partid >= xp_max_npartitions)
  407. return xpInvalidPartid;
  408. if (remote_rp->partid == sn_partition_id)
  409. return xpLocalPartid;
  410. if (XPC_VERSION_MAJOR(remote_rp->version) !=
  411. XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
  412. return xpBadVersion;
  413. }
  414. return xpSuccess;
  415. }
  416. /*
  417. * Get a copy of the remote partition's XPC variables from the reserved page.
  418. *
  419. * remote_vars points to a buffer that is cacheline aligned for BTE copies and
  420. * assumed to be of size XPC_RP_VARS_SIZE.
  421. */
  422. static enum xp_retval
  423. xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
  424. {
  425. enum xp_retval ret;
  426. if (remote_vars_pa == 0)
  427. return xpVarsNotSet;
  428. /* pull over the cross partition variables */
  429. ret = xp_remote_memcpy(remote_vars, (void *)remote_vars_pa,
  430. XPC_RP_VARS_SIZE);
  431. if (ret != xpSuccess)
  432. return ret;
  433. if (XPC_VERSION_MAJOR(remote_vars->version) !=
  434. XPC_VERSION_MAJOR(XPC_V_VERSION)) {
  435. return xpBadVersion;
  436. }
  437. return xpSuccess;
  438. }
  439. /*
  440. * Update the remote partition's info.
  441. */
  442. static void
  443. xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
  444. struct timespec *remote_rp_stamp, u64 remote_rp_pa,
  445. u64 remote_vars_pa, struct xpc_vars *remote_vars)
  446. {
  447. part->remote_rp_version = remote_rp_version;
  448. dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
  449. part->remote_rp_version);
  450. part->remote_rp_stamp = *remote_rp_stamp;
  451. dev_dbg(xpc_part, " remote_rp_stamp (tv_sec = 0x%lx tv_nsec = 0x%lx\n",
  452. part->remote_rp_stamp.tv_sec, part->remote_rp_stamp.tv_nsec);
  453. part->remote_rp_pa = remote_rp_pa;
  454. dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
  455. part->remote_vars_pa = remote_vars_pa;
  456. dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
  457. part->remote_vars_pa);
  458. part->last_heartbeat = remote_vars->heartbeat;
  459. dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
  460. part->last_heartbeat);
  461. part->remote_vars_part_pa = remote_vars->vars_part_pa;
  462. dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
  463. part->remote_vars_part_pa);
  464. part->remote_act_nasid = remote_vars->act_nasid;
  465. dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n",
  466. part->remote_act_nasid);
  467. part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid;
  468. dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n",
  469. part->remote_act_phys_cpuid);
  470. part->remote_amos_page_pa = remote_vars->amos_page_pa;
  471. dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
  472. part->remote_amos_page_pa);
  473. part->remote_vars_version = remote_vars->version;
  474. dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
  475. part->remote_vars_version);
  476. }
  477. /*
  478. * Prior code has determined the nasid which generated an IPI. Inspect
  479. * that nasid to determine if its partition needs to be activated or
  480. * deactivated.
  481. *
  482. * A partition is consider "awaiting activation" if our partition
  483. * flags indicate it is not active and it has a heartbeat. A
  484. * partition is considered "awaiting deactivation" if our partition
  485. * flags indicate it is active but it has no heartbeat or it is not
  486. * sending its heartbeat to us.
  487. *
  488. * To determine the heartbeat, the remote nasid must have a properly
  489. * initialized reserved page.
  490. */
  491. static void
  492. xpc_identify_act_IRQ_req(int nasid)
  493. {
  494. struct xpc_rsvd_page *remote_rp;
  495. struct xpc_vars *remote_vars;
  496. u64 remote_rp_pa;
  497. u64 remote_vars_pa;
  498. int remote_rp_version;
  499. int reactivate = 0;
  500. int stamp_diff;
  501. struct timespec remote_rp_stamp = { 0, 0 };
  502. short partid;
  503. struct xpc_partition *part;
  504. enum xp_retval ret;
  505. /* pull over the reserved page structure */
  506. remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
  507. ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
  508. if (ret != xpSuccess) {
  509. dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
  510. "which sent interrupt, reason=%d\n", nasid, ret);
  511. return;
  512. }
  513. remote_vars_pa = remote_rp->vars_pa;
  514. remote_rp_version = remote_rp->version;
  515. if (XPC_SUPPORTS_RP_STAMP(remote_rp_version))
  516. remote_rp_stamp = remote_rp->stamp;
  517. partid = remote_rp->partid;
  518. part = &xpc_partitions[partid];
  519. /* pull over the cross partition variables */
  520. remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
  521. ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
  522. if (ret != xpSuccess) {
  523. dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
  524. "which sent interrupt, reason=%d\n", nasid, ret);
  525. XPC_DEACTIVATE_PARTITION(part, ret);
  526. return;
  527. }
  528. part->act_IRQ_rcvd++;
  529. dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
  530. "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd,
  531. remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
  532. if (xpc_partition_disengaged(part) &&
  533. part->act_state == XPC_P_INACTIVE) {
  534. xpc_update_partition_info(part, remote_rp_version,
  535. &remote_rp_stamp, remote_rp_pa,
  536. remote_vars_pa, remote_vars);
  537. if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
  538. if (xpc_partition_disengage_requested(1UL << partid)) {
  539. /*
  540. * Other side is waiting on us to disengage,
  541. * even though we already have.
  542. */
  543. return;
  544. }
  545. } else {
  546. /* other side doesn't support disengage requests */
  547. xpc_clear_partition_disengage_request(1UL << partid);
  548. }
  549. xpc_activate_partition(part);
  550. return;
  551. }
  552. DBUG_ON(part->remote_rp_version == 0);
  553. DBUG_ON(part->remote_vars_version == 0);
  554. if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
  555. DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
  556. remote_vars_version));
  557. if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
  558. DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
  559. version));
  560. /* see if the other side rebooted */
  561. if (part->remote_amos_page_pa ==
  562. remote_vars->amos_page_pa &&
  563. xpc_hb_allowed(sn_partition_id, remote_vars)) {
  564. /* doesn't look that way, so ignore the IPI */
  565. return;
  566. }
  567. }
  568. /*
  569. * Other side rebooted and previous XPC didn't support the
  570. * disengage request, so we don't need to do anything special.
  571. */
  572. xpc_update_partition_info(part, remote_rp_version,
  573. &remote_rp_stamp, remote_rp_pa,
  574. remote_vars_pa, remote_vars);
  575. part->reactivate_nasid = nasid;
  576. XPC_DEACTIVATE_PARTITION(part, xpReactivating);
  577. return;
  578. }
  579. DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version));
  580. if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
  581. DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
  582. /*
  583. * Other side rebooted and previous XPC did support the
  584. * disengage request, but the new one doesn't.
  585. */
  586. xpc_clear_partition_engaged(1UL << partid);
  587. xpc_clear_partition_disengage_request(1UL << partid);
  588. xpc_update_partition_info(part, remote_rp_version,
  589. &remote_rp_stamp, remote_rp_pa,
  590. remote_vars_pa, remote_vars);
  591. reactivate = 1;
  592. } else {
  593. DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
  594. stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
  595. &remote_rp_stamp);
  596. if (stamp_diff != 0) {
  597. DBUG_ON(stamp_diff >= 0);
  598. /*
  599. * Other side rebooted and the previous XPC did support
  600. * the disengage request, as does the new one.
  601. */
  602. DBUG_ON(xpc_partition_engaged(1UL << partid));
  603. DBUG_ON(xpc_partition_disengage_requested(1UL <<
  604. partid));
  605. xpc_update_partition_info(part, remote_rp_version,
  606. &remote_rp_stamp,
  607. remote_rp_pa, remote_vars_pa,
  608. remote_vars);
  609. reactivate = 1;
  610. }
  611. }
  612. if (part->disengage_request_timeout > 0 &&
  613. !xpc_partition_disengaged(part)) {
  614. /* still waiting on other side to disengage from us */
  615. return;
  616. }
  617. if (reactivate) {
  618. part->reactivate_nasid = nasid;
  619. XPC_DEACTIVATE_PARTITION(part, xpReactivating);
  620. } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
  621. xpc_partition_disengage_requested(1UL << partid)) {
  622. XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown);
  623. }
  624. }
  625. /*
  626. * Loop through the activation AMO variables and process any bits
  627. * which are set. Each bit indicates a nasid sending a partition
  628. * activation or deactivation request.
  629. *
  630. * Return #of IRQs detected.
  631. */
  632. int
  633. xpc_identify_act_IRQ_sender(void)
  634. {
  635. int word, bit;
  636. u64 nasid_mask;
  637. u64 nasid; /* remote nasid */
  638. int n_IRQs_detected = 0;
  639. AMO_t *act_amos;
  640. act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
  641. /* scan through act AMO variable looking for non-zero entries */
  642. for (word = 0; word < xp_nasid_mask_words; word++) {
  643. if (xpc_exiting)
  644. break;
  645. nasid_mask = xpc_IPI_receive(&act_amos[word]);
  646. if (nasid_mask == 0) {
  647. /* no IRQs from nasids in this variable */
  648. continue;
  649. }
  650. dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
  651. nasid_mask);
  652. /*
  653. * If this nasid has been added to the machine since
  654. * our partition was reset, this will retain the
  655. * remote nasid in our reserved pages machine mask.
  656. * This is used in the event of module reload.
  657. */
  658. xpc_mach_nasids[word] |= nasid_mask;
  659. /* locate the nasid(s) which sent interrupts */
  660. for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
  661. if (nasid_mask & (1UL << bit)) {
  662. n_IRQs_detected++;
  663. nasid = XPC_NASID_FROM_W_B(word, bit);
  664. dev_dbg(xpc_part, "interrupt from nasid %ld\n",
  665. nasid);
  666. xpc_identify_act_IRQ_req(nasid);
  667. }
  668. }
  669. }
  670. return n_IRQs_detected;
  671. }
  672. /*
  673. * See if the other side has responded to a partition disengage request
  674. * from us.
  675. */
  676. int
  677. xpc_partition_disengaged(struct xpc_partition *part)
  678. {
  679. short partid = XPC_PARTID(part);
  680. int disengaged;
  681. disengaged = (xpc_partition_engaged(1UL << partid) == 0);
  682. if (part->disengage_request_timeout) {
  683. if (!disengaged) {
  684. if (time_before(jiffies,
  685. part->disengage_request_timeout)) {
  686. /* timelimit hasn't been reached yet */
  687. return 0;
  688. }
  689. /*
  690. * Other side hasn't responded to our disengage
  691. * request in a timely fashion, so assume it's dead.
  692. */
  693. dev_info(xpc_part, "disengage from remote partition %d "
  694. "timed out\n", partid);
  695. xpc_disengage_request_timedout = 1;
  696. xpc_clear_partition_engaged(1UL << partid);
  697. disengaged = 1;
  698. }
  699. part->disengage_request_timeout = 0;
  700. /* cancel the timer function, provided it's not us */
  701. if (!in_interrupt()) {
  702. del_singleshot_timer_sync(&part->
  703. disengage_request_timer);
  704. }
  705. DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
  706. part->act_state != XPC_P_INACTIVE);
  707. if (part->act_state != XPC_P_INACTIVE)
  708. xpc_wakeup_channel_mgr(part);
  709. if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version))
  710. xpc_cancel_partition_disengage_request(part);
  711. }
  712. return disengaged;
  713. }
  714. /*
  715. * Mark specified partition as active.
  716. */
  717. enum xp_retval
  718. xpc_mark_partition_active(struct xpc_partition *part)
  719. {
  720. unsigned long irq_flags;
  721. enum xp_retval ret;
  722. dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
  723. spin_lock_irqsave(&part->act_lock, irq_flags);
  724. if (part->act_state == XPC_P_ACTIVATING) {
  725. part->act_state = XPC_P_ACTIVE;
  726. ret = xpSuccess;
  727. } else {
  728. DBUG_ON(part->reason == xpSuccess);
  729. ret = part->reason;
  730. }
  731. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  732. return ret;
  733. }
  734. /*
  735. * Notify XPC that the partition is down.
  736. */
  737. void
  738. xpc_deactivate_partition(const int line, struct xpc_partition *part,
  739. enum xp_retval reason)
  740. {
  741. unsigned long irq_flags;
  742. spin_lock_irqsave(&part->act_lock, irq_flags);
  743. if (part->act_state == XPC_P_INACTIVE) {
  744. XPC_SET_REASON(part, reason, line);
  745. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  746. if (reason == xpReactivating) {
  747. /* we interrupt ourselves to reactivate partition */
  748. xpc_IPI_send_reactivate(part);
  749. }
  750. return;
  751. }
  752. if (part->act_state == XPC_P_DEACTIVATING) {
  753. if ((part->reason == xpUnloading && reason != xpUnloading) ||
  754. reason == xpReactivating) {
  755. XPC_SET_REASON(part, reason, line);
  756. }
  757. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  758. return;
  759. }
  760. part->act_state = XPC_P_DEACTIVATING;
  761. XPC_SET_REASON(part, reason, line);
  762. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  763. if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
  764. xpc_request_partition_disengage(part);
  765. xpc_IPI_send_disengage(part);
  766. /* set a timelimit on the disengage request */
  767. part->disengage_request_timeout = jiffies +
  768. (xpc_disengage_request_timelimit * HZ);
  769. part->disengage_request_timer.expires =
  770. part->disengage_request_timeout;
  771. add_timer(&part->disengage_request_timer);
  772. }
  773. dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n",
  774. XPC_PARTID(part), reason);
  775. xpc_partition_going_down(part, reason);
  776. }
  777. /*
  778. * Mark specified partition as inactive.
  779. */
  780. void
  781. xpc_mark_partition_inactive(struct xpc_partition *part)
  782. {
  783. unsigned long irq_flags;
  784. dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
  785. XPC_PARTID(part));
  786. spin_lock_irqsave(&part->act_lock, irq_flags);
  787. part->act_state = XPC_P_INACTIVE;
  788. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  789. part->remote_rp_pa = 0;
  790. }
  791. /*
  792. * SAL has provided a partition and machine mask. The partition mask
  793. * contains a bit for each even nasid in our partition. The machine
  794. * mask contains a bit for each even nasid in the entire machine.
  795. *
  796. * Using those two bit arrays, we can determine which nasids are
  797. * known in the machine. Each should also have a reserved page
  798. * initialized if they are available for partitioning.
  799. */
  800. void
  801. xpc_discovery(void)
  802. {
  803. void *remote_rp_base;
  804. struct xpc_rsvd_page *remote_rp;
  805. struct xpc_vars *remote_vars;
  806. u64 remote_rp_pa;
  807. u64 remote_vars_pa;
  808. int region;
  809. int region_size;
  810. int max_regions;
  811. int nasid;
  812. struct xpc_rsvd_page *rp;
  813. short partid;
  814. struct xpc_partition *part;
  815. u64 *discovered_nasids;
  816. enum xp_retval ret;
  817. remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
  818. xp_nasid_mask_bytes,
  819. GFP_KERNEL, &remote_rp_base);
  820. if (remote_rp == NULL)
  821. return;
  822. remote_vars = (struct xpc_vars *)remote_rp;
  823. discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
  824. GFP_KERNEL);
  825. if (discovered_nasids == NULL) {
  826. kfree(remote_rp_base);
  827. return;
  828. }
  829. rp = (struct xpc_rsvd_page *)xpc_rsvd_page;
  830. /*
  831. * The term 'region' in this context refers to the minimum number of
  832. * nodes that can comprise an access protection grouping. The access
  833. * protection is in regards to memory, IOI and IPI.
  834. */
  835. max_regions = 64;
  836. region_size = sn_region_size;
  837. switch (region_size) {
  838. case 128:
  839. max_regions *= 2;
  840. case 64:
  841. max_regions *= 2;
  842. case 32:
  843. max_regions *= 2;
  844. region_size = 16;
  845. DBUG_ON(!is_shub2());
  846. }
  847. for (region = 0; region < max_regions; region++) {
  848. if (xpc_exiting)
  849. break;
  850. dev_dbg(xpc_part, "searching region %d\n", region);
  851. for (nasid = (region * region_size * 2);
  852. nasid < ((region + 1) * region_size * 2); nasid += 2) {
  853. if (xpc_exiting)
  854. break;
  855. dev_dbg(xpc_part, "checking nasid %d\n", nasid);
  856. if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) {
  857. dev_dbg(xpc_part, "PROM indicates Nasid %d is "
  858. "part of the local partition; skipping "
  859. "region\n", nasid);
  860. break;
  861. }
  862. if (!(XPC_NASID_IN_ARRAY(nasid, xpc_mach_nasids))) {
  863. dev_dbg(xpc_part, "PROM indicates Nasid %d was "
  864. "not on Numa-Link network at reset\n",
  865. nasid);
  866. continue;
  867. }
  868. if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) {
  869. dev_dbg(xpc_part, "Nasid %d is part of a "
  870. "partition which was previously "
  871. "discovered\n", nasid);
  872. continue;
  873. }
  874. /* pull over the reserved page structure */
  875. ret = xpc_get_remote_rp(nasid, discovered_nasids,
  876. remote_rp, &remote_rp_pa);
  877. if (ret != xpSuccess) {
  878. dev_dbg(xpc_part, "unable to get reserved page "
  879. "from nasid %d, reason=%d\n", nasid,
  880. ret);
  881. if (ret == xpLocalPartid)
  882. break;
  883. continue;
  884. }
  885. remote_vars_pa = remote_rp->vars_pa;
  886. partid = remote_rp->partid;
  887. part = &xpc_partitions[partid];
  888. /* pull over the cross partition variables */
  889. ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
  890. if (ret != xpSuccess) {
  891. dev_dbg(xpc_part, "unable to get XPC variables "
  892. "from nasid %d, reason=%d\n", nasid,
  893. ret);
  894. XPC_DEACTIVATE_PARTITION(part, ret);
  895. continue;
  896. }
  897. if (part->act_state != XPC_P_INACTIVE) {
  898. dev_dbg(xpc_part, "partition %d on nasid %d is "
  899. "already activating\n", partid, nasid);
  900. break;
  901. }
  902. /*
  903. * Register the remote partition's AMOs with SAL so it
  904. * can handle and cleanup errors within that address
  905. * range should the remote partition go down. We don't
  906. * unregister this range because it is difficult to
  907. * tell when outstanding writes to the remote partition
  908. * are finished and thus when it is thus safe to
  909. * unregister. This should not result in wasted space
  910. * in the SAL xp_addr_region table because we should
  911. * get the same page for remote_act_amos_pa after
  912. * module reloads and system reboots.
  913. */
  914. if (sn_register_xp_addr_region
  915. (remote_vars->amos_page_pa, PAGE_SIZE, 1) < 0) {
  916. dev_dbg(xpc_part,
  917. "partition %d failed to "
  918. "register xp_addr region 0x%016lx\n",
  919. partid, remote_vars->amos_page_pa);
  920. XPC_SET_REASON(part, xpPhysAddrRegFailed,
  921. __LINE__);
  922. break;
  923. }
  924. /*
  925. * The remote nasid is valid and available.
  926. * Send an interrupt to that nasid to notify
  927. * it that we are ready to begin activation.
  928. */
  929. dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, "
  930. "nasid %d, phys_cpuid 0x%x\n",
  931. remote_vars->amos_page_pa,
  932. remote_vars->act_nasid,
  933. remote_vars->act_phys_cpuid);
  934. if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
  935. version)) {
  936. part->remote_amos_page_pa =
  937. remote_vars->amos_page_pa;
  938. xpc_mark_partition_disengaged(part);
  939. xpc_cancel_partition_disengage_request(part);
  940. }
  941. xpc_IPI_send_activate(remote_vars);
  942. }
  943. }
  944. kfree(discovered_nasids);
  945. kfree(remote_rp_base);
  946. }
  947. /*
  948. * Given a partid, get the nasids owned by that partition from the
  949. * remote partition's reserved page.
  950. */
  951. enum xp_retval
  952. xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
  953. {
  954. struct xpc_partition *part;
  955. u64 part_nasid_pa;
  956. part = &xpc_partitions[partid];
  957. if (part->remote_rp_pa == 0)
  958. return xpPartitionDown;
  959. memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
  960. part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa);
  961. return xp_remote_memcpy(nasid_mask, (void *)part_nasid_pa,
  962. xp_nasid_mask_bytes);
  963. }