xpc_partition.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) partition support.
  10. *
  11. * This is the part of XPC that detects the presence/absence of
  12. * other partitions. It provides a heartbeat and monitors the
  13. * heartbeats of other partitions.
  14. *
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/sysctl.h>
  18. #include <linux/cache.h>
  19. #include <linux/mmzone.h>
  20. #include <linux/nodemask.h>
  21. #include <asm/uncached.h>
  22. #include <asm/sn/bte.h>
  23. #include <asm/sn/intr.h>
  24. #include <asm/sn/sn_sal.h>
  25. #include <asm/sn/nodepda.h>
  26. #include <asm/sn/addrs.h>
  27. #include <asm/sn/xpc.h>
  28. /* XPC is exiting flag */
  29. int xpc_exiting;
  30. /* SH_IPI_ACCESS shub register value on startup */
  31. static u64 xpc_sh1_IPI_access;
  32. static u64 xpc_sh2_IPI_access0;
  33. static u64 xpc_sh2_IPI_access1;
  34. static u64 xpc_sh2_IPI_access2;
  35. static u64 xpc_sh2_IPI_access3;
  36. /* original protection values for each node */
  37. u64 xpc_prot_vec[MAX_NUMNODES];
  38. /* this partition's reserved page pointers */
  39. struct xpc_rsvd_page *xpc_rsvd_page;
  40. static u64 *xpc_part_nasids;
  41. static u64 *xpc_mach_nasids;
  42. struct xpc_vars *xpc_vars;
  43. struct xpc_vars_part *xpc_vars_part;
  44. static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */
  45. static int xp_nasid_mask_words; /* actual size in words of nasid mask */
  46. /*
  47. * For performance reasons, each entry of xpc_partitions[] is cacheline
  48. * aligned. And xpc_partitions[] is padded with an additional entry at the
  49. * end so that the last legitimate entry doesn't share its cacheline with
  50. * another variable.
  51. */
  52. struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
  53. /*
  54. * Generic buffer used to store a local copy of portions of a remote
  55. * partition's reserved page (either its header and part_nasids mask,
  56. * or its vars).
  57. *
  58. * xpc_discovery runs only once and is a seperate thread that is
  59. * very likely going to be processing in parallel with receiving
  60. * interrupts.
  61. */
  62. char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE +
  63. XP_NASID_MASK_BYTES];
  64. /*
  65. * Guarantee that the kmalloc'd memory is cacheline aligned.
  66. */
  67. static void *
  68. xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
  69. {
  70. /* see if kmalloc will give us cachline aligned memory by default */
  71. *base = kmalloc(size, flags);
  72. if (*base == NULL) {
  73. return NULL;
  74. }
  75. if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
  76. return *base;
  77. }
  78. kfree(*base);
  79. /* nope, we'll have to do it ourselves */
  80. *base = kmalloc(size + L1_CACHE_BYTES, flags);
  81. if (*base == NULL) {
  82. return NULL;
  83. }
  84. return (void *) L1_CACHE_ALIGN((u64) *base);
  85. }
  86. /*
  87. * Given a nasid, get the physical address of the partition's reserved page
  88. * for that nasid. This function returns 0 on any error.
  89. */
  90. static u64
  91. xpc_get_rsvd_page_pa(int nasid)
  92. {
  93. bte_result_t bte_res;
  94. s64 status;
  95. u64 cookie = 0;
  96. u64 rp_pa = nasid; /* seed with nasid */
  97. u64 len = 0;
  98. u64 buf = buf;
  99. u64 buf_len = 0;
  100. void *buf_base = NULL;
  101. while (1) {
  102. status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
  103. &len);
  104. dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
  105. "0x%016lx, address=0x%016lx, len=0x%016lx\n",
  106. status, cookie, rp_pa, len);
  107. if (status != SALRET_MORE_PASSES) {
  108. break;
  109. }
  110. if (L1_CACHE_ALIGN(len) > buf_len) {
  111. kfree(buf_base);
  112. buf_len = L1_CACHE_ALIGN(len);
  113. buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len,
  114. GFP_KERNEL, &buf_base);
  115. if (buf_base == NULL) {
  116. dev_err(xpc_part, "unable to kmalloc "
  117. "len=0x%016lx\n", buf_len);
  118. status = SALRET_ERROR;
  119. break;
  120. }
  121. }
  122. bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_len,
  123. (BTE_NOTIFY | BTE_WACQUIRE), NULL);
  124. if (bte_res != BTE_SUCCESS) {
  125. dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
  126. status = SALRET_ERROR;
  127. break;
  128. }
  129. }
  130. kfree(buf_base);
  131. if (status != SALRET_OK) {
  132. rp_pa = 0;
  133. }
  134. dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
  135. return rp_pa;
  136. }
  137. /*
  138. * Fill the partition reserved page with the information needed by
  139. * other partitions to discover we are alive and establish initial
  140. * communications.
  141. */
  142. struct xpc_rsvd_page *
  143. xpc_rsvd_page_init(void)
  144. {
  145. struct xpc_rsvd_page *rp;
  146. AMO_t *amos_page;
  147. u64 rp_pa, nasid_array = 0;
  148. int i, ret;
  149. /* get the local reserved page's address */
  150. preempt_disable();
  151. rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id()));
  152. preempt_enable();
  153. if (rp_pa == 0) {
  154. dev_err(xpc_part, "SAL failed to locate the reserved page\n");
  155. return NULL;
  156. }
  157. rp = (struct xpc_rsvd_page *) __va(rp_pa);
  158. if (rp->partid != sn_partition_id) {
  159. dev_err(xpc_part, "the reserved page's partid of %d should be "
  160. "%d\n", rp->partid, sn_partition_id);
  161. return NULL;
  162. }
  163. rp->version = XPC_RP_VERSION;
  164. /* establish the actual sizes of the nasid masks */
  165. if (rp->SAL_version == 1) {
  166. /* SAL_version 1 didn't set the nasids_size field */
  167. rp->nasids_size = 128;
  168. }
  169. xp_nasid_mask_bytes = rp->nasids_size;
  170. xp_nasid_mask_words = xp_nasid_mask_bytes / 8;
  171. /* setup the pointers to the various items in the reserved page */
  172. xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
  173. xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
  174. xpc_vars = XPC_RP_VARS(rp);
  175. xpc_vars_part = XPC_RP_VARS_PART(rp);
  176. /*
  177. * Before clearing xpc_vars, see if a page of AMOs had been previously
  178. * allocated. If not we'll need to allocate one and set permissions
  179. * so that cross-partition AMOs are allowed.
  180. *
  181. * The allocated AMO page needs MCA reporting to remain disabled after
  182. * XPC has unloaded. To make this work, we keep a copy of the pointer
  183. * to this page (i.e., amos_page) in the struct xpc_vars structure,
  184. * which is pointed to by the reserved page, and re-use that saved copy
  185. * on subsequent loads of XPC. This AMO page is never freed, and its
  186. * memory protections are never restricted.
  187. */
  188. if ((amos_page = xpc_vars->amos_page) == NULL) {
  189. amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0));
  190. if (amos_page == NULL) {
  191. dev_err(xpc_part, "can't allocate page of AMOs\n");
  192. return NULL;
  193. }
  194. /*
  195. * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems
  196. * when xpc_allow_IPI_ops() is called via xpc_hb_init().
  197. */
  198. if (!enable_shub_wars_1_1()) {
  199. ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
  200. PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
  201. &nasid_array);
  202. if (ret != 0) {
  203. dev_err(xpc_part, "can't change memory "
  204. "protections\n");
  205. uncached_free_page(__IA64_UNCACHED_OFFSET |
  206. TO_PHYS((u64) amos_page));
  207. return NULL;
  208. }
  209. }
  210. } else if (!IS_AMO_ADDRESS((u64) amos_page)) {
  211. /*
  212. * EFI's XPBOOT can also set amos_page in the reserved page,
  213. * but it happens to leave it as an uncached physical address
  214. * and we need it to be an uncached virtual, so we'll have to
  215. * convert it.
  216. */
  217. if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
  218. dev_err(xpc_part, "previously used amos_page address "
  219. "is bad = 0x%p\n", (void *) amos_page);
  220. return NULL;
  221. }
  222. amos_page = (AMO_t *) TO_AMO((u64) amos_page);
  223. }
  224. /* clear xpc_vars */
  225. memset(xpc_vars, 0, sizeof(struct xpc_vars));
  226. xpc_vars->version = XPC_V_VERSION;
  227. xpc_vars->act_nasid = cpuid_to_nasid(0);
  228. xpc_vars->act_phys_cpuid = cpu_physical_id(0);
  229. xpc_vars->vars_part_pa = __pa(xpc_vars_part);
  230. xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page);
  231. xpc_vars->amos_page = amos_page; /* save for next load of XPC */
  232. /* clear xpc_vars_part */
  233. memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
  234. XP_MAX_PARTITIONS);
  235. /* initialize the activate IRQ related AMO variables */
  236. for (i = 0; i < xp_nasid_mask_words; i++) {
  237. (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
  238. }
  239. /* initialize the engaged remote partitions related AMO variables */
  240. (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
  241. (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
  242. /* timestamp of when reserved page was setup by XPC */
  243. rp->stamp = CURRENT_TIME;
  244. /*
  245. * This signifies to the remote partition that our reserved
  246. * page is initialized.
  247. */
  248. rp->vars_pa = __pa(xpc_vars);
  249. return rp;
  250. }
  251. /*
  252. * Change protections to allow IPI operations (and AMO operations on
  253. * Shub 1.1 systems).
  254. */
  255. void
  256. xpc_allow_IPI_ops(void)
  257. {
  258. int node;
  259. int nasid;
  260. // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
  261. if (is_shub2()) {
  262. xpc_sh2_IPI_access0 =
  263. (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
  264. xpc_sh2_IPI_access1 =
  265. (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
  266. xpc_sh2_IPI_access2 =
  267. (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
  268. xpc_sh2_IPI_access3 =
  269. (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
  270. for_each_online_node(node) {
  271. nasid = cnodeid_to_nasid(node);
  272. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
  273. -1UL);
  274. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
  275. -1UL);
  276. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
  277. -1UL);
  278. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
  279. -1UL);
  280. }
  281. } else {
  282. xpc_sh1_IPI_access =
  283. (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
  284. for_each_online_node(node) {
  285. nasid = cnodeid_to_nasid(node);
  286. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
  287. -1UL);
  288. /*
  289. * Since the BIST collides with memory operations on
  290. * SHUB 1.1 sn_change_memprotect() cannot be used.
  291. */
  292. if (enable_shub_wars_1_1()) {
  293. /* open up everything */
  294. xpc_prot_vec[node] = (u64) HUB_L((u64 *)
  295. GLOBAL_MMR_ADDR(nasid,
  296. SH1_MD_DQLP_MMR_DIR_PRIVEC0));
  297. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
  298. SH1_MD_DQLP_MMR_DIR_PRIVEC0),
  299. -1UL);
  300. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
  301. SH1_MD_DQRP_MMR_DIR_PRIVEC0),
  302. -1UL);
  303. }
  304. }
  305. }
  306. }
  307. /*
  308. * Restrict protections to disallow IPI operations (and AMO operations on
  309. * Shub 1.1 systems).
  310. */
  311. void
  312. xpc_restrict_IPI_ops(void)
  313. {
  314. int node;
  315. int nasid;
  316. // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
  317. if (is_shub2()) {
  318. for_each_online_node(node) {
  319. nasid = cnodeid_to_nasid(node);
  320. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
  321. xpc_sh2_IPI_access0);
  322. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
  323. xpc_sh2_IPI_access1);
  324. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
  325. xpc_sh2_IPI_access2);
  326. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
  327. xpc_sh2_IPI_access3);
  328. }
  329. } else {
  330. for_each_online_node(node) {
  331. nasid = cnodeid_to_nasid(node);
  332. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
  333. xpc_sh1_IPI_access);
  334. if (enable_shub_wars_1_1()) {
  335. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
  336. SH1_MD_DQLP_MMR_DIR_PRIVEC0),
  337. xpc_prot_vec[node]);
  338. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
  339. SH1_MD_DQRP_MMR_DIR_PRIVEC0),
  340. xpc_prot_vec[node]);
  341. }
  342. }
  343. }
  344. }
  345. /*
  346. * At periodic intervals, scan through all active partitions and ensure
  347. * their heartbeat is still active. If not, the partition is deactivated.
  348. */
  349. void
  350. xpc_check_remote_hb(void)
  351. {
  352. struct xpc_vars *remote_vars;
  353. struct xpc_partition *part;
  354. partid_t partid;
  355. bte_result_t bres;
  356. remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
  357. for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
  358. if (xpc_exiting) {
  359. break;
  360. }
  361. if (partid == sn_partition_id) {
  362. continue;
  363. }
  364. part = &xpc_partitions[partid];
  365. if (part->act_state == XPC_P_INACTIVE ||
  366. part->act_state == XPC_P_DEACTIVATING) {
  367. continue;
  368. }
  369. /* pull the remote_hb cache line */
  370. bres = xp_bte_copy(part->remote_vars_pa,
  371. ia64_tpa((u64) remote_vars),
  372. XPC_RP_VARS_SIZE,
  373. (BTE_NOTIFY | BTE_WACQUIRE), NULL);
  374. if (bres != BTE_SUCCESS) {
  375. XPC_DEACTIVATE_PARTITION(part,
  376. xpc_map_bte_errors(bres));
  377. continue;
  378. }
  379. dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
  380. " = %ld, heartbeat_offline = %ld, HB_mask = 0x%lx\n",
  381. partid, remote_vars->heartbeat, part->last_heartbeat,
  382. remote_vars->heartbeat_offline,
  383. remote_vars->heartbeating_to_mask);
  384. if (((remote_vars->heartbeat == part->last_heartbeat) &&
  385. (remote_vars->heartbeat_offline == 0)) ||
  386. !xpc_hb_allowed(sn_partition_id, remote_vars)) {
  387. XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
  388. continue;
  389. }
  390. part->last_heartbeat = remote_vars->heartbeat;
  391. }
  392. }
  393. /*
  394. * Get a copy of a portion of the remote partition's rsvd page.
  395. *
  396. * remote_rp points to a buffer that is cacheline aligned for BTE copies and
  397. * is large enough to contain a copy of their reserved page header and
  398. * part_nasids mask.
  399. */
  400. static enum xpc_retval
  401. xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
  402. struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
  403. {
  404. int bres, i;
  405. /* get the reserved page's physical address */
  406. *remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
  407. if (*remote_rp_pa == 0) {
  408. return xpcNoRsvdPageAddr;
  409. }
  410. /* pull over the reserved page header and part_nasids mask */
  411. bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp),
  412. XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
  413. (BTE_NOTIFY | BTE_WACQUIRE), NULL);
  414. if (bres != BTE_SUCCESS) {
  415. return xpc_map_bte_errors(bres);
  416. }
  417. if (discovered_nasids != NULL) {
  418. u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
  419. for (i = 0; i < xp_nasid_mask_words; i++) {
  420. discovered_nasids[i] |= remote_part_nasids[i];
  421. }
  422. }
  423. /* check that the partid is for another partition */
  424. if (remote_rp->partid < 1 ||
  425. remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
  426. return xpcInvalidPartid;
  427. }
  428. if (remote_rp->partid == sn_partition_id) {
  429. return xpcLocalPartid;
  430. }
  431. if (XPC_VERSION_MAJOR(remote_rp->version) !=
  432. XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
  433. return xpcBadVersion;
  434. }
  435. return xpcSuccess;
  436. }
  437. /*
  438. * Get a copy of the remote partition's XPC variables from the reserved page.
  439. *
  440. * remote_vars points to a buffer that is cacheline aligned for BTE copies and
  441. * assumed to be of size XPC_RP_VARS_SIZE.
  442. */
  443. static enum xpc_retval
  444. xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
  445. {
  446. int bres;
  447. if (remote_vars_pa == 0) {
  448. return xpcVarsNotSet;
  449. }
  450. /* pull over the cross partition variables */
  451. bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
  452. XPC_RP_VARS_SIZE,
  453. (BTE_NOTIFY | BTE_WACQUIRE), NULL);
  454. if (bres != BTE_SUCCESS) {
  455. return xpc_map_bte_errors(bres);
  456. }
  457. if (XPC_VERSION_MAJOR(remote_vars->version) !=
  458. XPC_VERSION_MAJOR(XPC_V_VERSION)) {
  459. return xpcBadVersion;
  460. }
  461. return xpcSuccess;
  462. }
  463. /*
  464. * Update the remote partition's info.
  465. */
  466. static void
  467. xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
  468. struct timespec *remote_rp_stamp, u64 remote_rp_pa,
  469. u64 remote_vars_pa, struct xpc_vars *remote_vars)
  470. {
  471. part->remote_rp_version = remote_rp_version;
  472. dev_dbg(xpc_part, " remote_rp_version = 0x%016lx\n",
  473. part->remote_rp_version);
  474. part->remote_rp_stamp = *remote_rp_stamp;
  475. dev_dbg(xpc_part, " remote_rp_stamp (tv_sec = 0x%lx tv_nsec = 0x%lx\n",
  476. part->remote_rp_stamp.tv_sec, part->remote_rp_stamp.tv_nsec);
  477. part->remote_rp_pa = remote_rp_pa;
  478. dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
  479. part->remote_vars_pa = remote_vars_pa;
  480. dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
  481. part->remote_vars_pa);
  482. part->last_heartbeat = remote_vars->heartbeat;
  483. dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
  484. part->last_heartbeat);
  485. part->remote_vars_part_pa = remote_vars->vars_part_pa;
  486. dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
  487. part->remote_vars_part_pa);
  488. part->remote_act_nasid = remote_vars->act_nasid;
  489. dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n",
  490. part->remote_act_nasid);
  491. part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid;
  492. dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n",
  493. part->remote_act_phys_cpuid);
  494. part->remote_amos_page_pa = remote_vars->amos_page_pa;
  495. dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
  496. part->remote_amos_page_pa);
  497. part->remote_vars_version = remote_vars->version;
  498. dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
  499. part->remote_vars_version);
  500. }
  501. /*
  502. * Prior code has determined the nasid which generated an IPI. Inspect
  503. * that nasid to determine if its partition needs to be activated or
  504. * deactivated.
  505. *
  506. * A partition is consider "awaiting activation" if our partition
  507. * flags indicate it is not active and it has a heartbeat. A
  508. * partition is considered "awaiting deactivation" if our partition
  509. * flags indicate it is active but it has no heartbeat or it is not
  510. * sending its heartbeat to us.
  511. *
  512. * To determine the heartbeat, the remote nasid must have a properly
  513. * initialized reserved page.
  514. */
  515. static void
  516. xpc_identify_act_IRQ_req(int nasid)
  517. {
  518. struct xpc_rsvd_page *remote_rp;
  519. struct xpc_vars *remote_vars;
  520. u64 remote_rp_pa;
  521. u64 remote_vars_pa;
  522. int remote_rp_version;
  523. int reactivate = 0;
  524. int stamp_diff;
  525. struct timespec remote_rp_stamp = { 0, 0 };
  526. partid_t partid;
  527. struct xpc_partition *part;
  528. enum xpc_retval ret;
  529. /* pull over the reserved page structure */
  530. remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer;
  531. ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
  532. if (ret != xpcSuccess) {
  533. dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
  534. "which sent interrupt, reason=%d\n", nasid, ret);
  535. return;
  536. }
  537. remote_vars_pa = remote_rp->vars_pa;
  538. remote_rp_version = remote_rp->version;
  539. if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
  540. remote_rp_stamp = remote_rp->stamp;
  541. }
  542. partid = remote_rp->partid;
  543. part = &xpc_partitions[partid];
  544. /* pull over the cross partition variables */
  545. remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
  546. ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
  547. if (ret != xpcSuccess) {
  548. dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
  549. "which sent interrupt, reason=%d\n", nasid, ret);
  550. XPC_DEACTIVATE_PARTITION(part, ret);
  551. return;
  552. }
  553. part->act_IRQ_rcvd++;
  554. dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
  555. "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd,
  556. remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
  557. if (xpc_partition_disengaged(part) &&
  558. part->act_state == XPC_P_INACTIVE) {
  559. xpc_update_partition_info(part, remote_rp_version,
  560. &remote_rp_stamp, remote_rp_pa,
  561. remote_vars_pa, remote_vars);
  562. if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
  563. if (xpc_partition_disengage_requested(1UL << partid)) {
  564. /*
  565. * Other side is waiting on us to disengage,
  566. * even though we already have.
  567. */
  568. return;
  569. }
  570. } else {
  571. /* other side doesn't support disengage requests */
  572. xpc_clear_partition_disengage_request(1UL << partid);
  573. }
  574. xpc_activate_partition(part);
  575. return;
  576. }
  577. DBUG_ON(part->remote_rp_version == 0);
  578. DBUG_ON(part->remote_vars_version == 0);
  579. if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
  580. DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
  581. remote_vars_version));
  582. if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
  583. DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
  584. version));
  585. /* see if the other side rebooted */
  586. if (part->remote_amos_page_pa ==
  587. remote_vars->amos_page_pa &&
  588. xpc_hb_allowed(sn_partition_id,
  589. remote_vars)) {
  590. /* doesn't look that way, so ignore the IPI */
  591. return;
  592. }
  593. }
  594. /*
  595. * Other side rebooted and previous XPC didn't support the
  596. * disengage request, so we don't need to do anything special.
  597. */
  598. xpc_update_partition_info(part, remote_rp_version,
  599. &remote_rp_stamp, remote_rp_pa,
  600. remote_vars_pa, remote_vars);
  601. part->reactivate_nasid = nasid;
  602. XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
  603. return;
  604. }
  605. DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version));
  606. if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
  607. DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
  608. /*
  609. * Other side rebooted and previous XPC did support the
  610. * disengage request, but the new one doesn't.
  611. */
  612. xpc_clear_partition_engaged(1UL << partid);
  613. xpc_clear_partition_disengage_request(1UL << partid);
  614. xpc_update_partition_info(part, remote_rp_version,
  615. &remote_rp_stamp, remote_rp_pa,
  616. remote_vars_pa, remote_vars);
  617. reactivate = 1;
  618. } else {
  619. DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
  620. stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
  621. &remote_rp_stamp);
  622. if (stamp_diff != 0) {
  623. DBUG_ON(stamp_diff >= 0);
  624. /*
  625. * Other side rebooted and the previous XPC did support
  626. * the disengage request, as does the new one.
  627. */
  628. DBUG_ON(xpc_partition_engaged(1UL << partid));
  629. DBUG_ON(xpc_partition_disengage_requested(1UL <<
  630. partid));
  631. xpc_update_partition_info(part, remote_rp_version,
  632. &remote_rp_stamp, remote_rp_pa,
  633. remote_vars_pa, remote_vars);
  634. reactivate = 1;
  635. }
  636. }
  637. if (part->disengage_request_timeout > 0 &&
  638. !xpc_partition_disengaged(part)) {
  639. /* still waiting on other side to disengage from us */
  640. return;
  641. }
  642. if (reactivate) {
  643. part->reactivate_nasid = nasid;
  644. XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
  645. } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
  646. xpc_partition_disengage_requested(1UL << partid)) {
  647. XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown);
  648. }
  649. }
  650. /*
  651. * Loop through the activation AMO variables and process any bits
  652. * which are set. Each bit indicates a nasid sending a partition
  653. * activation or deactivation request.
  654. *
  655. * Return #of IRQs detected.
  656. */
  657. int
  658. xpc_identify_act_IRQ_sender(void)
  659. {
  660. int word, bit;
  661. u64 nasid_mask;
  662. u64 nasid; /* remote nasid */
  663. int n_IRQs_detected = 0;
  664. AMO_t *act_amos;
  665. act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
  666. /* scan through act AMO variable looking for non-zero entries */
  667. for (word = 0; word < xp_nasid_mask_words; word++) {
  668. if (xpc_exiting) {
  669. break;
  670. }
  671. nasid_mask = xpc_IPI_receive(&act_amos[word]);
  672. if (nasid_mask == 0) {
  673. /* no IRQs from nasids in this variable */
  674. continue;
  675. }
  676. dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
  677. nasid_mask);
  678. /*
  679. * If this nasid has been added to the machine since
  680. * our partition was reset, this will retain the
  681. * remote nasid in our reserved pages machine mask.
  682. * This is used in the event of module reload.
  683. */
  684. xpc_mach_nasids[word] |= nasid_mask;
  685. /* locate the nasid(s) which sent interrupts */
  686. for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
  687. if (nasid_mask & (1UL << bit)) {
  688. n_IRQs_detected++;
  689. nasid = XPC_NASID_FROM_W_B(word, bit);
  690. dev_dbg(xpc_part, "interrupt from nasid %ld\n",
  691. nasid);
  692. xpc_identify_act_IRQ_req(nasid);
  693. }
  694. }
  695. }
  696. return n_IRQs_detected;
  697. }
  698. /*
  699. * See if the other side has responded to a partition disengage request
  700. * from us.
  701. */
  702. int
  703. xpc_partition_disengaged(struct xpc_partition *part)
  704. {
  705. partid_t partid = XPC_PARTID(part);
  706. int disengaged;
  707. disengaged = (xpc_partition_engaged(1UL << partid) == 0);
  708. if (part->disengage_request_timeout) {
  709. if (!disengaged) {
  710. if (jiffies < part->disengage_request_timeout) {
  711. /* timelimit hasn't been reached yet */
  712. return 0;
  713. }
  714. /*
  715. * Other side hasn't responded to our disengage
  716. * request in a timely fashion, so assume it's dead.
  717. */
  718. dev_info(xpc_part, "disengage from remote partition %d "
  719. "timed out\n", partid);
  720. xpc_disengage_request_timedout = 1;
  721. xpc_clear_partition_engaged(1UL << partid);
  722. disengaged = 1;
  723. }
  724. part->disengage_request_timeout = 0;
  725. /* cancel the timer function, provided it's not us */
  726. if (!in_interrupt()) {
  727. del_singleshot_timer_sync(&part->
  728. disengage_request_timer);
  729. }
  730. DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
  731. part->act_state != XPC_P_INACTIVE);
  732. if (part->act_state != XPC_P_INACTIVE) {
  733. xpc_wakeup_channel_mgr(part);
  734. }
  735. if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
  736. xpc_cancel_partition_disengage_request(part);
  737. }
  738. }
  739. return disengaged;
  740. }
  741. /*
  742. * Mark specified partition as active.
  743. */
  744. enum xpc_retval
  745. xpc_mark_partition_active(struct xpc_partition *part)
  746. {
  747. unsigned long irq_flags;
  748. enum xpc_retval ret;
  749. dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
  750. spin_lock_irqsave(&part->act_lock, irq_flags);
  751. if (part->act_state == XPC_P_ACTIVATING) {
  752. part->act_state = XPC_P_ACTIVE;
  753. ret = xpcSuccess;
  754. } else {
  755. DBUG_ON(part->reason == xpcSuccess);
  756. ret = part->reason;
  757. }
  758. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  759. return ret;
  760. }
  761. /*
  762. * Notify XPC that the partition is down.
  763. */
  764. void
  765. xpc_deactivate_partition(const int line, struct xpc_partition *part,
  766. enum xpc_retval reason)
  767. {
  768. unsigned long irq_flags;
  769. spin_lock_irqsave(&part->act_lock, irq_flags);
  770. if (part->act_state == XPC_P_INACTIVE) {
  771. XPC_SET_REASON(part, reason, line);
  772. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  773. if (reason == xpcReactivating) {
  774. /* we interrupt ourselves to reactivate partition */
  775. xpc_IPI_send_reactivate(part);
  776. }
  777. return;
  778. }
  779. if (part->act_state == XPC_P_DEACTIVATING) {
  780. if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
  781. reason == xpcReactivating) {
  782. XPC_SET_REASON(part, reason, line);
  783. }
  784. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  785. return;
  786. }
  787. part->act_state = XPC_P_DEACTIVATING;
  788. XPC_SET_REASON(part, reason, line);
  789. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  790. if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
  791. xpc_request_partition_disengage(part);
  792. xpc_IPI_send_disengage(part);
  793. /* set a timelimit on the disengage request */
  794. part->disengage_request_timeout = jiffies +
  795. (xpc_disengage_request_timelimit * HZ);
  796. part->disengage_request_timer.expires =
  797. part->disengage_request_timeout;
  798. add_timer(&part->disengage_request_timer);
  799. }
  800. dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n",
  801. XPC_PARTID(part), reason);
  802. xpc_partition_going_down(part, reason);
  803. }
  804. /*
  805. * Mark specified partition as inactive.
  806. */
  807. void
  808. xpc_mark_partition_inactive(struct xpc_partition *part)
  809. {
  810. unsigned long irq_flags;
  811. dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
  812. XPC_PARTID(part));
  813. spin_lock_irqsave(&part->act_lock, irq_flags);
  814. part->act_state = XPC_P_INACTIVE;
  815. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  816. part->remote_rp_pa = 0;
  817. }
  818. /*
  819. * SAL has provided a partition and machine mask. The partition mask
  820. * contains a bit for each even nasid in our partition. The machine
  821. * mask contains a bit for each even nasid in the entire machine.
  822. *
  823. * Using those two bit arrays, we can determine which nasids are
  824. * known in the machine. Each should also have a reserved page
  825. * initialized if they are available for partitioning.
  826. */
  827. void
  828. xpc_discovery(void)
  829. {
  830. void *remote_rp_base;
  831. struct xpc_rsvd_page *remote_rp;
  832. struct xpc_vars *remote_vars;
  833. u64 remote_rp_pa;
  834. u64 remote_vars_pa;
  835. int region;
  836. int region_size;
  837. int max_regions;
  838. int nasid;
  839. struct xpc_rsvd_page *rp;
  840. partid_t partid;
  841. struct xpc_partition *part;
  842. u64 *discovered_nasids;
  843. enum xpc_retval ret;
  844. remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
  845. xp_nasid_mask_bytes,
  846. GFP_KERNEL, &remote_rp_base);
  847. if (remote_rp == NULL) {
  848. return;
  849. }
  850. remote_vars = (struct xpc_vars *) remote_rp;
  851. discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words,
  852. GFP_KERNEL);
  853. if (discovered_nasids == NULL) {
  854. kfree(remote_rp_base);
  855. return;
  856. }
  857. rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
  858. /*
  859. * The term 'region' in this context refers to the minimum number of
  860. * nodes that can comprise an access protection grouping. The access
  861. * protection is in regards to memory, IOI and IPI.
  862. */
  863. max_regions = 64;
  864. region_size = sn_region_size;
  865. switch (region_size) {
  866. case 128:
  867. max_regions *= 2;
  868. case 64:
  869. max_regions *= 2;
  870. case 32:
  871. max_regions *= 2;
  872. region_size = 16;
  873. DBUG_ON(!is_shub2());
  874. }
  875. for (region = 0; region < max_regions; region++) {
  876. if ((volatile int) xpc_exiting) {
  877. break;
  878. }
  879. dev_dbg(xpc_part, "searching region %d\n", region);
  880. for (nasid = (region * region_size * 2);
  881. nasid < ((region + 1) * region_size * 2);
  882. nasid += 2) {
  883. if ((volatile int) xpc_exiting) {
  884. break;
  885. }
  886. dev_dbg(xpc_part, "checking nasid %d\n", nasid);
  887. if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) {
  888. dev_dbg(xpc_part, "PROM indicates Nasid %d is "
  889. "part of the local partition; skipping "
  890. "region\n", nasid);
  891. break;
  892. }
  893. if (!(XPC_NASID_IN_ARRAY(nasid, xpc_mach_nasids))) {
  894. dev_dbg(xpc_part, "PROM indicates Nasid %d was "
  895. "not on Numa-Link network at reset\n",
  896. nasid);
  897. continue;
  898. }
  899. if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) {
  900. dev_dbg(xpc_part, "Nasid %d is part of a "
  901. "partition which was previously "
  902. "discovered\n", nasid);
  903. continue;
  904. }
  905. /* pull over the reserved page structure */
  906. ret = xpc_get_remote_rp(nasid, discovered_nasids,
  907. remote_rp, &remote_rp_pa);
  908. if (ret != xpcSuccess) {
  909. dev_dbg(xpc_part, "unable to get reserved page "
  910. "from nasid %d, reason=%d\n", nasid,
  911. ret);
  912. if (ret == xpcLocalPartid) {
  913. break;
  914. }
  915. continue;
  916. }
  917. remote_vars_pa = remote_rp->vars_pa;
  918. partid = remote_rp->partid;
  919. part = &xpc_partitions[partid];
  920. /* pull over the cross partition variables */
  921. ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
  922. if (ret != xpcSuccess) {
  923. dev_dbg(xpc_part, "unable to get XPC variables "
  924. "from nasid %d, reason=%d\n", nasid,
  925. ret);
  926. XPC_DEACTIVATE_PARTITION(part, ret);
  927. continue;
  928. }
  929. if (part->act_state != XPC_P_INACTIVE) {
  930. dev_dbg(xpc_part, "partition %d on nasid %d is "
  931. "already activating\n", partid, nasid);
  932. break;
  933. }
  934. /*
  935. * Register the remote partition's AMOs with SAL so it
  936. * can handle and cleanup errors within that address
  937. * range should the remote partition go down. We don't
  938. * unregister this range because it is difficult to
  939. * tell when outstanding writes to the remote partition
  940. * are finished and thus when it is thus safe to
  941. * unregister. This should not result in wasted space
  942. * in the SAL xp_addr_region table because we should
  943. * get the same page for remote_act_amos_pa after
  944. * module reloads and system reboots.
  945. */
  946. if (sn_register_xp_addr_region(
  947. remote_vars->amos_page_pa,
  948. PAGE_SIZE, 1) < 0) {
  949. dev_dbg(xpc_part, "partition %d failed to "
  950. "register xp_addr region 0x%016lx\n",
  951. partid, remote_vars->amos_page_pa);
  952. XPC_SET_REASON(part, xpcPhysAddrRegFailed,
  953. __LINE__);
  954. break;
  955. }
  956. /*
  957. * The remote nasid is valid and available.
  958. * Send an interrupt to that nasid to notify
  959. * it that we are ready to begin activation.
  960. */
  961. dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, "
  962. "nasid %d, phys_cpuid 0x%x\n",
  963. remote_vars->amos_page_pa,
  964. remote_vars->act_nasid,
  965. remote_vars->act_phys_cpuid);
  966. if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
  967. version)) {
  968. part->remote_amos_page_pa =
  969. remote_vars->amos_page_pa;
  970. xpc_mark_partition_disengaged(part);
  971. xpc_cancel_partition_disengage_request(part);
  972. }
  973. xpc_IPI_send_activate(remote_vars);
  974. }
  975. }
  976. kfree(discovered_nasids);
  977. kfree(remote_rp_base);
  978. }
  979. /*
  980. * Given a partid, get the nasids owned by that partition from the
  981. * remote partition's reserved page.
  982. */
  983. enum xpc_retval
  984. xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
  985. {
  986. struct xpc_partition *part;
  987. u64 part_nasid_pa;
  988. int bte_res;
  989. part = &xpc_partitions[partid];
  990. if (part->remote_rp_pa == 0) {
  991. return xpcPartitionDown;
  992. }
  993. memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
  994. part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
  995. bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask),
  996. xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
  997. return xpc_map_bte_errors(bte_res);
  998. }