xpc_partition.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) partition support.
  10. *
  11. * This is the part of XPC that detects the presence/absence of
  12. * other partitions. It provides a heartbeat and monitors the
  13. * heartbeats of other partitions.
  14. *
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/sysctl.h>
  18. #include <linux/cache.h>
  19. #include <linux/mmzone.h>
  20. #include <linux/nodemask.h>
  21. #include <asm/uncached.h>
  22. #include <asm/sn/bte.h>
  23. #include <asm/sn/intr.h>
  24. #include <asm/sn/sn_sal.h>
  25. #include <asm/sn/nodepda.h>
  26. #include <asm/sn/addrs.h>
  27. #include "xpc.h"
  28. /* XPC is exiting flag */
  29. int xpc_exiting;
  30. /* SH_IPI_ACCESS shub register value on startup */
  31. static u64 xpc_sh1_IPI_access;
  32. static u64 xpc_sh2_IPI_access0;
  33. static u64 xpc_sh2_IPI_access1;
  34. static u64 xpc_sh2_IPI_access2;
  35. static u64 xpc_sh2_IPI_access3;
  36. /* original protection values for each node */
  37. u64 xpc_prot_vec[MAX_COMPACT_NODES];
  38. /* this partition's reserved page */
  39. struct xpc_rsvd_page *xpc_rsvd_page;
  40. /* this partition's XPC variables (within the reserved page) */
  41. struct xpc_vars *xpc_vars;
  42. struct xpc_vars_part *xpc_vars_part;
  43. /*
  44. * For performance reasons, each entry of xpc_partitions[] is cacheline
  45. * aligned. And xpc_partitions[] is padded with an additional entry at the
  46. * end so that the last legitimate entry doesn't share its cacheline with
  47. * another variable.
  48. */
  49. struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
  50. /*
  51. * Generic buffer used to store a local copy of the remote partitions
  52. * reserved page or XPC variables.
  53. *
  54. * xpc_discovery runs only once and is a seperate thread that is
  55. * very likely going to be processing in parallel with receiving
  56. * interrupts.
  57. */
  58. char ____cacheline_aligned
  59. xpc_remote_copy_buffer[XPC_RSVD_PAGE_ALIGNED_SIZE];
  60. /*
  61. * Given a nasid, get the physical address of the partition's reserved page
  62. * for that nasid. This function returns 0 on any error.
  63. */
  64. static u64
  65. xpc_get_rsvd_page_pa(int nasid, u64 buf, u64 buf_size)
  66. {
  67. bte_result_t bte_res;
  68. s64 status;
  69. u64 cookie = 0;
  70. u64 rp_pa = nasid; /* seed with nasid */
  71. u64 len = 0;
  72. while (1) {
  73. status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
  74. &len);
  75. dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
  76. "0x%016lx, address=0x%016lx, len=0x%016lx\n",
  77. status, cookie, rp_pa, len);
  78. if (status != SALRET_MORE_PASSES) {
  79. break;
  80. }
  81. if (len > buf_size) {
  82. dev_err(xpc_part, "len (=0x%016lx) > buf_size\n", len);
  83. status = SALRET_ERROR;
  84. break;
  85. }
  86. bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_size,
  87. (BTE_NOTIFY | BTE_WACQUIRE), NULL);
  88. if (bte_res != BTE_SUCCESS) {
  89. dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
  90. status = SALRET_ERROR;
  91. break;
  92. }
  93. }
  94. if (status != SALRET_OK) {
  95. rp_pa = 0;
  96. }
  97. dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
  98. return rp_pa;
  99. }
  100. /*
  101. * Fill the partition reserved page with the information needed by
  102. * other partitions to discover we are alive and establish initial
  103. * communications.
  104. */
  105. struct xpc_rsvd_page *
  106. xpc_rsvd_page_init(void)
  107. {
  108. struct xpc_rsvd_page *rp;
  109. AMO_t *amos_page;
  110. u64 rp_pa, next_cl, nasid_array = 0;
  111. int i, ret;
  112. /* get the local reserved page's address */
  113. rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0),
  114. (u64) xpc_remote_copy_buffer,
  115. XPC_RSVD_PAGE_ALIGNED_SIZE);
  116. if (rp_pa == 0) {
  117. dev_err(xpc_part, "SAL failed to locate the reserved page\n");
  118. return NULL;
  119. }
  120. rp = (struct xpc_rsvd_page *) __va(rp_pa);
  121. if (rp->partid != sn_partition_id) {
  122. dev_err(xpc_part, "the reserved page's partid of %d should be "
  123. "%d\n", rp->partid, sn_partition_id);
  124. return NULL;
  125. }
  126. rp->version = XPC_RP_VERSION;
  127. /*
  128. * Place the XPC variables on the cache line following the
  129. * reserved page structure.
  130. */
  131. next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE;
  132. xpc_vars = (struct xpc_vars *) next_cl;
  133. /*
  134. * Before clearing xpc_vars, see if a page of AMOs had been previously
  135. * allocated. If not we'll need to allocate one and set permissions
  136. * so that cross-partition AMOs are allowed.
  137. *
  138. * The allocated AMO page needs MCA reporting to remain disabled after
  139. * XPC has unloaded. To make this work, we keep a copy of the pointer
  140. * to this page (i.e., amos_page) in the struct xpc_vars structure,
  141. * which is pointed to by the reserved page, and re-use that saved copy
  142. * on subsequent loads of XPC. This AMO page is never freed, and its
  143. * memory protections are never restricted.
  144. */
  145. if ((amos_page = xpc_vars->amos_page) == NULL) {
  146. amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0));
  147. if (amos_page == NULL) {
  148. dev_err(xpc_part, "can't allocate page of AMOs\n");
  149. return NULL;
  150. }
  151. /*
  152. * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems
  153. * when xpc_allow_IPI_ops() is called via xpc_hb_init().
  154. */
  155. if (!enable_shub_wars_1_1()) {
  156. ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
  157. PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
  158. &nasid_array);
  159. if (ret != 0) {
  160. dev_err(xpc_part, "can't change memory "
  161. "protections\n");
  162. uncached_free_page(__IA64_UNCACHED_OFFSET |
  163. TO_PHYS((u64) amos_page));
  164. return NULL;
  165. }
  166. }
  167. } else if (!IS_AMO_ADDRESS((u64) amos_page)) {
  168. /*
  169. * EFI's XPBOOT can also set amos_page in the reserved page,
  170. * but it happens to leave it as an uncached physical address
  171. * and we need it to be an uncached virtual, so we'll have to
  172. * convert it.
  173. */
  174. if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
  175. dev_err(xpc_part, "previously used amos_page address "
  176. "is bad = 0x%p\n", (void *) amos_page);
  177. return NULL;
  178. }
  179. amos_page = (AMO_t *) TO_AMO((u64) amos_page);
  180. }
  181. memset(xpc_vars, 0, sizeof(struct xpc_vars));
  182. /*
  183. * Place the XPC per partition specific variables on the cache line
  184. * following the XPC variables structure.
  185. */
  186. next_cl += XPC_VARS_ALIGNED_SIZE;
  187. memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) *
  188. XP_MAX_PARTITIONS);
  189. xpc_vars_part = (struct xpc_vars_part *) next_cl;
  190. xpc_vars->vars_part_pa = __pa(next_cl);
  191. xpc_vars->version = XPC_V_VERSION;
  192. xpc_vars->act_nasid = cpuid_to_nasid(0);
  193. xpc_vars->act_phys_cpuid = cpu_physical_id(0);
  194. xpc_vars->amos_page = amos_page; /* save for next load of XPC */
  195. /* initialize the activate IRQ related AMO variables */
  196. for (i = 0; i < XP_NASID_MASK_WORDS; i++) {
  197. (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
  198. }
  199. /* initialize the engaged remote partitions related AMO variables */
  200. (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
  201. (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
  202. /* export AMO page's physical address to other partitions */
  203. xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page);
  204. /* timestamp of when reserved page was initialized */
  205. rp->stamp = CURRENT_TIME;
  206. /*
  207. * This signifies to the remote partition that our reserved
  208. * page is initialized.
  209. */
  210. rp->vars_pa = __pa(xpc_vars);
  211. return rp;
  212. }
  213. /*
  214. * Change protections to allow IPI operations (and AMO operations on
  215. * Shub 1.1 systems).
  216. */
  217. void
  218. xpc_allow_IPI_ops(void)
  219. {
  220. int node;
  221. int nasid;
  222. // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
  223. if (is_shub2()) {
  224. xpc_sh2_IPI_access0 =
  225. (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
  226. xpc_sh2_IPI_access1 =
  227. (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
  228. xpc_sh2_IPI_access2 =
  229. (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
  230. xpc_sh2_IPI_access3 =
  231. (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
  232. for_each_online_node(node) {
  233. nasid = cnodeid_to_nasid(node);
  234. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
  235. -1UL);
  236. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
  237. -1UL);
  238. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
  239. -1UL);
  240. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
  241. -1UL);
  242. }
  243. } else {
  244. xpc_sh1_IPI_access =
  245. (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
  246. for_each_online_node(node) {
  247. nasid = cnodeid_to_nasid(node);
  248. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
  249. -1UL);
  250. /*
  251. * Since the BIST collides with memory operations on
  252. * SHUB 1.1 sn_change_memprotect() cannot be used.
  253. */
  254. if (enable_shub_wars_1_1()) {
  255. /* open up everything */
  256. xpc_prot_vec[node] = (u64) HUB_L((u64 *)
  257. GLOBAL_MMR_ADDR(nasid,
  258. SH1_MD_DQLP_MMR_DIR_PRIVEC0));
  259. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
  260. SH1_MD_DQLP_MMR_DIR_PRIVEC0),
  261. -1UL);
  262. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
  263. SH1_MD_DQRP_MMR_DIR_PRIVEC0),
  264. -1UL);
  265. }
  266. }
  267. }
  268. }
  269. /*
  270. * Restrict protections to disallow IPI operations (and AMO operations on
  271. * Shub 1.1 systems).
  272. */
  273. void
  274. xpc_restrict_IPI_ops(void)
  275. {
  276. int node;
  277. int nasid;
  278. // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
  279. if (is_shub2()) {
  280. for_each_online_node(node) {
  281. nasid = cnodeid_to_nasid(node);
  282. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
  283. xpc_sh2_IPI_access0);
  284. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
  285. xpc_sh2_IPI_access1);
  286. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
  287. xpc_sh2_IPI_access2);
  288. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
  289. xpc_sh2_IPI_access3);
  290. }
  291. } else {
  292. for_each_online_node(node) {
  293. nasid = cnodeid_to_nasid(node);
  294. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
  295. xpc_sh1_IPI_access);
  296. if (enable_shub_wars_1_1()) {
  297. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
  298. SH1_MD_DQLP_MMR_DIR_PRIVEC0),
  299. xpc_prot_vec[node]);
  300. HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
  301. SH1_MD_DQRP_MMR_DIR_PRIVEC0),
  302. xpc_prot_vec[node]);
  303. }
  304. }
  305. }
  306. }
  307. /*
  308. * At periodic intervals, scan through all active partitions and ensure
  309. * their heartbeat is still active. If not, the partition is deactivated.
  310. */
  311. void
  312. xpc_check_remote_hb(void)
  313. {
  314. struct xpc_vars *remote_vars;
  315. struct xpc_partition *part;
  316. partid_t partid;
  317. bte_result_t bres;
  318. remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
  319. for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
  320. if (xpc_exiting) {
  321. break;
  322. }
  323. if (partid == sn_partition_id) {
  324. continue;
  325. }
  326. part = &xpc_partitions[partid];
  327. if (part->act_state == XPC_P_INACTIVE ||
  328. part->act_state == XPC_P_DEACTIVATING) {
  329. continue;
  330. }
  331. /* pull the remote_hb cache line */
  332. bres = xp_bte_copy(part->remote_vars_pa,
  333. ia64_tpa((u64) remote_vars),
  334. XPC_VARS_ALIGNED_SIZE,
  335. (BTE_NOTIFY | BTE_WACQUIRE), NULL);
  336. if (bres != BTE_SUCCESS) {
  337. XPC_DEACTIVATE_PARTITION(part,
  338. xpc_map_bte_errors(bres));
  339. continue;
  340. }
  341. dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
  342. " = %ld, kdb_status = %ld, HB_mask = 0x%lx\n", partid,
  343. remote_vars->heartbeat, part->last_heartbeat,
  344. remote_vars->kdb_status,
  345. remote_vars->heartbeating_to_mask);
  346. if (((remote_vars->heartbeat == part->last_heartbeat) &&
  347. (remote_vars->kdb_status == 0)) ||
  348. !xpc_hb_allowed(sn_partition_id, remote_vars)) {
  349. XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
  350. continue;
  351. }
  352. part->last_heartbeat = remote_vars->heartbeat;
  353. }
  354. }
  355. /*
  356. * Get a copy of the remote partition's rsvd page.
  357. *
  358. * remote_rp points to a buffer that is cacheline aligned for BTE copies and
  359. * assumed to be of size XPC_RSVD_PAGE_ALIGNED_SIZE.
  360. */
  361. static enum xpc_retval
  362. xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
  363. struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
  364. {
  365. int bres, i;
  366. /* get the reserved page's physical address */
  367. *remote_rp_pa = xpc_get_rsvd_page_pa(nasid, (u64) remote_rp,
  368. XPC_RSVD_PAGE_ALIGNED_SIZE);
  369. if (*remote_rp_pa == 0) {
  370. return xpcNoRsvdPageAddr;
  371. }
  372. /* pull over the reserved page structure */
  373. bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp),
  374. XPC_RSVD_PAGE_ALIGNED_SIZE,
  375. (BTE_NOTIFY | BTE_WACQUIRE), NULL);
  376. if (bres != BTE_SUCCESS) {
  377. return xpc_map_bte_errors(bres);
  378. }
  379. if (discovered_nasids != NULL) {
  380. for (i = 0; i < XP_NASID_MASK_WORDS; i++) {
  381. discovered_nasids[i] |= remote_rp->part_nasids[i];
  382. }
  383. }
  384. /* check that the partid is for another partition */
  385. if (remote_rp->partid < 1 ||
  386. remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
  387. return xpcInvalidPartid;
  388. }
  389. if (remote_rp->partid == sn_partition_id) {
  390. return xpcLocalPartid;
  391. }
  392. if (XPC_VERSION_MAJOR(remote_rp->version) !=
  393. XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
  394. return xpcBadVersion;
  395. }
  396. return xpcSuccess;
  397. }
  398. /*
  399. * Get a copy of the remote partition's XPC variables.
  400. *
  401. * remote_vars points to a buffer that is cacheline aligned for BTE copies and
  402. * assumed to be of size XPC_VARS_ALIGNED_SIZE.
  403. */
  404. static enum xpc_retval
  405. xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
  406. {
  407. int bres;
  408. if (remote_vars_pa == 0) {
  409. return xpcVarsNotSet;
  410. }
  411. /* pull over the cross partition variables */
  412. bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
  413. XPC_VARS_ALIGNED_SIZE,
  414. (BTE_NOTIFY | BTE_WACQUIRE), NULL);
  415. if (bres != BTE_SUCCESS) {
  416. return xpc_map_bte_errors(bres);
  417. }
  418. if (XPC_VERSION_MAJOR(remote_vars->version) !=
  419. XPC_VERSION_MAJOR(XPC_V_VERSION)) {
  420. return xpcBadVersion;
  421. }
  422. return xpcSuccess;
  423. }
  424. /*
  425. * Update the remote partition's info.
  426. */
  427. static void
  428. xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
  429. struct timespec *remote_rp_stamp, u64 remote_rp_pa,
  430. u64 remote_vars_pa, struct xpc_vars *remote_vars)
  431. {
  432. part->remote_rp_version = remote_rp_version;
  433. dev_dbg(xpc_part, " remote_rp_version = 0x%016lx\n",
  434. part->remote_rp_version);
  435. part->remote_rp_stamp = *remote_rp_stamp;
  436. dev_dbg(xpc_part, " remote_rp_stamp (tv_sec = 0x%lx tv_nsec = 0x%lx\n",
  437. part->remote_rp_stamp.tv_sec, part->remote_rp_stamp.tv_nsec);
  438. part->remote_rp_pa = remote_rp_pa;
  439. dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
  440. part->remote_vars_pa = remote_vars_pa;
  441. dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
  442. part->remote_vars_pa);
  443. part->last_heartbeat = remote_vars->heartbeat;
  444. dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
  445. part->last_heartbeat);
  446. part->remote_vars_part_pa = remote_vars->vars_part_pa;
  447. dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
  448. part->remote_vars_part_pa);
  449. part->remote_act_nasid = remote_vars->act_nasid;
  450. dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n",
  451. part->remote_act_nasid);
  452. part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid;
  453. dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n",
  454. part->remote_act_phys_cpuid);
  455. part->remote_amos_page_pa = remote_vars->amos_page_pa;
  456. dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
  457. part->remote_amos_page_pa);
  458. part->remote_vars_version = remote_vars->version;
  459. dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
  460. part->remote_vars_version);
  461. }
  462. /*
  463. * Prior code has determined the nasid which generated an IPI. Inspect
  464. * that nasid to determine if its partition needs to be activated or
  465. * deactivated.
  466. *
  467. * A partition is consider "awaiting activation" if our partition
  468. * flags indicate it is not active and it has a heartbeat. A
  469. * partition is considered "awaiting deactivation" if our partition
  470. * flags indicate it is active but it has no heartbeat or it is not
  471. * sending its heartbeat to us.
  472. *
  473. * To determine the heartbeat, the remote nasid must have a properly
  474. * initialized reserved page.
  475. */
  476. static void
  477. xpc_identify_act_IRQ_req(int nasid)
  478. {
  479. struct xpc_rsvd_page *remote_rp;
  480. struct xpc_vars *remote_vars;
  481. u64 remote_rp_pa;
  482. u64 remote_vars_pa;
  483. int remote_rp_version;
  484. int reactivate = 0;
  485. int stamp_diff;
  486. struct timespec remote_rp_stamp = { 0, 0 };
  487. partid_t partid;
  488. struct xpc_partition *part;
  489. enum xpc_retval ret;
  490. /* pull over the reserved page structure */
  491. remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer;
  492. ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
  493. if (ret != xpcSuccess) {
  494. dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
  495. "which sent interrupt, reason=%d\n", nasid, ret);
  496. return;
  497. }
  498. remote_vars_pa = remote_rp->vars_pa;
  499. remote_rp_version = remote_rp->version;
  500. if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
  501. remote_rp_stamp = remote_rp->stamp;
  502. }
  503. partid = remote_rp->partid;
  504. part = &xpc_partitions[partid];
  505. /* pull over the cross partition variables */
  506. remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
  507. ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
  508. if (ret != xpcSuccess) {
  509. dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
  510. "which sent interrupt, reason=%d\n", nasid, ret);
  511. XPC_DEACTIVATE_PARTITION(part, ret);
  512. return;
  513. }
  514. part->act_IRQ_rcvd++;
  515. dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
  516. "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd,
  517. remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
  518. if (xpc_partition_disengaged(part) &&
  519. part->act_state == XPC_P_INACTIVE) {
  520. xpc_update_partition_info(part, remote_rp_version,
  521. &remote_rp_stamp, remote_rp_pa,
  522. remote_vars_pa, remote_vars);
  523. if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
  524. if (xpc_partition_disengage_requested(1UL << partid)) {
  525. /*
  526. * Other side is waiting on us to disengage,
  527. * even though we already have.
  528. */
  529. return;
  530. }
  531. } else {
  532. /* other side doesn't support disengage requests */
  533. xpc_clear_partition_disengage_request(1UL << partid);
  534. }
  535. xpc_activate_partition(part);
  536. return;
  537. }
  538. DBUG_ON(part->remote_rp_version == 0);
  539. DBUG_ON(part->remote_vars_version == 0);
  540. if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
  541. DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
  542. remote_vars_version));
  543. if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
  544. DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
  545. version));
  546. /* see if the other side rebooted */
  547. if (part->remote_amos_page_pa ==
  548. remote_vars->amos_page_pa &&
  549. xpc_hb_allowed(sn_partition_id,
  550. remote_vars)) {
  551. /* doesn't look that way, so ignore the IPI */
  552. return;
  553. }
  554. }
  555. /*
  556. * Other side rebooted and previous XPC didn't support the
  557. * disengage request, so we don't need to do anything special.
  558. */
  559. xpc_update_partition_info(part, remote_rp_version,
  560. &remote_rp_stamp, remote_rp_pa,
  561. remote_vars_pa, remote_vars);
  562. part->reactivate_nasid = nasid;
  563. XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
  564. return;
  565. }
  566. DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version));
  567. if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
  568. DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
  569. /*
  570. * Other side rebooted and previous XPC did support the
  571. * disengage request, but the new one doesn't.
  572. */
  573. xpc_clear_partition_engaged(1UL << partid);
  574. xpc_clear_partition_disengage_request(1UL << partid);
  575. xpc_update_partition_info(part, remote_rp_version,
  576. &remote_rp_stamp, remote_rp_pa,
  577. remote_vars_pa, remote_vars);
  578. reactivate = 1;
  579. } else {
  580. DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
  581. stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
  582. &remote_rp_stamp);
  583. if (stamp_diff != 0) {
  584. DBUG_ON(stamp_diff >= 0);
  585. /*
  586. * Other side rebooted and the previous XPC did support
  587. * the disengage request, as does the new one.
  588. */
  589. DBUG_ON(xpc_partition_engaged(1UL << partid));
  590. DBUG_ON(xpc_partition_disengage_requested(1UL <<
  591. partid));
  592. xpc_update_partition_info(part, remote_rp_version,
  593. &remote_rp_stamp, remote_rp_pa,
  594. remote_vars_pa, remote_vars);
  595. reactivate = 1;
  596. }
  597. }
  598. if (!xpc_partition_disengaged(part)) {
  599. /* still waiting on other side to disengage from us */
  600. return;
  601. }
  602. if (reactivate) {
  603. part->reactivate_nasid = nasid;
  604. XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
  605. } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
  606. xpc_partition_disengage_requested(1UL << partid)) {
  607. XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown);
  608. }
  609. }
  610. /*
  611. * Loop through the activation AMO variables and process any bits
  612. * which are set. Each bit indicates a nasid sending a partition
  613. * activation or deactivation request.
  614. *
  615. * Return #of IRQs detected.
  616. */
  617. int
  618. xpc_identify_act_IRQ_sender(void)
  619. {
  620. int word, bit;
  621. u64 nasid_mask;
  622. u64 nasid; /* remote nasid */
  623. int n_IRQs_detected = 0;
  624. AMO_t *act_amos;
  625. struct xpc_rsvd_page *rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
  626. act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
  627. /* scan through act AMO variable looking for non-zero entries */
  628. for (word = 0; word < XP_NASID_MASK_WORDS; word++) {
  629. if (xpc_exiting) {
  630. break;
  631. }
  632. nasid_mask = xpc_IPI_receive(&act_amos[word]);
  633. if (nasid_mask == 0) {
  634. /* no IRQs from nasids in this variable */
  635. continue;
  636. }
  637. dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
  638. nasid_mask);
  639. /*
  640. * If this nasid has been added to the machine since
  641. * our partition was reset, this will retain the
  642. * remote nasid in our reserved pages machine mask.
  643. * This is used in the event of module reload.
  644. */
  645. rp->mach_nasids[word] |= nasid_mask;
  646. /* locate the nasid(s) which sent interrupts */
  647. for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
  648. if (nasid_mask & (1UL << bit)) {
  649. n_IRQs_detected++;
  650. nasid = XPC_NASID_FROM_W_B(word, bit);
  651. dev_dbg(xpc_part, "interrupt from nasid %ld\n",
  652. nasid);
  653. xpc_identify_act_IRQ_req(nasid);
  654. }
  655. }
  656. }
  657. return n_IRQs_detected;
  658. }
  659. /*
  660. * See if the other side has responded to a partition disengage request
  661. * from us.
  662. */
  663. int
  664. xpc_partition_disengaged(struct xpc_partition *part)
  665. {
  666. partid_t partid = XPC_PARTID(part);
  667. int disengaged;
  668. disengaged = (xpc_partition_engaged(1UL << partid) == 0);
  669. if (part->disengage_request_timeout) {
  670. if (!disengaged) {
  671. if (jiffies < part->disengage_request_timeout) {
  672. /* timelimit hasn't been reached yet */
  673. return 0;
  674. }
  675. /*
  676. * Other side hasn't responded to our disengage
  677. * request in a timely fashion, so assume it's dead.
  678. */
  679. xpc_clear_partition_engaged(1UL << partid);
  680. disengaged = 1;
  681. }
  682. part->disengage_request_timeout = 0;
  683. /* cancel the timer function, provided it's not us */
  684. if (!in_interrupt()) {
  685. del_singleshot_timer_sync(&part->
  686. disengage_request_timer);
  687. }
  688. DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
  689. part->act_state != XPC_P_INACTIVE);
  690. if (part->act_state != XPC_P_INACTIVE) {
  691. xpc_wakeup_channel_mgr(part);
  692. }
  693. if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
  694. xpc_cancel_partition_disengage_request(part);
  695. }
  696. }
  697. return disengaged;
  698. }
  699. /*
  700. * Mark specified partition as active.
  701. */
  702. enum xpc_retval
  703. xpc_mark_partition_active(struct xpc_partition *part)
  704. {
  705. unsigned long irq_flags;
  706. enum xpc_retval ret;
  707. dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
  708. spin_lock_irqsave(&part->act_lock, irq_flags);
  709. if (part->act_state == XPC_P_ACTIVATING) {
  710. part->act_state = XPC_P_ACTIVE;
  711. ret = xpcSuccess;
  712. } else {
  713. DBUG_ON(part->reason == xpcSuccess);
  714. ret = part->reason;
  715. }
  716. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  717. return ret;
  718. }
  719. /*
  720. * Notify XPC that the partition is down.
  721. */
  722. void
  723. xpc_deactivate_partition(const int line, struct xpc_partition *part,
  724. enum xpc_retval reason)
  725. {
  726. unsigned long irq_flags;
  727. spin_lock_irqsave(&part->act_lock, irq_flags);
  728. if (part->act_state == XPC_P_INACTIVE) {
  729. XPC_SET_REASON(part, reason, line);
  730. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  731. if (reason == xpcReactivating) {
  732. /* we interrupt ourselves to reactivate partition */
  733. xpc_IPI_send_reactivate(part);
  734. }
  735. return;
  736. }
  737. if (part->act_state == XPC_P_DEACTIVATING) {
  738. if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
  739. reason == xpcReactivating) {
  740. XPC_SET_REASON(part, reason, line);
  741. }
  742. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  743. return;
  744. }
  745. part->act_state = XPC_P_DEACTIVATING;
  746. XPC_SET_REASON(part, reason, line);
  747. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  748. if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
  749. xpc_request_partition_disengage(part);
  750. xpc_IPI_send_disengage(part);
  751. /* set a timelimit on the disengage request */
  752. part->disengage_request_timeout = jiffies +
  753. (xpc_disengage_request_timelimit * HZ);
  754. part->disengage_request_timer.expires =
  755. part->disengage_request_timeout;
  756. add_timer(&part->disengage_request_timer);
  757. }
  758. dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n",
  759. XPC_PARTID(part), reason);
  760. xpc_partition_going_down(part, reason);
  761. }
  762. /*
  763. * Mark specified partition as inactive.
  764. */
  765. void
  766. xpc_mark_partition_inactive(struct xpc_partition *part)
  767. {
  768. unsigned long irq_flags;
  769. dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
  770. XPC_PARTID(part));
  771. spin_lock_irqsave(&part->act_lock, irq_flags);
  772. part->act_state = XPC_P_INACTIVE;
  773. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  774. part->remote_rp_pa = 0;
  775. }
  776. /*
  777. * SAL has provided a partition and machine mask. The partition mask
  778. * contains a bit for each even nasid in our partition. The machine
  779. * mask contains a bit for each even nasid in the entire machine.
  780. *
  781. * Using those two bit arrays, we can determine which nasids are
  782. * known in the machine. Each should also have a reserved page
  783. * initialized if they are available for partitioning.
  784. */
  785. void
  786. xpc_discovery(void)
  787. {
  788. void *remote_rp_base;
  789. struct xpc_rsvd_page *remote_rp;
  790. struct xpc_vars *remote_vars;
  791. u64 remote_rp_pa;
  792. u64 remote_vars_pa;
  793. int region;
  794. int max_regions;
  795. int nasid;
  796. struct xpc_rsvd_page *rp;
  797. partid_t partid;
  798. struct xpc_partition *part;
  799. u64 *discovered_nasids;
  800. enum xpc_retval ret;
  801. remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RSVD_PAGE_ALIGNED_SIZE,
  802. GFP_KERNEL, &remote_rp_base);
  803. if (remote_rp == NULL) {
  804. return;
  805. }
  806. remote_vars = (struct xpc_vars *) remote_rp;
  807. discovered_nasids = kmalloc(sizeof(u64) * XP_NASID_MASK_WORDS,
  808. GFP_KERNEL);
  809. if (discovered_nasids == NULL) {
  810. kfree(remote_rp_base);
  811. return;
  812. }
  813. memset(discovered_nasids, 0, sizeof(u64) * XP_NASID_MASK_WORDS);
  814. rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
  815. /*
  816. * The term 'region' in this context refers to the minimum number of
  817. * nodes that can comprise an access protection grouping. The access
  818. * protection is in regards to memory, IOI and IPI.
  819. */
  820. //>>> move the next two #defines into either include/asm-ia64/sn/arch.h or
  821. //>>> include/asm-ia64/sn/addrs.h
  822. #define SH1_MAX_REGIONS 64
  823. #define SH2_MAX_REGIONS 256
  824. max_regions = is_shub2() ? SH2_MAX_REGIONS : SH1_MAX_REGIONS;
  825. for (region = 0; region < max_regions; region++) {
  826. if ((volatile int) xpc_exiting) {
  827. break;
  828. }
  829. dev_dbg(xpc_part, "searching region %d\n", region);
  830. for (nasid = (region * sn_region_size * 2);
  831. nasid < ((region + 1) * sn_region_size * 2);
  832. nasid += 2) {
  833. if ((volatile int) xpc_exiting) {
  834. break;
  835. }
  836. dev_dbg(xpc_part, "checking nasid %d\n", nasid);
  837. if (XPC_NASID_IN_ARRAY(nasid, rp->part_nasids)) {
  838. dev_dbg(xpc_part, "PROM indicates Nasid %d is "
  839. "part of the local partition; skipping "
  840. "region\n", nasid);
  841. break;
  842. }
  843. if (!(XPC_NASID_IN_ARRAY(nasid, rp->mach_nasids))) {
  844. dev_dbg(xpc_part, "PROM indicates Nasid %d was "
  845. "not on Numa-Link network at reset\n",
  846. nasid);
  847. continue;
  848. }
  849. if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) {
  850. dev_dbg(xpc_part, "Nasid %d is part of a "
  851. "partition which was previously "
  852. "discovered\n", nasid);
  853. continue;
  854. }
  855. /* pull over the reserved page structure */
  856. ret = xpc_get_remote_rp(nasid, discovered_nasids,
  857. remote_rp, &remote_rp_pa);
  858. if (ret != xpcSuccess) {
  859. dev_dbg(xpc_part, "unable to get reserved page "
  860. "from nasid %d, reason=%d\n", nasid,
  861. ret);
  862. if (ret == xpcLocalPartid) {
  863. break;
  864. }
  865. continue;
  866. }
  867. remote_vars_pa = remote_rp->vars_pa;
  868. partid = remote_rp->partid;
  869. part = &xpc_partitions[partid];
  870. /* pull over the cross partition variables */
  871. ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
  872. if (ret != xpcSuccess) {
  873. dev_dbg(xpc_part, "unable to get XPC variables "
  874. "from nasid %d, reason=%d\n", nasid,
  875. ret);
  876. XPC_DEACTIVATE_PARTITION(part, ret);
  877. continue;
  878. }
  879. if (part->act_state != XPC_P_INACTIVE) {
  880. dev_dbg(xpc_part, "partition %d on nasid %d is "
  881. "already activating\n", partid, nasid);
  882. break;
  883. }
  884. /*
  885. * Register the remote partition's AMOs with SAL so it
  886. * can handle and cleanup errors within that address
  887. * range should the remote partition go down. We don't
  888. * unregister this range because it is difficult to
  889. * tell when outstanding writes to the remote partition
  890. * are finished and thus when it is thus safe to
  891. * unregister. This should not result in wasted space
  892. * in the SAL xp_addr_region table because we should
  893. * get the same page for remote_act_amos_pa after
  894. * module reloads and system reboots.
  895. */
  896. if (sn_register_xp_addr_region(
  897. remote_vars->amos_page_pa,
  898. PAGE_SIZE, 1) < 0) {
  899. dev_dbg(xpc_part, "partition %d failed to "
  900. "register xp_addr region 0x%016lx\n",
  901. partid, remote_vars->amos_page_pa);
  902. XPC_SET_REASON(part, xpcPhysAddrRegFailed,
  903. __LINE__);
  904. break;
  905. }
  906. /*
  907. * The remote nasid is valid and available.
  908. * Send an interrupt to that nasid to notify
  909. * it that we are ready to begin activation.
  910. */
  911. dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, "
  912. "nasid %d, phys_cpuid 0x%x\n",
  913. remote_vars->amos_page_pa,
  914. remote_vars->act_nasid,
  915. remote_vars->act_phys_cpuid);
  916. if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
  917. version)) {
  918. part->remote_amos_page_pa =
  919. remote_vars->amos_page_pa;
  920. xpc_mark_partition_disengaged(part);
  921. xpc_cancel_partition_disengage_request(part);
  922. }
  923. xpc_IPI_send_activate(remote_vars);
  924. }
  925. }
  926. kfree(discovered_nasids);
  927. kfree(remote_rp_base);
  928. }
  929. /*
  930. * Given a partid, get the nasids owned by that partition from the
  931. * remote partition's reserved page.
  932. */
  933. enum xpc_retval
  934. xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
  935. {
  936. struct xpc_partition *part;
  937. u64 part_nasid_pa;
  938. int bte_res;
  939. part = &xpc_partitions[partid];
  940. if (part->remote_rp_pa == 0) {
  941. return xpcPartitionDown;
  942. }
  943. part_nasid_pa = part->remote_rp_pa +
  944. (u64) &((struct xpc_rsvd_page *) 0)->part_nasids;
  945. bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask),
  946. L1_CACHE_ALIGN(XP_NASID_MASK_BYTES),
  947. (BTE_NOTIFY | BTE_WACQUIRE), NULL);
  948. return xpc_map_bte_errors(bte_res);
  949. }