ip27-memory.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org)
  7. * Copyright (C) 2000 by Silicon Graphics, Inc.
  8. * Copyright (C) 2004 by Christoph Hellwig
  9. *
  10. * On SGI IP27 the ARC memory configuration data is completly bogus but
  11. * alternate easier to use mechanisms are available.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/mmzone.h>
  17. #include <linux/module.h>
  18. #include <linux/nodemask.h>
  19. #include <linux/swap.h>
  20. #include <linux/bootmem.h>
  21. #include <linux/pfn.h>
  22. #include <linux/highmem.h>
  23. #include <asm/page.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/sections.h>
  26. #include <asm/sn/arch.h>
  27. #include <asm/sn/hub.h>
  28. #include <asm/sn/klconfig.h>
  29. #include <asm/sn/sn_private.h>
  30. #define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
  31. #define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
  32. #define SLOT_IGNORED 0xffff
  33. static short __initdata slot_lastfilled_cache[MAX_COMPACT_NODES];
  34. static unsigned short __initdata slot_psize_cache[MAX_COMPACT_NODES][MAX_MEM_SLOTS];
  35. static struct bootmem_data __initdata plat_node_bdata[MAX_COMPACT_NODES];
  36. struct node_data *__node_data[MAX_COMPACT_NODES];
  37. EXPORT_SYMBOL(__node_data);
  38. static int fine_mode;
  39. static int is_fine_dirmode(void)
  40. {
  41. return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
  42. >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
  43. }
  44. static hubreg_t get_region(cnodeid_t cnode)
  45. {
  46. if (fine_mode)
  47. return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT;
  48. else
  49. return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT;
  50. }
  51. static hubreg_t region_mask;
  52. static void gen_region_mask(hubreg_t *region_mask)
  53. {
  54. cnodeid_t cnode;
  55. (*region_mask) = 0;
  56. for_each_online_node(cnode) {
  57. (*region_mask) |= 1ULL << get_region(cnode);
  58. }
  59. }
  60. #define rou_rflag rou_flags
  61. static int router_distance;
  62. static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
  63. {
  64. klrou_t *router;
  65. lboard_t *brd;
  66. int port;
  67. if (router_a->rou_rflag == 1)
  68. return;
  69. if (depth >= router_distance)
  70. return;
  71. router_a->rou_rflag = 1;
  72. for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
  73. if (router_a->rou_port[port].port_nasid == INVALID_NASID)
  74. continue;
  75. brd = (lboard_t *)NODE_OFFSET_TO_K0(
  76. router_a->rou_port[port].port_nasid,
  77. router_a->rou_port[port].port_offset);
  78. if (brd->brd_type == KLTYPE_ROUTER) {
  79. router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
  80. if (router == router_b) {
  81. if (depth < router_distance)
  82. router_distance = depth;
  83. }
  84. else
  85. router_recurse(router, router_b, depth + 1);
  86. }
  87. }
  88. router_a->rou_rflag = 0;
  89. }
  90. unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
  91. static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
  92. {
  93. klrou_t *router, *router_a = NULL, *router_b = NULL;
  94. lboard_t *brd, *dest_brd;
  95. cnodeid_t cnode;
  96. nasid_t nasid;
  97. int port;
  98. /* Figure out which routers nodes in question are connected to */
  99. for_each_online_node(cnode) {
  100. nasid = COMPACT_TO_NASID_NODEID(cnode);
  101. if (nasid == -1) continue;
  102. brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
  103. KLTYPE_ROUTER);
  104. if (!brd)
  105. continue;
  106. do {
  107. if (brd->brd_flags & DUPLICATE_BOARD)
  108. continue;
  109. router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
  110. router->rou_rflag = 0;
  111. for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
  112. if (router->rou_port[port].port_nasid == INVALID_NASID)
  113. continue;
  114. dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
  115. router->rou_port[port].port_nasid,
  116. router->rou_port[port].port_offset);
  117. if (dest_brd->brd_type == KLTYPE_IP27) {
  118. if (dest_brd->brd_nasid == nasid_a)
  119. router_a = router;
  120. if (dest_brd->brd_nasid == nasid_b)
  121. router_b = router;
  122. }
  123. }
  124. } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
  125. }
  126. if (router_a == NULL) {
  127. printk("node_distance: router_a NULL\n");
  128. return -1;
  129. }
  130. if (router_b == NULL) {
  131. printk("node_distance: router_b NULL\n");
  132. return -1;
  133. }
  134. if (nasid_a == nasid_b)
  135. return 0;
  136. if (router_a == router_b)
  137. return 1;
  138. router_distance = 100;
  139. router_recurse(router_a, router_b, 2);
  140. return router_distance;
  141. }
  142. static void __init init_topology_matrix(void)
  143. {
  144. nasid_t nasid, nasid2;
  145. cnodeid_t row, col;
  146. for (row = 0; row < MAX_COMPACT_NODES; row++)
  147. for (col = 0; col < MAX_COMPACT_NODES; col++)
  148. __node_distances[row][col] = -1;
  149. for_each_online_node(row) {
  150. nasid = COMPACT_TO_NASID_NODEID(row);
  151. for_each_online_node(col) {
  152. nasid2 = COMPACT_TO_NASID_NODEID(col);
  153. __node_distances[row][col] =
  154. compute_node_distance(nasid, nasid2);
  155. }
  156. }
  157. }
  158. static void __init dump_topology(void)
  159. {
  160. nasid_t nasid;
  161. cnodeid_t cnode;
  162. lboard_t *brd, *dest_brd;
  163. int port;
  164. int router_num = 0;
  165. klrou_t *router;
  166. cnodeid_t row, col;
  167. printk("************** Topology ********************\n");
  168. printk(" ");
  169. for_each_online_node(col)
  170. printk("%02d ", col);
  171. printk("\n");
  172. for_each_online_node(row) {
  173. printk("%02d ", row);
  174. for_each_online_node(col)
  175. printk("%2d ", node_distance(row, col));
  176. printk("\n");
  177. }
  178. for_each_online_node(cnode) {
  179. nasid = COMPACT_TO_NASID_NODEID(cnode);
  180. if (nasid == -1) continue;
  181. brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
  182. KLTYPE_ROUTER);
  183. if (!brd)
  184. continue;
  185. do {
  186. if (brd->brd_flags & DUPLICATE_BOARD)
  187. continue;
  188. printk("Router %d:", router_num);
  189. router_num++;
  190. router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
  191. for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
  192. if (router->rou_port[port].port_nasid == INVALID_NASID)
  193. continue;
  194. dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
  195. router->rou_port[port].port_nasid,
  196. router->rou_port[port].port_offset);
  197. if (dest_brd->brd_type == KLTYPE_IP27)
  198. printk(" %d", dest_brd->brd_nasid);
  199. if (dest_brd->brd_type == KLTYPE_ROUTER)
  200. printk(" r");
  201. }
  202. printk("\n");
  203. } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
  204. }
  205. }
  206. static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
  207. {
  208. nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
  209. return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
  210. }
  211. /*
  212. * Return the number of pages of memory provided by the given slot
  213. * on the specified node.
  214. */
  215. static pfn_t __init slot_getsize(cnodeid_t node, int slot)
  216. {
  217. return (pfn_t) slot_psize_cache[node][slot];
  218. }
  219. /*
  220. * Return highest slot filled
  221. */
  222. static int __init node_getlastslot(cnodeid_t node)
  223. {
  224. return (int) slot_lastfilled_cache[node];
  225. }
  226. /*
  227. * Return the pfn of the last free page of memory on a node.
  228. */
  229. static pfn_t __init node_getmaxclick(cnodeid_t node)
  230. {
  231. pfn_t slot_psize;
  232. int slot;
  233. /*
  234. * Start at the top slot. When we find a slot with memory in it,
  235. * that's the winner.
  236. */
  237. for (slot = (MAX_MEM_SLOTS - 1); slot >= 0; slot--) {
  238. if ((slot_psize = slot_getsize(node, slot))) {
  239. if (slot_psize == SLOT_IGNORED)
  240. continue;
  241. /* Return the basepfn + the slot size, minus 1. */
  242. return slot_getbasepfn(node, slot) + slot_psize - 1;
  243. }
  244. }
  245. /*
  246. * If there's no memory on the node, return 0. This is likely
  247. * to cause problems.
  248. */
  249. return 0;
  250. }
  251. static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
  252. {
  253. nasid_t nasid;
  254. lboard_t *brd;
  255. klmembnk_t *banks;
  256. unsigned long size;
  257. nasid = COMPACT_TO_NASID_NODEID(node);
  258. /* Find the node board */
  259. brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
  260. if (!brd)
  261. return 0;
  262. /* Get the memory bank structure */
  263. banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK);
  264. if (!banks)
  265. return 0;
  266. /* Size in _Megabytes_ */
  267. size = (unsigned long)banks->membnk_bnksz[slot/4];
  268. /* hack for 128 dimm banks */
  269. if (size <= 128) {
  270. if (slot % 4 == 0) {
  271. size <<= 20; /* size in bytes */
  272. return(size >> PAGE_SHIFT);
  273. } else
  274. return 0;
  275. } else {
  276. size /= 4;
  277. size <<= 20;
  278. return size >> PAGE_SHIFT;
  279. }
  280. }
  281. static void __init mlreset(void)
  282. {
  283. int i;
  284. master_nasid = get_nasid();
  285. fine_mode = is_fine_dirmode();
  286. /*
  287. * Probe for all CPUs - this creates the cpumask and sets up the
  288. * mapping tables. We need to do this as early as possible.
  289. */
  290. #ifdef CONFIG_SMP
  291. cpu_node_probe();
  292. #endif
  293. init_topology_matrix();
  294. dump_topology();
  295. gen_region_mask(&region_mask);
  296. setup_replication_mask();
  297. /*
  298. * Set all nodes' calias sizes to 8k
  299. */
  300. for_each_online_node(i) {
  301. nasid_t nasid;
  302. nasid = COMPACT_TO_NASID_NODEID(i);
  303. /*
  304. * Always have node 0 in the region mask, otherwise
  305. * CALIAS accesses get exceptions since the hub
  306. * thinks it is a node 0 address.
  307. */
  308. REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
  309. #ifdef CONFIG_REPLICATE_EXHANDLERS
  310. REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
  311. #else
  312. REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
  313. #endif
  314. #ifdef LATER
  315. /*
  316. * Set up all hubs to have a big window pointing at
  317. * widget 0. Memory mode, widget 0, offset 0
  318. */
  319. REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
  320. ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
  321. (0 << IIO_ITTE_WIDGET_SHIFT)));
  322. #endif
  323. }
  324. }
  325. static void __init szmem(void)
  326. {
  327. pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
  328. int slot, ignore;
  329. cnodeid_t node;
  330. num_physpages = 0;
  331. for_each_online_node(node) {
  332. ignore = nodebytes = 0;
  333. for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
  334. slot_psize = slot_psize_compute(node, slot);
  335. if (slot == 0)
  336. slot0sz = slot_psize;
  337. /*
  338. * We need to refine the hack when we have replicated
  339. * kernel text.
  340. */
  341. nodebytes += (1LL << SLOT_SHIFT);
  342. if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
  343. (slot0sz << PAGE_SHIFT))
  344. ignore = 1;
  345. if (ignore && slot_psize) {
  346. printk("Ignoring slot %d onwards on node %d\n",
  347. slot, node);
  348. slot_psize_cache[node][slot] = SLOT_IGNORED;
  349. slot = MAX_MEM_SLOTS;
  350. continue;
  351. }
  352. num_physpages += slot_psize;
  353. slot_psize_cache[node][slot] =
  354. (unsigned short) slot_psize;
  355. if (slot_psize)
  356. slot_lastfilled_cache[node] = slot;
  357. }
  358. }
  359. }
  360. static void __init node_mem_init(cnodeid_t node)
  361. {
  362. pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
  363. pfn_t slot_lastpfn = slot_firstpfn + slot_getsize(node, 0);
  364. pfn_t slot_freepfn = node_getfirstfree(node);
  365. struct pglist_data *pd;
  366. unsigned long bootmap_size;
  367. /*
  368. * Allocate the node data structures on the node first.
  369. */
  370. __node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
  371. pd = NODE_DATA(node);
  372. pd->bdata = &plat_node_bdata[node];
  373. cpus_clear(hub_data(node)->h_cpus);
  374. slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
  375. sizeof(struct hub_data));
  376. bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
  377. slot_firstpfn, slot_lastpfn);
  378. free_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
  379. (slot_lastpfn - slot_firstpfn) << PAGE_SHIFT);
  380. reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
  381. ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size);
  382. }
  383. /*
  384. * A node with nothing. We use it to avoid any special casing in
  385. * node_to_cpumask
  386. */
  387. static struct node_data null_node = {
  388. .hub = {
  389. .h_cpus = CPU_MASK_NONE
  390. }
  391. };
  392. /*
  393. * Currently, the intranode memory hole support assumes that each slot
  394. * contains at least 32 MBytes of memory. We assume all bootmem data
  395. * fits on the first slot.
  396. */
  397. void __init prom_meminit(void)
  398. {
  399. cnodeid_t node;
  400. mlreset();
  401. szmem();
  402. for (node = 0; node < MAX_COMPACT_NODES; node++) {
  403. if (node_online(node)) {
  404. node_mem_init(node);
  405. continue;
  406. }
  407. __node_data[node] = &null_node;
  408. }
  409. }
  410. void __init prom_free_prom_memory(void)
  411. {
  412. /* We got nothing to free here ... */
  413. }
  414. extern unsigned long setup_zero_pages(void);
  415. void __init paging_init(void)
  416. {
  417. unsigned long zones_size[MAX_NR_ZONES] = {0, };
  418. unsigned node;
  419. pagetable_init();
  420. for_each_online_node(node) {
  421. pfn_t start_pfn = slot_getbasepfn(node, 0);
  422. pfn_t end_pfn = node_getmaxclick(node) + 1;
  423. zones_size[ZONE_NORMAL] = end_pfn - start_pfn;
  424. free_area_init_node(node, NODE_DATA(node),
  425. zones_size, start_pfn, NULL);
  426. if (end_pfn > max_low_pfn)
  427. max_low_pfn = end_pfn;
  428. }
  429. }
  430. void __init mem_init(void)
  431. {
  432. unsigned long codesize, datasize, initsize, tmp;
  433. unsigned node;
  434. high_memory = (void *) __va(num_physpages << PAGE_SHIFT);
  435. for_each_online_node(node) {
  436. unsigned slot, numslots;
  437. struct page *end, *p;
  438. /*
  439. * This will free up the bootmem, ie, slot 0 memory.
  440. */
  441. totalram_pages += free_all_bootmem_node(NODE_DATA(node));
  442. /*
  443. * We need to manually do the other slots.
  444. */
  445. numslots = node_getlastslot(node);
  446. for (slot = 1; slot <= numslots; slot++) {
  447. p = nid_page_nr(node, slot_getbasepfn(node, slot) -
  448. slot_getbasepfn(node, 0));
  449. /*
  450. * Free valid memory in current slot.
  451. */
  452. for (end = p + slot_getsize(node, slot); p < end; p++) {
  453. /* if (!page_is_ram(pgnr)) continue; */
  454. /* commented out until page_is_ram works */
  455. ClearPageReserved(p);
  456. init_page_count(p);
  457. __free_page(p);
  458. totalram_pages++;
  459. }
  460. }
  461. }
  462. totalram_pages -= setup_zero_pages(); /* This comes from node 0 */
  463. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  464. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  465. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  466. tmp = nr_free_pages();
  467. printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
  468. "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
  469. tmp << (PAGE_SHIFT-10),
  470. num_physpages << (PAGE_SHIFT-10),
  471. codesize >> 10,
  472. (num_physpages - tmp) << (PAGE_SHIFT-10),
  473. datasize >> 10,
  474. initsize >> 10,
  475. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
  476. }