ip27-memory.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org)
  7. * Copyright (C) 2000 by Silicon Graphics, Inc.
  8. * Copyright (C) 2004 by Christoph Hellwig
  9. *
  10. * On SGI IP27 the ARC memory configuration data is completly bogus but
  11. * alternate easier to use mechanisms are available.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/mmzone.h>
  17. #include <linux/module.h>
  18. #include <linux/nodemask.h>
  19. #include <linux/swap.h>
  20. #include <linux/bootmem.h>
  21. #include <linux/pfn.h>
  22. #include <asm/page.h>
  23. #include <asm/sections.h>
  24. #include <asm/sn/arch.h>
  25. #include <asm/sn/hub.h>
  26. #include <asm/sn/klconfig.h>
  27. #include <asm/sn/sn_private.h>
  28. #define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
  29. #define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
  30. #define SLOT_IGNORED 0xffff
  31. static short __initdata slot_lastfilled_cache[MAX_COMPACT_NODES];
  32. static unsigned short __initdata slot_psize_cache[MAX_COMPACT_NODES][MAX_MEM_SLOTS];
  33. static struct bootmem_data __initdata plat_node_bdata[MAX_COMPACT_NODES];
  34. struct node_data *__node_data[MAX_COMPACT_NODES];
  35. EXPORT_SYMBOL(__node_data);
  36. static int fine_mode;
  37. static int is_fine_dirmode(void)
  38. {
  39. return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
  40. >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
  41. }
  42. static hubreg_t get_region(cnodeid_t cnode)
  43. {
  44. if (fine_mode)
  45. return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT;
  46. else
  47. return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT;
  48. }
  49. static hubreg_t region_mask;
  50. static void gen_region_mask(hubreg_t *region_mask)
  51. {
  52. cnodeid_t cnode;
  53. (*region_mask) = 0;
  54. for_each_online_node(cnode) {
  55. (*region_mask) |= 1ULL << get_region(cnode);
  56. }
  57. }
  58. #define rou_rflag rou_flags
  59. static int router_distance;
  60. static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
  61. {
  62. klrou_t *router;
  63. lboard_t *brd;
  64. int port;
  65. if (router_a->rou_rflag == 1)
  66. return;
  67. if (depth >= router_distance)
  68. return;
  69. router_a->rou_rflag = 1;
  70. for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
  71. if (router_a->rou_port[port].port_nasid == INVALID_NASID)
  72. continue;
  73. brd = (lboard_t *)NODE_OFFSET_TO_K0(
  74. router_a->rou_port[port].port_nasid,
  75. router_a->rou_port[port].port_offset);
  76. if (brd->brd_type == KLTYPE_ROUTER) {
  77. router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
  78. if (router == router_b) {
  79. if (depth < router_distance)
  80. router_distance = depth;
  81. }
  82. else
  83. router_recurse(router, router_b, depth + 1);
  84. }
  85. }
  86. router_a->rou_rflag = 0;
  87. }
  88. unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
  89. static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
  90. {
  91. klrou_t *router, *router_a = NULL, *router_b = NULL;
  92. lboard_t *brd, *dest_brd;
  93. cnodeid_t cnode;
  94. nasid_t nasid;
  95. int port;
  96. /* Figure out which routers nodes in question are connected to */
  97. for_each_online_node(cnode) {
  98. nasid = COMPACT_TO_NASID_NODEID(cnode);
  99. if (nasid == -1) continue;
  100. brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
  101. KLTYPE_ROUTER);
  102. if (!brd)
  103. continue;
  104. do {
  105. if (brd->brd_flags & DUPLICATE_BOARD)
  106. continue;
  107. router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
  108. router->rou_rflag = 0;
  109. for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
  110. if (router->rou_port[port].port_nasid == INVALID_NASID)
  111. continue;
  112. dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
  113. router->rou_port[port].port_nasid,
  114. router->rou_port[port].port_offset);
  115. if (dest_brd->brd_type == KLTYPE_IP27) {
  116. if (dest_brd->brd_nasid == nasid_a)
  117. router_a = router;
  118. if (dest_brd->brd_nasid == nasid_b)
  119. router_b = router;
  120. }
  121. }
  122. } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
  123. }
  124. if (router_a == NULL) {
  125. printk("node_distance: router_a NULL\n");
  126. return -1;
  127. }
  128. if (router_b == NULL) {
  129. printk("node_distance: router_b NULL\n");
  130. return -1;
  131. }
  132. if (nasid_a == nasid_b)
  133. return 0;
  134. if (router_a == router_b)
  135. return 1;
  136. router_distance = 100;
  137. router_recurse(router_a, router_b, 2);
  138. return router_distance;
  139. }
  140. static void __init init_topology_matrix(void)
  141. {
  142. nasid_t nasid, nasid2;
  143. cnodeid_t row, col;
  144. for (row = 0; row < MAX_COMPACT_NODES; row++)
  145. for (col = 0; col < MAX_COMPACT_NODES; col++)
  146. __node_distances[row][col] = -1;
  147. for_each_online_node(row) {
  148. nasid = COMPACT_TO_NASID_NODEID(row);
  149. for_each_online_node(col) {
  150. nasid2 = COMPACT_TO_NASID_NODEID(col);
  151. __node_distances[row][col] =
  152. compute_node_distance(nasid, nasid2);
  153. }
  154. }
  155. }
  156. static void __init dump_topology(void)
  157. {
  158. nasid_t nasid;
  159. cnodeid_t cnode;
  160. lboard_t *brd, *dest_brd;
  161. int port;
  162. int router_num = 0;
  163. klrou_t *router;
  164. cnodeid_t row, col;
  165. printk("************** Topology ********************\n");
  166. printk(" ");
  167. for_each_online_node(col)
  168. printk("%02d ", col);
  169. printk("\n");
  170. for_each_online_node(row) {
  171. printk("%02d ", row);
  172. for_each_online_node(col)
  173. printk("%2d ", node_distance(row, col));
  174. printk("\n");
  175. }
  176. for_each_online_node(cnode) {
  177. nasid = COMPACT_TO_NASID_NODEID(cnode);
  178. if (nasid == -1) continue;
  179. brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
  180. KLTYPE_ROUTER);
  181. if (!brd)
  182. continue;
  183. do {
  184. if (brd->brd_flags & DUPLICATE_BOARD)
  185. continue;
  186. printk("Router %d:", router_num);
  187. router_num++;
  188. router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
  189. for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
  190. if (router->rou_port[port].port_nasid == INVALID_NASID)
  191. continue;
  192. dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
  193. router->rou_port[port].port_nasid,
  194. router->rou_port[port].port_offset);
  195. if (dest_brd->brd_type == KLTYPE_IP27)
  196. printk(" %d", dest_brd->brd_nasid);
  197. if (dest_brd->brd_type == KLTYPE_ROUTER)
  198. printk(" r");
  199. }
  200. printk("\n");
  201. } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
  202. }
  203. }
  204. static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
  205. {
  206. nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
  207. return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
  208. }
  209. /*
  210. * Return the number of pages of memory provided by the given slot
  211. * on the specified node.
  212. */
  213. static pfn_t __init slot_getsize(cnodeid_t node, int slot)
  214. {
  215. return (pfn_t) slot_psize_cache[node][slot];
  216. }
  217. /*
  218. * Return highest slot filled
  219. */
  220. static int __init node_getlastslot(cnodeid_t node)
  221. {
  222. return (int) slot_lastfilled_cache[node];
  223. }
  224. /*
  225. * Return the pfn of the last free page of memory on a node.
  226. */
  227. static pfn_t __init node_getmaxclick(cnodeid_t node)
  228. {
  229. pfn_t slot_psize;
  230. int slot;
  231. /*
  232. * Start at the top slot. When we find a slot with memory in it,
  233. * that's the winner.
  234. */
  235. for (slot = (MAX_MEM_SLOTS - 1); slot >= 0; slot--) {
  236. if ((slot_psize = slot_getsize(node, slot))) {
  237. if (slot_psize == SLOT_IGNORED)
  238. continue;
  239. /* Return the basepfn + the slot size, minus 1. */
  240. return slot_getbasepfn(node, slot) + slot_psize - 1;
  241. }
  242. }
  243. /*
  244. * If there's no memory on the node, return 0. This is likely
  245. * to cause problems.
  246. */
  247. return 0;
  248. }
  249. static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
  250. {
  251. nasid_t nasid;
  252. lboard_t *brd;
  253. klmembnk_t *banks;
  254. unsigned long size;
  255. nasid = COMPACT_TO_NASID_NODEID(node);
  256. /* Find the node board */
  257. brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
  258. if (!brd)
  259. return 0;
  260. /* Get the memory bank structure */
  261. banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK);
  262. if (!banks)
  263. return 0;
  264. /* Size in _Megabytes_ */
  265. size = (unsigned long)banks->membnk_bnksz[slot/4];
  266. /* hack for 128 dimm banks */
  267. if (size <= 128) {
  268. if (slot % 4 == 0) {
  269. size <<= 20; /* size in bytes */
  270. return(size >> PAGE_SHIFT);
  271. } else
  272. return 0;
  273. } else {
  274. size /= 4;
  275. size <<= 20;
  276. return size >> PAGE_SHIFT;
  277. }
  278. }
  279. static void __init mlreset(void)
  280. {
  281. int i;
  282. master_nasid = get_nasid();
  283. fine_mode = is_fine_dirmode();
  284. /*
  285. * Probe for all CPUs - this creates the cpumask and sets up the
  286. * mapping tables. We need to do this as early as possible.
  287. */
  288. #ifdef CONFIG_SMP
  289. cpu_node_probe();
  290. #endif
  291. init_topology_matrix();
  292. dump_topology();
  293. gen_region_mask(&region_mask);
  294. setup_replication_mask();
  295. /*
  296. * Set all nodes' calias sizes to 8k
  297. */
  298. for_each_online_node(i) {
  299. nasid_t nasid;
  300. nasid = COMPACT_TO_NASID_NODEID(i);
  301. /*
  302. * Always have node 0 in the region mask, otherwise
  303. * CALIAS accesses get exceptions since the hub
  304. * thinks it is a node 0 address.
  305. */
  306. REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
  307. #ifdef CONFIG_REPLICATE_EXHANDLERS
  308. REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
  309. #else
  310. REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
  311. #endif
  312. #ifdef LATER
  313. /*
  314. * Set up all hubs to have a big window pointing at
  315. * widget 0. Memory mode, widget 0, offset 0
  316. */
  317. REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
  318. ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
  319. (0 << IIO_ITTE_WIDGET_SHIFT)));
  320. #endif
  321. }
  322. }
  323. static void __init szmem(void)
  324. {
  325. pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
  326. int slot, ignore;
  327. cnodeid_t node;
  328. num_physpages = 0;
  329. for_each_online_node(node) {
  330. ignore = nodebytes = 0;
  331. for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
  332. slot_psize = slot_psize_compute(node, slot);
  333. if (slot == 0)
  334. slot0sz = slot_psize;
  335. /*
  336. * We need to refine the hack when we have replicated
  337. * kernel text.
  338. */
  339. nodebytes += (1LL << SLOT_SHIFT);
  340. if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
  341. (slot0sz << PAGE_SHIFT))
  342. ignore = 1;
  343. if (ignore && slot_psize) {
  344. printk("Ignoring slot %d onwards on node %d\n",
  345. slot, node);
  346. slot_psize_cache[node][slot] = SLOT_IGNORED;
  347. slot = MAX_MEM_SLOTS;
  348. continue;
  349. }
  350. num_physpages += slot_psize;
  351. slot_psize_cache[node][slot] =
  352. (unsigned short) slot_psize;
  353. if (slot_psize)
  354. slot_lastfilled_cache[node] = slot;
  355. }
  356. }
  357. }
  358. static void __init node_mem_init(cnodeid_t node)
  359. {
  360. pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
  361. pfn_t slot_lastpfn = slot_firstpfn + slot_getsize(node, 0);
  362. pfn_t slot_freepfn = node_getfirstfree(node);
  363. struct pglist_data *pd;
  364. unsigned long bootmap_size;
  365. /*
  366. * Allocate the node data structures on the node first.
  367. */
  368. __node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
  369. pd = NODE_DATA(node);
  370. pd->bdata = &plat_node_bdata[node];
  371. cpus_clear(hub_data(node)->h_cpus);
  372. slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
  373. sizeof(struct hub_data));
  374. bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
  375. slot_firstpfn, slot_lastpfn);
  376. free_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
  377. (slot_lastpfn - slot_firstpfn) << PAGE_SHIFT);
  378. reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
  379. ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size);
  380. }
  381. /*
  382. * A node with nothing. We use it to avoid any special casing in
  383. * node_to_cpumask
  384. */
  385. static struct node_data null_node = {
  386. .hub = {
  387. .h_cpus = CPU_MASK_NONE
  388. }
  389. };
  390. /*
  391. * Currently, the intranode memory hole support assumes that each slot
  392. * contains at least 32 MBytes of memory. We assume all bootmem data
  393. * fits on the first slot.
  394. */
  395. void __init prom_meminit(void)
  396. {
  397. cnodeid_t node;
  398. mlreset();
  399. szmem();
  400. for (node = 0; node < MAX_COMPACT_NODES; node++) {
  401. if (node_online(node)) {
  402. node_mem_init(node);
  403. continue;
  404. }
  405. __node_data[node] = &null_node;
  406. }
  407. }
  408. unsigned long __init prom_free_prom_memory(void)
  409. {
  410. /* We got nothing to free here ... */
  411. return 0;
  412. }
  413. extern void pagetable_init(void);
  414. extern unsigned long setup_zero_pages(void);
  415. void __init paging_init(void)
  416. {
  417. unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
  418. unsigned node;
  419. pagetable_init();
  420. for_each_online_node(node) {
  421. pfn_t start_pfn = slot_getbasepfn(node, 0);
  422. pfn_t end_pfn = node_getmaxclick(node) + 1;
  423. zones_size[ZONE_DMA] = end_pfn - start_pfn;
  424. free_area_init_node(node, NODE_DATA(node),
  425. zones_size, start_pfn, NULL);
  426. if (end_pfn > max_low_pfn)
  427. max_low_pfn = end_pfn;
  428. }
  429. }
  430. void __init mem_init(void)
  431. {
  432. unsigned long codesize, datasize, initsize, tmp;
  433. unsigned node;
  434. high_memory = (void *) __va(num_physpages << PAGE_SHIFT);
  435. for_each_online_node(node) {
  436. unsigned slot, numslots;
  437. struct page *end, *p;
  438. /*
  439. * This will free up the bootmem, ie, slot 0 memory.
  440. */
  441. totalram_pages += free_all_bootmem_node(NODE_DATA(node));
  442. /*
  443. * We need to manually do the other slots.
  444. */
  445. numslots = node_getlastslot(node);
  446. for (slot = 1; slot <= numslots; slot++) {
  447. p = nid_page_nr(node, slot_getbasepfn(node, slot) -
  448. slot_getbasepfn(node, 0));
  449. /*
  450. * Free valid memory in current slot.
  451. */
  452. for (end = p + slot_getsize(node, slot); p < end; p++) {
  453. /* if (!page_is_ram(pgnr)) continue; */
  454. /* commented out until page_is_ram works */
  455. ClearPageReserved(p);
  456. init_page_count(p);
  457. __free_page(p);
  458. totalram_pages++;
  459. }
  460. }
  461. }
  462. totalram_pages -= setup_zero_pages(); /* This comes from node 0 */
  463. codesize = (unsigned long) &_etext - (unsigned long) &_text;
  464. datasize = (unsigned long) &_edata - (unsigned long) &_etext;
  465. initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
  466. tmp = nr_free_pages();
  467. printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
  468. "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
  469. tmp << (PAGE_SHIFT-10),
  470. num_physpages << (PAGE_SHIFT-10),
  471. codesize >> 10,
  472. (num_physpages - tmp) << (PAGE_SHIFT-10),
  473. datasize >> 10,
  474. initsize >> 10,
  475. (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
  476. }