numa.c 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * This file contains NUMA specific variables and functions which can
  7. * be split away from DISCONTIGMEM and are used on NUMA machines with
  8. * contiguous memory.
  9. *
  10. * 2002/08/07 Erich Focht <efocht@ess.nec.de>
  11. */
  12. #include <linux/cpu.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mm.h>
  15. #include <linux/node.h>
  16. #include <linux/init.h>
  17. #include <linux/bootmem.h>
  18. #include <linux/module.h>
  19. #include <asm/mmzone.h>
  20. #include <asm/numa.h>
  21. /*
  22. * The following structures are usually initialized by ACPI or
  23. * similar mechanisms and describe the NUMA characteristics of the machine.
  24. */
  25. int num_node_memblks;
  26. struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
  27. struct node_cpuid_s node_cpuid[NR_CPUS] =
  28. { [0 ... NR_CPUS-1] = { .phys_id = 0, .nid = NUMA_NO_NODE } };
  29. /*
  30. * This is a matrix with "distances" between nodes, they should be
  31. * proportional to the memory access latency ratios.
  32. */
  33. u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
  34. /* Identify which cnode a physical address resides on */
  35. int
  36. paddr_to_nid(unsigned long paddr)
  37. {
  38. int i;
  39. for (i = 0; i < num_node_memblks; i++)
  40. if (paddr >= node_memblk[i].start_paddr &&
  41. paddr < node_memblk[i].start_paddr + node_memblk[i].size)
  42. break;
  43. return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
  44. }
  45. #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
  46. /*
  47. * Because of holes evaluate on section limits.
  48. * If the section of memory exists, then return the node where the section
  49. * resides. Otherwise return node 0 as the default. This is used by
  50. * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
  51. * the section resides.
  52. */
  53. int __meminit __early_pfn_to_nid(unsigned long pfn)
  54. {
  55. int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
  56. for (i = 0; i < num_node_memblks; i++) {
  57. ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
  58. esec = (node_memblk[i].start_paddr + node_memblk[i].size +
  59. ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
  60. if (section >= ssec && section < esec)
  61. return node_memblk[i].nid;
  62. }
  63. return -1;
  64. }
  65. void __cpuinit numa_clear_node(int cpu)
  66. {
  67. unmap_cpu_from_node(cpu, NUMA_NO_NODE);
  68. }
  69. #ifdef CONFIG_MEMORY_HOTPLUG
  70. /*
  71. * SRAT information is stored in node_memblk[], then we can use SRAT
  72. * information at memory-hot-add if necessary.
  73. */
  74. int memory_add_physaddr_to_nid(u64 addr)
  75. {
  76. int nid = paddr_to_nid(addr);
  77. if (nid < 0)
  78. return 0;
  79. return nid;
  80. }
  81. EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
  82. #endif
  83. #endif