mmzone.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. #ifndef _LINUX_MMZONE_H
  2. #define _LINUX_MMZONE_H
  3. #ifdef __KERNEL__
  4. #ifndef __ASSEMBLY__
  5. #include <linux/spinlock.h>
  6. #include <linux/list.h>
  7. #include <linux/wait.h>
  8. #include <linux/cache.h>
  9. #include <linux/threads.h>
  10. #include <linux/numa.h>
  11. #include <linux/init.h>
  12. #include <linux/seqlock.h>
  13. #include <linux/nodemask.h>
  14. #include <asm/atomic.h>
  15. #include <asm/page.h>
  16. /* Free memory management - zoned buddy allocator. */
  17. #ifndef CONFIG_FORCE_MAX_ZONEORDER
  18. #define MAX_ORDER 11
  19. #else
  20. #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
  21. #endif
  22. #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
  23. struct free_area {
  24. struct list_head free_list;
  25. unsigned long nr_free;
  26. };
  27. struct pglist_data;
  28. /*
  29. * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
  30. * So add a wild amount of padding here to ensure that they fall into separate
  31. * cachelines. There are very few zone structures in the machine, so space
  32. * consumption is not a concern here.
  33. */
  34. #if defined(CONFIG_SMP)
  35. struct zone_padding {
  36. char x[0];
  37. } ____cacheline_internodealigned_in_smp;
  38. #define ZONE_PADDING(name) struct zone_padding name;
  39. #else
  40. #define ZONE_PADDING(name)
  41. #endif
  42. enum zone_stat_item {
  43. NR_ANON_PAGES, /* Mapped anonymous pages */
  44. NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
  45. only modified from process context */
  46. NR_FILE_PAGES,
  47. NR_SLAB_RECLAIMABLE,
  48. NR_SLAB_UNRECLAIMABLE,
  49. NR_PAGETABLE, /* used for pagetables */
  50. NR_FILE_DIRTY,
  51. NR_WRITEBACK,
  52. NR_UNSTABLE_NFS, /* NFS unstable pages */
  53. NR_BOUNCE,
  54. #ifdef CONFIG_NUMA
  55. NUMA_HIT, /* allocated in intended node */
  56. NUMA_MISS, /* allocated in non intended node */
  57. NUMA_FOREIGN, /* was intended here, hit elsewhere */
  58. NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
  59. NUMA_LOCAL, /* allocation from local node */
  60. NUMA_OTHER, /* allocation from other node */
  61. #endif
  62. NR_VM_ZONE_STAT_ITEMS };
  63. struct per_cpu_pages {
  64. int count; /* number of pages in the list */
  65. int high; /* high watermark, emptying needed */
  66. int batch; /* chunk size for buddy add/remove */
  67. struct list_head list; /* the list of pages */
  68. };
  69. struct per_cpu_pageset {
  70. struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
  71. #ifdef CONFIG_SMP
  72. s8 stat_threshold;
  73. s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
  74. #endif
  75. } ____cacheline_aligned_in_smp;
  76. #ifdef CONFIG_NUMA
  77. #define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
  78. #else
  79. #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
  80. #endif
  81. enum zone_type {
  82. /*
  83. * ZONE_DMA is used when there are devices that are not able
  84. * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
  85. * carve out the portion of memory that is needed for these devices.
  86. * The range is arch specific.
  87. *
  88. * Some examples
  89. *
  90. * Architecture Limit
  91. * ---------------------------
  92. * parisc, ia64, sparc <4G
  93. * s390 <2G
  94. * arm26 <48M
  95. * arm Various
  96. * alpha Unlimited or 0-16MB.
  97. *
  98. * i386, x86_64 and multiple other arches
  99. * <16M.
  100. */
  101. ZONE_DMA,
  102. #ifdef CONFIG_ZONE_DMA32
  103. /*
  104. * x86_64 needs two ZONE_DMAs because it supports devices that are
  105. * only able to do DMA to the lower 16M but also 32 bit devices that
  106. * can only do DMA areas below 4G.
  107. */
  108. ZONE_DMA32,
  109. #endif
  110. /*
  111. * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
  112. * performed on pages in ZONE_NORMAL if the DMA devices support
  113. * transfers to all addressable memory.
  114. */
  115. ZONE_NORMAL,
  116. #ifdef CONFIG_HIGHMEM
  117. /*
  118. * A memory area that is only addressable by the kernel through
  119. * mapping portions into its own address space. This is for example
  120. * used by i386 to allow the kernel to address the memory beyond
  121. * 900MB. The kernel will set up special mappings (page
  122. * table entries on i386) for each page that the kernel needs to
  123. * access.
  124. */
  125. ZONE_HIGHMEM,
  126. #endif
  127. MAX_NR_ZONES
  128. };
  129. /*
  130. * When a memory allocation must conform to specific limitations (such
  131. * as being suitable for DMA) the caller will pass in hints to the
  132. * allocator in the gfp_mask, in the zone modifier bits. These bits
  133. * are used to select a priority ordered list of memory zones which
  134. * match the requested limits. See gfp_zone() in include/linux/gfp.h
  135. */
  136. #if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM)
  137. #define ZONES_SHIFT 1
  138. #else
  139. #define ZONES_SHIFT 2
  140. #endif
  141. struct zone {
  142. /* Fields commonly accessed by the page allocator */
  143. unsigned long free_pages;
  144. unsigned long pages_min, pages_low, pages_high;
  145. /*
  146. * We don't know if the memory that we're going to allocate will be freeable
  147. * or/and it will be released eventually, so to avoid totally wasting several
  148. * GB of ram we must reserve some of the lower zone memory (otherwise we risk
  149. * to run OOM on the lower zones despite there's tons of freeable ram
  150. * on the higher zones). This array is recalculated at runtime if the
  151. * sysctl_lowmem_reserve_ratio sysctl changes.
  152. */
  153. unsigned long lowmem_reserve[MAX_NR_ZONES];
  154. #ifdef CONFIG_NUMA
  155. /*
  156. * zone reclaim becomes active if more unmapped pages exist.
  157. */
  158. unsigned long min_unmapped_pages;
  159. unsigned long min_slab_pages;
  160. struct per_cpu_pageset *pageset[NR_CPUS];
  161. #else
  162. struct per_cpu_pageset pageset[NR_CPUS];
  163. #endif
  164. /*
  165. * free areas of different sizes
  166. */
  167. spinlock_t lock;
  168. #ifdef CONFIG_MEMORY_HOTPLUG
  169. /* see spanned/present_pages for more description */
  170. seqlock_t span_seqlock;
  171. #endif
  172. struct free_area free_area[MAX_ORDER];
  173. ZONE_PADDING(_pad1_)
  174. /* Fields commonly accessed by the page reclaim scanner */
  175. spinlock_t lru_lock;
  176. struct list_head active_list;
  177. struct list_head inactive_list;
  178. unsigned long nr_scan_active;
  179. unsigned long nr_scan_inactive;
  180. unsigned long nr_active;
  181. unsigned long nr_inactive;
  182. unsigned long pages_scanned; /* since last reclaim */
  183. int all_unreclaimable; /* All pages pinned */
  184. /* A count of how many reclaimers are scanning this zone */
  185. atomic_t reclaim_in_progress;
  186. /* Zone statistics */
  187. atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  188. /*
  189. * prev_priority holds the scanning priority for this zone. It is
  190. * defined as the scanning priority at which we achieved our reclaim
  191. * target at the previous try_to_free_pages() or balance_pgdat()
  192. * invokation.
  193. *
  194. * We use prev_priority as a measure of how much stress page reclaim is
  195. * under - it drives the swappiness decision: whether to unmap mapped
  196. * pages.
  197. *
  198. * temp_priority is used to remember the scanning priority at which
  199. * this zone was successfully refilled to free_pages == pages_high.
  200. *
  201. * Access to both these fields is quite racy even on uniprocessor. But
  202. * it is expected to average out OK.
  203. */
  204. int temp_priority;
  205. int prev_priority;
  206. ZONE_PADDING(_pad2_)
  207. /* Rarely used or read-mostly fields */
  208. /*
  209. * wait_table -- the array holding the hash table
  210. * wait_table_hash_nr_entries -- the size of the hash table array
  211. * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
  212. *
  213. * The purpose of all these is to keep track of the people
  214. * waiting for a page to become available and make them
  215. * runnable again when possible. The trouble is that this
  216. * consumes a lot of space, especially when so few things
  217. * wait on pages at a given time. So instead of using
  218. * per-page waitqueues, we use a waitqueue hash table.
  219. *
  220. * The bucket discipline is to sleep on the same queue when
  221. * colliding and wake all in that wait queue when removing.
  222. * When something wakes, it must check to be sure its page is
  223. * truly available, a la thundering herd. The cost of a
  224. * collision is great, but given the expected load of the
  225. * table, they should be so rare as to be outweighed by the
  226. * benefits from the saved space.
  227. *
  228. * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
  229. * primary users of these fields, and in mm/page_alloc.c
  230. * free_area_init_core() performs the initialization of them.
  231. */
  232. wait_queue_head_t * wait_table;
  233. unsigned long wait_table_hash_nr_entries;
  234. unsigned long wait_table_bits;
  235. /*
  236. * Discontig memory support fields.
  237. */
  238. struct pglist_data *zone_pgdat;
  239. /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
  240. unsigned long zone_start_pfn;
  241. /*
  242. * zone_start_pfn, spanned_pages and present_pages are all
  243. * protected by span_seqlock. It is a seqlock because it has
  244. * to be read outside of zone->lock, and it is done in the main
  245. * allocator path. But, it is written quite infrequently.
  246. *
  247. * The lock is declared along with zone->lock because it is
  248. * frequently read in proximity to zone->lock. It's good to
  249. * give them a chance of being in the same cacheline.
  250. */
  251. unsigned long spanned_pages; /* total size, including holes */
  252. unsigned long present_pages; /* amount of memory (excluding holes) */
  253. /*
  254. * rarely used fields:
  255. */
  256. char *name;
  257. } ____cacheline_internodealigned_in_smp;
  258. /*
  259. * The "priority" of VM scanning is how much of the queues we will scan in one
  260. * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
  261. * queues ("queue_length >> 12") during an aging round.
  262. */
  263. #define DEF_PRIORITY 12
  264. /*
  265. * One allocation request operates on a zonelist. A zonelist
  266. * is a list of zones, the first one is the 'goal' of the
  267. * allocation, the other zones are fallback zones, in decreasing
  268. * priority.
  269. *
  270. * Right now a zonelist takes up less than a cacheline. We never
  271. * modify it apart from boot-up, and only a few indices are used,
  272. * so despite the zonelist table being relatively big, the cache
  273. * footprint of this construct is very small.
  274. */
  275. struct zonelist {
  276. struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited
  277. };
  278. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  279. struct node_active_region {
  280. unsigned long start_pfn;
  281. unsigned long end_pfn;
  282. int nid;
  283. };
  284. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  285. /*
  286. * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
  287. * (mostly NUMA machines?) to denote a higher-level memory zone than the
  288. * zone denotes.
  289. *
  290. * On NUMA machines, each NUMA node would have a pg_data_t to describe
  291. * it's memory layout.
  292. *
  293. * Memory statistics and page replacement data structures are maintained on a
  294. * per-zone basis.
  295. */
  296. struct bootmem_data;
  297. typedef struct pglist_data {
  298. struct zone node_zones[MAX_NR_ZONES];
  299. struct zonelist node_zonelists[MAX_NR_ZONES];
  300. int nr_zones;
  301. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  302. struct page *node_mem_map;
  303. #endif
  304. struct bootmem_data *bdata;
  305. #ifdef CONFIG_MEMORY_HOTPLUG
  306. /*
  307. * Must be held any time you expect node_start_pfn, node_present_pages
  308. * or node_spanned_pages stay constant. Holding this will also
  309. * guarantee that any pfn_valid() stays that way.
  310. *
  311. * Nests above zone->lock and zone->size_seqlock.
  312. */
  313. spinlock_t node_size_lock;
  314. #endif
  315. unsigned long node_start_pfn;
  316. unsigned long node_present_pages; /* total number of physical pages */
  317. unsigned long node_spanned_pages; /* total size of physical page
  318. range, including holes */
  319. int node_id;
  320. wait_queue_head_t kswapd_wait;
  321. struct task_struct *kswapd;
  322. int kswapd_max_order;
  323. } pg_data_t;
  324. #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
  325. #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
  326. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  327. #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
  328. #else
  329. #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
  330. #endif
  331. #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
  332. #include <linux/memory_hotplug.h>
  333. void __get_zone_counts(unsigned long *active, unsigned long *inactive,
  334. unsigned long *free, struct pglist_data *pgdat);
  335. void get_zone_counts(unsigned long *active, unsigned long *inactive,
  336. unsigned long *free);
  337. void build_all_zonelists(void);
  338. void wakeup_kswapd(struct zone *zone, int order);
  339. int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  340. int classzone_idx, int alloc_flags);
  341. extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
  342. unsigned long size);
  343. #ifdef CONFIG_HAVE_MEMORY_PRESENT
  344. void memory_present(int nid, unsigned long start, unsigned long end);
  345. #else
  346. static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
  347. #endif
  348. #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
  349. unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
  350. #endif
  351. /*
  352. * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
  353. */
  354. #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
  355. static inline int populated_zone(struct zone *zone)
  356. {
  357. return (!!zone->present_pages);
  358. }
  359. static inline int is_highmem_idx(enum zone_type idx)
  360. {
  361. #ifdef CONFIG_HIGHMEM
  362. return (idx == ZONE_HIGHMEM);
  363. #else
  364. return 0;
  365. #endif
  366. }
  367. static inline int is_normal_idx(enum zone_type idx)
  368. {
  369. return (idx == ZONE_NORMAL);
  370. }
  371. /**
  372. * is_highmem - helper function to quickly check if a struct zone is a
  373. * highmem zone or not. This is an attempt to keep references
  374. * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
  375. * @zone - pointer to struct zone variable
  376. */
  377. static inline int is_highmem(struct zone *zone)
  378. {
  379. #ifdef CONFIG_HIGHMEM
  380. return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM;
  381. #else
  382. return 0;
  383. #endif
  384. }
  385. static inline int is_normal(struct zone *zone)
  386. {
  387. return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
  388. }
  389. static inline int is_dma32(struct zone *zone)
  390. {
  391. #ifdef CONFIG_ZONE_DMA32
  392. return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
  393. #else
  394. return 0;
  395. #endif
  396. }
  397. static inline int is_dma(struct zone *zone)
  398. {
  399. return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
  400. }
  401. /* These two functions are used to setup the per zone pages min values */
  402. struct ctl_table;
  403. struct file;
  404. int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
  405. void __user *, size_t *, loff_t *);
  406. extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
  407. int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
  408. void __user *, size_t *, loff_t *);
  409. int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
  410. void __user *, size_t *, loff_t *);
  411. int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
  412. struct file *, void __user *, size_t *, loff_t *);
  413. int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
  414. struct file *, void __user *, size_t *, loff_t *);
  415. #include <linux/topology.h>
  416. /* Returns the number of the current Node. */
  417. #ifndef numa_node_id
  418. #define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
  419. #endif
  420. #ifndef CONFIG_NEED_MULTIPLE_NODES
  421. extern struct pglist_data contig_page_data;
  422. #define NODE_DATA(nid) (&contig_page_data)
  423. #define NODE_MEM_MAP(nid) mem_map
  424. #define MAX_NODES_SHIFT 1
  425. #else /* CONFIG_NEED_MULTIPLE_NODES */
  426. #include <asm/mmzone.h>
  427. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  428. extern struct pglist_data *first_online_pgdat(void);
  429. extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
  430. extern struct zone *next_zone(struct zone *zone);
  431. /**
  432. * for_each_pgdat - helper macro to iterate over all nodes
  433. * @pgdat - pointer to a pg_data_t variable
  434. */
  435. #define for_each_online_pgdat(pgdat) \
  436. for (pgdat = first_online_pgdat(); \
  437. pgdat; \
  438. pgdat = next_online_pgdat(pgdat))
  439. /**
  440. * for_each_zone - helper macro to iterate over all memory zones
  441. * @zone - pointer to struct zone variable
  442. *
  443. * The user only needs to declare the zone variable, for_each_zone
  444. * fills it in.
  445. */
  446. #define for_each_zone(zone) \
  447. for (zone = (first_online_pgdat())->node_zones; \
  448. zone; \
  449. zone = next_zone(zone))
  450. #ifdef CONFIG_SPARSEMEM
  451. #include <asm/sparsemem.h>
  452. #endif
  453. #if BITS_PER_LONG == 32
  454. /*
  455. * with 32 bit page->flags field, we reserve 9 bits for node/zone info.
  456. * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes.
  457. */
  458. #define FLAGS_RESERVED 9
  459. #elif BITS_PER_LONG == 64
  460. /*
  461. * with 64 bit flags field, there's plenty of room.
  462. */
  463. #define FLAGS_RESERVED 32
  464. #else
  465. #error BITS_PER_LONG not defined
  466. #endif
  467. #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
  468. !defined(CONFIG_ARCH_POPULATES_NODE_MAP)
  469. #define early_pfn_to_nid(nid) (0UL)
  470. #endif
  471. #ifdef CONFIG_FLATMEM
  472. #define pfn_to_nid(pfn) (0)
  473. #endif
  474. #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
  475. #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
  476. #ifdef CONFIG_SPARSEMEM
  477. /*
  478. * SECTION_SHIFT #bits space required to store a section #
  479. *
  480. * PA_SECTION_SHIFT physical address to/from section number
  481. * PFN_SECTION_SHIFT pfn to/from section number
  482. */
  483. #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
  484. #define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
  485. #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
  486. #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
  487. #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
  488. #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
  489. #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
  490. #error Allocator MAX_ORDER exceeds SECTION_SIZE
  491. #endif
  492. struct page;
  493. struct mem_section {
  494. /*
  495. * This is, logically, a pointer to an array of struct
  496. * pages. However, it is stored with some other magic.
  497. * (see sparse.c::sparse_init_one_section())
  498. *
  499. * Additionally during early boot we encode node id of
  500. * the location of the section here to guide allocation.
  501. * (see sparse.c::memory_present())
  502. *
  503. * Making it a UL at least makes someone do a cast
  504. * before using it wrong.
  505. */
  506. unsigned long section_mem_map;
  507. };
  508. #ifdef CONFIG_SPARSEMEM_EXTREME
  509. #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
  510. #else
  511. #define SECTIONS_PER_ROOT 1
  512. #endif
  513. #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
  514. #define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT)
  515. #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
  516. #ifdef CONFIG_SPARSEMEM_EXTREME
  517. extern struct mem_section *mem_section[NR_SECTION_ROOTS];
  518. #else
  519. extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
  520. #endif
  521. static inline struct mem_section *__nr_to_section(unsigned long nr)
  522. {
  523. if (!mem_section[SECTION_NR_TO_ROOT(nr)])
  524. return NULL;
  525. return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
  526. }
  527. extern int __section_nr(struct mem_section* ms);
  528. /*
  529. * We use the lower bits of the mem_map pointer to store
  530. * a little bit of information. There should be at least
  531. * 3 bits here due to 32-bit alignment.
  532. */
  533. #define SECTION_MARKED_PRESENT (1UL<<0)
  534. #define SECTION_HAS_MEM_MAP (1UL<<1)
  535. #define SECTION_MAP_LAST_BIT (1UL<<2)
  536. #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
  537. #define SECTION_NID_SHIFT 2
  538. static inline struct page *__section_mem_map_addr(struct mem_section *section)
  539. {
  540. unsigned long map = section->section_mem_map;
  541. map &= SECTION_MAP_MASK;
  542. return (struct page *)map;
  543. }
  544. static inline int valid_section(struct mem_section *section)
  545. {
  546. return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
  547. }
  548. static inline int section_has_mem_map(struct mem_section *section)
  549. {
  550. return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
  551. }
  552. static inline int valid_section_nr(unsigned long nr)
  553. {
  554. return valid_section(__nr_to_section(nr));
  555. }
  556. static inline struct mem_section *__pfn_to_section(unsigned long pfn)
  557. {
  558. return __nr_to_section(pfn_to_section_nr(pfn));
  559. }
  560. static inline int pfn_valid(unsigned long pfn)
  561. {
  562. if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
  563. return 0;
  564. return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
  565. }
  566. /*
  567. * These are _only_ used during initialisation, therefore they
  568. * can use __initdata ... They could have names to indicate
  569. * this restriction.
  570. */
  571. #ifdef CONFIG_NUMA
  572. #define pfn_to_nid(pfn) \
  573. ({ \
  574. unsigned long __pfn_to_nid_pfn = (pfn); \
  575. page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
  576. })
  577. #else
  578. #define pfn_to_nid(pfn) (0)
  579. #endif
  580. #define early_pfn_valid(pfn) pfn_valid(pfn)
  581. void sparse_init(void);
  582. #else
  583. #define sparse_init() do {} while (0)
  584. #define sparse_index_init(_sec, _nid) do {} while (0)
  585. #endif /* CONFIG_SPARSEMEM */
  586. #ifndef early_pfn_valid
  587. #define early_pfn_valid(pfn) (1)
  588. #endif
  589. void memory_present(int nid, unsigned long start, unsigned long end);
  590. unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
  591. #endif /* !__ASSEMBLY__ */
  592. #endif /* __KERNEL__ */
  593. #endif /* _LINUX_MMZONE_H */