mmzone.h 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224
  1. #ifndef _LINUX_MMZONE_H
  2. #define _LINUX_MMZONE_H
  3. #ifndef __ASSEMBLY__
  4. #ifndef __GENERATING_BOUNDS_H
  5. #include <linux/spinlock.h>
  6. #include <linux/list.h>
  7. #include <linux/wait.h>
  8. #include <linux/bitops.h>
  9. #include <linux/cache.h>
  10. #include <linux/threads.h>
  11. #include <linux/numa.h>
  12. #include <linux/init.h>
  13. #include <linux/seqlock.h>
  14. #include <linux/nodemask.h>
  15. #include <linux/pageblock-flags.h>
  16. #include <generated/bounds.h>
  17. #include <linux/atomic.h>
  18. #include <asm/page.h>
  19. /* Free memory management - zoned buddy allocator. */
  20. #ifndef CONFIG_FORCE_MAX_ZONEORDER
  21. #define MAX_ORDER 11
  22. #else
  23. #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
  24. #endif
  25. #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
  26. /*
  27. * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
  28. * costly to service. That is between allocation orders which should
  29. * coalesce naturally under reasonable reclaim pressure and those which
  30. * will not.
  31. */
  32. #define PAGE_ALLOC_COSTLY_ORDER 3
  33. enum {
  34. MIGRATE_UNMOVABLE,
  35. MIGRATE_RECLAIMABLE,
  36. MIGRATE_MOVABLE,
  37. MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
  38. MIGRATE_RESERVE = MIGRATE_PCPTYPES,
  39. #ifdef CONFIG_CMA
  40. /*
  41. * MIGRATE_CMA migration type is designed to mimic the way
  42. * ZONE_MOVABLE works. Only movable pages can be allocated
  43. * from MIGRATE_CMA pageblocks and page allocator never
  44. * implicitly change migration type of MIGRATE_CMA pageblock.
  45. *
  46. * The way to use it is to change migratetype of a range of
  47. * pageblocks to MIGRATE_CMA which can be done by
  48. * __free_pageblock_cma() function. What is important though
  49. * is that a range of pageblocks must be aligned to
  50. * MAX_ORDER_NR_PAGES should biggest page be bigger then
  51. * a single pageblock.
  52. */
  53. MIGRATE_CMA,
  54. #endif
  55. MIGRATE_ISOLATE, /* can't allocate from here */
  56. MIGRATE_TYPES
  57. };
  58. #ifdef CONFIG_CMA
  59. # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
  60. # define cma_wmark_pages(zone) zone->min_cma_pages
  61. #else
  62. # define is_migrate_cma(migratetype) false
  63. # define cma_wmark_pages(zone) 0
  64. #endif
  65. #define for_each_migratetype_order(order, type) \
  66. for (order = 0; order < MAX_ORDER; order++) \
  67. for (type = 0; type < MIGRATE_TYPES; type++)
  68. extern int page_group_by_mobility_disabled;
  69. static inline int get_pageblock_migratetype(struct page *page)
  70. {
  71. return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
  72. }
  73. struct free_area {
  74. struct list_head free_list[MIGRATE_TYPES];
  75. unsigned long nr_free;
  76. };
  77. struct pglist_data;
  78. /*
  79. * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
  80. * So add a wild amount of padding here to ensure that they fall into separate
  81. * cachelines. There are very few zone structures in the machine, so space
  82. * consumption is not a concern here.
  83. */
  84. #if defined(CONFIG_SMP)
  85. struct zone_padding {
  86. char x[0];
  87. } ____cacheline_internodealigned_in_smp;
  88. #define ZONE_PADDING(name) struct zone_padding name;
  89. #else
  90. #define ZONE_PADDING(name)
  91. #endif
  92. enum zone_stat_item {
  93. /* First 128 byte cacheline (assuming 64 bit words) */
  94. NR_FREE_PAGES,
  95. NR_LRU_BASE,
  96. NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
  97. NR_ACTIVE_ANON, /* " " " " " */
  98. NR_INACTIVE_FILE, /* " " " " " */
  99. NR_ACTIVE_FILE, /* " " " " " */
  100. NR_UNEVICTABLE, /* " " " " " */
  101. NR_MLOCK, /* mlock()ed pages found and moved off LRU */
  102. NR_ANON_PAGES, /* Mapped anonymous pages */
  103. NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
  104. only modified from process context */
  105. NR_FILE_PAGES,
  106. NR_FILE_DIRTY,
  107. NR_WRITEBACK,
  108. NR_SLAB_RECLAIMABLE,
  109. NR_SLAB_UNRECLAIMABLE,
  110. NR_PAGETABLE, /* used for pagetables */
  111. NR_KERNEL_STACK,
  112. /* Second 128 byte cacheline */
  113. NR_UNSTABLE_NFS, /* NFS unstable pages */
  114. NR_BOUNCE,
  115. NR_VMSCAN_WRITE,
  116. NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
  117. NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
  118. NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
  119. NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
  120. NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
  121. NR_DIRTIED, /* page dirtyings since bootup */
  122. NR_WRITTEN, /* page writings since bootup */
  123. #ifdef CONFIG_NUMA
  124. NUMA_HIT, /* allocated in intended node */
  125. NUMA_MISS, /* allocated in non intended node */
  126. NUMA_FOREIGN, /* was intended here, hit elsewhere */
  127. NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
  128. NUMA_LOCAL, /* allocation from local node */
  129. NUMA_OTHER, /* allocation from other node */
  130. #endif
  131. NR_ANON_TRANSPARENT_HUGEPAGES,
  132. NR_VM_ZONE_STAT_ITEMS };
  133. /*
  134. * We do arithmetic on the LRU lists in various places in the code,
  135. * so it is important to keep the active lists LRU_ACTIVE higher in
  136. * the array than the corresponding inactive lists, and to keep
  137. * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
  138. *
  139. * This has to be kept in sync with the statistics in zone_stat_item
  140. * above and the descriptions in vmstat_text in mm/vmstat.c
  141. */
  142. #define LRU_BASE 0
  143. #define LRU_ACTIVE 1
  144. #define LRU_FILE 2
  145. enum lru_list {
  146. LRU_INACTIVE_ANON = LRU_BASE,
  147. LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
  148. LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
  149. LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
  150. LRU_UNEVICTABLE,
  151. NR_LRU_LISTS
  152. };
  153. #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
  154. #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
  155. static inline int is_file_lru(enum lru_list lru)
  156. {
  157. return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
  158. }
  159. static inline int is_active_lru(enum lru_list lru)
  160. {
  161. return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
  162. }
  163. static inline int is_unevictable_lru(enum lru_list lru)
  164. {
  165. return (lru == LRU_UNEVICTABLE);
  166. }
  167. struct zone_reclaim_stat {
  168. /*
  169. * The pageout code in vmscan.c keeps track of how many of the
  170. * mem/swap backed and file backed pages are referenced.
  171. * The higher the rotated/scanned ratio, the more valuable
  172. * that cache is.
  173. *
  174. * The anon LRU stats live in [0], file LRU stats in [1]
  175. */
  176. unsigned long recent_rotated[2];
  177. unsigned long recent_scanned[2];
  178. };
  179. struct lruvec {
  180. struct list_head lists[NR_LRU_LISTS];
  181. struct zone_reclaim_stat reclaim_stat;
  182. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  183. struct zone *zone;
  184. #endif
  185. };
  186. /* Mask used at gathering information at once (see memcontrol.c) */
  187. #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
  188. #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
  189. #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
  190. #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
  191. /* Isolate clean file */
  192. #define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
  193. /* Isolate unmapped file */
  194. #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
  195. /* Isolate for asynchronous migration */
  196. #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
  197. /* LRU Isolation modes. */
  198. typedef unsigned __bitwise__ isolate_mode_t;
  199. enum zone_watermarks {
  200. WMARK_MIN,
  201. WMARK_LOW,
  202. WMARK_HIGH,
  203. NR_WMARK
  204. };
  205. #define min_wmark_pages(z) (z->watermark[WMARK_MIN])
  206. #define low_wmark_pages(z) (z->watermark[WMARK_LOW])
  207. #define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
  208. struct per_cpu_pages {
  209. int count; /* number of pages in the list */
  210. int high; /* high watermark, emptying needed */
  211. int batch; /* chunk size for buddy add/remove */
  212. /* Lists of pages, one per migrate type stored on the pcp-lists */
  213. struct list_head lists[MIGRATE_PCPTYPES];
  214. };
  215. struct per_cpu_pageset {
  216. struct per_cpu_pages pcp;
  217. #ifdef CONFIG_NUMA
  218. s8 expire;
  219. #endif
  220. #ifdef CONFIG_SMP
  221. s8 stat_threshold;
  222. s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
  223. #endif
  224. };
  225. #endif /* !__GENERATING_BOUNDS.H */
  226. enum zone_type {
  227. #ifdef CONFIG_ZONE_DMA
  228. /*
  229. * ZONE_DMA is used when there are devices that are not able
  230. * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
  231. * carve out the portion of memory that is needed for these devices.
  232. * The range is arch specific.
  233. *
  234. * Some examples
  235. *
  236. * Architecture Limit
  237. * ---------------------------
  238. * parisc, ia64, sparc <4G
  239. * s390 <2G
  240. * arm Various
  241. * alpha Unlimited or 0-16MB.
  242. *
  243. * i386, x86_64 and multiple other arches
  244. * <16M.
  245. */
  246. ZONE_DMA,
  247. #endif
  248. #ifdef CONFIG_ZONE_DMA32
  249. /*
  250. * x86_64 needs two ZONE_DMAs because it supports devices that are
  251. * only able to do DMA to the lower 16M but also 32 bit devices that
  252. * can only do DMA areas below 4G.
  253. */
  254. ZONE_DMA32,
  255. #endif
  256. /*
  257. * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
  258. * performed on pages in ZONE_NORMAL if the DMA devices support
  259. * transfers to all addressable memory.
  260. */
  261. ZONE_NORMAL,
  262. #ifdef CONFIG_HIGHMEM
  263. /*
  264. * A memory area that is only addressable by the kernel through
  265. * mapping portions into its own address space. This is for example
  266. * used by i386 to allow the kernel to address the memory beyond
  267. * 900MB. The kernel will set up special mappings (page
  268. * table entries on i386) for each page that the kernel needs to
  269. * access.
  270. */
  271. ZONE_HIGHMEM,
  272. #endif
  273. ZONE_MOVABLE,
  274. __MAX_NR_ZONES
  275. };
  276. #ifndef __GENERATING_BOUNDS_H
  277. /*
  278. * When a memory allocation must conform to specific limitations (such
  279. * as being suitable for DMA) the caller will pass in hints to the
  280. * allocator in the gfp_mask, in the zone modifier bits. These bits
  281. * are used to select a priority ordered list of memory zones which
  282. * match the requested limits. See gfp_zone() in include/linux/gfp.h
  283. */
  284. #if MAX_NR_ZONES < 2
  285. #define ZONES_SHIFT 0
  286. #elif MAX_NR_ZONES <= 2
  287. #define ZONES_SHIFT 1
  288. #elif MAX_NR_ZONES <= 4
  289. #define ZONES_SHIFT 2
  290. #else
  291. #error ZONES_SHIFT -- too many zones configured adjust calculation
  292. #endif
  293. struct zone {
  294. /* Fields commonly accessed by the page allocator */
  295. /* zone watermarks, access with *_wmark_pages(zone) macros */
  296. unsigned long watermark[NR_WMARK];
  297. /*
  298. * When free pages are below this point, additional steps are taken
  299. * when reading the number of free pages to avoid per-cpu counter
  300. * drift allowing watermarks to be breached
  301. */
  302. unsigned long percpu_drift_mark;
  303. /*
  304. * We don't know if the memory that we're going to allocate will be freeable
  305. * or/and it will be released eventually, so to avoid totally wasting several
  306. * GB of ram we must reserve some of the lower zone memory (otherwise we risk
  307. * to run OOM on the lower zones despite there's tons of freeable ram
  308. * on the higher zones). This array is recalculated at runtime if the
  309. * sysctl_lowmem_reserve_ratio sysctl changes.
  310. */
  311. unsigned long lowmem_reserve[MAX_NR_ZONES];
  312. /*
  313. * This is a per-zone reserve of pages that should not be
  314. * considered dirtyable memory.
  315. */
  316. unsigned long dirty_balance_reserve;
  317. #ifdef CONFIG_NUMA
  318. int node;
  319. /*
  320. * zone reclaim becomes active if more unmapped pages exist.
  321. */
  322. unsigned long min_unmapped_pages;
  323. unsigned long min_slab_pages;
  324. #endif
  325. struct per_cpu_pageset __percpu *pageset;
  326. /*
  327. * free areas of different sizes
  328. */
  329. spinlock_t lock;
  330. int all_unreclaimable; /* All pages pinned */
  331. #ifdef CONFIG_MEMORY_HOTPLUG
  332. /* see spanned/present_pages for more description */
  333. seqlock_t span_seqlock;
  334. #endif
  335. #ifdef CONFIG_CMA
  336. /*
  337. * CMA needs to increase watermark levels during the allocation
  338. * process to make sure that the system is not starved.
  339. */
  340. unsigned long min_cma_pages;
  341. #endif
  342. struct free_area free_area[MAX_ORDER];
  343. #ifndef CONFIG_SPARSEMEM
  344. /*
  345. * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
  346. * In SPARSEMEM, this map is stored in struct mem_section
  347. */
  348. unsigned long *pageblock_flags;
  349. #endif /* CONFIG_SPARSEMEM */
  350. #ifdef CONFIG_COMPACTION
  351. /*
  352. * On compaction failure, 1<<compact_defer_shift compactions
  353. * are skipped before trying again. The number attempted since
  354. * last failure is tracked with compact_considered.
  355. */
  356. unsigned int compact_considered;
  357. unsigned int compact_defer_shift;
  358. int compact_order_failed;
  359. #endif
  360. ZONE_PADDING(_pad1_)
  361. /* Fields commonly accessed by the page reclaim scanner */
  362. spinlock_t lru_lock;
  363. struct lruvec lruvec;
  364. unsigned long pages_scanned; /* since last reclaim */
  365. unsigned long flags; /* zone flags, see below */
  366. /* Zone statistics */
  367. atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  368. /*
  369. * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
  370. * this zone's LRU. Maintained by the pageout code.
  371. */
  372. unsigned int inactive_ratio;
  373. ZONE_PADDING(_pad2_)
  374. /* Rarely used or read-mostly fields */
  375. /*
  376. * wait_table -- the array holding the hash table
  377. * wait_table_hash_nr_entries -- the size of the hash table array
  378. * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
  379. *
  380. * The purpose of all these is to keep track of the people
  381. * waiting for a page to become available and make them
  382. * runnable again when possible. The trouble is that this
  383. * consumes a lot of space, especially when so few things
  384. * wait on pages at a given time. So instead of using
  385. * per-page waitqueues, we use a waitqueue hash table.
  386. *
  387. * The bucket discipline is to sleep on the same queue when
  388. * colliding and wake all in that wait queue when removing.
  389. * When something wakes, it must check to be sure its page is
  390. * truly available, a la thundering herd. The cost of a
  391. * collision is great, but given the expected load of the
  392. * table, they should be so rare as to be outweighed by the
  393. * benefits from the saved space.
  394. *
  395. * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
  396. * primary users of these fields, and in mm/page_alloc.c
  397. * free_area_init_core() performs the initialization of them.
  398. */
  399. wait_queue_head_t * wait_table;
  400. unsigned long wait_table_hash_nr_entries;
  401. unsigned long wait_table_bits;
  402. /*
  403. * Discontig memory support fields.
  404. */
  405. struct pglist_data *zone_pgdat;
  406. /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
  407. unsigned long zone_start_pfn;
  408. /*
  409. * zone_start_pfn, spanned_pages and present_pages are all
  410. * protected by span_seqlock. It is a seqlock because it has
  411. * to be read outside of zone->lock, and it is done in the main
  412. * allocator path. But, it is written quite infrequently.
  413. *
  414. * The lock is declared along with zone->lock because it is
  415. * frequently read in proximity to zone->lock. It's good to
  416. * give them a chance of being in the same cacheline.
  417. */
  418. unsigned long spanned_pages; /* total size, including holes */
  419. unsigned long present_pages; /* amount of memory (excluding holes) */
  420. /*
  421. * rarely used fields:
  422. */
  423. const char *name;
  424. } ____cacheline_internodealigned_in_smp;
  425. typedef enum {
  426. ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
  427. ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
  428. ZONE_CONGESTED, /* zone has many dirty pages backed by
  429. * a congested BDI
  430. */
  431. } zone_flags_t;
  432. static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
  433. {
  434. set_bit(flag, &zone->flags);
  435. }
  436. static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
  437. {
  438. return test_and_set_bit(flag, &zone->flags);
  439. }
  440. static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
  441. {
  442. clear_bit(flag, &zone->flags);
  443. }
  444. static inline int zone_is_reclaim_congested(const struct zone *zone)
  445. {
  446. return test_bit(ZONE_CONGESTED, &zone->flags);
  447. }
  448. static inline int zone_is_reclaim_locked(const struct zone *zone)
  449. {
  450. return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
  451. }
  452. static inline int zone_is_oom_locked(const struct zone *zone)
  453. {
  454. return test_bit(ZONE_OOM_LOCKED, &zone->flags);
  455. }
  456. /*
  457. * The "priority" of VM scanning is how much of the queues we will scan in one
  458. * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
  459. * queues ("queue_length >> 12") during an aging round.
  460. */
  461. #define DEF_PRIORITY 12
  462. /* Maximum number of zones on a zonelist */
  463. #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
  464. #ifdef CONFIG_NUMA
  465. /*
  466. * The NUMA zonelists are doubled because we need zonelists that restrict the
  467. * allocations to a single node for GFP_THISNODE.
  468. *
  469. * [0] : Zonelist with fallback
  470. * [1] : No fallback (GFP_THISNODE)
  471. */
  472. #define MAX_ZONELISTS 2
  473. /*
  474. * We cache key information from each zonelist for smaller cache
  475. * footprint when scanning for free pages in get_page_from_freelist().
  476. *
  477. * 1) The BITMAP fullzones tracks which zones in a zonelist have come
  478. * up short of free memory since the last time (last_fullzone_zap)
  479. * we zero'd fullzones.
  480. * 2) The array z_to_n[] maps each zone in the zonelist to its node
  481. * id, so that we can efficiently evaluate whether that node is
  482. * set in the current tasks mems_allowed.
  483. *
  484. * Both fullzones and z_to_n[] are one-to-one with the zonelist,
  485. * indexed by a zones offset in the zonelist zones[] array.
  486. *
  487. * The get_page_from_freelist() routine does two scans. During the
  488. * first scan, we skip zones whose corresponding bit in 'fullzones'
  489. * is set or whose corresponding node in current->mems_allowed (which
  490. * comes from cpusets) is not set. During the second scan, we bypass
  491. * this zonelist_cache, to ensure we look methodically at each zone.
  492. *
  493. * Once per second, we zero out (zap) fullzones, forcing us to
  494. * reconsider nodes that might have regained more free memory.
  495. * The field last_full_zap is the time we last zapped fullzones.
  496. *
  497. * This mechanism reduces the amount of time we waste repeatedly
  498. * reexaming zones for free memory when they just came up low on
  499. * memory momentarilly ago.
  500. *
  501. * The zonelist_cache struct members logically belong in struct
  502. * zonelist. However, the mempolicy zonelists constructed for
  503. * MPOL_BIND are intentionally variable length (and usually much
  504. * shorter). A general purpose mechanism for handling structs with
  505. * multiple variable length members is more mechanism than we want
  506. * here. We resort to some special case hackery instead.
  507. *
  508. * The MPOL_BIND zonelists don't need this zonelist_cache (in good
  509. * part because they are shorter), so we put the fixed length stuff
  510. * at the front of the zonelist struct, ending in a variable length
  511. * zones[], as is needed by MPOL_BIND.
  512. *
  513. * Then we put the optional zonelist cache on the end of the zonelist
  514. * struct. This optional stuff is found by a 'zlcache_ptr' pointer in
  515. * the fixed length portion at the front of the struct. This pointer
  516. * both enables us to find the zonelist cache, and in the case of
  517. * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
  518. * to know that the zonelist cache is not there.
  519. *
  520. * The end result is that struct zonelists come in two flavors:
  521. * 1) The full, fixed length version, shown below, and
  522. * 2) The custom zonelists for MPOL_BIND.
  523. * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
  524. *
  525. * Even though there may be multiple CPU cores on a node modifying
  526. * fullzones or last_full_zap in the same zonelist_cache at the same
  527. * time, we don't lock it. This is just hint data - if it is wrong now
  528. * and then, the allocator will still function, perhaps a bit slower.
  529. */
  530. struct zonelist_cache {
  531. unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */
  532. DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */
  533. unsigned long last_full_zap; /* when last zap'd (jiffies) */
  534. };
  535. #else
  536. #define MAX_ZONELISTS 1
  537. struct zonelist_cache;
  538. #endif
  539. /*
  540. * This struct contains information about a zone in a zonelist. It is stored
  541. * here to avoid dereferences into large structures and lookups of tables
  542. */
  543. struct zoneref {
  544. struct zone *zone; /* Pointer to actual zone */
  545. int zone_idx; /* zone_idx(zoneref->zone) */
  546. };
  547. /*
  548. * One allocation request operates on a zonelist. A zonelist
  549. * is a list of zones, the first one is the 'goal' of the
  550. * allocation, the other zones are fallback zones, in decreasing
  551. * priority.
  552. *
  553. * If zlcache_ptr is not NULL, then it is just the address of zlcache,
  554. * as explained above. If zlcache_ptr is NULL, there is no zlcache.
  555. * *
  556. * To speed the reading of the zonelist, the zonerefs contain the zone index
  557. * of the entry being read. Helper functions to access information given
  558. * a struct zoneref are
  559. *
  560. * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
  561. * zonelist_zone_idx() - Return the index of the zone for an entry
  562. * zonelist_node_idx() - Return the index of the node for an entry
  563. */
  564. struct zonelist {
  565. struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
  566. struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
  567. #ifdef CONFIG_NUMA
  568. struct zonelist_cache zlcache; // optional ...
  569. #endif
  570. };
  571. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  572. struct node_active_region {
  573. unsigned long start_pfn;
  574. unsigned long end_pfn;
  575. int nid;
  576. };
  577. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  578. #ifndef CONFIG_DISCONTIGMEM
  579. /* The array of struct pages - for discontigmem use pgdat->lmem_map */
  580. extern struct page *mem_map;
  581. #endif
  582. /*
  583. * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
  584. * (mostly NUMA machines?) to denote a higher-level memory zone than the
  585. * zone denotes.
  586. *
  587. * On NUMA machines, each NUMA node would have a pg_data_t to describe
  588. * it's memory layout.
  589. *
  590. * Memory statistics and page replacement data structures are maintained on a
  591. * per-zone basis.
  592. */
  593. struct bootmem_data;
  594. typedef struct pglist_data {
  595. struct zone node_zones[MAX_NR_ZONES];
  596. struct zonelist node_zonelists[MAX_ZONELISTS];
  597. int nr_zones;
  598. #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
  599. struct page *node_mem_map;
  600. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  601. struct page_cgroup *node_page_cgroup;
  602. #endif
  603. #endif
  604. #ifndef CONFIG_NO_BOOTMEM
  605. struct bootmem_data *bdata;
  606. #endif
  607. #ifdef CONFIG_MEMORY_HOTPLUG
  608. /*
  609. * Must be held any time you expect node_start_pfn, node_present_pages
  610. * or node_spanned_pages stay constant. Holding this will also
  611. * guarantee that any pfn_valid() stays that way.
  612. *
  613. * Nests above zone->lock and zone->size_seqlock.
  614. */
  615. spinlock_t node_size_lock;
  616. #endif
  617. unsigned long node_start_pfn;
  618. unsigned long node_present_pages; /* total number of physical pages */
  619. unsigned long node_spanned_pages; /* total size of physical page
  620. range, including holes */
  621. int node_id;
  622. wait_queue_head_t kswapd_wait;
  623. struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
  624. int kswapd_max_order;
  625. enum zone_type classzone_idx;
  626. } pg_data_t;
  627. #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
  628. #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
  629. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  630. #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
  631. #else
  632. #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
  633. #endif
  634. #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
  635. #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
  636. #define node_end_pfn(nid) ({\
  637. pg_data_t *__pgdat = NODE_DATA(nid);\
  638. __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
  639. })
  640. #include <linux/memory_hotplug.h>
  641. extern struct mutex zonelists_mutex;
  642. void build_all_zonelists(void *data);
  643. void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
  644. bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  645. int classzone_idx, int alloc_flags);
  646. bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
  647. int classzone_idx, int alloc_flags);
  648. enum memmap_context {
  649. MEMMAP_EARLY,
  650. MEMMAP_HOTPLUG,
  651. };
  652. extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
  653. unsigned long size,
  654. enum memmap_context context);
  655. extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
  656. static inline struct zone *lruvec_zone(struct lruvec *lruvec)
  657. {
  658. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  659. return lruvec->zone;
  660. #else
  661. return container_of(lruvec, struct zone, lruvec);
  662. #endif
  663. }
  664. #ifdef CONFIG_HAVE_MEMORY_PRESENT
  665. void memory_present(int nid, unsigned long start, unsigned long end);
  666. #else
  667. static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
  668. #endif
  669. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  670. int local_memory_node(int node_id);
  671. #else
  672. static inline int local_memory_node(int node_id) { return node_id; };
  673. #endif
  674. #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
  675. unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
  676. #endif
  677. /*
  678. * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
  679. */
  680. #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
  681. static inline int populated_zone(struct zone *zone)
  682. {
  683. return (!!zone->present_pages);
  684. }
  685. extern int movable_zone;
  686. static inline int zone_movable_is_highmem(void)
  687. {
  688. #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
  689. return movable_zone == ZONE_HIGHMEM;
  690. #else
  691. return 0;
  692. #endif
  693. }
  694. static inline int is_highmem_idx(enum zone_type idx)
  695. {
  696. #ifdef CONFIG_HIGHMEM
  697. return (idx == ZONE_HIGHMEM ||
  698. (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
  699. #else
  700. return 0;
  701. #endif
  702. }
  703. static inline int is_normal_idx(enum zone_type idx)
  704. {
  705. return (idx == ZONE_NORMAL);
  706. }
  707. /**
  708. * is_highmem - helper function to quickly check if a struct zone is a
  709. * highmem zone or not. This is an attempt to keep references
  710. * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
  711. * @zone - pointer to struct zone variable
  712. */
  713. static inline int is_highmem(struct zone *zone)
  714. {
  715. #ifdef CONFIG_HIGHMEM
  716. int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
  717. return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
  718. (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
  719. zone_movable_is_highmem());
  720. #else
  721. return 0;
  722. #endif
  723. }
  724. static inline int is_normal(struct zone *zone)
  725. {
  726. return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
  727. }
  728. static inline int is_dma32(struct zone *zone)
  729. {
  730. #ifdef CONFIG_ZONE_DMA32
  731. return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
  732. #else
  733. return 0;
  734. #endif
  735. }
  736. static inline int is_dma(struct zone *zone)
  737. {
  738. #ifdef CONFIG_ZONE_DMA
  739. return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
  740. #else
  741. return 0;
  742. #endif
  743. }
  744. /* These two functions are used to setup the per zone pages min values */
  745. struct ctl_table;
  746. int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
  747. void __user *, size_t *, loff_t *);
  748. extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
  749. int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
  750. void __user *, size_t *, loff_t *);
  751. int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
  752. void __user *, size_t *, loff_t *);
  753. int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
  754. void __user *, size_t *, loff_t *);
  755. int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
  756. void __user *, size_t *, loff_t *);
  757. extern int numa_zonelist_order_handler(struct ctl_table *, int,
  758. void __user *, size_t *, loff_t *);
  759. extern char numa_zonelist_order[];
  760. #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
  761. #ifndef CONFIG_NEED_MULTIPLE_NODES
  762. extern struct pglist_data contig_page_data;
  763. #define NODE_DATA(nid) (&contig_page_data)
  764. #define NODE_MEM_MAP(nid) mem_map
  765. #else /* CONFIG_NEED_MULTIPLE_NODES */
  766. #include <asm/mmzone.h>
  767. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  768. extern struct pglist_data *first_online_pgdat(void);
  769. extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
  770. extern struct zone *next_zone(struct zone *zone);
  771. /**
  772. * for_each_online_pgdat - helper macro to iterate over all online nodes
  773. * @pgdat - pointer to a pg_data_t variable
  774. */
  775. #define for_each_online_pgdat(pgdat) \
  776. for (pgdat = first_online_pgdat(); \
  777. pgdat; \
  778. pgdat = next_online_pgdat(pgdat))
  779. /**
  780. * for_each_zone - helper macro to iterate over all memory zones
  781. * @zone - pointer to struct zone variable
  782. *
  783. * The user only needs to declare the zone variable, for_each_zone
  784. * fills it in.
  785. */
  786. #define for_each_zone(zone) \
  787. for (zone = (first_online_pgdat())->node_zones; \
  788. zone; \
  789. zone = next_zone(zone))
  790. #define for_each_populated_zone(zone) \
  791. for (zone = (first_online_pgdat())->node_zones; \
  792. zone; \
  793. zone = next_zone(zone)) \
  794. if (!populated_zone(zone)) \
  795. ; /* do nothing */ \
  796. else
  797. static inline struct zone *zonelist_zone(struct zoneref *zoneref)
  798. {
  799. return zoneref->zone;
  800. }
  801. static inline int zonelist_zone_idx(struct zoneref *zoneref)
  802. {
  803. return zoneref->zone_idx;
  804. }
  805. static inline int zonelist_node_idx(struct zoneref *zoneref)
  806. {
  807. #ifdef CONFIG_NUMA
  808. /* zone_to_nid not available in this context */
  809. return zoneref->zone->node;
  810. #else
  811. return 0;
  812. #endif /* CONFIG_NUMA */
  813. }
  814. /**
  815. * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
  816. * @z - The cursor used as a starting point for the search
  817. * @highest_zoneidx - The zone index of the highest zone to return
  818. * @nodes - An optional nodemask to filter the zonelist with
  819. * @zone - The first suitable zone found is returned via this parameter
  820. *
  821. * This function returns the next zone at or below a given zone index that is
  822. * within the allowed nodemask using a cursor as the starting point for the
  823. * search. The zoneref returned is a cursor that represents the current zone
  824. * being examined. It should be advanced by one before calling
  825. * next_zones_zonelist again.
  826. */
  827. struct zoneref *next_zones_zonelist(struct zoneref *z,
  828. enum zone_type highest_zoneidx,
  829. nodemask_t *nodes,
  830. struct zone **zone);
  831. /**
  832. * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
  833. * @zonelist - The zonelist to search for a suitable zone
  834. * @highest_zoneidx - The zone index of the highest zone to return
  835. * @nodes - An optional nodemask to filter the zonelist with
  836. * @zone - The first suitable zone found is returned via this parameter
  837. *
  838. * This function returns the first zone at or below a given zone index that is
  839. * within the allowed nodemask. The zoneref returned is a cursor that can be
  840. * used to iterate the zonelist with next_zones_zonelist by advancing it by
  841. * one before calling.
  842. */
  843. static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
  844. enum zone_type highest_zoneidx,
  845. nodemask_t *nodes,
  846. struct zone **zone)
  847. {
  848. return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
  849. zone);
  850. }
  851. /**
  852. * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
  853. * @zone - The current zone in the iterator
  854. * @z - The current pointer within zonelist->zones being iterated
  855. * @zlist - The zonelist being iterated
  856. * @highidx - The zone index of the highest zone to return
  857. * @nodemask - Nodemask allowed by the allocator
  858. *
  859. * This iterator iterates though all zones at or below a given zone index and
  860. * within a given nodemask
  861. */
  862. #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
  863. for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
  864. zone; \
  865. z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
  866. /**
  867. * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
  868. * @zone - The current zone in the iterator
  869. * @z - The current pointer within zonelist->zones being iterated
  870. * @zlist - The zonelist being iterated
  871. * @highidx - The zone index of the highest zone to return
  872. *
  873. * This iterator iterates though all zones at or below a given zone index.
  874. */
  875. #define for_each_zone_zonelist(zone, z, zlist, highidx) \
  876. for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
  877. #ifdef CONFIG_SPARSEMEM
  878. #include <asm/sparsemem.h>
  879. #endif
  880. #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
  881. !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
  882. static inline unsigned long early_pfn_to_nid(unsigned long pfn)
  883. {
  884. return 0;
  885. }
  886. #endif
  887. #ifdef CONFIG_FLATMEM
  888. #define pfn_to_nid(pfn) (0)
  889. #endif
  890. #ifdef CONFIG_SPARSEMEM
  891. /*
  892. * SECTION_SHIFT #bits space required to store a section #
  893. *
  894. * PA_SECTION_SHIFT physical address to/from section number
  895. * PFN_SECTION_SHIFT pfn to/from section number
  896. */
  897. #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
  898. #define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
  899. #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
  900. #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
  901. #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
  902. #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
  903. #define SECTION_BLOCKFLAGS_BITS \
  904. ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
  905. #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
  906. #error Allocator MAX_ORDER exceeds SECTION_SIZE
  907. #endif
  908. #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
  909. #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
  910. #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
  911. #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
  912. struct page;
  913. struct page_cgroup;
  914. struct mem_section {
  915. /*
  916. * This is, logically, a pointer to an array of struct
  917. * pages. However, it is stored with some other magic.
  918. * (see sparse.c::sparse_init_one_section())
  919. *
  920. * Additionally during early boot we encode node id of
  921. * the location of the section here to guide allocation.
  922. * (see sparse.c::memory_present())
  923. *
  924. * Making it a UL at least makes someone do a cast
  925. * before using it wrong.
  926. */
  927. unsigned long section_mem_map;
  928. /* See declaration of similar field in struct zone */
  929. unsigned long *pageblock_flags;
  930. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  931. /*
  932. * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
  933. * section. (see memcontrol.h/page_cgroup.h about this.)
  934. */
  935. struct page_cgroup *page_cgroup;
  936. unsigned long pad;
  937. #endif
  938. };
  939. #ifdef CONFIG_SPARSEMEM_EXTREME
  940. #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
  941. #else
  942. #define SECTIONS_PER_ROOT 1
  943. #endif
  944. #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
  945. #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
  946. #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
  947. #ifdef CONFIG_SPARSEMEM_EXTREME
  948. extern struct mem_section *mem_section[NR_SECTION_ROOTS];
  949. #else
  950. extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
  951. #endif
  952. static inline struct mem_section *__nr_to_section(unsigned long nr)
  953. {
  954. if (!mem_section[SECTION_NR_TO_ROOT(nr)])
  955. return NULL;
  956. return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
  957. }
  958. extern int __section_nr(struct mem_section* ms);
  959. extern unsigned long usemap_size(void);
  960. /*
  961. * We use the lower bits of the mem_map pointer to store
  962. * a little bit of information. There should be at least
  963. * 3 bits here due to 32-bit alignment.
  964. */
  965. #define SECTION_MARKED_PRESENT (1UL<<0)
  966. #define SECTION_HAS_MEM_MAP (1UL<<1)
  967. #define SECTION_MAP_LAST_BIT (1UL<<2)
  968. #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
  969. #define SECTION_NID_SHIFT 2
  970. static inline struct page *__section_mem_map_addr(struct mem_section *section)
  971. {
  972. unsigned long map = section->section_mem_map;
  973. map &= SECTION_MAP_MASK;
  974. return (struct page *)map;
  975. }
  976. static inline int present_section(struct mem_section *section)
  977. {
  978. return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
  979. }
  980. static inline int present_section_nr(unsigned long nr)
  981. {
  982. return present_section(__nr_to_section(nr));
  983. }
  984. static inline int valid_section(struct mem_section *section)
  985. {
  986. return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
  987. }
  988. static inline int valid_section_nr(unsigned long nr)
  989. {
  990. return valid_section(__nr_to_section(nr));
  991. }
  992. static inline struct mem_section *__pfn_to_section(unsigned long pfn)
  993. {
  994. return __nr_to_section(pfn_to_section_nr(pfn));
  995. }
  996. #ifndef CONFIG_HAVE_ARCH_PFN_VALID
  997. static inline int pfn_valid(unsigned long pfn)
  998. {
  999. if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
  1000. return 0;
  1001. return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
  1002. }
  1003. #endif
  1004. static inline int pfn_present(unsigned long pfn)
  1005. {
  1006. if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
  1007. return 0;
  1008. return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
  1009. }
  1010. /*
  1011. * These are _only_ used during initialisation, therefore they
  1012. * can use __initdata ... They could have names to indicate
  1013. * this restriction.
  1014. */
  1015. #ifdef CONFIG_NUMA
  1016. #define pfn_to_nid(pfn) \
  1017. ({ \
  1018. unsigned long __pfn_to_nid_pfn = (pfn); \
  1019. page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
  1020. })
  1021. #else
  1022. #define pfn_to_nid(pfn) (0)
  1023. #endif
  1024. #define early_pfn_valid(pfn) pfn_valid(pfn)
  1025. void sparse_init(void);
  1026. #else
  1027. #define sparse_init() do {} while (0)
  1028. #define sparse_index_init(_sec, _nid) do {} while (0)
  1029. #endif /* CONFIG_SPARSEMEM */
  1030. #ifdef CONFIG_NODES_SPAN_OTHER_NODES
  1031. bool early_pfn_in_nid(unsigned long pfn, int nid);
  1032. #else
  1033. #define early_pfn_in_nid(pfn, nid) (1)
  1034. #endif
  1035. #ifndef early_pfn_valid
  1036. #define early_pfn_valid(pfn) (1)
  1037. #endif
  1038. void memory_present(int nid, unsigned long start, unsigned long end);
  1039. unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
  1040. /*
  1041. * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
  1042. * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
  1043. * pfn_valid_within() should be used in this case; we optimise this away
  1044. * when we have no holes within a MAX_ORDER_NR_PAGES block.
  1045. */
  1046. #ifdef CONFIG_HOLES_IN_ZONE
  1047. #define pfn_valid_within(pfn) pfn_valid(pfn)
  1048. #else
  1049. #define pfn_valid_within(pfn) (1)
  1050. #endif
  1051. #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
  1052. /*
  1053. * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
  1054. * associated with it or not. In FLATMEM, it is expected that holes always
  1055. * have valid memmap as long as there is valid PFNs either side of the hole.
  1056. * In SPARSEMEM, it is assumed that a valid section has a memmap for the
  1057. * entire section.
  1058. *
  1059. * However, an ARM, and maybe other embedded architectures in the future
  1060. * free memmap backing holes to save memory on the assumption the memmap is
  1061. * never used. The page_zone linkages are then broken even though pfn_valid()
  1062. * returns true. A walker of the full memmap must then do this additional
  1063. * check to ensure the memmap they are looking at is sane by making sure
  1064. * the zone and PFN linkages are still valid. This is expensive, but walkers
  1065. * of the full memmap are extremely rare.
  1066. */
  1067. int memmap_valid_within(unsigned long pfn,
  1068. struct page *page, struct zone *zone);
  1069. #else
  1070. static inline int memmap_valid_within(unsigned long pfn,
  1071. struct page *page, struct zone *zone)
  1072. {
  1073. return 1;
  1074. }
  1075. #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
  1076. #endif /* !__GENERATING_BOUNDS.H */
  1077. #endif /* !__ASSEMBLY__ */
  1078. #endif /* _LINUX_MMZONE_H */