mmzone.h 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094
  1. #ifndef _LINUX_MMZONE_H
  2. #define _LINUX_MMZONE_H
  3. #ifndef __ASSEMBLY__
  4. #ifndef __GENERATING_BOUNDS_H
  5. #include <linux/spinlock.h>
  6. #include <linux/list.h>
  7. #include <linux/wait.h>
  8. #include <linux/bitops.h>
  9. #include <linux/cache.h>
  10. #include <linux/threads.h>
  11. #include <linux/numa.h>
  12. #include <linux/init.h>
  13. #include <linux/seqlock.h>
  14. #include <linux/nodemask.h>
  15. #include <linux/pageblock-flags.h>
  16. #include <linux/bounds.h>
  17. #include <asm/atomic.h>
  18. #include <asm/page.h>
  19. /* Free memory management - zoned buddy allocator. */
  20. #ifndef CONFIG_FORCE_MAX_ZONEORDER
  21. #define MAX_ORDER 11
  22. #else
  23. #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
  24. #endif
  25. #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
  26. /*
  27. * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
  28. * costly to service. That is between allocation orders which should
  29. * coelesce naturally under reasonable reclaim pressure and those which
  30. * will not.
  31. */
  32. #define PAGE_ALLOC_COSTLY_ORDER 3
  33. #define MIGRATE_UNMOVABLE 0
  34. #define MIGRATE_RECLAIMABLE 1
  35. #define MIGRATE_MOVABLE 2
  36. #define MIGRATE_RESERVE 3
  37. #define MIGRATE_ISOLATE 4 /* can't allocate from here */
  38. #define MIGRATE_TYPES 5
  39. #define for_each_migratetype_order(order, type) \
  40. for (order = 0; order < MAX_ORDER; order++) \
  41. for (type = 0; type < MIGRATE_TYPES; type++)
  42. extern int page_group_by_mobility_disabled;
  43. static inline int get_pageblock_migratetype(struct page *page)
  44. {
  45. if (unlikely(page_group_by_mobility_disabled))
  46. return MIGRATE_UNMOVABLE;
  47. return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
  48. }
  49. struct free_area {
  50. struct list_head free_list[MIGRATE_TYPES];
  51. unsigned long nr_free;
  52. };
  53. struct pglist_data;
  54. /*
  55. * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
  56. * So add a wild amount of padding here to ensure that they fall into separate
  57. * cachelines. There are very few zone structures in the machine, so space
  58. * consumption is not a concern here.
  59. */
  60. #if defined(CONFIG_SMP)
  61. struct zone_padding {
  62. char x[0];
  63. } ____cacheline_internodealigned_in_smp;
  64. #define ZONE_PADDING(name) struct zone_padding name;
  65. #else
  66. #define ZONE_PADDING(name)
  67. #endif
  68. enum zone_stat_item {
  69. /* First 128 byte cacheline (assuming 64 bit words) */
  70. NR_FREE_PAGES,
  71. NR_LRU_BASE,
  72. NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
  73. NR_ACTIVE_ANON, /* " " " " " */
  74. NR_INACTIVE_FILE, /* " " " " " */
  75. NR_ACTIVE_FILE, /* " " " " " */
  76. #ifdef CONFIG_UNEVICTABLE_LRU
  77. NR_UNEVICTABLE, /* " " " " " */
  78. NR_MLOCK, /* mlock()ed pages found and moved off LRU */
  79. #else
  80. NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
  81. NR_MLOCK = NR_ACTIVE_FILE,
  82. #endif
  83. NR_ANON_PAGES, /* Mapped anonymous pages */
  84. NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
  85. only modified from process context */
  86. NR_FILE_PAGES,
  87. NR_FILE_DIRTY,
  88. NR_WRITEBACK,
  89. NR_SLAB_RECLAIMABLE,
  90. NR_SLAB_UNRECLAIMABLE,
  91. NR_PAGETABLE, /* used for pagetables */
  92. NR_UNSTABLE_NFS, /* NFS unstable pages */
  93. NR_BOUNCE,
  94. NR_VMSCAN_WRITE,
  95. /* Second 128 byte cacheline */
  96. NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
  97. #ifdef CONFIG_NUMA
  98. NUMA_HIT, /* allocated in intended node */
  99. NUMA_MISS, /* allocated in non intended node */
  100. NUMA_FOREIGN, /* was intended here, hit elsewhere */
  101. NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
  102. NUMA_LOCAL, /* allocation from local node */
  103. NUMA_OTHER, /* allocation from other node */
  104. #endif
  105. NR_VM_ZONE_STAT_ITEMS };
  106. /*
  107. * We do arithmetic on the LRU lists in various places in the code,
  108. * so it is important to keep the active lists LRU_ACTIVE higher in
  109. * the array than the corresponding inactive lists, and to keep
  110. * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
  111. *
  112. * This has to be kept in sync with the statistics in zone_stat_item
  113. * above and the descriptions in vmstat_text in mm/vmstat.c
  114. */
  115. #define LRU_BASE 0
  116. #define LRU_ACTIVE 1
  117. #define LRU_FILE 2
  118. enum lru_list {
  119. LRU_INACTIVE_ANON = LRU_BASE,
  120. LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
  121. LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
  122. LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
  123. #ifdef CONFIG_UNEVICTABLE_LRU
  124. LRU_UNEVICTABLE,
  125. #else
  126. LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */
  127. #endif
  128. NR_LRU_LISTS
  129. };
  130. #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
  131. #define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++)
  132. static inline int is_file_lru(enum lru_list l)
  133. {
  134. return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
  135. }
  136. static inline int is_active_lru(enum lru_list l)
  137. {
  138. return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
  139. }
  140. static inline int is_unevictable_lru(enum lru_list l)
  141. {
  142. #ifdef CONFIG_UNEVICTABLE_LRU
  143. return (l == LRU_UNEVICTABLE);
  144. #else
  145. return 0;
  146. #endif
  147. }
  148. struct per_cpu_pages {
  149. int count; /* number of pages in the list */
  150. int high; /* high watermark, emptying needed */
  151. int batch; /* chunk size for buddy add/remove */
  152. struct list_head list; /* the list of pages */
  153. };
  154. struct per_cpu_pageset {
  155. struct per_cpu_pages pcp;
  156. #ifdef CONFIG_NUMA
  157. s8 expire;
  158. #endif
  159. #ifdef CONFIG_SMP
  160. s8 stat_threshold;
  161. s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
  162. #endif
  163. } ____cacheline_aligned_in_smp;
  164. #ifdef CONFIG_NUMA
  165. #define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
  166. #else
  167. #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
  168. #endif
  169. #endif /* !__GENERATING_BOUNDS.H */
  170. enum zone_type {
  171. #ifdef CONFIG_ZONE_DMA
  172. /*
  173. * ZONE_DMA is used when there are devices that are not able
  174. * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
  175. * carve out the portion of memory that is needed for these devices.
  176. * The range is arch specific.
  177. *
  178. * Some examples
  179. *
  180. * Architecture Limit
  181. * ---------------------------
  182. * parisc, ia64, sparc <4G
  183. * s390 <2G
  184. * arm Various
  185. * alpha Unlimited or 0-16MB.
  186. *
  187. * i386, x86_64 and multiple other arches
  188. * <16M.
  189. */
  190. ZONE_DMA,
  191. #endif
  192. #ifdef CONFIG_ZONE_DMA32
  193. /*
  194. * x86_64 needs two ZONE_DMAs because it supports devices that are
  195. * only able to do DMA to the lower 16M but also 32 bit devices that
  196. * can only do DMA areas below 4G.
  197. */
  198. ZONE_DMA32,
  199. #endif
  200. /*
  201. * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
  202. * performed on pages in ZONE_NORMAL if the DMA devices support
  203. * transfers to all addressable memory.
  204. */
  205. ZONE_NORMAL,
  206. #ifdef CONFIG_HIGHMEM
  207. /*
  208. * A memory area that is only addressable by the kernel through
  209. * mapping portions into its own address space. This is for example
  210. * used by i386 to allow the kernel to address the memory beyond
  211. * 900MB. The kernel will set up special mappings (page
  212. * table entries on i386) for each page that the kernel needs to
  213. * access.
  214. */
  215. ZONE_HIGHMEM,
  216. #endif
  217. ZONE_MOVABLE,
  218. __MAX_NR_ZONES
  219. };
  220. #ifndef __GENERATING_BOUNDS_H
  221. /*
  222. * When a memory allocation must conform to specific limitations (such
  223. * as being suitable for DMA) the caller will pass in hints to the
  224. * allocator in the gfp_mask, in the zone modifier bits. These bits
  225. * are used to select a priority ordered list of memory zones which
  226. * match the requested limits. See gfp_zone() in include/linux/gfp.h
  227. */
  228. #if MAX_NR_ZONES < 2
  229. #define ZONES_SHIFT 0
  230. #elif MAX_NR_ZONES <= 2
  231. #define ZONES_SHIFT 1
  232. #elif MAX_NR_ZONES <= 4
  233. #define ZONES_SHIFT 2
  234. #else
  235. #error ZONES_SHIFT -- too many zones configured adjust calculation
  236. #endif
  237. struct zone_reclaim_stat {
  238. /*
  239. * The pageout code in vmscan.c keeps track of how many of the
  240. * mem/swap backed and file backed pages are refeferenced.
  241. * The higher the rotated/scanned ratio, the more valuable
  242. * that cache is.
  243. *
  244. * The anon LRU stats live in [0], file LRU stats in [1]
  245. */
  246. unsigned long recent_rotated[2];
  247. unsigned long recent_scanned[2];
  248. };
  249. struct zone {
  250. /* Fields commonly accessed by the page allocator */
  251. unsigned long pages_min, pages_low, pages_high;
  252. /*
  253. * We don't know if the memory that we're going to allocate will be freeable
  254. * or/and it will be released eventually, so to avoid totally wasting several
  255. * GB of ram we must reserve some of the lower zone memory (otherwise we risk
  256. * to run OOM on the lower zones despite there's tons of freeable ram
  257. * on the higher zones). This array is recalculated at runtime if the
  258. * sysctl_lowmem_reserve_ratio sysctl changes.
  259. */
  260. unsigned long lowmem_reserve[MAX_NR_ZONES];
  261. #ifdef CONFIG_NUMA
  262. int node;
  263. /*
  264. * zone reclaim becomes active if more unmapped pages exist.
  265. */
  266. unsigned long min_unmapped_pages;
  267. unsigned long min_slab_pages;
  268. struct per_cpu_pageset *pageset[NR_CPUS];
  269. #else
  270. struct per_cpu_pageset pageset[NR_CPUS];
  271. #endif
  272. /*
  273. * free areas of different sizes
  274. */
  275. spinlock_t lock;
  276. #ifdef CONFIG_MEMORY_HOTPLUG
  277. /* see spanned/present_pages for more description */
  278. seqlock_t span_seqlock;
  279. #endif
  280. struct free_area free_area[MAX_ORDER];
  281. #ifndef CONFIG_SPARSEMEM
  282. /*
  283. * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
  284. * In SPARSEMEM, this map is stored in struct mem_section
  285. */
  286. unsigned long *pageblock_flags;
  287. #endif /* CONFIG_SPARSEMEM */
  288. ZONE_PADDING(_pad1_)
  289. /* Fields commonly accessed by the page reclaim scanner */
  290. spinlock_t lru_lock;
  291. struct {
  292. struct list_head list;
  293. unsigned long nr_scan;
  294. } lru[NR_LRU_LISTS];
  295. struct zone_reclaim_stat reclaim_stat;
  296. unsigned long pages_scanned; /* since last reclaim */
  297. unsigned long flags; /* zone flags, see below */
  298. /* Zone statistics */
  299. atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  300. /*
  301. * prev_priority holds the scanning priority for this zone. It is
  302. * defined as the scanning priority at which we achieved our reclaim
  303. * target at the previous try_to_free_pages() or balance_pgdat()
  304. * invokation.
  305. *
  306. * We use prev_priority as a measure of how much stress page reclaim is
  307. * under - it drives the swappiness decision: whether to unmap mapped
  308. * pages.
  309. *
  310. * Access to both this field is quite racy even on uniprocessor. But
  311. * it is expected to average out OK.
  312. */
  313. int prev_priority;
  314. /*
  315. * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
  316. * this zone's LRU. Maintained by the pageout code.
  317. */
  318. unsigned int inactive_ratio;
  319. ZONE_PADDING(_pad2_)
  320. /* Rarely used or read-mostly fields */
  321. /*
  322. * wait_table -- the array holding the hash table
  323. * wait_table_hash_nr_entries -- the size of the hash table array
  324. * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
  325. *
  326. * The purpose of all these is to keep track of the people
  327. * waiting for a page to become available and make them
  328. * runnable again when possible. The trouble is that this
  329. * consumes a lot of space, especially when so few things
  330. * wait on pages at a given time. So instead of using
  331. * per-page waitqueues, we use a waitqueue hash table.
  332. *
  333. * The bucket discipline is to sleep on the same queue when
  334. * colliding and wake all in that wait queue when removing.
  335. * When something wakes, it must check to be sure its page is
  336. * truly available, a la thundering herd. The cost of a
  337. * collision is great, but given the expected load of the
  338. * table, they should be so rare as to be outweighed by the
  339. * benefits from the saved space.
  340. *
  341. * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
  342. * primary users of these fields, and in mm/page_alloc.c
  343. * free_area_init_core() performs the initialization of them.
  344. */
  345. wait_queue_head_t * wait_table;
  346. unsigned long wait_table_hash_nr_entries;
  347. unsigned long wait_table_bits;
  348. /*
  349. * Discontig memory support fields.
  350. */
  351. struct pglist_data *zone_pgdat;
  352. /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
  353. unsigned long zone_start_pfn;
  354. /*
  355. * zone_start_pfn, spanned_pages and present_pages are all
  356. * protected by span_seqlock. It is a seqlock because it has
  357. * to be read outside of zone->lock, and it is done in the main
  358. * allocator path. But, it is written quite infrequently.
  359. *
  360. * The lock is declared along with zone->lock because it is
  361. * frequently read in proximity to zone->lock. It's good to
  362. * give them a chance of being in the same cacheline.
  363. */
  364. unsigned long spanned_pages; /* total size, including holes */
  365. unsigned long present_pages; /* amount of memory (excluding holes) */
  366. /*
  367. * rarely used fields:
  368. */
  369. const char *name;
  370. } ____cacheline_internodealigned_in_smp;
  371. typedef enum {
  372. ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */
  373. ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
  374. ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
  375. } zone_flags_t;
  376. static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
  377. {
  378. set_bit(flag, &zone->flags);
  379. }
  380. static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
  381. {
  382. return test_and_set_bit(flag, &zone->flags);
  383. }
  384. static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
  385. {
  386. clear_bit(flag, &zone->flags);
  387. }
  388. static inline int zone_is_all_unreclaimable(const struct zone *zone)
  389. {
  390. return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
  391. }
  392. static inline int zone_is_reclaim_locked(const struct zone *zone)
  393. {
  394. return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
  395. }
  396. static inline int zone_is_oom_locked(const struct zone *zone)
  397. {
  398. return test_bit(ZONE_OOM_LOCKED, &zone->flags);
  399. }
  400. /*
  401. * The "priority" of VM scanning is how much of the queues we will scan in one
  402. * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
  403. * queues ("queue_length >> 12") during an aging round.
  404. */
  405. #define DEF_PRIORITY 12
  406. /* Maximum number of zones on a zonelist */
  407. #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
  408. #ifdef CONFIG_NUMA
  409. /*
  410. * The NUMA zonelists are doubled becausse we need zonelists that restrict the
  411. * allocations to a single node for GFP_THISNODE.
  412. *
  413. * [0] : Zonelist with fallback
  414. * [1] : No fallback (GFP_THISNODE)
  415. */
  416. #define MAX_ZONELISTS 2
  417. /*
  418. * We cache key information from each zonelist for smaller cache
  419. * footprint when scanning for free pages in get_page_from_freelist().
  420. *
  421. * 1) The BITMAP fullzones tracks which zones in a zonelist have come
  422. * up short of free memory since the last time (last_fullzone_zap)
  423. * we zero'd fullzones.
  424. * 2) The array z_to_n[] maps each zone in the zonelist to its node
  425. * id, so that we can efficiently evaluate whether that node is
  426. * set in the current tasks mems_allowed.
  427. *
  428. * Both fullzones and z_to_n[] are one-to-one with the zonelist,
  429. * indexed by a zones offset in the zonelist zones[] array.
  430. *
  431. * The get_page_from_freelist() routine does two scans. During the
  432. * first scan, we skip zones whose corresponding bit in 'fullzones'
  433. * is set or whose corresponding node in current->mems_allowed (which
  434. * comes from cpusets) is not set. During the second scan, we bypass
  435. * this zonelist_cache, to ensure we look methodically at each zone.
  436. *
  437. * Once per second, we zero out (zap) fullzones, forcing us to
  438. * reconsider nodes that might have regained more free memory.
  439. * The field last_full_zap is the time we last zapped fullzones.
  440. *
  441. * This mechanism reduces the amount of time we waste repeatedly
  442. * reexaming zones for free memory when they just came up low on
  443. * memory momentarilly ago.
  444. *
  445. * The zonelist_cache struct members logically belong in struct
  446. * zonelist. However, the mempolicy zonelists constructed for
  447. * MPOL_BIND are intentionally variable length (and usually much
  448. * shorter). A general purpose mechanism for handling structs with
  449. * multiple variable length members is more mechanism than we want
  450. * here. We resort to some special case hackery instead.
  451. *
  452. * The MPOL_BIND zonelists don't need this zonelist_cache (in good
  453. * part because they are shorter), so we put the fixed length stuff
  454. * at the front of the zonelist struct, ending in a variable length
  455. * zones[], as is needed by MPOL_BIND.
  456. *
  457. * Then we put the optional zonelist cache on the end of the zonelist
  458. * struct. This optional stuff is found by a 'zlcache_ptr' pointer in
  459. * the fixed length portion at the front of the struct. This pointer
  460. * both enables us to find the zonelist cache, and in the case of
  461. * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
  462. * to know that the zonelist cache is not there.
  463. *
  464. * The end result is that struct zonelists come in two flavors:
  465. * 1) The full, fixed length version, shown below, and
  466. * 2) The custom zonelists for MPOL_BIND.
  467. * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
  468. *
  469. * Even though there may be multiple CPU cores on a node modifying
  470. * fullzones or last_full_zap in the same zonelist_cache at the same
  471. * time, we don't lock it. This is just hint data - if it is wrong now
  472. * and then, the allocator will still function, perhaps a bit slower.
  473. */
  474. struct zonelist_cache {
  475. unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */
  476. DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */
  477. unsigned long last_full_zap; /* when last zap'd (jiffies) */
  478. };
  479. #else
  480. #define MAX_ZONELISTS 1
  481. struct zonelist_cache;
  482. #endif
  483. /*
  484. * This struct contains information about a zone in a zonelist. It is stored
  485. * here to avoid dereferences into large structures and lookups of tables
  486. */
  487. struct zoneref {
  488. struct zone *zone; /* Pointer to actual zone */
  489. int zone_idx; /* zone_idx(zoneref->zone) */
  490. };
  491. /*
  492. * One allocation request operates on a zonelist. A zonelist
  493. * is a list of zones, the first one is the 'goal' of the
  494. * allocation, the other zones are fallback zones, in decreasing
  495. * priority.
  496. *
  497. * If zlcache_ptr is not NULL, then it is just the address of zlcache,
  498. * as explained above. If zlcache_ptr is NULL, there is no zlcache.
  499. * *
  500. * To speed the reading of the zonelist, the zonerefs contain the zone index
  501. * of the entry being read. Helper functions to access information given
  502. * a struct zoneref are
  503. *
  504. * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
  505. * zonelist_zone_idx() - Return the index of the zone for an entry
  506. * zonelist_node_idx() - Return the index of the node for an entry
  507. */
  508. struct zonelist {
  509. struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
  510. struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
  511. #ifdef CONFIG_NUMA
  512. struct zonelist_cache zlcache; // optional ...
  513. #endif
  514. };
  515. #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  516. struct node_active_region {
  517. unsigned long start_pfn;
  518. unsigned long end_pfn;
  519. int nid;
  520. };
  521. #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
  522. #ifndef CONFIG_DISCONTIGMEM
  523. /* The array of struct pages - for discontigmem use pgdat->lmem_map */
  524. extern struct page *mem_map;
  525. #endif
  526. /*
  527. * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
  528. * (mostly NUMA machines?) to denote a higher-level memory zone than the
  529. * zone denotes.
  530. *
  531. * On NUMA machines, each NUMA node would have a pg_data_t to describe
  532. * it's memory layout.
  533. *
  534. * Memory statistics and page replacement data structures are maintained on a
  535. * per-zone basis.
  536. */
  537. struct bootmem_data;
  538. typedef struct pglist_data {
  539. struct zone node_zones[MAX_NR_ZONES];
  540. struct zonelist node_zonelists[MAX_ZONELISTS];
  541. int nr_zones;
  542. #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
  543. struct page *node_mem_map;
  544. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  545. struct page_cgroup *node_page_cgroup;
  546. #endif
  547. #endif
  548. struct bootmem_data *bdata;
  549. #ifdef CONFIG_MEMORY_HOTPLUG
  550. /*
  551. * Must be held any time you expect node_start_pfn, node_present_pages
  552. * or node_spanned_pages stay constant. Holding this will also
  553. * guarantee that any pfn_valid() stays that way.
  554. *
  555. * Nests above zone->lock and zone->size_seqlock.
  556. */
  557. spinlock_t node_size_lock;
  558. #endif
  559. unsigned long node_start_pfn;
  560. unsigned long node_present_pages; /* total number of physical pages */
  561. unsigned long node_spanned_pages; /* total size of physical page
  562. range, including holes */
  563. int node_id;
  564. wait_queue_head_t kswapd_wait;
  565. struct task_struct *kswapd;
  566. int kswapd_max_order;
  567. } pg_data_t;
  568. #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
  569. #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
  570. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  571. #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
  572. #else
  573. #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
  574. #endif
  575. #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
  576. #include <linux/memory_hotplug.h>
  577. void get_zone_counts(unsigned long *active, unsigned long *inactive,
  578. unsigned long *free);
  579. void build_all_zonelists(void);
  580. void wakeup_kswapd(struct zone *zone, int order);
  581. int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
  582. int classzone_idx, int alloc_flags);
  583. enum memmap_context {
  584. MEMMAP_EARLY,
  585. MEMMAP_HOTPLUG,
  586. };
  587. extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
  588. unsigned long size,
  589. enum memmap_context context);
  590. #ifdef CONFIG_HAVE_MEMORY_PRESENT
  591. void memory_present(int nid, unsigned long start, unsigned long end);
  592. #else
  593. static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
  594. #endif
  595. #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
  596. unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
  597. #endif
  598. /*
  599. * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
  600. */
  601. #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
  602. static inline int populated_zone(struct zone *zone)
  603. {
  604. return (!!zone->present_pages);
  605. }
  606. extern int movable_zone;
  607. static inline int zone_movable_is_highmem(void)
  608. {
  609. #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
  610. return movable_zone == ZONE_HIGHMEM;
  611. #else
  612. return 0;
  613. #endif
  614. }
  615. static inline int is_highmem_idx(enum zone_type idx)
  616. {
  617. #ifdef CONFIG_HIGHMEM
  618. return (idx == ZONE_HIGHMEM ||
  619. (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
  620. #else
  621. return 0;
  622. #endif
  623. }
  624. static inline int is_normal_idx(enum zone_type idx)
  625. {
  626. return (idx == ZONE_NORMAL);
  627. }
  628. /**
  629. * is_highmem - helper function to quickly check if a struct zone is a
  630. * highmem zone or not. This is an attempt to keep references
  631. * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
  632. * @zone - pointer to struct zone variable
  633. */
  634. static inline int is_highmem(struct zone *zone)
  635. {
  636. #ifdef CONFIG_HIGHMEM
  637. int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
  638. return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
  639. (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
  640. zone_movable_is_highmem());
  641. #else
  642. return 0;
  643. #endif
  644. }
  645. static inline int is_normal(struct zone *zone)
  646. {
  647. return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
  648. }
  649. static inline int is_dma32(struct zone *zone)
  650. {
  651. #ifdef CONFIG_ZONE_DMA32
  652. return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
  653. #else
  654. return 0;
  655. #endif
  656. }
  657. static inline int is_dma(struct zone *zone)
  658. {
  659. #ifdef CONFIG_ZONE_DMA
  660. return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
  661. #else
  662. return 0;
  663. #endif
  664. }
  665. /* These two functions are used to setup the per zone pages min values */
  666. struct ctl_table;
  667. struct file;
  668. int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
  669. void __user *, size_t *, loff_t *);
  670. extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
  671. int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
  672. void __user *, size_t *, loff_t *);
  673. int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
  674. void __user *, size_t *, loff_t *);
  675. int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
  676. struct file *, void __user *, size_t *, loff_t *);
  677. int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
  678. struct file *, void __user *, size_t *, loff_t *);
  679. extern int numa_zonelist_order_handler(struct ctl_table *, int,
  680. struct file *, void __user *, size_t *, loff_t *);
  681. extern char numa_zonelist_order[];
  682. #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
  683. #ifndef CONFIG_NEED_MULTIPLE_NODES
  684. extern struct pglist_data contig_page_data;
  685. #define NODE_DATA(nid) (&contig_page_data)
  686. #define NODE_MEM_MAP(nid) mem_map
  687. #else /* CONFIG_NEED_MULTIPLE_NODES */
  688. #include <asm/mmzone.h>
  689. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  690. extern struct pglist_data *first_online_pgdat(void);
  691. extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
  692. extern struct zone *next_zone(struct zone *zone);
  693. /**
  694. * for_each_online_pgdat - helper macro to iterate over all online nodes
  695. * @pgdat - pointer to a pg_data_t variable
  696. */
  697. #define for_each_online_pgdat(pgdat) \
  698. for (pgdat = first_online_pgdat(); \
  699. pgdat; \
  700. pgdat = next_online_pgdat(pgdat))
  701. /**
  702. * for_each_zone - helper macro to iterate over all memory zones
  703. * @zone - pointer to struct zone variable
  704. *
  705. * The user only needs to declare the zone variable, for_each_zone
  706. * fills it in.
  707. */
  708. #define for_each_zone(zone) \
  709. for (zone = (first_online_pgdat())->node_zones; \
  710. zone; \
  711. zone = next_zone(zone))
  712. static inline struct zone *zonelist_zone(struct zoneref *zoneref)
  713. {
  714. return zoneref->zone;
  715. }
  716. static inline int zonelist_zone_idx(struct zoneref *zoneref)
  717. {
  718. return zoneref->zone_idx;
  719. }
  720. static inline int zonelist_node_idx(struct zoneref *zoneref)
  721. {
  722. #ifdef CONFIG_NUMA
  723. /* zone_to_nid not available in this context */
  724. return zoneref->zone->node;
  725. #else
  726. return 0;
  727. #endif /* CONFIG_NUMA */
  728. }
  729. /**
  730. * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
  731. * @z - The cursor used as a starting point for the search
  732. * @highest_zoneidx - The zone index of the highest zone to return
  733. * @nodes - An optional nodemask to filter the zonelist with
  734. * @zone - The first suitable zone found is returned via this parameter
  735. *
  736. * This function returns the next zone at or below a given zone index that is
  737. * within the allowed nodemask using a cursor as the starting point for the
  738. * search. The zoneref returned is a cursor that represents the current zone
  739. * being examined. It should be advanced by one before calling
  740. * next_zones_zonelist again.
  741. */
  742. struct zoneref *next_zones_zonelist(struct zoneref *z,
  743. enum zone_type highest_zoneidx,
  744. nodemask_t *nodes,
  745. struct zone **zone);
  746. /**
  747. * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
  748. * @zonelist - The zonelist to search for a suitable zone
  749. * @highest_zoneidx - The zone index of the highest zone to return
  750. * @nodes - An optional nodemask to filter the zonelist with
  751. * @zone - The first suitable zone found is returned via this parameter
  752. *
  753. * This function returns the first zone at or below a given zone index that is
  754. * within the allowed nodemask. The zoneref returned is a cursor that can be
  755. * used to iterate the zonelist with next_zones_zonelist by advancing it by
  756. * one before calling.
  757. */
  758. static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
  759. enum zone_type highest_zoneidx,
  760. nodemask_t *nodes,
  761. struct zone **zone)
  762. {
  763. return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
  764. zone);
  765. }
  766. /**
  767. * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
  768. * @zone - The current zone in the iterator
  769. * @z - The current pointer within zonelist->zones being iterated
  770. * @zlist - The zonelist being iterated
  771. * @highidx - The zone index of the highest zone to return
  772. * @nodemask - Nodemask allowed by the allocator
  773. *
  774. * This iterator iterates though all zones at or below a given zone index and
  775. * within a given nodemask
  776. */
  777. #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
  778. for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
  779. zone; \
  780. z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
  781. /**
  782. * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
  783. * @zone - The current zone in the iterator
  784. * @z - The current pointer within zonelist->zones being iterated
  785. * @zlist - The zonelist being iterated
  786. * @highidx - The zone index of the highest zone to return
  787. *
  788. * This iterator iterates though all zones at or below a given zone index.
  789. */
  790. #define for_each_zone_zonelist(zone, z, zlist, highidx) \
  791. for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
  792. #ifdef CONFIG_SPARSEMEM
  793. #include <asm/sparsemem.h>
  794. #endif
  795. #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
  796. !defined(CONFIG_ARCH_POPULATES_NODE_MAP)
  797. static inline unsigned long early_pfn_to_nid(unsigned long pfn)
  798. {
  799. return 0;
  800. }
  801. #endif
  802. #ifdef CONFIG_FLATMEM
  803. #define pfn_to_nid(pfn) (0)
  804. #endif
  805. #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
  806. #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
  807. #ifdef CONFIG_SPARSEMEM
  808. /*
  809. * SECTION_SHIFT #bits space required to store a section #
  810. *
  811. * PA_SECTION_SHIFT physical address to/from section number
  812. * PFN_SECTION_SHIFT pfn to/from section number
  813. */
  814. #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
  815. #define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
  816. #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
  817. #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
  818. #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
  819. #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
  820. #define SECTION_BLOCKFLAGS_BITS \
  821. ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
  822. #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
  823. #error Allocator MAX_ORDER exceeds SECTION_SIZE
  824. #endif
  825. struct page;
  826. struct page_cgroup;
  827. struct mem_section {
  828. /*
  829. * This is, logically, a pointer to an array of struct
  830. * pages. However, it is stored with some other magic.
  831. * (see sparse.c::sparse_init_one_section())
  832. *
  833. * Additionally during early boot we encode node id of
  834. * the location of the section here to guide allocation.
  835. * (see sparse.c::memory_present())
  836. *
  837. * Making it a UL at least makes someone do a cast
  838. * before using it wrong.
  839. */
  840. unsigned long section_mem_map;
  841. /* See declaration of similar field in struct zone */
  842. unsigned long *pageblock_flags;
  843. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  844. /*
  845. * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
  846. * section. (see memcontrol.h/page_cgroup.h about this.)
  847. */
  848. struct page_cgroup *page_cgroup;
  849. unsigned long pad;
  850. #endif
  851. };
  852. #ifdef CONFIG_SPARSEMEM_EXTREME
  853. #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
  854. #else
  855. #define SECTIONS_PER_ROOT 1
  856. #endif
  857. #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
  858. #define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT)
  859. #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
  860. #ifdef CONFIG_SPARSEMEM_EXTREME
  861. extern struct mem_section *mem_section[NR_SECTION_ROOTS];
  862. #else
  863. extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
  864. #endif
  865. static inline struct mem_section *__nr_to_section(unsigned long nr)
  866. {
  867. if (!mem_section[SECTION_NR_TO_ROOT(nr)])
  868. return NULL;
  869. return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
  870. }
  871. extern int __section_nr(struct mem_section* ms);
  872. extern unsigned long usemap_size(void);
  873. /*
  874. * We use the lower bits of the mem_map pointer to store
  875. * a little bit of information. There should be at least
  876. * 3 bits here due to 32-bit alignment.
  877. */
  878. #define SECTION_MARKED_PRESENT (1UL<<0)
  879. #define SECTION_HAS_MEM_MAP (1UL<<1)
  880. #define SECTION_MAP_LAST_BIT (1UL<<2)
  881. #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
  882. #define SECTION_NID_SHIFT 2
  883. static inline struct page *__section_mem_map_addr(struct mem_section *section)
  884. {
  885. unsigned long map = section->section_mem_map;
  886. map &= SECTION_MAP_MASK;
  887. return (struct page *)map;
  888. }
  889. static inline int present_section(struct mem_section *section)
  890. {
  891. return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
  892. }
  893. static inline int present_section_nr(unsigned long nr)
  894. {
  895. return present_section(__nr_to_section(nr));
  896. }
  897. static inline int valid_section(struct mem_section *section)
  898. {
  899. return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
  900. }
  901. static inline int valid_section_nr(unsigned long nr)
  902. {
  903. return valid_section(__nr_to_section(nr));
  904. }
  905. static inline struct mem_section *__pfn_to_section(unsigned long pfn)
  906. {
  907. return __nr_to_section(pfn_to_section_nr(pfn));
  908. }
  909. static inline int pfn_valid(unsigned long pfn)
  910. {
  911. if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
  912. return 0;
  913. return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
  914. }
  915. static inline int pfn_present(unsigned long pfn)
  916. {
  917. if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
  918. return 0;
  919. return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
  920. }
  921. /*
  922. * These are _only_ used during initialisation, therefore they
  923. * can use __initdata ... They could have names to indicate
  924. * this restriction.
  925. */
  926. #ifdef CONFIG_NUMA
  927. #define pfn_to_nid(pfn) \
  928. ({ \
  929. unsigned long __pfn_to_nid_pfn = (pfn); \
  930. page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
  931. })
  932. #else
  933. #define pfn_to_nid(pfn) (0)
  934. #endif
  935. #define early_pfn_valid(pfn) pfn_valid(pfn)
  936. void sparse_init(void);
  937. #else
  938. #define sparse_init() do {} while (0)
  939. #define sparse_index_init(_sec, _nid) do {} while (0)
  940. #endif /* CONFIG_SPARSEMEM */
  941. #ifdef CONFIG_NODES_SPAN_OTHER_NODES
  942. bool early_pfn_in_nid(unsigned long pfn, int nid);
  943. #else
  944. #define early_pfn_in_nid(pfn, nid) (1)
  945. #endif
  946. #ifndef early_pfn_valid
  947. #define early_pfn_valid(pfn) (1)
  948. #endif
  949. void memory_present(int nid, unsigned long start, unsigned long end);
  950. unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
  951. /*
  952. * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
  953. * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
  954. * pfn_valid_within() should be used in this case; we optimise this away
  955. * when we have no holes within a MAX_ORDER_NR_PAGES block.
  956. */
  957. #ifdef CONFIG_HOLES_IN_ZONE
  958. #define pfn_valid_within(pfn) pfn_valid(pfn)
  959. #else
  960. #define pfn_valid_within(pfn) (1)
  961. #endif
  962. #endif /* !__GENERATING_BOUNDS.H */
  963. #endif /* !__ASSEMBLY__ */
  964. #endif /* _LINUX_MMZONE_H */