internal.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. /* internal.h: mm/ internal definitions
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #ifndef __MM_INTERNAL_H
  12. #define __MM_INTERNAL_H
  13. #include <linux/mm.h>
  14. void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
  15. unsigned long floor, unsigned long ceiling);
  16. extern void prep_compound_page(struct page *page, unsigned long order);
  17. static inline void set_page_count(struct page *page, int v)
  18. {
  19. atomic_set(&page->_count, v);
  20. }
  21. /*
  22. * Turn a non-refcounted page (->_count == 0) into refcounted with
  23. * a count of one.
  24. */
  25. static inline void set_page_refcounted(struct page *page)
  26. {
  27. VM_BUG_ON(PageTail(page));
  28. VM_BUG_ON(atomic_read(&page->_count));
  29. set_page_count(page, 1);
  30. }
  31. static inline void __put_page(struct page *page)
  32. {
  33. atomic_dec(&page->_count);
  34. }
  35. extern void __free_pages_bootmem(struct page *page, unsigned int order);
  36. /*
  37. * function for dealing with page's order in buddy system.
  38. * zone->lock is already acquired when we use these.
  39. * So, we don't need atomic page->flags operations here.
  40. */
  41. static inline unsigned long page_order(struct page *page)
  42. {
  43. VM_BUG_ON(!PageBuddy(page));
  44. return page_private(page);
  45. }
  46. /*
  47. * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
  48. * so all functions starting at paging_init should be marked __init
  49. * in those cases. SPARSEMEM, however, allows for memory hotplug,
  50. * and alloc_bootmem_node is not used.
  51. */
  52. #ifdef CONFIG_SPARSEMEM
  53. #define __paginginit __meminit
  54. #else
  55. #define __paginginit __init
  56. #endif
  57. /* Memory initialisation debug and verification */
  58. enum mminit_level {
  59. MMINIT_WARNING,
  60. MMINIT_VERIFY,
  61. MMINIT_TRACE
  62. };
  63. #ifdef CONFIG_DEBUG_MEMORY_INIT
  64. extern int mminit_loglevel;
  65. #define mminit_dprintk(level, prefix, fmt, arg...) \
  66. do { \
  67. if (level < mminit_loglevel) { \
  68. printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
  69. printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
  70. } \
  71. } while (0)
  72. extern void mminit_verify_pageflags_layout(void);
  73. extern void mminit_verify_page_links(struct page *page,
  74. enum zone_type zone, unsigned long nid, unsigned long pfn);
  75. extern void mminit_verify_zonelist(void);
  76. #else
  77. static inline void mminit_dprintk(enum mminit_level level,
  78. const char *prefix, const char *fmt, ...)
  79. {
  80. }
  81. static inline void mminit_verify_pageflags_layout(void)
  82. {
  83. }
  84. static inline void mminit_verify_page_links(struct page *page,
  85. enum zone_type zone, unsigned long nid, unsigned long pfn)
  86. {
  87. }
  88. static inline void mminit_verify_zonelist(void)
  89. {
  90. }
  91. #endif /* CONFIG_DEBUG_MEMORY_INIT */
  92. /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
  93. #if defined(CONFIG_SPARSEMEM)
  94. extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
  95. unsigned long *end_pfn);
  96. #else
  97. static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
  98. unsigned long *end_pfn)
  99. {
  100. }
  101. #endif /* CONFIG_SPARSEMEM */
  102. #endif