|
@@ -1097,6 +1097,32 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
|
|
#define pfn_valid_within(pfn) (1)
|
|
#define pfn_valid_within(pfn) (1)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
|
|
|
|
+/*
|
|
|
|
+ * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
|
|
|
|
+ * associated with it or not. In FLATMEM, it is expected that holes always
|
|
|
|
+ * have valid memmap as long as there is valid PFNs either side of the hole.
|
|
|
|
+ * In SPARSEMEM, it is assumed that a valid section has a memmap for the
|
|
|
|
+ * entire section.
|
|
|
|
+ *
|
|
|
|
+ * However, an ARM, and maybe other embedded architectures in the future
|
|
|
|
+ * free memmap backing holes to save memory on the assumption the memmap is
|
|
|
|
+ * never used. The page_zone linkages are then broken even though pfn_valid()
|
|
|
|
+ * returns true. A walker of the full memmap must then do this additional
|
|
|
|
+ * check to ensure the memmap they are looking at is sane by making sure
|
|
|
|
+ * the zone and PFN linkages are still valid. This is expensive, but walkers
|
|
|
|
+ * of the full memmap are extremely rare.
|
|
|
|
+ */
|
|
|
|
+int memmap_valid_within(unsigned long pfn,
|
|
|
|
+ struct page *page, struct zone *zone);
|
|
|
|
+#else
|
|
|
|
+static inline int memmap_valid_within(unsigned long pfn,
|
|
|
|
+ struct page *page, struct zone *zone)
|
|
|
|
+{
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
|
|
|
|
+
|
|
#endif /* !__GENERATING_BOUNDS.H */
|
|
#endif /* !__GENERATING_BOUNDS.H */
|
|
#endif /* !__ASSEMBLY__ */
|
|
#endif /* !__ASSEMBLY__ */
|
|
#endif /* _LINUX_MMZONE_H */
|
|
#endif /* _LINUX_MMZONE_H */
|