|
@@ -249,13 +249,47 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
|
|
extern void arch_unmap_area(struct mm_struct *, unsigned long);
|
|
extern void arch_unmap_area(struct mm_struct *, unsigned long);
|
|
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
|
|
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
|
|
|
|
|
|
|
|
+#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
|
|
|
|
+/*
|
|
|
|
+ * The mm counters are not protected by its page_table_lock,
|
|
|
|
+ * so must be incremented atomically.
|
|
|
|
+ */
|
|
|
|
+#ifdef ATOMIC64_INIT
|
|
|
|
+#define set_mm_counter(mm, member, value) atomic64_set(&(mm)->_##member, value)
|
|
|
|
+#define get_mm_counter(mm, member) ((unsigned long)atomic64_read(&(mm)->_##member))
|
|
|
|
+#define add_mm_counter(mm, member, value) atomic64_add(value, &(mm)->_##member)
|
|
|
|
+#define inc_mm_counter(mm, member) atomic64_inc(&(mm)->_##member)
|
|
|
|
+#define dec_mm_counter(mm, member) atomic64_dec(&(mm)->_##member)
|
|
|
|
+typedef atomic64_t mm_counter_t;
|
|
|
|
+#else /* !ATOMIC64_INIT */
|
|
|
|
+/*
|
|
|
|
+ * The counters wrap back to 0 at 2^32 * PAGE_SIZE,
|
|
|
|
+ * that is, at 16TB if using 4kB page size.
|
|
|
|
+ */
|
|
|
|
+#define set_mm_counter(mm, member, value) atomic_set(&(mm)->_##member, value)
|
|
|
|
+#define get_mm_counter(mm, member) ((unsigned long)atomic_read(&(mm)->_##member))
|
|
|
|
+#define add_mm_counter(mm, member, value) atomic_add(value, &(mm)->_##member)
|
|
|
|
+#define inc_mm_counter(mm, member) atomic_inc(&(mm)->_##member)
|
|
|
|
+#define dec_mm_counter(mm, member) atomic_dec(&(mm)->_##member)
|
|
|
|
+typedef atomic_t mm_counter_t;
|
|
|
|
+#endif /* !ATOMIC64_INIT */
|
|
|
|
+
|
|
|
|
+#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
|
|
|
|
+/*
|
|
|
|
+ * The mm counters are protected by its page_table_lock,
|
|
|
|
+ * so can be incremented directly.
|
|
|
|
+ */
|
|
#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
|
|
#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
|
|
#define get_mm_counter(mm, member) ((mm)->_##member)
|
|
#define get_mm_counter(mm, member) ((mm)->_##member)
|
|
#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
|
|
#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
|
|
#define inc_mm_counter(mm, member) (mm)->_##member++
|
|
#define inc_mm_counter(mm, member) (mm)->_##member++
|
|
#define dec_mm_counter(mm, member) (mm)->_##member--
|
|
#define dec_mm_counter(mm, member) (mm)->_##member--
|
|
-#define get_mm_rss(mm) ((mm)->_file_rss + (mm)->_anon_rss)
|
|
|
|
|
|
+typedef unsigned long mm_counter_t;
|
|
|
|
+
|
|
|
|
+#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
|
|
|
|
|
|
|
|
+#define get_mm_rss(mm) \
|
|
|
|
+ (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
|
|
#define update_hiwater_rss(mm) do { \
|
|
#define update_hiwater_rss(mm) do { \
|
|
unsigned long _rss = get_mm_rss(mm); \
|
|
unsigned long _rss = get_mm_rss(mm); \
|
|
if ((mm)->hiwater_rss < _rss) \
|
|
if ((mm)->hiwater_rss < _rss) \
|
|
@@ -266,8 +300,6 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
|
|
(mm)->hiwater_vm = (mm)->total_vm; \
|
|
(mm)->hiwater_vm = (mm)->total_vm; \
|
|
} while (0)
|
|
} while (0)
|
|
|
|
|
|
-typedef unsigned long mm_counter_t;
|
|
|
|
-
|
|
|
|
struct mm_struct {
|
|
struct mm_struct {
|
|
struct vm_area_struct * mmap; /* list of VMAs */
|
|
struct vm_area_struct * mmap; /* list of VMAs */
|
|
struct rb_root mm_rb;
|
|
struct rb_root mm_rb;
|
|
@@ -291,7 +323,9 @@ struct mm_struct {
|
|
* by mmlist_lock
|
|
* by mmlist_lock
|
|
*/
|
|
*/
|
|
|
|
|
|
- /* Special counters protected by the page_table_lock */
|
|
|
|
|
|
+ /* Special counters, in some configurations protected by the
|
|
|
|
+ * page_table_lock, in other configurations by being atomic.
|
|
|
|
+ */
|
|
mm_counter_t _file_rss;
|
|
mm_counter_t _file_rss;
|
|
mm_counter_t _anon_rss;
|
|
mm_counter_t _anon_rss;
|
|
|
|
|