|
@@ -114,6 +114,25 @@ static void pad_len_spaces(struct seq_file *m, int len)
|
|
|
seq_printf(m, "%*c", len, ' ');
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Proportional Set Size(PSS): my share of RSS.
|
|
|
+ *
|
|
|
+ * PSS of a process is the count of pages it has in memory, where each
|
|
|
+ * page is divided by the number of processes sharing it. So if a
|
|
|
+ * process has 1000 pages all to itself, and 1000 shared with one other
|
|
|
+ * process, its PSS will be 1500.
|
|
|
+ *
|
|
|
+ * To keep (accumulated) division errors low, we adopt a 64bit
|
|
|
+ * fixed-point pss counter to minimize division errors. So (pss >>
|
|
|
+ * PSS_SHIFT) would be the real byte count.
|
|
|
+ *
|
|
|
+ * A shift of 12 before division means (assuming 4K page size):
|
|
|
+ * - 1M 3-user-pages add up to 8KB errors;
|
|
|
+ * - supports mapcount up to 2^24, or 16M;
|
|
|
+ * - supports PSS up to 2^52 bytes, or 4PB.
|
|
|
+ */
|
|
|
+#define PSS_SHIFT 12
|
|
|
+
|
|
|
struct mem_size_stats
|
|
|
{
|
|
|
unsigned long resident;
|
|
@@ -122,6 +141,7 @@ struct mem_size_stats
|
|
|
unsigned long private_clean;
|
|
|
unsigned long private_dirty;
|
|
|
unsigned long referenced;
|
|
|
+ u64 pss;
|
|
|
};
|
|
|
|
|
|
struct pmd_walker {
|
|
@@ -195,6 +215,7 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
|
|
|
seq_printf(m,
|
|
|
"Size: %8lu kB\n"
|
|
|
"Rss: %8lu kB\n"
|
|
|
+ "Pss: %8lu kB\n"
|
|
|
"Shared_Clean: %8lu kB\n"
|
|
|
"Shared_Dirty: %8lu kB\n"
|
|
|
"Private_Clean: %8lu kB\n"
|
|
@@ -202,6 +223,7 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
|
|
|
"Referenced: %8lu kB\n",
|
|
|
(vma->vm_end - vma->vm_start) >> 10,
|
|
|
mss->resident >> 10,
|
|
|
+ (unsigned long)(mss->pss >> (10 + PSS_SHIFT)),
|
|
|
mss->shared_clean >> 10,
|
|
|
mss->shared_dirty >> 10,
|
|
|
mss->private_clean >> 10,
|
|
@@ -226,6 +248,7 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
pte_t *pte, ptent;
|
|
|
spinlock_t *ptl;
|
|
|
struct page *page;
|
|
|
+ int mapcount;
|
|
|
|
|
|
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
|
|
for (; addr != end; pte++, addr += PAGE_SIZE) {
|
|
@@ -242,16 +265,19 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
/* Accumulate the size in pages that have been accessed. */
|
|
|
if (pte_young(ptent) || PageReferenced(page))
|
|
|
mss->referenced += PAGE_SIZE;
|
|
|
- if (page_mapcount(page) >= 2) {
|
|
|
+ mapcount = page_mapcount(page);
|
|
|
+ if (mapcount >= 2) {
|
|
|
if (pte_dirty(ptent))
|
|
|
mss->shared_dirty += PAGE_SIZE;
|
|
|
else
|
|
|
mss->shared_clean += PAGE_SIZE;
|
|
|
+ mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
|
|
|
} else {
|
|
|
if (pte_dirty(ptent))
|
|
|
mss->private_dirty += PAGE_SIZE;
|
|
|
else
|
|
|
mss->private_clean += PAGE_SIZE;
|
|
|
+ mss->pss += (PAGE_SIZE << PSS_SHIFT);
|
|
|
}
|
|
|
}
|
|
|
pte_unmap_unlock(pte - 1, ptl);
|