task_mmu.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. #include <linux/mm.h>
  2. #include <linux/hugetlb.h>
  3. #include <linux/mount.h>
  4. #include <linux/seq_file.h>
  5. #include <linux/highmem.h>
  6. #include <linux/ptrace.h>
  7. #include <linux/pagemap.h>
  8. #include <linux/mempolicy.h>
  9. #include <linux/swap.h>
  10. #include <linux/swapops.h>
  11. #include <asm/elf.h>
  12. #include <asm/uaccess.h>
  13. #include <asm/tlbflush.h>
  14. #include "internal.h"
  15. void task_mem(struct seq_file *m, struct mm_struct *mm)
  16. {
  17. unsigned long data, text, lib;
  18. unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  19. /*
  20. * Note: to minimize their overhead, mm maintains hiwater_vm and
  21. * hiwater_rss only when about to *lower* total_vm or rss. Any
  22. * collector of these hiwater stats must therefore get total_vm
  23. * and rss too, which will usually be the higher. Barriers? not
  24. * worth the effort, such snapshots can always be inconsistent.
  25. */
  26. hiwater_vm = total_vm = mm->total_vm;
  27. if (hiwater_vm < mm->hiwater_vm)
  28. hiwater_vm = mm->hiwater_vm;
  29. hiwater_rss = total_rss = get_mm_rss(mm);
  30. if (hiwater_rss < mm->hiwater_rss)
  31. hiwater_rss = mm->hiwater_rss;
  32. data = mm->total_vm - mm->shared_vm - mm->stack_vm;
  33. text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  34. lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
  35. seq_printf(m,
  36. "VmPeak:\t%8lu kB\n"
  37. "VmSize:\t%8lu kB\n"
  38. "VmLck:\t%8lu kB\n"
  39. "VmHWM:\t%8lu kB\n"
  40. "VmRSS:\t%8lu kB\n"
  41. "VmData:\t%8lu kB\n"
  42. "VmStk:\t%8lu kB\n"
  43. "VmExe:\t%8lu kB\n"
  44. "VmLib:\t%8lu kB\n"
  45. "VmPTE:\t%8lu kB\n",
  46. hiwater_vm << (PAGE_SHIFT-10),
  47. (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
  48. mm->locked_vm << (PAGE_SHIFT-10),
  49. hiwater_rss << (PAGE_SHIFT-10),
  50. total_rss << (PAGE_SHIFT-10),
  51. data << (PAGE_SHIFT-10),
  52. mm->stack_vm << (PAGE_SHIFT-10), text, lib,
  53. (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
  54. }
  55. unsigned long task_vsize(struct mm_struct *mm)
  56. {
  57. return PAGE_SIZE * mm->total_vm;
  58. }
  59. int task_statm(struct mm_struct *mm, int *shared, int *text,
  60. int *data, int *resident)
  61. {
  62. *shared = get_mm_counter(mm, file_rss);
  63. *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  64. >> PAGE_SHIFT;
  65. *data = mm->total_vm - mm->shared_vm;
  66. *resident = *shared + get_mm_counter(mm, anon_rss);
  67. return mm->total_vm;
  68. }
  69. static void pad_len_spaces(struct seq_file *m, int len)
  70. {
  71. len = 25 + sizeof(void*) * 6 - len;
  72. if (len < 1)
  73. len = 1;
  74. seq_printf(m, "%*c", len, ' ');
  75. }
  76. static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
  77. {
  78. if (vma && vma != priv->tail_vma) {
  79. struct mm_struct *mm = vma->vm_mm;
  80. up_read(&mm->mmap_sem);
  81. mmput(mm);
  82. }
  83. }
  84. static void *m_start(struct seq_file *m, loff_t *pos)
  85. {
  86. struct proc_maps_private *priv = m->private;
  87. unsigned long last_addr = m->version;
  88. struct mm_struct *mm;
  89. struct vm_area_struct *vma, *tail_vma = NULL;
  90. loff_t l = *pos;
  91. /* Clear the per syscall fields in priv */
  92. priv->task = NULL;
  93. priv->tail_vma = NULL;
  94. /*
  95. * We remember last_addr rather than next_addr to hit with
  96. * mmap_cache most of the time. We have zero last_addr at
  97. * the beginning and also after lseek. We will have -1 last_addr
  98. * after the end of the vmas.
  99. */
  100. if (last_addr == -1UL)
  101. return NULL;
  102. priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
  103. if (!priv->task)
  104. return NULL;
  105. mm = mm_for_maps(priv->task);
  106. if (!mm)
  107. return NULL;
  108. down_read(&mm->mmap_sem);
  109. tail_vma = get_gate_vma(priv->task);
  110. priv->tail_vma = tail_vma;
  111. /* Start with last addr hint */
  112. vma = find_vma(mm, last_addr);
  113. if (last_addr && vma) {
  114. vma = vma->vm_next;
  115. goto out;
  116. }
  117. /*
  118. * Check the vma index is within the range and do
  119. * sequential scan until m_index.
  120. */
  121. vma = NULL;
  122. if ((unsigned long)l < mm->map_count) {
  123. vma = mm->mmap;
  124. while (l-- && vma)
  125. vma = vma->vm_next;
  126. goto out;
  127. }
  128. if (l != mm->map_count)
  129. tail_vma = NULL; /* After gate vma */
  130. out:
  131. if (vma)
  132. return vma;
  133. /* End of vmas has been reached */
  134. m->version = (tail_vma != NULL)? 0: -1UL;
  135. up_read(&mm->mmap_sem);
  136. mmput(mm);
  137. return tail_vma;
  138. }
  139. static void *m_next(struct seq_file *m, void *v, loff_t *pos)
  140. {
  141. struct proc_maps_private *priv = m->private;
  142. struct vm_area_struct *vma = v;
  143. struct vm_area_struct *tail_vma = priv->tail_vma;
  144. (*pos)++;
  145. if (vma && (vma != tail_vma) && vma->vm_next)
  146. return vma->vm_next;
  147. vma_stop(priv, vma);
  148. return (vma != tail_vma)? tail_vma: NULL;
  149. }
  150. static void m_stop(struct seq_file *m, void *v)
  151. {
  152. struct proc_maps_private *priv = m->private;
  153. struct vm_area_struct *vma = v;
  154. vma_stop(priv, vma);
  155. if (priv->task)
  156. put_task_struct(priv->task);
  157. }
  158. static int do_maps_open(struct inode *inode, struct file *file,
  159. const struct seq_operations *ops)
  160. {
  161. struct proc_maps_private *priv;
  162. int ret = -ENOMEM;
  163. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  164. if (priv) {
  165. priv->pid = proc_pid(inode);
  166. ret = seq_open(file, ops);
  167. if (!ret) {
  168. struct seq_file *m = file->private_data;
  169. m->private = priv;
  170. } else {
  171. kfree(priv);
  172. }
  173. }
  174. return ret;
  175. }
  176. static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
  177. {
  178. struct mm_struct *mm = vma->vm_mm;
  179. struct file *file = vma->vm_file;
  180. int flags = vma->vm_flags;
  181. unsigned long ino = 0;
  182. unsigned long long pgoff = 0;
  183. dev_t dev = 0;
  184. int len;
  185. if (file) {
  186. struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
  187. dev = inode->i_sb->s_dev;
  188. ino = inode->i_ino;
  189. pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
  190. }
  191. seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
  192. vma->vm_start,
  193. vma->vm_end,
  194. flags & VM_READ ? 'r' : '-',
  195. flags & VM_WRITE ? 'w' : '-',
  196. flags & VM_EXEC ? 'x' : '-',
  197. flags & VM_MAYSHARE ? 's' : 'p',
  198. pgoff,
  199. MAJOR(dev), MINOR(dev), ino, &len);
  200. /*
  201. * Print the dentry name for named mappings, and a
  202. * special [heap] marker for the heap:
  203. */
  204. if (file) {
  205. pad_len_spaces(m, len);
  206. seq_path(m, &file->f_path, "\n");
  207. } else {
  208. const char *name = arch_vma_name(vma);
  209. if (!name) {
  210. if (mm) {
  211. if (vma->vm_start <= mm->start_brk &&
  212. vma->vm_end >= mm->brk) {
  213. name = "[heap]";
  214. } else if (vma->vm_start <= mm->start_stack &&
  215. vma->vm_end >= mm->start_stack) {
  216. name = "[stack]";
  217. }
  218. } else {
  219. name = "[vdso]";
  220. }
  221. }
  222. if (name) {
  223. pad_len_spaces(m, len);
  224. seq_puts(m, name);
  225. }
  226. }
  227. seq_putc(m, '\n');
  228. }
  229. static int show_map(struct seq_file *m, void *v)
  230. {
  231. struct vm_area_struct *vma = v;
  232. struct proc_maps_private *priv = m->private;
  233. struct task_struct *task = priv->task;
  234. show_map_vma(m, vma);
  235. if (m->count < m->size) /* vma is copied successfully */
  236. m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
  237. return 0;
  238. }
  239. static const struct seq_operations proc_pid_maps_op = {
  240. .start = m_start,
  241. .next = m_next,
  242. .stop = m_stop,
  243. .show = show_map
  244. };
  245. static int maps_open(struct inode *inode, struct file *file)
  246. {
  247. return do_maps_open(inode, file, &proc_pid_maps_op);
  248. }
  249. const struct file_operations proc_maps_operations = {
  250. .open = maps_open,
  251. .read = seq_read,
  252. .llseek = seq_lseek,
  253. .release = seq_release_private,
  254. };
  255. /*
  256. * Proportional Set Size(PSS): my share of RSS.
  257. *
  258. * PSS of a process is the count of pages it has in memory, where each
  259. * page is divided by the number of processes sharing it. So if a
  260. * process has 1000 pages all to itself, and 1000 shared with one other
  261. * process, its PSS will be 1500.
  262. *
  263. * To keep (accumulated) division errors low, we adopt a 64bit
  264. * fixed-point pss counter to minimize division errors. So (pss >>
  265. * PSS_SHIFT) would be the real byte count.
  266. *
  267. * A shift of 12 before division means (assuming 4K page size):
  268. * - 1M 3-user-pages add up to 8KB errors;
  269. * - supports mapcount up to 2^24, or 16M;
  270. * - supports PSS up to 2^52 bytes, or 4PB.
  271. */
  272. #define PSS_SHIFT 12
  273. #ifdef CONFIG_PROC_PAGE_MONITOR
  274. struct mem_size_stats {
  275. struct vm_area_struct *vma;
  276. unsigned long resident;
  277. unsigned long shared_clean;
  278. unsigned long shared_dirty;
  279. unsigned long private_clean;
  280. unsigned long private_dirty;
  281. unsigned long referenced;
  282. unsigned long swap;
  283. u64 pss;
  284. };
  285. static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  286. struct mm_walk *walk)
  287. {
  288. struct mem_size_stats *mss = walk->private;
  289. struct vm_area_struct *vma = mss->vma;
  290. pte_t *pte, ptent;
  291. spinlock_t *ptl;
  292. struct page *page;
  293. int mapcount;
  294. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  295. for (; addr != end; pte++, addr += PAGE_SIZE) {
  296. ptent = *pte;
  297. if (is_swap_pte(ptent)) {
  298. mss->swap += PAGE_SIZE;
  299. continue;
  300. }
  301. if (!pte_present(ptent))
  302. continue;
  303. mss->resident += PAGE_SIZE;
  304. page = vm_normal_page(vma, addr, ptent);
  305. if (!page)
  306. continue;
  307. /* Accumulate the size in pages that have been accessed. */
  308. if (pte_young(ptent) || PageReferenced(page))
  309. mss->referenced += PAGE_SIZE;
  310. mapcount = page_mapcount(page);
  311. if (mapcount >= 2) {
  312. if (pte_dirty(ptent))
  313. mss->shared_dirty += PAGE_SIZE;
  314. else
  315. mss->shared_clean += PAGE_SIZE;
  316. mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
  317. } else {
  318. if (pte_dirty(ptent))
  319. mss->private_dirty += PAGE_SIZE;
  320. else
  321. mss->private_clean += PAGE_SIZE;
  322. mss->pss += (PAGE_SIZE << PSS_SHIFT);
  323. }
  324. }
  325. pte_unmap_unlock(pte - 1, ptl);
  326. cond_resched();
  327. return 0;
  328. }
  329. static int show_smap(struct seq_file *m, void *v)
  330. {
  331. struct proc_maps_private *priv = m->private;
  332. struct task_struct *task = priv->task;
  333. struct vm_area_struct *vma = v;
  334. struct mem_size_stats mss;
  335. struct mm_walk smaps_walk = {
  336. .pmd_entry = smaps_pte_range,
  337. .mm = vma->vm_mm,
  338. .private = &mss,
  339. };
  340. memset(&mss, 0, sizeof mss);
  341. mss.vma = vma;
  342. if (vma->vm_mm && !is_vm_hugetlb_page(vma))
  343. walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
  344. show_map_vma(m, vma);
  345. seq_printf(m,
  346. "Size: %8lu kB\n"
  347. "Rss: %8lu kB\n"
  348. "Pss: %8lu kB\n"
  349. "Shared_Clean: %8lu kB\n"
  350. "Shared_Dirty: %8lu kB\n"
  351. "Private_Clean: %8lu kB\n"
  352. "Private_Dirty: %8lu kB\n"
  353. "Referenced: %8lu kB\n"
  354. "Swap: %8lu kB\n"
  355. "KernelPageSize: %8lu kB\n"
  356. "MMUPageSize: %8lu kB\n",
  357. (vma->vm_end - vma->vm_start) >> 10,
  358. mss.resident >> 10,
  359. (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
  360. mss.shared_clean >> 10,
  361. mss.shared_dirty >> 10,
  362. mss.private_clean >> 10,
  363. mss.private_dirty >> 10,
  364. mss.referenced >> 10,
  365. mss.swap >> 10,
  366. vma_kernel_pagesize(vma) >> 10,
  367. vma_mmu_pagesize(vma) >> 10);
  368. if (m->count < m->size) /* vma is copied successfully */
  369. m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
  370. return 0;
  371. }
  372. static const struct seq_operations proc_pid_smaps_op = {
  373. .start = m_start,
  374. .next = m_next,
  375. .stop = m_stop,
  376. .show = show_smap
  377. };
  378. static int smaps_open(struct inode *inode, struct file *file)
  379. {
  380. return do_maps_open(inode, file, &proc_pid_smaps_op);
  381. }
  382. const struct file_operations proc_smaps_operations = {
  383. .open = smaps_open,
  384. .read = seq_read,
  385. .llseek = seq_lseek,
  386. .release = seq_release_private,
  387. };
  388. static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
  389. unsigned long end, struct mm_walk *walk)
  390. {
  391. struct vm_area_struct *vma = walk->private;
  392. pte_t *pte, ptent;
  393. spinlock_t *ptl;
  394. struct page *page;
  395. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  396. for (; addr != end; pte++, addr += PAGE_SIZE) {
  397. ptent = *pte;
  398. if (!pte_present(ptent))
  399. continue;
  400. page = vm_normal_page(vma, addr, ptent);
  401. if (!page)
  402. continue;
  403. /* Clear accessed and referenced bits. */
  404. ptep_test_and_clear_young(vma, addr, pte);
  405. ClearPageReferenced(page);
  406. }
  407. pte_unmap_unlock(pte - 1, ptl);
  408. cond_resched();
  409. return 0;
  410. }
  411. #define CLEAR_REFS_ALL 1
  412. #define CLEAR_REFS_ANON 2
  413. #define CLEAR_REFS_MAPPED 3
  414. static ssize_t clear_refs_write(struct file *file, const char __user *buf,
  415. size_t count, loff_t *ppos)
  416. {
  417. struct task_struct *task;
  418. char buffer[PROC_NUMBUF], *end;
  419. struct mm_struct *mm;
  420. struct vm_area_struct *vma;
  421. int type;
  422. memset(buffer, 0, sizeof(buffer));
  423. if (count > sizeof(buffer) - 1)
  424. count = sizeof(buffer) - 1;
  425. if (copy_from_user(buffer, buf, count))
  426. return -EFAULT;
  427. type = simple_strtol(buffer, &end, 0);
  428. if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
  429. return -EINVAL;
  430. if (*end == '\n')
  431. end++;
  432. task = get_proc_task(file->f_path.dentry->d_inode);
  433. if (!task)
  434. return -ESRCH;
  435. mm = get_task_mm(task);
  436. if (mm) {
  437. struct mm_walk clear_refs_walk = {
  438. .pmd_entry = clear_refs_pte_range,
  439. .mm = mm,
  440. };
  441. down_read(&mm->mmap_sem);
  442. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  443. clear_refs_walk.private = vma;
  444. if (is_vm_hugetlb_page(vma))
  445. continue;
  446. /*
  447. * Writing 1 to /proc/pid/clear_refs affects all pages.
  448. *
  449. * Writing 2 to /proc/pid/clear_refs only affects
  450. * Anonymous pages.
  451. *
  452. * Writing 3 to /proc/pid/clear_refs only affects file
  453. * mapped pages.
  454. */
  455. if (type == CLEAR_REFS_ANON && vma->vm_file)
  456. continue;
  457. if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
  458. continue;
  459. walk_page_range(vma->vm_start, vma->vm_end,
  460. &clear_refs_walk);
  461. }
  462. flush_tlb_mm(mm);
  463. up_read(&mm->mmap_sem);
  464. mmput(mm);
  465. }
  466. put_task_struct(task);
  467. if (end - buffer == 0)
  468. return -EIO;
  469. return end - buffer;
  470. }
  471. const struct file_operations proc_clear_refs_operations = {
  472. .write = clear_refs_write,
  473. };
  474. struct pagemapread {
  475. u64 __user *out, *end;
  476. };
  477. #define PM_ENTRY_BYTES sizeof(u64)
  478. #define PM_STATUS_BITS 3
  479. #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
  480. #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
  481. #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
  482. #define PM_PSHIFT_BITS 6
  483. #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
  484. #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
  485. #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
  486. #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
  487. #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
  488. #define PM_PRESENT PM_STATUS(4LL)
  489. #define PM_SWAP PM_STATUS(2LL)
  490. #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
  491. #define PM_END_OF_BUFFER 1
  492. static int add_to_pagemap(unsigned long addr, u64 pfn,
  493. struct pagemapread *pm)
  494. {
  495. if (put_user(pfn, pm->out))
  496. return -EFAULT;
  497. pm->out++;
  498. if (pm->out >= pm->end)
  499. return PM_END_OF_BUFFER;
  500. return 0;
  501. }
  502. static int pagemap_pte_hole(unsigned long start, unsigned long end,
  503. struct mm_walk *walk)
  504. {
  505. struct pagemapread *pm = walk->private;
  506. unsigned long addr;
  507. int err = 0;
  508. for (addr = start; addr < end; addr += PAGE_SIZE) {
  509. err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
  510. if (err)
  511. break;
  512. }
  513. return err;
  514. }
  515. static u64 swap_pte_to_pagemap_entry(pte_t pte)
  516. {
  517. swp_entry_t e = pte_to_swp_entry(pte);
  518. return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
  519. }
  520. static u64 pte_to_pagemap_entry(pte_t pte)
  521. {
  522. u64 pme = 0;
  523. if (is_swap_pte(pte))
  524. pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
  525. | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
  526. else if (pte_present(pte))
  527. pme = PM_PFRAME(pte_pfn(pte))
  528. | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
  529. return pme;
  530. }
  531. static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  532. struct mm_walk *walk)
  533. {
  534. struct vm_area_struct *vma;
  535. struct pagemapread *pm = walk->private;
  536. pte_t *pte;
  537. int err = 0;
  538. /* find the first VMA at or above 'addr' */
  539. vma = find_vma(walk->mm, addr);
  540. for (; addr != end; addr += PAGE_SIZE) {
  541. u64 pfn = PM_NOT_PRESENT;
  542. /* check to see if we've left 'vma' behind
  543. * and need a new, higher one */
  544. if (vma && (addr >= vma->vm_end))
  545. vma = find_vma(walk->mm, addr);
  546. /* check that 'vma' actually covers this address,
  547. * and that it isn't a huge page vma */
  548. if (vma && (vma->vm_start <= addr) &&
  549. !is_vm_hugetlb_page(vma)) {
  550. pte = pte_offset_map(pmd, addr);
  551. pfn = pte_to_pagemap_entry(*pte);
  552. /* unmap before userspace copy */
  553. pte_unmap(pte);
  554. }
  555. err = add_to_pagemap(addr, pfn, pm);
  556. if (err)
  557. return err;
  558. }
  559. cond_resched();
  560. return err;
  561. }
  562. /*
  563. * /proc/pid/pagemap - an array mapping virtual pages to pfns
  564. *
  565. * For each page in the address space, this file contains one 64-bit entry
  566. * consisting of the following:
  567. *
  568. * Bits 0-55 page frame number (PFN) if present
  569. * Bits 0-4 swap type if swapped
  570. * Bits 5-55 swap offset if swapped
  571. * Bits 55-60 page shift (page size = 1<<page shift)
  572. * Bit 61 reserved for future use
  573. * Bit 62 page swapped
  574. * Bit 63 page present
  575. *
  576. * If the page is not present but in swap, then the PFN contains an
  577. * encoding of the swap file number and the page's offset into the
  578. * swap. Unmapped pages return a null PFN. This allows determining
  579. * precisely which pages are mapped (or in swap) and comparing mapped
  580. * pages between processes.
  581. *
  582. * Efficient users of this interface will use /proc/pid/maps to
  583. * determine which areas of memory are actually mapped and llseek to
  584. * skip over unmapped regions.
  585. */
  586. static ssize_t pagemap_read(struct file *file, char __user *buf,
  587. size_t count, loff_t *ppos)
  588. {
  589. struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
  590. struct page **pages, *page;
  591. unsigned long uaddr, uend;
  592. struct mm_struct *mm;
  593. struct pagemapread pm;
  594. int pagecount;
  595. int ret = -ESRCH;
  596. struct mm_walk pagemap_walk = {};
  597. unsigned long src;
  598. unsigned long svpfn;
  599. unsigned long start_vaddr;
  600. unsigned long end_vaddr;
  601. if (!task)
  602. goto out;
  603. ret = -EACCES;
  604. if (!ptrace_may_access(task, PTRACE_MODE_READ))
  605. goto out_task;
  606. ret = -EINVAL;
  607. /* file position must be aligned */
  608. if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
  609. goto out_task;
  610. ret = 0;
  611. if (!count)
  612. goto out_task;
  613. mm = get_task_mm(task);
  614. if (!mm)
  615. goto out_task;
  616. uaddr = (unsigned long)buf & PAGE_MASK;
  617. uend = (unsigned long)(buf + count);
  618. pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
  619. ret = 0;
  620. if (pagecount == 0)
  621. goto out_mm;
  622. pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
  623. ret = -ENOMEM;
  624. if (!pages)
  625. goto out_mm;
  626. down_read(&current->mm->mmap_sem);
  627. ret = get_user_pages(current, current->mm, uaddr, pagecount,
  628. 1, 0, pages, NULL);
  629. up_read(&current->mm->mmap_sem);
  630. if (ret < 0)
  631. goto out_free;
  632. if (ret != pagecount) {
  633. pagecount = ret;
  634. ret = -EFAULT;
  635. goto out_pages;
  636. }
  637. pm.out = (u64 __user *)buf;
  638. pm.end = (u64 __user *)(buf + count);
  639. pagemap_walk.pmd_entry = pagemap_pte_range;
  640. pagemap_walk.pte_hole = pagemap_pte_hole;
  641. pagemap_walk.mm = mm;
  642. pagemap_walk.private = &pm;
  643. src = *ppos;
  644. svpfn = src / PM_ENTRY_BYTES;
  645. start_vaddr = svpfn << PAGE_SHIFT;
  646. end_vaddr = TASK_SIZE_OF(task);
  647. /* watch out for wraparound */
  648. if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
  649. start_vaddr = end_vaddr;
  650. /*
  651. * The odds are that this will stop walking way
  652. * before end_vaddr, because the length of the
  653. * user buffer is tracked in "pm", and the walk
  654. * will stop when we hit the end of the buffer.
  655. */
  656. ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk);
  657. if (ret == PM_END_OF_BUFFER)
  658. ret = 0;
  659. /* don't need mmap_sem for these, but this looks cleaner */
  660. *ppos += (char __user *)pm.out - buf;
  661. if (!ret)
  662. ret = (char __user *)pm.out - buf;
  663. out_pages:
  664. for (; pagecount; pagecount--) {
  665. page = pages[pagecount-1];
  666. if (!PageReserved(page))
  667. SetPageDirty(page);
  668. page_cache_release(page);
  669. }
  670. out_free:
  671. kfree(pages);
  672. out_mm:
  673. mmput(mm);
  674. out_task:
  675. put_task_struct(task);
  676. out:
  677. return ret;
  678. }
  679. const struct file_operations proc_pagemap_operations = {
  680. .llseek = mem_lseek, /* borrow this */
  681. .read = pagemap_read,
  682. };
  683. #endif /* CONFIG_PROC_PAGE_MONITOR */
  684. #ifdef CONFIG_NUMA
  685. extern int show_numa_map(struct seq_file *m, void *v);
  686. static const struct seq_operations proc_pid_numa_maps_op = {
  687. .start = m_start,
  688. .next = m_next,
  689. .stop = m_stop,
  690. .show = show_numa_map,
  691. };
  692. static int numa_maps_open(struct inode *inode, struct file *file)
  693. {
  694. return do_maps_open(inode, file, &proc_pid_numa_maps_op);
  695. }
  696. const struct file_operations proc_numa_maps_operations = {
  697. .open = numa_maps_open,
  698. .read = seq_read,
  699. .llseek = seq_lseek,
  700. .release = seq_release_private,
  701. };
  702. #endif