task_mmu.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535
  1. #include <linux/mm.h>
  2. #include <linux/hugetlb.h>
  3. #include <linux/huge_mm.h>
  4. #include <linux/mount.h>
  5. #include <linux/seq_file.h>
  6. #include <linux/highmem.h>
  7. #include <linux/ptrace.h>
  8. #include <linux/slab.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/mempolicy.h>
  11. #include <linux/rmap.h>
  12. #include <linux/swap.h>
  13. #include <linux/swapops.h>
  14. #include <linux/mmu_notifier.h>
  15. #include <asm/elf.h>
  16. #include <asm/uaccess.h>
  17. #include <asm/tlbflush.h>
  18. #include "internal.h"
  19. void task_mem(struct seq_file *m, struct mm_struct *mm)
  20. {
  21. unsigned long data, text, lib, swap;
  22. unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  23. /*
  24. * Note: to minimize their overhead, mm maintains hiwater_vm and
  25. * hiwater_rss only when about to *lower* total_vm or rss. Any
  26. * collector of these hiwater stats must therefore get total_vm
  27. * and rss too, which will usually be the higher. Barriers? not
  28. * worth the effort, such snapshots can always be inconsistent.
  29. */
  30. hiwater_vm = total_vm = mm->total_vm;
  31. if (hiwater_vm < mm->hiwater_vm)
  32. hiwater_vm = mm->hiwater_vm;
  33. hiwater_rss = total_rss = get_mm_rss(mm);
  34. if (hiwater_rss < mm->hiwater_rss)
  35. hiwater_rss = mm->hiwater_rss;
  36. data = mm->total_vm - mm->shared_vm - mm->stack_vm;
  37. text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  38. lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
  39. swap = get_mm_counter(mm, MM_SWAPENTS);
  40. seq_printf(m,
  41. "VmPeak:\t%8lu kB\n"
  42. "VmSize:\t%8lu kB\n"
  43. "VmLck:\t%8lu kB\n"
  44. "VmPin:\t%8lu kB\n"
  45. "VmHWM:\t%8lu kB\n"
  46. "VmRSS:\t%8lu kB\n"
  47. "VmData:\t%8lu kB\n"
  48. "VmStk:\t%8lu kB\n"
  49. "VmExe:\t%8lu kB\n"
  50. "VmLib:\t%8lu kB\n"
  51. "VmPTE:\t%8lu kB\n"
  52. "VmSwap:\t%8lu kB\n",
  53. hiwater_vm << (PAGE_SHIFT-10),
  54. total_vm << (PAGE_SHIFT-10),
  55. mm->locked_vm << (PAGE_SHIFT-10),
  56. mm->pinned_vm << (PAGE_SHIFT-10),
  57. hiwater_rss << (PAGE_SHIFT-10),
  58. total_rss << (PAGE_SHIFT-10),
  59. data << (PAGE_SHIFT-10),
  60. mm->stack_vm << (PAGE_SHIFT-10), text, lib,
  61. (PTRS_PER_PTE * sizeof(pte_t) *
  62. atomic_long_read(&mm->nr_ptes)) >> 10,
  63. swap << (PAGE_SHIFT-10));
  64. }
  65. unsigned long task_vsize(struct mm_struct *mm)
  66. {
  67. return PAGE_SIZE * mm->total_vm;
  68. }
  69. unsigned long task_statm(struct mm_struct *mm,
  70. unsigned long *shared, unsigned long *text,
  71. unsigned long *data, unsigned long *resident)
  72. {
  73. *shared = get_mm_counter(mm, MM_FILEPAGES);
  74. *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  75. >> PAGE_SHIFT;
  76. *data = mm->total_vm - mm->shared_vm;
  77. *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
  78. return mm->total_vm;
  79. }
  80. #ifdef CONFIG_NUMA
  81. /*
  82. * These functions are for numa_maps but called in generic **maps seq_file
  83. * ->start(), ->stop() ops.
  84. *
  85. * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
  86. * Each mempolicy object is controlled by reference counting. The problem here
  87. * is how to avoid accessing dead mempolicy object.
  88. *
  89. * Because we're holding mmap_sem while reading seq_file, it's safe to access
  90. * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
  91. *
  92. * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
  93. * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
  94. * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
  95. * gurantee the task never exits under us. But taking task_lock() around
  96. * get_vma_plicy() causes lock order problem.
  97. *
  98. * To access task->mempolicy without lock, we hold a reference count of an
  99. * object pointed by task->mempolicy and remember it. This will guarantee
  100. * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
  101. */
  102. static void hold_task_mempolicy(struct proc_maps_private *priv)
  103. {
  104. struct task_struct *task = priv->task;
  105. task_lock(task);
  106. priv->task_mempolicy = task->mempolicy;
  107. mpol_get(priv->task_mempolicy);
  108. task_unlock(task);
  109. }
  110. static void release_task_mempolicy(struct proc_maps_private *priv)
  111. {
  112. mpol_put(priv->task_mempolicy);
  113. }
  114. #else
  115. static void hold_task_mempolicy(struct proc_maps_private *priv)
  116. {
  117. }
  118. static void release_task_mempolicy(struct proc_maps_private *priv)
  119. {
  120. }
  121. #endif
  122. static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
  123. {
  124. if (vma && vma != priv->tail_vma) {
  125. struct mm_struct *mm = vma->vm_mm;
  126. release_task_mempolicy(priv);
  127. up_read(&mm->mmap_sem);
  128. mmput(mm);
  129. }
  130. }
  131. static void *m_start(struct seq_file *m, loff_t *pos)
  132. {
  133. struct proc_maps_private *priv = m->private;
  134. unsigned long last_addr = m->version;
  135. struct mm_struct *mm;
  136. struct vm_area_struct *vma, *tail_vma = NULL;
  137. loff_t l = *pos;
  138. /* Clear the per syscall fields in priv */
  139. priv->task = NULL;
  140. priv->tail_vma = NULL;
  141. /*
  142. * We remember last_addr rather than next_addr to hit with
  143. * mmap_cache most of the time. We have zero last_addr at
  144. * the beginning and also after lseek. We will have -1 last_addr
  145. * after the end of the vmas.
  146. */
  147. if (last_addr == -1UL)
  148. return NULL;
  149. priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
  150. if (!priv->task)
  151. return ERR_PTR(-ESRCH);
  152. mm = mm_access(priv->task, PTRACE_MODE_READ);
  153. if (!mm || IS_ERR(mm))
  154. return mm;
  155. down_read(&mm->mmap_sem);
  156. tail_vma = get_gate_vma(priv->task->mm);
  157. priv->tail_vma = tail_vma;
  158. hold_task_mempolicy(priv);
  159. /* Start with last addr hint */
  160. vma = find_vma(mm, last_addr);
  161. if (last_addr && vma) {
  162. vma = vma->vm_next;
  163. goto out;
  164. }
  165. /*
  166. * Check the vma index is within the range and do
  167. * sequential scan until m_index.
  168. */
  169. vma = NULL;
  170. if ((unsigned long)l < mm->map_count) {
  171. vma = mm->mmap;
  172. while (l-- && vma)
  173. vma = vma->vm_next;
  174. goto out;
  175. }
  176. if (l != mm->map_count)
  177. tail_vma = NULL; /* After gate vma */
  178. out:
  179. if (vma)
  180. return vma;
  181. release_task_mempolicy(priv);
  182. /* End of vmas has been reached */
  183. m->version = (tail_vma != NULL)? 0: -1UL;
  184. up_read(&mm->mmap_sem);
  185. mmput(mm);
  186. return tail_vma;
  187. }
  188. static void *m_next(struct seq_file *m, void *v, loff_t *pos)
  189. {
  190. struct proc_maps_private *priv = m->private;
  191. struct vm_area_struct *vma = v;
  192. struct vm_area_struct *tail_vma = priv->tail_vma;
  193. (*pos)++;
  194. if (vma && (vma != tail_vma) && vma->vm_next)
  195. return vma->vm_next;
  196. vma_stop(priv, vma);
  197. return (vma != tail_vma)? tail_vma: NULL;
  198. }
  199. static void m_stop(struct seq_file *m, void *v)
  200. {
  201. struct proc_maps_private *priv = m->private;
  202. struct vm_area_struct *vma = v;
  203. if (!IS_ERR(vma))
  204. vma_stop(priv, vma);
  205. if (priv->task)
  206. put_task_struct(priv->task);
  207. }
  208. static int do_maps_open(struct inode *inode, struct file *file,
  209. const struct seq_operations *ops)
  210. {
  211. struct proc_maps_private *priv;
  212. int ret = -ENOMEM;
  213. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  214. if (priv) {
  215. priv->pid = proc_pid(inode);
  216. ret = seq_open(file, ops);
  217. if (!ret) {
  218. struct seq_file *m = file->private_data;
  219. m->private = priv;
  220. } else {
  221. kfree(priv);
  222. }
  223. }
  224. return ret;
  225. }
  226. static void
  227. show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
  228. {
  229. struct mm_struct *mm = vma->vm_mm;
  230. struct file *file = vma->vm_file;
  231. struct proc_maps_private *priv = m->private;
  232. struct task_struct *task = priv->task;
  233. vm_flags_t flags = vma->vm_flags;
  234. unsigned long ino = 0;
  235. unsigned long long pgoff = 0;
  236. unsigned long start, end;
  237. dev_t dev = 0;
  238. const char *name = NULL;
  239. if (file) {
  240. struct inode *inode = file_inode(vma->vm_file);
  241. dev = inode->i_sb->s_dev;
  242. ino = inode->i_ino;
  243. pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
  244. }
  245. /* We don't show the stack guard page in /proc/maps */
  246. start = vma->vm_start;
  247. if (stack_guard_page_start(vma, start))
  248. start += PAGE_SIZE;
  249. end = vma->vm_end;
  250. if (stack_guard_page_end(vma, end))
  251. end -= PAGE_SIZE;
  252. seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
  253. seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
  254. start,
  255. end,
  256. flags & VM_READ ? 'r' : '-',
  257. flags & VM_WRITE ? 'w' : '-',
  258. flags & VM_EXEC ? 'x' : '-',
  259. flags & VM_MAYSHARE ? 's' : 'p',
  260. pgoff,
  261. MAJOR(dev), MINOR(dev), ino);
  262. /*
  263. * Print the dentry name for named mappings, and a
  264. * special [heap] marker for the heap:
  265. */
  266. if (file) {
  267. seq_pad(m, ' ');
  268. seq_path(m, &file->f_path, "\n");
  269. goto done;
  270. }
  271. name = arch_vma_name(vma);
  272. if (!name) {
  273. pid_t tid;
  274. if (!mm) {
  275. name = "[vdso]";
  276. goto done;
  277. }
  278. if (vma->vm_start <= mm->brk &&
  279. vma->vm_end >= mm->start_brk) {
  280. name = "[heap]";
  281. goto done;
  282. }
  283. tid = vm_is_stack(task, vma, is_pid);
  284. if (tid != 0) {
  285. /*
  286. * Thread stack in /proc/PID/task/TID/maps or
  287. * the main process stack.
  288. */
  289. if (!is_pid || (vma->vm_start <= mm->start_stack &&
  290. vma->vm_end >= mm->start_stack)) {
  291. name = "[stack]";
  292. } else {
  293. /* Thread stack in /proc/PID/maps */
  294. seq_pad(m, ' ');
  295. seq_printf(m, "[stack:%d]", tid);
  296. }
  297. }
  298. }
  299. done:
  300. if (name) {
  301. seq_pad(m, ' ');
  302. seq_puts(m, name);
  303. }
  304. seq_putc(m, '\n');
  305. }
  306. static int show_map(struct seq_file *m, void *v, int is_pid)
  307. {
  308. struct vm_area_struct *vma = v;
  309. struct proc_maps_private *priv = m->private;
  310. struct task_struct *task = priv->task;
  311. show_map_vma(m, vma, is_pid);
  312. if (m->count < m->size) /* vma is copied successfully */
  313. m->version = (vma != get_gate_vma(task->mm))
  314. ? vma->vm_start : 0;
  315. return 0;
  316. }
  317. static int show_pid_map(struct seq_file *m, void *v)
  318. {
  319. return show_map(m, v, 1);
  320. }
  321. static int show_tid_map(struct seq_file *m, void *v)
  322. {
  323. return show_map(m, v, 0);
  324. }
  325. static const struct seq_operations proc_pid_maps_op = {
  326. .start = m_start,
  327. .next = m_next,
  328. .stop = m_stop,
  329. .show = show_pid_map
  330. };
  331. static const struct seq_operations proc_tid_maps_op = {
  332. .start = m_start,
  333. .next = m_next,
  334. .stop = m_stop,
  335. .show = show_tid_map
  336. };
  337. static int pid_maps_open(struct inode *inode, struct file *file)
  338. {
  339. return do_maps_open(inode, file, &proc_pid_maps_op);
  340. }
  341. static int tid_maps_open(struct inode *inode, struct file *file)
  342. {
  343. return do_maps_open(inode, file, &proc_tid_maps_op);
  344. }
  345. const struct file_operations proc_pid_maps_operations = {
  346. .open = pid_maps_open,
  347. .read = seq_read,
  348. .llseek = seq_lseek,
  349. .release = seq_release_private,
  350. };
  351. const struct file_operations proc_tid_maps_operations = {
  352. .open = tid_maps_open,
  353. .read = seq_read,
  354. .llseek = seq_lseek,
  355. .release = seq_release_private,
  356. };
  357. /*
  358. * Proportional Set Size(PSS): my share of RSS.
  359. *
  360. * PSS of a process is the count of pages it has in memory, where each
  361. * page is divided by the number of processes sharing it. So if a
  362. * process has 1000 pages all to itself, and 1000 shared with one other
  363. * process, its PSS will be 1500.
  364. *
  365. * To keep (accumulated) division errors low, we adopt a 64bit
  366. * fixed-point pss counter to minimize division errors. So (pss >>
  367. * PSS_SHIFT) would be the real byte count.
  368. *
  369. * A shift of 12 before division means (assuming 4K page size):
  370. * - 1M 3-user-pages add up to 8KB errors;
  371. * - supports mapcount up to 2^24, or 16M;
  372. * - supports PSS up to 2^52 bytes, or 4PB.
  373. */
  374. #define PSS_SHIFT 12
  375. #ifdef CONFIG_PROC_PAGE_MONITOR
  376. struct mem_size_stats {
  377. struct vm_area_struct *vma;
  378. unsigned long resident;
  379. unsigned long shared_clean;
  380. unsigned long shared_dirty;
  381. unsigned long private_clean;
  382. unsigned long private_dirty;
  383. unsigned long referenced;
  384. unsigned long anonymous;
  385. unsigned long anonymous_thp;
  386. unsigned long swap;
  387. unsigned long nonlinear;
  388. u64 pss;
  389. };
  390. static void smaps_pte_entry(pte_t ptent, unsigned long addr,
  391. unsigned long ptent_size, struct mm_walk *walk)
  392. {
  393. struct mem_size_stats *mss = walk->private;
  394. struct vm_area_struct *vma = mss->vma;
  395. pgoff_t pgoff = linear_page_index(vma, addr);
  396. struct page *page = NULL;
  397. int mapcount;
  398. if (pte_present(ptent)) {
  399. page = vm_normal_page(vma, addr, ptent);
  400. } else if (is_swap_pte(ptent)) {
  401. swp_entry_t swpent = pte_to_swp_entry(ptent);
  402. if (!non_swap_entry(swpent))
  403. mss->swap += ptent_size;
  404. else if (is_migration_entry(swpent))
  405. page = migration_entry_to_page(swpent);
  406. } else if (pte_file(ptent)) {
  407. if (pte_to_pgoff(ptent) != pgoff)
  408. mss->nonlinear += ptent_size;
  409. }
  410. if (!page)
  411. return;
  412. if (PageAnon(page))
  413. mss->anonymous += ptent_size;
  414. if (page->index != pgoff)
  415. mss->nonlinear += ptent_size;
  416. mss->resident += ptent_size;
  417. /* Accumulate the size in pages that have been accessed. */
  418. if (pte_young(ptent) || PageReferenced(page))
  419. mss->referenced += ptent_size;
  420. mapcount = page_mapcount(page);
  421. if (mapcount >= 2) {
  422. if (pte_dirty(ptent) || PageDirty(page))
  423. mss->shared_dirty += ptent_size;
  424. else
  425. mss->shared_clean += ptent_size;
  426. mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
  427. } else {
  428. if (pte_dirty(ptent) || PageDirty(page))
  429. mss->private_dirty += ptent_size;
  430. else
  431. mss->private_clean += ptent_size;
  432. mss->pss += (ptent_size << PSS_SHIFT);
  433. }
  434. }
  435. static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  436. struct mm_walk *walk)
  437. {
  438. struct mem_size_stats *mss = walk->private;
  439. struct vm_area_struct *vma = mss->vma;
  440. pte_t *pte;
  441. spinlock_t *ptl;
  442. if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
  443. smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
  444. spin_unlock(ptl);
  445. mss->anonymous_thp += HPAGE_PMD_SIZE;
  446. return 0;
  447. }
  448. if (pmd_trans_unstable(pmd))
  449. return 0;
  450. /*
  451. * The mmap_sem held all the way back in m_start() is what
  452. * keeps khugepaged out of here and from collapsing things
  453. * in here.
  454. */
  455. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  456. for (; addr != end; pte++, addr += PAGE_SIZE)
  457. smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
  458. pte_unmap_unlock(pte - 1, ptl);
  459. cond_resched();
  460. return 0;
  461. }
  462. static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
  463. {
  464. /*
  465. * Don't forget to update Documentation/ on changes.
  466. */
  467. static const char mnemonics[BITS_PER_LONG][2] = {
  468. /*
  469. * In case if we meet a flag we don't know about.
  470. */
  471. [0 ... (BITS_PER_LONG-1)] = "??",
  472. [ilog2(VM_READ)] = "rd",
  473. [ilog2(VM_WRITE)] = "wr",
  474. [ilog2(VM_EXEC)] = "ex",
  475. [ilog2(VM_SHARED)] = "sh",
  476. [ilog2(VM_MAYREAD)] = "mr",
  477. [ilog2(VM_MAYWRITE)] = "mw",
  478. [ilog2(VM_MAYEXEC)] = "me",
  479. [ilog2(VM_MAYSHARE)] = "ms",
  480. [ilog2(VM_GROWSDOWN)] = "gd",
  481. [ilog2(VM_PFNMAP)] = "pf",
  482. [ilog2(VM_DENYWRITE)] = "dw",
  483. [ilog2(VM_LOCKED)] = "lo",
  484. [ilog2(VM_IO)] = "io",
  485. [ilog2(VM_SEQ_READ)] = "sr",
  486. [ilog2(VM_RAND_READ)] = "rr",
  487. [ilog2(VM_DONTCOPY)] = "dc",
  488. [ilog2(VM_DONTEXPAND)] = "de",
  489. [ilog2(VM_ACCOUNT)] = "ac",
  490. [ilog2(VM_NORESERVE)] = "nr",
  491. [ilog2(VM_HUGETLB)] = "ht",
  492. [ilog2(VM_NONLINEAR)] = "nl",
  493. [ilog2(VM_ARCH_1)] = "ar",
  494. [ilog2(VM_DONTDUMP)] = "dd",
  495. #ifdef CONFIG_MEM_SOFT_DIRTY
  496. [ilog2(VM_SOFTDIRTY)] = "sd",
  497. #endif
  498. [ilog2(VM_MIXEDMAP)] = "mm",
  499. [ilog2(VM_HUGEPAGE)] = "hg",
  500. [ilog2(VM_NOHUGEPAGE)] = "nh",
  501. [ilog2(VM_MERGEABLE)] = "mg",
  502. };
  503. size_t i;
  504. seq_puts(m, "VmFlags: ");
  505. for (i = 0; i < BITS_PER_LONG; i++) {
  506. if (vma->vm_flags & (1UL << i)) {
  507. seq_printf(m, "%c%c ",
  508. mnemonics[i][0], mnemonics[i][1]);
  509. }
  510. }
  511. seq_putc(m, '\n');
  512. }
  513. static int show_smap(struct seq_file *m, void *v, int is_pid)
  514. {
  515. struct proc_maps_private *priv = m->private;
  516. struct task_struct *task = priv->task;
  517. struct vm_area_struct *vma = v;
  518. struct mem_size_stats mss;
  519. struct mm_walk smaps_walk = {
  520. .pmd_entry = smaps_pte_range,
  521. .mm = vma->vm_mm,
  522. .private = &mss,
  523. };
  524. memset(&mss, 0, sizeof mss);
  525. mss.vma = vma;
  526. /* mmap_sem is held in m_start */
  527. if (vma->vm_mm && !is_vm_hugetlb_page(vma))
  528. walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
  529. show_map_vma(m, vma, is_pid);
  530. seq_printf(m,
  531. "Size: %8lu kB\n"
  532. "Rss: %8lu kB\n"
  533. "Pss: %8lu kB\n"
  534. "Shared_Clean: %8lu kB\n"
  535. "Shared_Dirty: %8lu kB\n"
  536. "Private_Clean: %8lu kB\n"
  537. "Private_Dirty: %8lu kB\n"
  538. "Referenced: %8lu kB\n"
  539. "Anonymous: %8lu kB\n"
  540. "AnonHugePages: %8lu kB\n"
  541. "Swap: %8lu kB\n"
  542. "KernelPageSize: %8lu kB\n"
  543. "MMUPageSize: %8lu kB\n"
  544. "Locked: %8lu kB\n",
  545. (vma->vm_end - vma->vm_start) >> 10,
  546. mss.resident >> 10,
  547. (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
  548. mss.shared_clean >> 10,
  549. mss.shared_dirty >> 10,
  550. mss.private_clean >> 10,
  551. mss.private_dirty >> 10,
  552. mss.referenced >> 10,
  553. mss.anonymous >> 10,
  554. mss.anonymous_thp >> 10,
  555. mss.swap >> 10,
  556. vma_kernel_pagesize(vma) >> 10,
  557. vma_mmu_pagesize(vma) >> 10,
  558. (vma->vm_flags & VM_LOCKED) ?
  559. (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
  560. if (vma->vm_flags & VM_NONLINEAR)
  561. seq_printf(m, "Nonlinear: %8lu kB\n",
  562. mss.nonlinear >> 10);
  563. show_smap_vma_flags(m, vma);
  564. if (m->count < m->size) /* vma is copied successfully */
  565. m->version = (vma != get_gate_vma(task->mm))
  566. ? vma->vm_start : 0;
  567. return 0;
  568. }
  569. static int show_pid_smap(struct seq_file *m, void *v)
  570. {
  571. return show_smap(m, v, 1);
  572. }
  573. static int show_tid_smap(struct seq_file *m, void *v)
  574. {
  575. return show_smap(m, v, 0);
  576. }
  577. static const struct seq_operations proc_pid_smaps_op = {
  578. .start = m_start,
  579. .next = m_next,
  580. .stop = m_stop,
  581. .show = show_pid_smap
  582. };
  583. static const struct seq_operations proc_tid_smaps_op = {
  584. .start = m_start,
  585. .next = m_next,
  586. .stop = m_stop,
  587. .show = show_tid_smap
  588. };
  589. static int pid_smaps_open(struct inode *inode, struct file *file)
  590. {
  591. return do_maps_open(inode, file, &proc_pid_smaps_op);
  592. }
  593. static int tid_smaps_open(struct inode *inode, struct file *file)
  594. {
  595. return do_maps_open(inode, file, &proc_tid_smaps_op);
  596. }
  597. const struct file_operations proc_pid_smaps_operations = {
  598. .open = pid_smaps_open,
  599. .read = seq_read,
  600. .llseek = seq_lseek,
  601. .release = seq_release_private,
  602. };
  603. const struct file_operations proc_tid_smaps_operations = {
  604. .open = tid_smaps_open,
  605. .read = seq_read,
  606. .llseek = seq_lseek,
  607. .release = seq_release_private,
  608. };
  609. /*
  610. * We do not want to have constant page-shift bits sitting in
  611. * pagemap entries and are about to reuse them some time soon.
  612. *
  613. * Here's the "migration strategy":
  614. * 1. when the system boots these bits remain what they are,
  615. * but a warning about future change is printed in log;
  616. * 2. once anyone clears soft-dirty bits via clear_refs file,
  617. * these flag is set to denote, that user is aware of the
  618. * new API and those page-shift bits change their meaning.
  619. * The respective warning is printed in dmesg;
  620. * 3. In a couple of releases we will remove all the mentions
  621. * of page-shift in pagemap entries.
  622. */
  623. static bool soft_dirty_cleared __read_mostly;
  624. enum clear_refs_types {
  625. CLEAR_REFS_ALL = 1,
  626. CLEAR_REFS_ANON,
  627. CLEAR_REFS_MAPPED,
  628. CLEAR_REFS_SOFT_DIRTY,
  629. CLEAR_REFS_LAST,
  630. };
  631. struct clear_refs_private {
  632. struct vm_area_struct *vma;
  633. enum clear_refs_types type;
  634. };
  635. static inline void clear_soft_dirty(struct vm_area_struct *vma,
  636. unsigned long addr, pte_t *pte)
  637. {
  638. #ifdef CONFIG_MEM_SOFT_DIRTY
  639. /*
  640. * The soft-dirty tracker uses #PF-s to catch writes
  641. * to pages, so write-protect the pte as well. See the
  642. * Documentation/vm/soft-dirty.txt for full description
  643. * of how soft-dirty works.
  644. */
  645. pte_t ptent = *pte;
  646. if (pte_present(ptent)) {
  647. ptent = pte_wrprotect(ptent);
  648. ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
  649. } else if (is_swap_pte(ptent)) {
  650. ptent = pte_swp_clear_soft_dirty(ptent);
  651. } else if (pte_file(ptent)) {
  652. ptent = pte_file_clear_soft_dirty(ptent);
  653. }
  654. if (vma->vm_flags & VM_SOFTDIRTY)
  655. vma->vm_flags &= ~VM_SOFTDIRTY;
  656. set_pte_at(vma->vm_mm, addr, pte, ptent);
  657. #endif
  658. }
  659. static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
  660. unsigned long end, struct mm_walk *walk)
  661. {
  662. struct clear_refs_private *cp = walk->private;
  663. struct vm_area_struct *vma = cp->vma;
  664. pte_t *pte, ptent;
  665. spinlock_t *ptl;
  666. struct page *page;
  667. split_huge_page_pmd(vma, addr, pmd);
  668. if (pmd_trans_unstable(pmd))
  669. return 0;
  670. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  671. for (; addr != end; pte++, addr += PAGE_SIZE) {
  672. ptent = *pte;
  673. if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
  674. clear_soft_dirty(vma, addr, pte);
  675. continue;
  676. }
  677. if (!pte_present(ptent))
  678. continue;
  679. page = vm_normal_page(vma, addr, ptent);
  680. if (!page)
  681. continue;
  682. /* Clear accessed and referenced bits. */
  683. ptep_test_and_clear_young(vma, addr, pte);
  684. ClearPageReferenced(page);
  685. }
  686. pte_unmap_unlock(pte - 1, ptl);
  687. cond_resched();
  688. return 0;
  689. }
  690. static ssize_t clear_refs_write(struct file *file, const char __user *buf,
  691. size_t count, loff_t *ppos)
  692. {
  693. struct task_struct *task;
  694. char buffer[PROC_NUMBUF];
  695. struct mm_struct *mm;
  696. struct vm_area_struct *vma;
  697. enum clear_refs_types type;
  698. int itype;
  699. int rv;
  700. memset(buffer, 0, sizeof(buffer));
  701. if (count > sizeof(buffer) - 1)
  702. count = sizeof(buffer) - 1;
  703. if (copy_from_user(buffer, buf, count))
  704. return -EFAULT;
  705. rv = kstrtoint(strstrip(buffer), 10, &itype);
  706. if (rv < 0)
  707. return rv;
  708. type = (enum clear_refs_types)itype;
  709. if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
  710. return -EINVAL;
  711. if (type == CLEAR_REFS_SOFT_DIRTY) {
  712. soft_dirty_cleared = true;
  713. pr_warn_once("The pagemap bits 55-60 has changed their meaning! "
  714. "See the linux/Documentation/vm/pagemap.txt for details.\n");
  715. }
  716. task = get_proc_task(file_inode(file));
  717. if (!task)
  718. return -ESRCH;
  719. mm = get_task_mm(task);
  720. if (mm) {
  721. struct clear_refs_private cp = {
  722. .type = type,
  723. };
  724. struct mm_walk clear_refs_walk = {
  725. .pmd_entry = clear_refs_pte_range,
  726. .mm = mm,
  727. .private = &cp,
  728. };
  729. down_read(&mm->mmap_sem);
  730. if (type == CLEAR_REFS_SOFT_DIRTY)
  731. mmu_notifier_invalidate_range_start(mm, 0, -1);
  732. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  733. cp.vma = vma;
  734. if (is_vm_hugetlb_page(vma))
  735. continue;
  736. /*
  737. * Writing 1 to /proc/pid/clear_refs affects all pages.
  738. *
  739. * Writing 2 to /proc/pid/clear_refs only affects
  740. * Anonymous pages.
  741. *
  742. * Writing 3 to /proc/pid/clear_refs only affects file
  743. * mapped pages.
  744. */
  745. if (type == CLEAR_REFS_ANON && vma->vm_file)
  746. continue;
  747. if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
  748. continue;
  749. walk_page_range(vma->vm_start, vma->vm_end,
  750. &clear_refs_walk);
  751. }
  752. if (type == CLEAR_REFS_SOFT_DIRTY)
  753. mmu_notifier_invalidate_range_end(mm, 0, -1);
  754. flush_tlb_mm(mm);
  755. up_read(&mm->mmap_sem);
  756. mmput(mm);
  757. }
  758. put_task_struct(task);
  759. return count;
  760. }
  761. const struct file_operations proc_clear_refs_operations = {
  762. .write = clear_refs_write,
  763. .llseek = noop_llseek,
  764. };
  765. typedef struct {
  766. u64 pme;
  767. } pagemap_entry_t;
  768. struct pagemapread {
  769. int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
  770. pagemap_entry_t *buffer;
  771. bool v2;
  772. };
  773. #define PAGEMAP_WALK_SIZE (PMD_SIZE)
  774. #define PAGEMAP_WALK_MASK (PMD_MASK)
  775. #define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
  776. #define PM_STATUS_BITS 3
  777. #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
  778. #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
  779. #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
  780. #define PM_PSHIFT_BITS 6
  781. #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
  782. #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
  783. #define __PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
  784. #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
  785. #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
  786. /* in "new" pagemap pshift bits are occupied with more status bits */
  787. #define PM_STATUS2(v2, x) (__PM_PSHIFT(v2 ? x : PAGE_SHIFT))
  788. #define __PM_SOFT_DIRTY (1LL)
  789. #define PM_PRESENT PM_STATUS(4LL)
  790. #define PM_SWAP PM_STATUS(2LL)
  791. #define PM_FILE PM_STATUS(1LL)
  792. #define PM_NOT_PRESENT(v2) PM_STATUS2(v2, 0)
  793. #define PM_END_OF_BUFFER 1
  794. static inline pagemap_entry_t make_pme(u64 val)
  795. {
  796. return (pagemap_entry_t) { .pme = val };
  797. }
  798. static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
  799. struct pagemapread *pm)
  800. {
  801. pm->buffer[pm->pos++] = *pme;
  802. if (pm->pos >= pm->len)
  803. return PM_END_OF_BUFFER;
  804. return 0;
  805. }
  806. static int pagemap_pte_hole(unsigned long start, unsigned long end,
  807. struct mm_walk *walk)
  808. {
  809. struct pagemapread *pm = walk->private;
  810. unsigned long addr;
  811. int err = 0;
  812. pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
  813. for (addr = start; addr < end; addr += PAGE_SIZE) {
  814. err = add_to_pagemap(addr, &pme, pm);
  815. if (err)
  816. break;
  817. }
  818. return err;
  819. }
  820. static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
  821. struct vm_area_struct *vma, unsigned long addr, pte_t pte)
  822. {
  823. u64 frame, flags;
  824. struct page *page = NULL;
  825. int flags2 = 0;
  826. if (pte_present(pte)) {
  827. frame = pte_pfn(pte);
  828. flags = PM_PRESENT;
  829. page = vm_normal_page(vma, addr, pte);
  830. if (pte_soft_dirty(pte))
  831. flags2 |= __PM_SOFT_DIRTY;
  832. } else if (is_swap_pte(pte)) {
  833. swp_entry_t entry;
  834. if (pte_swp_soft_dirty(pte))
  835. flags2 |= __PM_SOFT_DIRTY;
  836. entry = pte_to_swp_entry(pte);
  837. frame = swp_type(entry) |
  838. (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
  839. flags = PM_SWAP;
  840. if (is_migration_entry(entry))
  841. page = migration_entry_to_page(entry);
  842. } else {
  843. if (vma->vm_flags & VM_SOFTDIRTY)
  844. flags2 |= __PM_SOFT_DIRTY;
  845. *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
  846. return;
  847. }
  848. if (page && !PageAnon(page))
  849. flags |= PM_FILE;
  850. if ((vma->vm_flags & VM_SOFTDIRTY))
  851. flags2 |= __PM_SOFT_DIRTY;
  852. *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
  853. }
  854. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  855. static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
  856. pmd_t pmd, int offset, int pmd_flags2)
  857. {
  858. /*
  859. * Currently pmd for thp is always present because thp can not be
  860. * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
  861. * This if-check is just to prepare for future implementation.
  862. */
  863. if (pmd_present(pmd))
  864. *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
  865. | PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
  866. else
  867. *pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
  868. }
  869. #else
  870. static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
  871. pmd_t pmd, int offset, int pmd_flags2)
  872. {
  873. }
  874. #endif
  875. static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  876. struct mm_walk *walk)
  877. {
  878. struct vm_area_struct *vma;
  879. struct pagemapread *pm = walk->private;
  880. spinlock_t *ptl;
  881. pte_t *pte;
  882. int err = 0;
  883. pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
  884. /* find the first VMA at or above 'addr' */
  885. vma = find_vma(walk->mm, addr);
  886. if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
  887. int pmd_flags2;
  888. if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
  889. pmd_flags2 = __PM_SOFT_DIRTY;
  890. else
  891. pmd_flags2 = 0;
  892. for (; addr != end; addr += PAGE_SIZE) {
  893. unsigned long offset;
  894. offset = (addr & ~PAGEMAP_WALK_MASK) >>
  895. PAGE_SHIFT;
  896. thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2);
  897. err = add_to_pagemap(addr, &pme, pm);
  898. if (err)
  899. break;
  900. }
  901. spin_unlock(ptl);
  902. return err;
  903. }
  904. if (pmd_trans_unstable(pmd))
  905. return 0;
  906. for (; addr != end; addr += PAGE_SIZE) {
  907. int flags2;
  908. /* check to see if we've left 'vma' behind
  909. * and need a new, higher one */
  910. if (vma && (addr >= vma->vm_end)) {
  911. vma = find_vma(walk->mm, addr);
  912. if (vma && (vma->vm_flags & VM_SOFTDIRTY))
  913. flags2 = __PM_SOFT_DIRTY;
  914. else
  915. flags2 = 0;
  916. pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
  917. }
  918. /* check that 'vma' actually covers this address,
  919. * and that it isn't a huge page vma */
  920. if (vma && (vma->vm_start <= addr) &&
  921. !is_vm_hugetlb_page(vma)) {
  922. pte = pte_offset_map(pmd, addr);
  923. pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
  924. /* unmap before userspace copy */
  925. pte_unmap(pte);
  926. }
  927. err = add_to_pagemap(addr, &pme, pm);
  928. if (err)
  929. return err;
  930. }
  931. cond_resched();
  932. return err;
  933. }
  934. #ifdef CONFIG_HUGETLB_PAGE
  935. static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
  936. pte_t pte, int offset, int flags2)
  937. {
  938. if (pte_present(pte))
  939. *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) |
  940. PM_STATUS2(pm->v2, flags2) |
  941. PM_PRESENT);
  942. else
  943. *pme = make_pme(PM_NOT_PRESENT(pm->v2) |
  944. PM_STATUS2(pm->v2, flags2));
  945. }
  946. /* This function walks within one hugetlb entry in the single call */
  947. static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
  948. unsigned long addr, unsigned long end,
  949. struct mm_walk *walk)
  950. {
  951. struct pagemapread *pm = walk->private;
  952. struct vm_area_struct *vma;
  953. int err = 0;
  954. int flags2;
  955. pagemap_entry_t pme;
  956. vma = find_vma(walk->mm, addr);
  957. WARN_ON_ONCE(!vma);
  958. if (vma && (vma->vm_flags & VM_SOFTDIRTY))
  959. flags2 = __PM_SOFT_DIRTY;
  960. else
  961. flags2 = 0;
  962. for (; addr != end; addr += PAGE_SIZE) {
  963. int offset = (addr & ~hmask) >> PAGE_SHIFT;
  964. huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
  965. err = add_to_pagemap(addr, &pme, pm);
  966. if (err)
  967. return err;
  968. }
  969. cond_resched();
  970. return err;
  971. }
  972. #endif /* HUGETLB_PAGE */
  973. /*
  974. * /proc/pid/pagemap - an array mapping virtual pages to pfns
  975. *
  976. * For each page in the address space, this file contains one 64-bit entry
  977. * consisting of the following:
  978. *
  979. * Bits 0-54 page frame number (PFN) if present
  980. * Bits 0-4 swap type if swapped
  981. * Bits 5-54 swap offset if swapped
  982. * Bits 55-60 page shift (page size = 1<<page shift)
  983. * Bit 61 page is file-page or shared-anon
  984. * Bit 62 page swapped
  985. * Bit 63 page present
  986. *
  987. * If the page is not present but in swap, then the PFN contains an
  988. * encoding of the swap file number and the page's offset into the
  989. * swap. Unmapped pages return a null PFN. This allows determining
  990. * precisely which pages are mapped (or in swap) and comparing mapped
  991. * pages between processes.
  992. *
  993. * Efficient users of this interface will use /proc/pid/maps to
  994. * determine which areas of memory are actually mapped and llseek to
  995. * skip over unmapped regions.
  996. */
  997. static ssize_t pagemap_read(struct file *file, char __user *buf,
  998. size_t count, loff_t *ppos)
  999. {
  1000. struct task_struct *task = get_proc_task(file_inode(file));
  1001. struct mm_struct *mm;
  1002. struct pagemapread pm;
  1003. int ret = -ESRCH;
  1004. struct mm_walk pagemap_walk = {};
  1005. unsigned long src;
  1006. unsigned long svpfn;
  1007. unsigned long start_vaddr;
  1008. unsigned long end_vaddr;
  1009. int copied = 0;
  1010. if (!task)
  1011. goto out;
  1012. ret = -EINVAL;
  1013. /* file position must be aligned */
  1014. if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
  1015. goto out_task;
  1016. ret = 0;
  1017. if (!count)
  1018. goto out_task;
  1019. pm.v2 = soft_dirty_cleared;
  1020. pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
  1021. pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
  1022. ret = -ENOMEM;
  1023. if (!pm.buffer)
  1024. goto out_task;
  1025. mm = mm_access(task, PTRACE_MODE_READ);
  1026. ret = PTR_ERR(mm);
  1027. if (!mm || IS_ERR(mm))
  1028. goto out_free;
  1029. pagemap_walk.pmd_entry = pagemap_pte_range;
  1030. pagemap_walk.pte_hole = pagemap_pte_hole;
  1031. #ifdef CONFIG_HUGETLB_PAGE
  1032. pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
  1033. #endif
  1034. pagemap_walk.mm = mm;
  1035. pagemap_walk.private = &pm;
  1036. src = *ppos;
  1037. svpfn = src / PM_ENTRY_BYTES;
  1038. start_vaddr = svpfn << PAGE_SHIFT;
  1039. end_vaddr = TASK_SIZE_OF(task);
  1040. /* watch out for wraparound */
  1041. if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
  1042. start_vaddr = end_vaddr;
  1043. /*
  1044. * The odds are that this will stop walking way
  1045. * before end_vaddr, because the length of the
  1046. * user buffer is tracked in "pm", and the walk
  1047. * will stop when we hit the end of the buffer.
  1048. */
  1049. ret = 0;
  1050. while (count && (start_vaddr < end_vaddr)) {
  1051. int len;
  1052. unsigned long end;
  1053. pm.pos = 0;
  1054. end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
  1055. /* overflow ? */
  1056. if (end < start_vaddr || end > end_vaddr)
  1057. end = end_vaddr;
  1058. down_read(&mm->mmap_sem);
  1059. ret = walk_page_range(start_vaddr, end, &pagemap_walk);
  1060. up_read(&mm->mmap_sem);
  1061. start_vaddr = end;
  1062. len = min(count, PM_ENTRY_BYTES * pm.pos);
  1063. if (copy_to_user(buf, pm.buffer, len)) {
  1064. ret = -EFAULT;
  1065. goto out_mm;
  1066. }
  1067. copied += len;
  1068. buf += len;
  1069. count -= len;
  1070. }
  1071. *ppos += copied;
  1072. if (!ret || ret == PM_END_OF_BUFFER)
  1073. ret = copied;
  1074. out_mm:
  1075. mmput(mm);
  1076. out_free:
  1077. kfree(pm.buffer);
  1078. out_task:
  1079. put_task_struct(task);
  1080. out:
  1081. return ret;
  1082. }
  1083. static int pagemap_open(struct inode *inode, struct file *file)
  1084. {
  1085. pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
  1086. "to stop being page-shift some time soon. See the "
  1087. "linux/Documentation/vm/pagemap.txt for details.\n");
  1088. return 0;
  1089. }
  1090. const struct file_operations proc_pagemap_operations = {
  1091. .llseek = mem_lseek, /* borrow this */
  1092. .read = pagemap_read,
  1093. .open = pagemap_open,
  1094. };
  1095. #endif /* CONFIG_PROC_PAGE_MONITOR */
  1096. #ifdef CONFIG_NUMA
  1097. struct numa_maps {
  1098. struct vm_area_struct *vma;
  1099. unsigned long pages;
  1100. unsigned long anon;
  1101. unsigned long active;
  1102. unsigned long writeback;
  1103. unsigned long mapcount_max;
  1104. unsigned long dirty;
  1105. unsigned long swapcache;
  1106. unsigned long node[MAX_NUMNODES];
  1107. };
  1108. struct numa_maps_private {
  1109. struct proc_maps_private proc_maps;
  1110. struct numa_maps md;
  1111. };
  1112. static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
  1113. unsigned long nr_pages)
  1114. {
  1115. int count = page_mapcount(page);
  1116. md->pages += nr_pages;
  1117. if (pte_dirty || PageDirty(page))
  1118. md->dirty += nr_pages;
  1119. if (PageSwapCache(page))
  1120. md->swapcache += nr_pages;
  1121. if (PageActive(page) || PageUnevictable(page))
  1122. md->active += nr_pages;
  1123. if (PageWriteback(page))
  1124. md->writeback += nr_pages;
  1125. if (PageAnon(page))
  1126. md->anon += nr_pages;
  1127. if (count > md->mapcount_max)
  1128. md->mapcount_max = count;
  1129. md->node[page_to_nid(page)] += nr_pages;
  1130. }
  1131. static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
  1132. unsigned long addr)
  1133. {
  1134. struct page *page;
  1135. int nid;
  1136. if (!pte_present(pte))
  1137. return NULL;
  1138. page = vm_normal_page(vma, addr, pte);
  1139. if (!page)
  1140. return NULL;
  1141. if (PageReserved(page))
  1142. return NULL;
  1143. nid = page_to_nid(page);
  1144. if (!node_isset(nid, node_states[N_MEMORY]))
  1145. return NULL;
  1146. return page;
  1147. }
  1148. static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
  1149. unsigned long end, struct mm_walk *walk)
  1150. {
  1151. struct numa_maps *md;
  1152. spinlock_t *ptl;
  1153. pte_t *orig_pte;
  1154. pte_t *pte;
  1155. md = walk->private;
  1156. if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) {
  1157. pte_t huge_pte = *(pte_t *)pmd;
  1158. struct page *page;
  1159. page = can_gather_numa_stats(huge_pte, md->vma, addr);
  1160. if (page)
  1161. gather_stats(page, md, pte_dirty(huge_pte),
  1162. HPAGE_PMD_SIZE/PAGE_SIZE);
  1163. spin_unlock(ptl);
  1164. return 0;
  1165. }
  1166. if (pmd_trans_unstable(pmd))
  1167. return 0;
  1168. orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  1169. do {
  1170. struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
  1171. if (!page)
  1172. continue;
  1173. gather_stats(page, md, pte_dirty(*pte), 1);
  1174. } while (pte++, addr += PAGE_SIZE, addr != end);
  1175. pte_unmap_unlock(orig_pte, ptl);
  1176. return 0;
  1177. }
  1178. #ifdef CONFIG_HUGETLB_PAGE
  1179. static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
  1180. unsigned long addr, unsigned long end, struct mm_walk *walk)
  1181. {
  1182. struct numa_maps *md;
  1183. struct page *page;
  1184. if (pte_none(*pte))
  1185. return 0;
  1186. page = pte_page(*pte);
  1187. if (!page)
  1188. return 0;
  1189. md = walk->private;
  1190. gather_stats(page, md, pte_dirty(*pte), 1);
  1191. return 0;
  1192. }
  1193. #else
  1194. static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
  1195. unsigned long addr, unsigned long end, struct mm_walk *walk)
  1196. {
  1197. return 0;
  1198. }
  1199. #endif
  1200. /*
  1201. * Display pages allocated per node and memory policy via /proc.
  1202. */
  1203. static int show_numa_map(struct seq_file *m, void *v, int is_pid)
  1204. {
  1205. struct numa_maps_private *numa_priv = m->private;
  1206. struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
  1207. struct vm_area_struct *vma = v;
  1208. struct numa_maps *md = &numa_priv->md;
  1209. struct file *file = vma->vm_file;
  1210. struct task_struct *task = proc_priv->task;
  1211. struct mm_struct *mm = vma->vm_mm;
  1212. struct mm_walk walk = {};
  1213. struct mempolicy *pol;
  1214. char buffer[64];
  1215. int nid;
  1216. if (!mm)
  1217. return 0;
  1218. /* Ensure we start with an empty set of numa_maps statistics. */
  1219. memset(md, 0, sizeof(*md));
  1220. md->vma = vma;
  1221. walk.hugetlb_entry = gather_hugetbl_stats;
  1222. walk.pmd_entry = gather_pte_stats;
  1223. walk.private = md;
  1224. walk.mm = mm;
  1225. pol = get_vma_policy(task, vma, vma->vm_start);
  1226. mpol_to_str(buffer, sizeof(buffer), pol);
  1227. mpol_cond_put(pol);
  1228. seq_printf(m, "%08lx %s", vma->vm_start, buffer);
  1229. if (file) {
  1230. seq_printf(m, " file=");
  1231. seq_path(m, &file->f_path, "\n\t= ");
  1232. } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
  1233. seq_printf(m, " heap");
  1234. } else {
  1235. pid_t tid = vm_is_stack(task, vma, is_pid);
  1236. if (tid != 0) {
  1237. /*
  1238. * Thread stack in /proc/PID/task/TID/maps or
  1239. * the main process stack.
  1240. */
  1241. if (!is_pid || (vma->vm_start <= mm->start_stack &&
  1242. vma->vm_end >= mm->start_stack))
  1243. seq_printf(m, " stack");
  1244. else
  1245. seq_printf(m, " stack:%d", tid);
  1246. }
  1247. }
  1248. if (is_vm_hugetlb_page(vma))
  1249. seq_printf(m, " huge");
  1250. walk_page_range(vma->vm_start, vma->vm_end, &walk);
  1251. if (!md->pages)
  1252. goto out;
  1253. if (md->anon)
  1254. seq_printf(m, " anon=%lu", md->anon);
  1255. if (md->dirty)
  1256. seq_printf(m, " dirty=%lu", md->dirty);
  1257. if (md->pages != md->anon && md->pages != md->dirty)
  1258. seq_printf(m, " mapped=%lu", md->pages);
  1259. if (md->mapcount_max > 1)
  1260. seq_printf(m, " mapmax=%lu", md->mapcount_max);
  1261. if (md->swapcache)
  1262. seq_printf(m, " swapcache=%lu", md->swapcache);
  1263. if (md->active < md->pages && !is_vm_hugetlb_page(vma))
  1264. seq_printf(m, " active=%lu", md->active);
  1265. if (md->writeback)
  1266. seq_printf(m, " writeback=%lu", md->writeback);
  1267. for_each_node_state(nid, N_MEMORY)
  1268. if (md->node[nid])
  1269. seq_printf(m, " N%d=%lu", nid, md->node[nid]);
  1270. out:
  1271. seq_putc(m, '\n');
  1272. if (m->count < m->size)
  1273. m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
  1274. return 0;
  1275. }
  1276. static int show_pid_numa_map(struct seq_file *m, void *v)
  1277. {
  1278. return show_numa_map(m, v, 1);
  1279. }
  1280. static int show_tid_numa_map(struct seq_file *m, void *v)
  1281. {
  1282. return show_numa_map(m, v, 0);
  1283. }
  1284. static const struct seq_operations proc_pid_numa_maps_op = {
  1285. .start = m_start,
  1286. .next = m_next,
  1287. .stop = m_stop,
  1288. .show = show_pid_numa_map,
  1289. };
  1290. static const struct seq_operations proc_tid_numa_maps_op = {
  1291. .start = m_start,
  1292. .next = m_next,
  1293. .stop = m_stop,
  1294. .show = show_tid_numa_map,
  1295. };
  1296. static int numa_maps_open(struct inode *inode, struct file *file,
  1297. const struct seq_operations *ops)
  1298. {
  1299. struct numa_maps_private *priv;
  1300. int ret = -ENOMEM;
  1301. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  1302. if (priv) {
  1303. priv->proc_maps.pid = proc_pid(inode);
  1304. ret = seq_open(file, ops);
  1305. if (!ret) {
  1306. struct seq_file *m = file->private_data;
  1307. m->private = priv;
  1308. } else {
  1309. kfree(priv);
  1310. }
  1311. }
  1312. return ret;
  1313. }
  1314. static int pid_numa_maps_open(struct inode *inode, struct file *file)
  1315. {
  1316. return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
  1317. }
  1318. static int tid_numa_maps_open(struct inode *inode, struct file *file)
  1319. {
  1320. return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
  1321. }
  1322. const struct file_operations proc_pid_numa_maps_operations = {
  1323. .open = pid_numa_maps_open,
  1324. .read = seq_read,
  1325. .llseek = seq_lseek,
  1326. .release = seq_release_private,
  1327. };
  1328. const struct file_operations proc_tid_numa_maps_operations = {
  1329. .open = tid_numa_maps_open,
  1330. .read = seq_read,
  1331. .llseek = seq_lseek,
  1332. .release = seq_release_private,
  1333. };
  1334. #endif /* CONFIG_NUMA */