task_nommu.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. #include <linux/mm.h>
  2. #include <linux/file.h>
  3. #include <linux/mount.h>
  4. #include <linux/ptrace.h>
  5. #include <linux/seq_file.h>
  6. #include "internal.h"
  7. /*
  8. * Logic: we've got two memory sums for each process, "shared", and
  9. * "non-shared". Shared memory may get counted more then once, for
  10. * each process that owns it. Non-shared memory is counted
  11. * accurately.
  12. */
  13. void task_mem(struct seq_file *m, struct mm_struct *mm)
  14. {
  15. struct vm_list_struct *vml;
  16. unsigned long bytes = 0, sbytes = 0, slack = 0;
  17. down_read(&mm->mmap_sem);
  18. for (vml = mm->context.vmlist; vml; vml = vml->next) {
  19. if (!vml->vma)
  20. continue;
  21. bytes += kobjsize(vml);
  22. if (atomic_read(&mm->mm_count) > 1 ||
  23. atomic_read(&vml->vma->vm_usage) > 1
  24. ) {
  25. sbytes += kobjsize((void *) vml->vma->vm_start);
  26. sbytes += kobjsize(vml->vma);
  27. } else {
  28. bytes += kobjsize((void *) vml->vma->vm_start);
  29. bytes += kobjsize(vml->vma);
  30. slack += kobjsize((void *) vml->vma->vm_start) -
  31. (vml->vma->vm_end - vml->vma->vm_start);
  32. }
  33. }
  34. if (atomic_read(&mm->mm_count) > 1)
  35. sbytes += kobjsize(mm);
  36. else
  37. bytes += kobjsize(mm);
  38. if (current->fs && atomic_read(&current->fs->count) > 1)
  39. sbytes += kobjsize(current->fs);
  40. else
  41. bytes += kobjsize(current->fs);
  42. if (current->files && atomic_read(&current->files->count) > 1)
  43. sbytes += kobjsize(current->files);
  44. else
  45. bytes += kobjsize(current->files);
  46. if (current->sighand && atomic_read(&current->sighand->count) > 1)
  47. sbytes += kobjsize(current->sighand);
  48. else
  49. bytes += kobjsize(current->sighand);
  50. bytes += kobjsize(current); /* includes kernel stack */
  51. seq_printf(m,
  52. "Mem:\t%8lu bytes\n"
  53. "Slack:\t%8lu bytes\n"
  54. "Shared:\t%8lu bytes\n",
  55. bytes, slack, sbytes);
  56. up_read(&mm->mmap_sem);
  57. }
  58. unsigned long task_vsize(struct mm_struct *mm)
  59. {
  60. struct vm_list_struct *tbp;
  61. unsigned long vsize = 0;
  62. down_read(&mm->mmap_sem);
  63. for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
  64. if (tbp->vma)
  65. vsize += kobjsize((void *) tbp->vma->vm_start);
  66. }
  67. up_read(&mm->mmap_sem);
  68. return vsize;
  69. }
  70. int task_statm(struct mm_struct *mm, int *shared, int *text,
  71. int *data, int *resident)
  72. {
  73. struct vm_list_struct *tbp;
  74. int size = kobjsize(mm);
  75. down_read(&mm->mmap_sem);
  76. for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
  77. size += kobjsize(tbp);
  78. if (tbp->vma) {
  79. size += kobjsize(tbp->vma);
  80. size += kobjsize((void *) tbp->vma->vm_start);
  81. }
  82. }
  83. size += (*text = mm->end_code - mm->start_code);
  84. size += (*data = mm->start_stack - mm->start_data);
  85. up_read(&mm->mmap_sem);
  86. *resident = size;
  87. return size;
  88. }
  89. /*
  90. * display mapping lines for a particular process's /proc/pid/maps
  91. */
  92. static int show_map(struct seq_file *m, void *_vml)
  93. {
  94. struct vm_list_struct *vml = _vml;
  95. struct proc_maps_private *priv = m->private;
  96. struct task_struct *task = priv->task;
  97. if (maps_protect && !ptrace_may_attach(task))
  98. return -EACCES;
  99. return nommu_vma_show(m, vml->vma);
  100. }
  101. static void *m_start(struct seq_file *m, loff_t *pos)
  102. {
  103. struct proc_maps_private *priv = m->private;
  104. struct vm_list_struct *vml;
  105. struct mm_struct *mm;
  106. loff_t n = *pos;
  107. /* pin the task and mm whilst we play with them */
  108. priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
  109. if (!priv->task)
  110. return NULL;
  111. mm = mm_for_maps(priv->task);
  112. if (!mm) {
  113. put_task_struct(priv->task);
  114. priv->task = NULL;
  115. return NULL;
  116. }
  117. /* start from the Nth VMA */
  118. for (vml = mm->context.vmlist; vml; vml = vml->next)
  119. if (n-- == 0)
  120. return vml;
  121. return NULL;
  122. }
  123. static void m_stop(struct seq_file *m, void *_vml)
  124. {
  125. struct proc_maps_private *priv = m->private;
  126. if (priv->task) {
  127. struct mm_struct *mm = priv->task->mm;
  128. up_read(&mm->mmap_sem);
  129. mmput(mm);
  130. put_task_struct(priv->task);
  131. }
  132. }
  133. static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
  134. {
  135. struct vm_list_struct *vml = _vml;
  136. (*pos)++;
  137. return vml ? vml->next : NULL;
  138. }
  139. static const struct seq_operations proc_pid_maps_ops = {
  140. .start = m_start,
  141. .next = m_next,
  142. .stop = m_stop,
  143. .show = show_map
  144. };
  145. static int maps_open(struct inode *inode, struct file *file)
  146. {
  147. struct proc_maps_private *priv;
  148. int ret = -ENOMEM;
  149. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  150. if (priv) {
  151. priv->pid = proc_pid(inode);
  152. ret = seq_open(file, &proc_pid_maps_ops);
  153. if (!ret) {
  154. struct seq_file *m = file->private_data;
  155. m->private = priv;
  156. } else {
  157. kfree(priv);
  158. }
  159. }
  160. return ret;
  161. }
  162. const struct file_operations proc_maps_operations = {
  163. .open = maps_open,
  164. .read = seq_read,
  165. .llseek = seq_lseek,
  166. .release = seq_release_private,
  167. };