task_nommu.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. #include <linux/mm.h>
  2. #include <linux/file.h>
  3. #include <linux/mount.h>
  4. #include <linux/ptrace.h>
  5. #include <linux/seq_file.h>
  6. #include "internal.h"
  7. /*
  8. * Logic: we've got two memory sums for each process, "shared", and
  9. * "non-shared". Shared memory may get counted more then once, for
  10. * each process that owns it. Non-shared memory is counted
  11. * accurately.
  12. */
  13. void task_mem(struct seq_file *m, struct mm_struct *mm)
  14. {
  15. struct vm_list_struct *vml;
  16. unsigned long bytes = 0, sbytes = 0, slack = 0;
  17. down_read(&mm->mmap_sem);
  18. for (vml = mm->context.vmlist; vml; vml = vml->next) {
  19. if (!vml->vma)
  20. continue;
  21. bytes += kobjsize(vml);
  22. if (atomic_read(&mm->mm_count) > 1 ||
  23. atomic_read(&vml->vma->vm_usage) > 1
  24. ) {
  25. sbytes += kobjsize((void *) vml->vma->vm_start);
  26. sbytes += kobjsize(vml->vma);
  27. } else {
  28. bytes += kobjsize((void *) vml->vma->vm_start);
  29. bytes += kobjsize(vml->vma);
  30. slack += kobjsize((void *) vml->vma->vm_start) -
  31. (vml->vma->vm_end - vml->vma->vm_start);
  32. }
  33. }
  34. if (atomic_read(&mm->mm_count) > 1)
  35. sbytes += kobjsize(mm);
  36. else
  37. bytes += kobjsize(mm);
  38. if (current->fs && atomic_read(&current->fs->count) > 1)
  39. sbytes += kobjsize(current->fs);
  40. else
  41. bytes += kobjsize(current->fs);
  42. if (current->files && atomic_read(&current->files->count) > 1)
  43. sbytes += kobjsize(current->files);
  44. else
  45. bytes += kobjsize(current->files);
  46. if (current->sighand && atomic_read(&current->sighand->count) > 1)
  47. sbytes += kobjsize(current->sighand);
  48. else
  49. bytes += kobjsize(current->sighand);
  50. bytes += kobjsize(current); /* includes kernel stack */
  51. seq_printf(m,
  52. "Mem:\t%8lu bytes\n"
  53. "Slack:\t%8lu bytes\n"
  54. "Shared:\t%8lu bytes\n",
  55. bytes, slack, sbytes);
  56. up_read(&mm->mmap_sem);
  57. }
  58. unsigned long task_vsize(struct mm_struct *mm)
  59. {
  60. struct vm_list_struct *tbp;
  61. unsigned long vsize = 0;
  62. down_read(&mm->mmap_sem);
  63. for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
  64. if (tbp->vma)
  65. vsize += kobjsize((void *) tbp->vma->vm_start);
  66. }
  67. up_read(&mm->mmap_sem);
  68. return vsize;
  69. }
  70. int task_statm(struct mm_struct *mm, int *shared, int *text,
  71. int *data, int *resident)
  72. {
  73. struct vm_list_struct *tbp;
  74. int size = kobjsize(mm);
  75. down_read(&mm->mmap_sem);
  76. for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
  77. size += kobjsize(tbp);
  78. if (tbp->vma) {
  79. size += kobjsize(tbp->vma);
  80. size += kobjsize((void *) tbp->vma->vm_start);
  81. }
  82. }
  83. size += (*text = mm->end_code - mm->start_code);
  84. size += (*data = mm->start_stack - mm->start_data);
  85. up_read(&mm->mmap_sem);
  86. *resident = size;
  87. return size;
  88. }
  89. int proc_exe_link(struct inode *inode, struct path *path)
  90. {
  91. struct vm_list_struct *vml;
  92. struct vm_area_struct *vma;
  93. struct task_struct *task = get_proc_task(inode);
  94. struct mm_struct *mm = get_task_mm(task);
  95. int result = -ENOENT;
  96. if (!mm)
  97. goto out;
  98. down_read(&mm->mmap_sem);
  99. vml = mm->context.vmlist;
  100. vma = NULL;
  101. while (vml) {
  102. if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
  103. vma = vml->vma;
  104. break;
  105. }
  106. vml = vml->next;
  107. }
  108. if (vma) {
  109. *path = vma->vm_file->f_path;
  110. path_get(&vma->vm_file->f_path);
  111. result = 0;
  112. }
  113. up_read(&mm->mmap_sem);
  114. mmput(mm);
  115. out:
  116. return result;
  117. }
  118. /*
  119. * display mapping lines for a particular process's /proc/pid/maps
  120. */
  121. static int show_map(struct seq_file *m, void *_vml)
  122. {
  123. struct vm_list_struct *vml = _vml;
  124. struct proc_maps_private *priv = m->private;
  125. struct task_struct *task = priv->task;
  126. if (maps_protect && !ptrace_may_attach(task))
  127. return -EACCES;
  128. return nommu_vma_show(m, vml->vma);
  129. }
  130. static void *m_start(struct seq_file *m, loff_t *pos)
  131. {
  132. struct proc_maps_private *priv = m->private;
  133. struct vm_list_struct *vml;
  134. struct mm_struct *mm;
  135. loff_t n = *pos;
  136. /* pin the task and mm whilst we play with them */
  137. priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
  138. if (!priv->task)
  139. return NULL;
  140. mm = mm_for_maps(priv->task);
  141. if (!mm) {
  142. put_task_struct(priv->task);
  143. priv->task = NULL;
  144. return NULL;
  145. }
  146. /* start from the Nth VMA */
  147. for (vml = mm->context.vmlist; vml; vml = vml->next)
  148. if (n-- == 0)
  149. return vml;
  150. return NULL;
  151. }
  152. static void m_stop(struct seq_file *m, void *_vml)
  153. {
  154. struct proc_maps_private *priv = m->private;
  155. if (priv->task) {
  156. struct mm_struct *mm = priv->task->mm;
  157. up_read(&mm->mmap_sem);
  158. mmput(mm);
  159. put_task_struct(priv->task);
  160. }
  161. }
  162. static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
  163. {
  164. struct vm_list_struct *vml = _vml;
  165. (*pos)++;
  166. return vml ? vml->next : NULL;
  167. }
  168. static const struct seq_operations proc_pid_maps_ops = {
  169. .start = m_start,
  170. .next = m_next,
  171. .stop = m_stop,
  172. .show = show_map
  173. };
  174. static int maps_open(struct inode *inode, struct file *file)
  175. {
  176. struct proc_maps_private *priv;
  177. int ret = -ENOMEM;
  178. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  179. if (priv) {
  180. priv->pid = proc_pid(inode);
  181. ret = seq_open(file, &proc_pid_maps_ops);
  182. if (!ret) {
  183. struct seq_file *m = file->private_data;
  184. m->private = priv;
  185. } else {
  186. kfree(priv);
  187. }
  188. }
  189. return ret;
  190. }
  191. const struct file_operations proc_maps_operations = {
  192. .open = maps_open,
  193. .read = seq_read,
  194. .llseek = seq_lseek,
  195. .release = seq_release_private,
  196. };