task_nommu.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. #include <linux/mm.h>
  2. #include <linux/file.h>
  3. #include <linux/mount.h>
  4. #include <linux/ptrace.h>
  5. #include <linux/seq_file.h>
  6. #include "internal.h"
  7. /*
  8. * Logic: we've got two memory sums for each process, "shared", and
  9. * "non-shared". Shared memory may get counted more then once, for
  10. * each process that owns it. Non-shared memory is counted
  11. * accurately.
  12. */
  13. char *task_mem(struct mm_struct *mm, char *buffer)
  14. {
  15. struct vm_list_struct *vml;
  16. unsigned long bytes = 0, sbytes = 0, slack = 0;
  17. down_read(&mm->mmap_sem);
  18. for (vml = mm->context.vmlist; vml; vml = vml->next) {
  19. if (!vml->vma)
  20. continue;
  21. bytes += kobjsize(vml);
  22. if (atomic_read(&mm->mm_count) > 1 ||
  23. atomic_read(&vml->vma->vm_usage) > 1
  24. ) {
  25. sbytes += kobjsize((void *) vml->vma->vm_start);
  26. sbytes += kobjsize(vml->vma);
  27. } else {
  28. bytes += kobjsize((void *) vml->vma->vm_start);
  29. bytes += kobjsize(vml->vma);
  30. slack += kobjsize((void *) vml->vma->vm_start) -
  31. (vml->vma->vm_end - vml->vma->vm_start);
  32. }
  33. }
  34. if (atomic_read(&mm->mm_count) > 1)
  35. sbytes += kobjsize(mm);
  36. else
  37. bytes += kobjsize(mm);
  38. if (current->fs && atomic_read(&current->fs->count) > 1)
  39. sbytes += kobjsize(current->fs);
  40. else
  41. bytes += kobjsize(current->fs);
  42. if (current->files && atomic_read(&current->files->count) > 1)
  43. sbytes += kobjsize(current->files);
  44. else
  45. bytes += kobjsize(current->files);
  46. if (current->sighand && atomic_read(&current->sighand->count) > 1)
  47. sbytes += kobjsize(current->sighand);
  48. else
  49. bytes += kobjsize(current->sighand);
  50. bytes += kobjsize(current); /* includes kernel stack */
  51. buffer += sprintf(buffer,
  52. "Mem:\t%8lu bytes\n"
  53. "Slack:\t%8lu bytes\n"
  54. "Shared:\t%8lu bytes\n",
  55. bytes, slack, sbytes);
  56. up_read(&mm->mmap_sem);
  57. return buffer;
  58. }
  59. unsigned long task_vsize(struct mm_struct *mm)
  60. {
  61. struct vm_list_struct *tbp;
  62. unsigned long vsize = 0;
  63. down_read(&mm->mmap_sem);
  64. for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
  65. if (tbp->vma)
  66. vsize += kobjsize((void *) tbp->vma->vm_start);
  67. }
  68. up_read(&mm->mmap_sem);
  69. return vsize;
  70. }
  71. int task_statm(struct mm_struct *mm, int *shared, int *text,
  72. int *data, int *resident)
  73. {
  74. struct vm_list_struct *tbp;
  75. int size = kobjsize(mm);
  76. down_read(&mm->mmap_sem);
  77. for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
  78. size += kobjsize(tbp);
  79. if (tbp->vma) {
  80. size += kobjsize(tbp->vma);
  81. size += kobjsize((void *) tbp->vma->vm_start);
  82. }
  83. }
  84. size += (*text = mm->end_code - mm->start_code);
  85. size += (*data = mm->start_stack - mm->start_data);
  86. up_read(&mm->mmap_sem);
  87. *resident = size;
  88. return size;
  89. }
  90. int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
  91. {
  92. struct vm_list_struct *vml;
  93. struct vm_area_struct *vma;
  94. struct task_struct *task = get_proc_task(inode);
  95. struct mm_struct *mm = get_task_mm(task);
  96. int result = -ENOENT;
  97. if (!mm)
  98. goto out;
  99. down_read(&mm->mmap_sem);
  100. vml = mm->context.vmlist;
  101. vma = NULL;
  102. while (vml) {
  103. if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
  104. vma = vml->vma;
  105. break;
  106. }
  107. vml = vml->next;
  108. }
  109. if (vma) {
  110. *mnt = mntget(vma->vm_file->f_path.mnt);
  111. *dentry = dget(vma->vm_file->f_path.dentry);
  112. result = 0;
  113. }
  114. up_read(&mm->mmap_sem);
  115. mmput(mm);
  116. out:
  117. return result;
  118. }
  119. /*
  120. * display mapping lines for a particular process's /proc/pid/maps
  121. */
  122. static int show_map(struct seq_file *m, void *_vml)
  123. {
  124. struct vm_list_struct *vml = _vml;
  125. struct proc_maps_private *priv = m->private;
  126. struct task_struct *task = priv->task;
  127. if (maps_protect && !ptrace_may_attach(task))
  128. return -EACCES;
  129. return nommu_vma_show(m, vml->vma);
  130. }
  131. static void *m_start(struct seq_file *m, loff_t *pos)
  132. {
  133. struct proc_maps_private *priv = m->private;
  134. struct vm_list_struct *vml;
  135. struct mm_struct *mm;
  136. loff_t n = *pos;
  137. /* pin the task and mm whilst we play with them */
  138. priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
  139. if (!priv->task)
  140. return NULL;
  141. mm = get_task_mm(priv->task);
  142. if (!mm) {
  143. put_task_struct(priv->task);
  144. priv->task = NULL;
  145. return NULL;
  146. }
  147. down_read(&mm->mmap_sem);
  148. /* start from the Nth VMA */
  149. for (vml = mm->context.vmlist; vml; vml = vml->next)
  150. if (n-- == 0)
  151. return vml;
  152. return NULL;
  153. }
  154. static void m_stop(struct seq_file *m, void *_vml)
  155. {
  156. struct proc_maps_private *priv = m->private;
  157. if (priv->task) {
  158. struct mm_struct *mm = priv->task->mm;
  159. up_read(&mm->mmap_sem);
  160. mmput(mm);
  161. put_task_struct(priv->task);
  162. }
  163. }
  164. static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
  165. {
  166. struct vm_list_struct *vml = _vml;
  167. (*pos)++;
  168. return vml ? vml->next : NULL;
  169. }
  170. static struct seq_operations proc_pid_maps_ops = {
  171. .start = m_start,
  172. .next = m_next,
  173. .stop = m_stop,
  174. .show = show_map
  175. };
  176. static int maps_open(struct inode *inode, struct file *file)
  177. {
  178. struct proc_maps_private *priv;
  179. int ret = -ENOMEM;
  180. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  181. if (priv) {
  182. priv->pid = proc_pid(inode);
  183. ret = seq_open(file, &proc_pid_maps_ops);
  184. if (!ret) {
  185. struct seq_file *m = file->private_data;
  186. m->private = priv;
  187. } else {
  188. kfree(priv);
  189. }
  190. }
  191. return ret;
  192. }
  193. const struct file_operations proc_maps_operations = {
  194. .open = maps_open,
  195. .read = seq_read,
  196. .llseek = seq_lseek,
  197. .release = seq_release_private,
  198. };