task_nommu.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. #include <linux/mm.h>
  2. #include <linux/file.h>
  3. #include <linux/mount.h>
  4. #include <linux/seq_file.h>
  5. #include "internal.h"
  6. /*
  7. * Logic: we've got two memory sums for each process, "shared", and
  8. * "non-shared". Shared memory may get counted more then once, for
  9. * each process that owns it. Non-shared memory is counted
  10. * accurately.
  11. */
  12. char *task_mem(struct mm_struct *mm, char *buffer)
  13. {
  14. struct vm_list_struct *vml;
  15. unsigned long bytes = 0, sbytes = 0, slack = 0;
  16. down_read(&mm->mmap_sem);
  17. for (vml = mm->context.vmlist; vml; vml = vml->next) {
  18. if (!vml->vma)
  19. continue;
  20. bytes += kobjsize(vml);
  21. if (atomic_read(&mm->mm_count) > 1 ||
  22. atomic_read(&vml->vma->vm_usage) > 1
  23. ) {
  24. sbytes += kobjsize((void *) vml->vma->vm_start);
  25. sbytes += kobjsize(vml->vma);
  26. } else {
  27. bytes += kobjsize((void *) vml->vma->vm_start);
  28. bytes += kobjsize(vml->vma);
  29. slack += kobjsize((void *) vml->vma->vm_start) -
  30. (vml->vma->vm_end - vml->vma->vm_start);
  31. }
  32. }
  33. if (atomic_read(&mm->mm_count) > 1)
  34. sbytes += kobjsize(mm);
  35. else
  36. bytes += kobjsize(mm);
  37. if (current->fs && atomic_read(&current->fs->count) > 1)
  38. sbytes += kobjsize(current->fs);
  39. else
  40. bytes += kobjsize(current->fs);
  41. if (current->files && atomic_read(&current->files->count) > 1)
  42. sbytes += kobjsize(current->files);
  43. else
  44. bytes += kobjsize(current->files);
  45. if (current->sighand && atomic_read(&current->sighand->count) > 1)
  46. sbytes += kobjsize(current->sighand);
  47. else
  48. bytes += kobjsize(current->sighand);
  49. bytes += kobjsize(current); /* includes kernel stack */
  50. buffer += sprintf(buffer,
  51. "Mem:\t%8lu bytes\n"
  52. "Slack:\t%8lu bytes\n"
  53. "Shared:\t%8lu bytes\n",
  54. bytes, slack, sbytes);
  55. up_read(&mm->mmap_sem);
  56. return buffer;
  57. }
  58. unsigned long task_vsize(struct mm_struct *mm)
  59. {
  60. struct vm_list_struct *tbp;
  61. unsigned long vsize = 0;
  62. down_read(&mm->mmap_sem);
  63. for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
  64. if (tbp->vma)
  65. vsize += kobjsize((void *) tbp->vma->vm_start);
  66. }
  67. up_read(&mm->mmap_sem);
  68. return vsize;
  69. }
  70. int task_statm(struct mm_struct *mm, int *shared, int *text,
  71. int *data, int *resident)
  72. {
  73. struct vm_list_struct *tbp;
  74. int size = kobjsize(mm);
  75. down_read(&mm->mmap_sem);
  76. for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
  77. size += kobjsize(tbp);
  78. if (tbp->vma) {
  79. size += kobjsize(tbp->vma);
  80. size += kobjsize((void *) tbp->vma->vm_start);
  81. }
  82. }
  83. size += (*text = mm->end_code - mm->start_code);
  84. size += (*data = mm->start_stack - mm->start_data);
  85. up_read(&mm->mmap_sem);
  86. *resident = size;
  87. return size;
  88. }
  89. int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
  90. {
  91. struct vm_list_struct *vml;
  92. struct vm_area_struct *vma;
  93. struct task_struct *task = proc_task(inode);
  94. struct mm_struct *mm = get_task_mm(task);
  95. int result = -ENOENT;
  96. if (!mm)
  97. goto out;
  98. down_read(&mm->mmap_sem);
  99. vml = mm->context.vmlist;
  100. vma = NULL;
  101. while (vml) {
  102. if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
  103. vma = vml->vma;
  104. break;
  105. }
  106. vml = vml->next;
  107. }
  108. if (vma) {
  109. *mnt = mntget(vma->vm_file->f_vfsmnt);
  110. *dentry = dget(vma->vm_file->f_dentry);
  111. result = 0;
  112. }
  113. up_read(&mm->mmap_sem);
  114. mmput(mm);
  115. out:
  116. return result;
  117. }
  118. /*
  119. * Albert D. Cahalan suggested to fake entries for the traditional
  120. * sections here. This might be worth investigating.
  121. */
  122. static int show_map(struct seq_file *m, void *v)
  123. {
  124. return 0;
  125. }
  126. static void *m_start(struct seq_file *m, loff_t *pos)
  127. {
  128. return NULL;
  129. }
  130. static void m_stop(struct seq_file *m, void *v)
  131. {
  132. }
  133. static void *m_next(struct seq_file *m, void *v, loff_t *pos)
  134. {
  135. return NULL;
  136. }
  137. struct seq_operations proc_pid_maps_op = {
  138. .start = m_start,
  139. .next = m_next,
  140. .stop = m_stop,
  141. .show = show_map
  142. };