mincore.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /*
  2. * linux/mm/mincore.c
  3. *
  4. * Copyright (C) 1994-1999 Linus Torvalds
  5. */
  6. /*
  7. * The mincore() system call.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/mm.h>
  12. #include <linux/mman.h>
  13. #include <linux/syscalls.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/pgtable.h>
  16. /*
  17. * Later we can get more picky about what "in core" means precisely.
  18. * For now, simply check to see if the page is in the page cache,
  19. * and is up to date; i.e. that no page-in operation would be required
  20. * at this time if an application were to map and access this page.
  21. */
  22. static unsigned char mincore_page(struct vm_area_struct * vma,
  23. unsigned long pgoff)
  24. {
  25. unsigned char present = 0;
  26. struct address_space * as = vma->vm_file->f_mapping;
  27. struct page * page;
  28. page = find_get_page(as, pgoff);
  29. if (page) {
  30. present = PageUptodate(page);
  31. page_cache_release(page);
  32. }
  33. return present;
  34. }
  35. static long mincore_vma(struct vm_area_struct * vma,
  36. unsigned long start, unsigned long end, unsigned char __user * vec)
  37. {
  38. long error, i, remaining;
  39. unsigned char * tmp;
  40. error = -ENOMEM;
  41. if (!vma->vm_file)
  42. return error;
  43. start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  44. if (end > vma->vm_end)
  45. end = vma->vm_end;
  46. end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  47. error = -EAGAIN;
  48. tmp = (unsigned char *) __get_free_page(GFP_KERNEL);
  49. if (!tmp)
  50. return error;
  51. /* (end - start) is # of pages, and also # of bytes in "vec */
  52. remaining = (end - start),
  53. error = 0;
  54. for (i = 0; remaining > 0; remaining -= PAGE_SIZE, i++) {
  55. int j = 0;
  56. long thispiece = (remaining < PAGE_SIZE) ?
  57. remaining : PAGE_SIZE;
  58. while (j < thispiece)
  59. tmp[j++] = mincore_page(vma, start++);
  60. if (copy_to_user(vec + PAGE_SIZE * i, tmp, thispiece)) {
  61. error = -EFAULT;
  62. break;
  63. }
  64. }
  65. free_page((unsigned long) tmp);
  66. return error;
  67. }
  68. /*
  69. * The mincore(2) system call.
  70. *
  71. * mincore() returns the memory residency status of the pages in the
  72. * current process's address space specified by [addr, addr + len).
  73. * The status is returned in a vector of bytes. The least significant
  74. * bit of each byte is 1 if the referenced page is in memory, otherwise
  75. * it is zero.
  76. *
  77. * Because the status of a page can change after mincore() checks it
  78. * but before it returns to the application, the returned vector may
  79. * contain stale information. Only locked pages are guaranteed to
  80. * remain in memory.
  81. *
  82. * return values:
  83. * zero - success
  84. * -EFAULT - vec points to an illegal address
  85. * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
  86. * -ENOMEM - Addresses in the range [addr, addr + len] are
  87. * invalid for the address space of this process, or
  88. * specify one or more pages which are not currently
  89. * mapped
  90. * -EAGAIN - A kernel resource was temporarily unavailable.
  91. */
  92. asmlinkage long sys_mincore(unsigned long start, size_t len,
  93. unsigned char __user * vec)
  94. {
  95. int index = 0;
  96. unsigned long end, limit;
  97. struct vm_area_struct * vma;
  98. size_t max;
  99. int unmapped_error = 0;
  100. long error;
  101. /* check the arguments */
  102. if (start & ~PAGE_CACHE_MASK)
  103. goto einval;
  104. limit = TASK_SIZE;
  105. if (start >= limit)
  106. goto enomem;
  107. if (!len)
  108. return 0;
  109. max = limit - start;
  110. len = PAGE_CACHE_ALIGN(len);
  111. if (len > max || !len)
  112. goto enomem;
  113. end = start + len;
  114. /* check the output buffer whilst holding the lock */
  115. error = -EFAULT;
  116. down_read(&current->mm->mmap_sem);
  117. if (!access_ok(VERIFY_WRITE, vec, len >> PAGE_SHIFT))
  118. goto out;
  119. /*
  120. * If the interval [start,end) covers some unmapped address
  121. * ranges, just ignore them, but return -ENOMEM at the end.
  122. */
  123. error = 0;
  124. vma = find_vma(current->mm, start);
  125. while (vma) {
  126. /* Here start < vma->vm_end. */
  127. if (start < vma->vm_start) {
  128. unmapped_error = -ENOMEM;
  129. start = vma->vm_start;
  130. }
  131. /* Here vma->vm_start <= start < vma->vm_end. */
  132. if (end <= vma->vm_end) {
  133. if (start < end) {
  134. error = mincore_vma(vma, start, end,
  135. &vec[index]);
  136. if (error)
  137. goto out;
  138. }
  139. error = unmapped_error;
  140. goto out;
  141. }
  142. /* Here vma->vm_start <= start < vma->vm_end < end. */
  143. error = mincore_vma(vma, start, vma->vm_end, &vec[index]);
  144. if (error)
  145. goto out;
  146. index += (vma->vm_end - start) >> PAGE_CACHE_SHIFT;
  147. start = vma->vm_end;
  148. vma = vma->vm_next;
  149. }
  150. /* we found a hole in the area queried if we arrive here */
  151. error = -ENOMEM;
  152. out:
  153. up_read(&current->mm->mmap_sem);
  154. return error;
  155. einval:
  156. return -EINVAL;
  157. enomem:
  158. return -ENOMEM;
  159. }