|
@@ -45,9 +45,58 @@ static struct backing_dev_info hugetlbfs_backing_dev_info = {
|
|
|
|
|
|
int sysctl_hugetlb_shm_group;
|
|
|
|
|
|
+static void huge_pagevec_release(struct pagevec *pvec)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < pagevec_count(pvec); ++i)
|
|
|
+ put_page(pvec->pages[i]);
|
|
|
+
|
|
|
+ pagevec_reinit(pvec);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * huge_pages_needed tries to determine the number of new huge pages that
|
|
|
+ * will be required to fully populate this VMA. This will be equal to
|
|
|
+ * the size of the VMA in huge pages minus the number of huge pages
|
|
|
+ * (covered by this VMA) that are found in the page cache.
|
|
|
+ *
|
|
|
+ * Result is in bytes to be compatible with is_hugepage_mem_enough()
|
|
|
+ */
|
|
|
+unsigned long
|
|
|
+huge_pages_needed(struct address_space *mapping, struct vm_area_struct *vma)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct pagevec pvec;
|
|
|
+ unsigned long start = vma->vm_start;
|
|
|
+ unsigned long end = vma->vm_end;
|
|
|
+ unsigned long hugepages = (end - start) >> HPAGE_SHIFT;
|
|
|
+ pgoff_t next = vma->vm_pgoff;
|
|
|
+ pgoff_t endpg = next + ((end - start) >> PAGE_SHIFT);
|
|
|
+
|
|
|
+ pagevec_init(&pvec, 0);
|
|
|
+ while (next < endpg) {
|
|
|
+ if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
|
|
|
+ break;
|
|
|
+ for (i = 0; i < pagevec_count(&pvec); i++) {
|
|
|
+ struct page *page = pvec.pages[i];
|
|
|
+ if (page->index > next)
|
|
|
+ next = page->index;
|
|
|
+ if (page->index >= endpg)
|
|
|
+ break;
|
|
|
+ next++;
|
|
|
+ hugepages--;
|
|
|
+ }
|
|
|
+ huge_pagevec_release(&pvec);
|
|
|
+ }
|
|
|
+ return hugepages << HPAGE_SHIFT;
|
|
|
+}
|
|
|
+
|
|
|
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
{
|
|
|
struct inode *inode = file->f_dentry->d_inode;
|
|
|
+ struct address_space *mapping = inode->i_mapping;
|
|
|
+ unsigned long bytes;
|
|
|
loff_t len, vma_len;
|
|
|
int ret;
|
|
|
|
|
@@ -66,6 +115,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
|
|
|
return -EINVAL;
|
|
|
|
|
|
+ bytes = huge_pages_needed(mapping, vma);
|
|
|
+ if (!is_hugepage_mem_enough(bytes))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
|
|
|
|
|
|
down(&inode->i_sem);
|
|
@@ -168,16 +221,6 @@ static int hugetlbfs_commit_write(struct file *file,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
-static void huge_pagevec_release(struct pagevec *pvec)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < pagevec_count(pvec); ++i)
|
|
|
- put_page(pvec->pages[i]);
|
|
|
-
|
|
|
- pagevec_reinit(pvec);
|
|
|
-}
|
|
|
-
|
|
|
static void truncate_huge_page(struct page *page)
|
|
|
{
|
|
|
clear_page_dirty(page);
|