|
@@ -801,6 +801,7 @@ retry:
|
|
|
|
|
|
if (vma->vm_flags & VM_SHARED) {
|
|
if (vma->vm_flags & VM_SHARED) {
|
|
int err;
|
|
int err;
|
|
|
|
+ struct inode *inode = mapping->host;
|
|
|
|
|
|
err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
|
|
err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
|
|
if (err) {
|
|
if (err) {
|
|
@@ -809,6 +810,10 @@ retry:
|
|
goto retry;
|
|
goto retry;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
|
+ inode->i_blocks += BLOCKS_PER_HUGEPAGE;
|
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
} else
|
|
} else
|
|
lock_page(page);
|
|
lock_page(page);
|
|
}
|
|
}
|
|
@@ -1160,6 +1165,11 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
|
|
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
|
|
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
|
|
{
|
|
{
|
|
long chg = region_truncate(&inode->i_mapping->private_list, offset);
|
|
long chg = region_truncate(&inode->i_mapping->private_list, offset);
|
|
|
|
+
|
|
|
|
+ spin_lock(&inode->i_lock);
|
|
|
|
+ inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed;
|
|
|
|
+ spin_unlock(&inode->i_lock);
|
|
|
|
+
|
|
hugetlb_put_quota(inode->i_mapping, (chg - freed));
|
|
hugetlb_put_quota(inode->i_mapping, (chg - freed));
|
|
hugetlb_acct_memory(-(chg - freed));
|
|
hugetlb_acct_memory(-(chg - freed));
|
|
}
|
|
}
|