|
@@ -1602,7 +1602,9 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
|
loff_t len)
|
|
loff_t len)
|
|
{
|
|
{
|
|
struct inode *inode = file->f_path.dentry->d_inode;
|
|
struct inode *inode = file->f_path.dentry->d_inode;
|
|
- int error = -EOPNOTSUPP;
|
|
|
|
|
|
+ struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
|
|
|
|
+ pgoff_t start, index, end;
|
|
|
|
+ int error;
|
|
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
|
|
@@ -1617,8 +1619,65 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
|
shmem_truncate_range(inode, offset, offset + len - 1);
|
|
shmem_truncate_range(inode, offset, offset + len - 1);
|
|
/* No need to unmap again: hole-punching leaves COWed pages */
|
|
/* No need to unmap again: hole-punching leaves COWed pages */
|
|
error = 0;
|
|
error = 0;
|
|
|
|
+ goto out;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
|
|
|
|
+ error = inode_newsize_ok(inode, offset + len);
|
|
|
|
+ if (error)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ start = offset >> PAGE_CACHE_SHIFT;
|
|
|
|
+ end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
|
|
|
+ /* Try to avoid a swapstorm if len is impossible to satisfy */
|
|
|
|
+ if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
|
|
|
|
+ error = -ENOSPC;
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (index = start; index < end; index++) {
|
|
|
|
+ struct page *page;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Good, the fallocate(2) manpage permits EINTR: we may have
|
|
|
|
+ * been interrupted because we are using up too much memory.
|
|
|
|
+ */
|
|
|
|
+ if (signal_pending(current))
|
|
|
|
+ error = -EINTR;
|
|
|
|
+ else
|
|
|
|
+ error = shmem_getpage(inode, index, &page, SGP_WRITE,
|
|
|
|
+ NULL);
|
|
|
|
+ if (error) {
|
|
|
|
+ /*
|
|
|
|
+ * We really ought to free what we allocated so far,
|
|
|
|
+ * but it would be wrong to free pages allocated
|
|
|
|
+ * earlier, or already now in use: i_mutex does not
|
|
|
|
+ * exclude all cases. We do not know what to free.
|
|
|
|
+ */
|
|
|
|
+ goto ctime;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!PageUptodate(page)) {
|
|
|
|
+ clear_highpage(page);
|
|
|
|
+ flush_dcache_page(page);
|
|
|
|
+ SetPageUptodate(page);
|
|
|
|
+ }
|
|
|
|
+ /*
|
|
|
|
+ * set_page_dirty so that memory pressure will swap rather
|
|
|
|
+ * than free the pages we are allocating (and SGP_CACHE pages
|
|
|
|
+ * might still be clean: we now need to mark those dirty too).
|
|
|
|
+ */
|
|
|
|
+ set_page_dirty(page);
|
|
|
|
+ unlock_page(page);
|
|
|
|
+ page_cache_release(page);
|
|
|
|
+ cond_resched();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
|
|
|
|
+ i_size_write(inode, offset + len);
|
|
|
|
+ctime:
|
|
|
|
+ inode->i_ctime = CURRENT_TIME;
|
|
|
|
+out:
|
|
mutex_unlock(&inode->i_mutex);
|
|
mutex_unlock(&inode->i_mutex);
|
|
return error;
|
|
return error;
|
|
}
|
|
}
|