|
@@ -1846,13 +1846,8 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
|
|
if (block_start >= to)
|
|
if (block_start >= to)
|
|
break;
|
|
break;
|
|
if (buffer_new(bh)) {
|
|
if (buffer_new(bh)) {
|
|
- void *kaddr;
|
|
|
|
-
|
|
|
|
clear_buffer_new(bh);
|
|
clear_buffer_new(bh);
|
|
- kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
- memset(kaddr+block_start, 0, bh->b_size);
|
|
|
|
- flush_dcache_page(page);
|
|
|
|
- kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
|
|
+ zero_user_page(page, block_start, bh->b_size, KM_USER0);
|
|
set_buffer_uptodate(bh);
|
|
set_buffer_uptodate(bh);
|
|
mark_buffer_dirty(bh);
|
|
mark_buffer_dirty(bh);
|
|
}
|
|
}
|
|
@@ -1940,10 +1935,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
|
|
SetPageError(page);
|
|
SetPageError(page);
|
|
}
|
|
}
|
|
if (!buffer_mapped(bh)) {
|
|
if (!buffer_mapped(bh)) {
|
|
- void *kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
- memset(kaddr + i * blocksize, 0, blocksize);
|
|
|
|
- flush_dcache_page(page);
|
|
|
|
- kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
|
|
+ zero_user_page(page, i * blocksize, blocksize,
|
|
|
|
+ KM_USER0);
|
|
if (!err)
|
|
if (!err)
|
|
set_buffer_uptodate(bh);
|
|
set_buffer_uptodate(bh);
|
|
continue;
|
|
continue;
|
|
@@ -2086,7 +2079,6 @@ int cont_prepare_write(struct page *page, unsigned offset,
|
|
long status;
|
|
long status;
|
|
unsigned zerofrom;
|
|
unsigned zerofrom;
|
|
unsigned blocksize = 1 << inode->i_blkbits;
|
|
unsigned blocksize = 1 << inode->i_blkbits;
|
|
- void *kaddr;
|
|
|
|
|
|
|
|
while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
|
|
while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
|
|
status = -ENOMEM;
|
|
status = -ENOMEM;
|
|
@@ -2108,10 +2100,8 @@ int cont_prepare_write(struct page *page, unsigned offset,
|
|
PAGE_CACHE_SIZE, get_block);
|
|
PAGE_CACHE_SIZE, get_block);
|
|
if (status)
|
|
if (status)
|
|
goto out_unmap;
|
|
goto out_unmap;
|
|
- kaddr = kmap_atomic(new_page, KM_USER0);
|
|
|
|
- memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
|
|
|
|
- flush_dcache_page(new_page);
|
|
|
|
- kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
|
|
+ zero_user_page(page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
|
|
|
|
+ KM_USER0);
|
|
generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
|
|
generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
|
|
unlock_page(new_page);
|
|
unlock_page(new_page);
|
|
page_cache_release(new_page);
|
|
page_cache_release(new_page);
|
|
@@ -2138,10 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset,
|
|
if (status)
|
|
if (status)
|
|
goto out1;
|
|
goto out1;
|
|
if (zerofrom < offset) {
|
|
if (zerofrom < offset) {
|
|
- kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
- memset(kaddr+zerofrom, 0, offset-zerofrom);
|
|
|
|
- flush_dcache_page(page);
|
|
|
|
- kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
|
|
+ zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
|
|
__block_commit_write(inode, page, zerofrom, offset);
|
|
__block_commit_write(inode, page, zerofrom, offset);
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
@@ -2340,10 +2327,7 @@ failed:
|
|
* Error recovery is pretty slack. Clear the page and mark it dirty
|
|
* Error recovery is pretty slack. Clear the page and mark it dirty
|
|
* so we'll later zero out any blocks which _were_ allocated.
|
|
* so we'll later zero out any blocks which _were_ allocated.
|
|
*/
|
|
*/
|
|
- kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
- memset(kaddr, 0, PAGE_CACHE_SIZE);
|
|
|
|
- flush_dcache_page(page);
|
|
|
|
- kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
|
|
+ zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
|
|
SetPageUptodate(page);
|
|
SetPageUptodate(page);
|
|
set_page_dirty(page);
|
|
set_page_dirty(page);
|
|
return ret;
|
|
return ret;
|
|
@@ -2382,7 +2366,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
|
|
loff_t i_size = i_size_read(inode);
|
|
loff_t i_size = i_size_read(inode);
|
|
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
|
|
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
|
|
unsigned offset;
|
|
unsigned offset;
|
|
- void *kaddr;
|
|
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
/* Is the page fully inside i_size? */
|
|
/* Is the page fully inside i_size? */
|
|
@@ -2413,10 +2396,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
|
|
* the page size, the remaining memory is zeroed when mapped, and
|
|
* the page size, the remaining memory is zeroed when mapped, and
|
|
* writes to that region are not written out to the file."
|
|
* writes to that region are not written out to the file."
|
|
*/
|
|
*/
|
|
- kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
|
|
|
|
- flush_dcache_page(page);
|
|
|
|
- kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
|
|
+ zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
|
|
out:
|
|
out:
|
|
ret = mpage_writepage(page, get_block, wbc);
|
|
ret = mpage_writepage(page, get_block, wbc);
|
|
if (ret == -EAGAIN)
|
|
if (ret == -EAGAIN)
|
|
@@ -2437,7 +2417,6 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
|
|
unsigned to;
|
|
unsigned to;
|
|
struct page *page;
|
|
struct page *page;
|
|
const struct address_space_operations *a_ops = mapping->a_ops;
|
|
const struct address_space_operations *a_ops = mapping->a_ops;
|
|
- char *kaddr;
|
|
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
if ((offset & (blocksize - 1)) == 0)
|
|
if ((offset & (blocksize - 1)) == 0)
|
|
@@ -2451,10 +2430,8 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
|
|
to = (offset + blocksize) & ~(blocksize - 1);
|
|
to = (offset + blocksize) & ~(blocksize - 1);
|
|
ret = a_ops->prepare_write(NULL, page, offset, to);
|
|
ret = a_ops->prepare_write(NULL, page, offset, to);
|
|
if (ret == 0) {
|
|
if (ret == 0) {
|
|
- kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
|
|
|
|
- flush_dcache_page(page);
|
|
|
|
- kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
|
|
+ zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
|
|
|
|
+ KM_USER0);
|
|
/*
|
|
/*
|
|
* It would be more correct to call aops->commit_write()
|
|
* It would be more correct to call aops->commit_write()
|
|
* here, but this is more efficient.
|
|
* here, but this is more efficient.
|
|
@@ -2480,7 +2457,6 @@ int block_truncate_page(struct address_space *mapping,
|
|
struct inode *inode = mapping->host;
|
|
struct inode *inode = mapping->host;
|
|
struct page *page;
|
|
struct page *page;
|
|
struct buffer_head *bh;
|
|
struct buffer_head *bh;
|
|
- void *kaddr;
|
|
|
|
int err;
|
|
int err;
|
|
|
|
|
|
blocksize = 1 << inode->i_blkbits;
|
|
blocksize = 1 << inode->i_blkbits;
|
|
@@ -2534,11 +2510,7 @@ int block_truncate_page(struct address_space *mapping,
|
|
goto unlock;
|
|
goto unlock;
|
|
}
|
|
}
|
|
|
|
|
|
- kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
- memset(kaddr + offset, 0, length);
|
|
|
|
- flush_dcache_page(page);
|
|
|
|
- kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
-
|
|
|
|
|
|
+ zero_user_page(page, offset, length, KM_USER0);
|
|
mark_buffer_dirty(bh);
|
|
mark_buffer_dirty(bh);
|
|
err = 0;
|
|
err = 0;
|
|
|
|
|
|
@@ -2559,7 +2531,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
|
|
loff_t i_size = i_size_read(inode);
|
|
loff_t i_size = i_size_read(inode);
|
|
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
|
|
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
|
|
unsigned offset;
|
|
unsigned offset;
|
|
- void *kaddr;
|
|
|
|
|
|
|
|
/* Is the page fully inside i_size? */
|
|
/* Is the page fully inside i_size? */
|
|
if (page->index < end_index)
|
|
if (page->index < end_index)
|
|
@@ -2585,10 +2556,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
|
|
* the page size, the remaining memory is zeroed when mapped, and
|
|
* the page size, the remaining memory is zeroed when mapped, and
|
|
* writes to that region are not written out to the file."
|
|
* writes to that region are not written out to the file."
|
|
*/
|
|
*/
|
|
- kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
|
|
|
|
- flush_dcache_page(page);
|
|
|
|
- kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
|
|
+ zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
|
|
return __block_write_full_page(inode, page, get_block, wbc);
|
|
return __block_write_full_page(inode, page, get_block, wbc);
|
|
}
|
|
}
|
|
|
|
|