reiserfs: use zero_user_page

Use zero_user_page() instead of open-coding it.

Signed-off-by: Nate Diller <nate.diller@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Nate Diller 2007-05-09 02:35:09 -07:00 committed by Linus Torvalds
parent 0c11d7a9e9
commit f2fff59695
2 changed files with 13 additions and 39 deletions

View File

@ -1059,20 +1059,12 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
maping blocks, since there is none, so we just zero out remaining maping blocks, since there is none, so we just zero out remaining
parts of first and last pages in write area (if needed) */ parts of first and last pages in write area (if needed) */
if ((pos & ~((loff_t) PAGE_CACHE_SIZE - 1)) > inode->i_size) { if ((pos & ~((loff_t) PAGE_CACHE_SIZE - 1)) > inode->i_size) {
if (from != 0) { /* First page needs to be partially zeroed */ if (from != 0) /* First page needs to be partially zeroed */
char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0); zero_user_page(prepared_pages[0], 0, from, KM_USER0);
memset(kaddr, 0, from);
kunmap_atomic(kaddr, KM_USER0); if (to != PAGE_CACHE_SIZE) /* Last page needs to be partially zeroed */
flush_dcache_page(prepared_pages[0]); zero_user_page(prepared_pages[num_pages-1], to,
} PAGE_CACHE_SIZE - to, KM_USER0);
if (to != PAGE_CACHE_SIZE) { /* Last page needs to be partially zeroed */
char *kaddr =
kmap_atomic(prepared_pages[num_pages - 1],
KM_USER0);
memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(prepared_pages[num_pages - 1]);
}
/* Since all blocks are new - use already calculated value */ /* Since all blocks are new - use already calculated value */
return blocks; return blocks;
@ -1199,13 +1191,9 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
ll_rw_block(READ, 1, &bh); ll_rw_block(READ, 1, &bh);
*wait_bh++ = bh; *wait_bh++ = bh;
} else { /* Not mapped, zero it */ } else { /* Not mapped, zero it */
char *kaddr = zero_user_page(prepared_pages[0],
kmap_atomic(prepared_pages[0], block_start,
KM_USER0); from - block_start, KM_USER0);
memset(kaddr + block_start, 0,
from - block_start);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(prepared_pages[0]);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
} }
@ -1237,13 +1225,8 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
ll_rw_block(READ, 1, &bh); ll_rw_block(READ, 1, &bh);
*wait_bh++ = bh; *wait_bh++ = bh;
} else { /* Not mapped, zero it */ } else { /* Not mapped, zero it */
char *kaddr = zero_user_page(prepared_pages[num_pages-1],
kmap_atomic(prepared_pages to, block_end - to, KM_USER0);
[num_pages - 1],
KM_USER0);
memset(kaddr + to, 0, block_end - to);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(prepared_pages[num_pages - 1]);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
} }
} }

View File

@ -2148,13 +2148,8 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
length = offset & (blocksize - 1); length = offset & (blocksize - 1);
/* if we are not on a block boundary */ /* if we are not on a block boundary */
if (length) { if (length) {
char *kaddr;
length = blocksize - length; length = blocksize - length;
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, offset, length, KM_USER0);
memset(kaddr + offset, 0, length);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
if (buffer_mapped(bh) && bh->b_blocknr != 0) { if (buffer_mapped(bh) && bh->b_blocknr != 0) {
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
} }
@ -2370,7 +2365,6 @@ static int reiserfs_write_full_page(struct page *page,
** last byte in the file ** last byte in the file
*/ */
if (page->index >= end_index) { if (page->index >= end_index) {
char *kaddr;
unsigned last_offset; unsigned last_offset;
last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1); last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
@ -2379,10 +2373,7 @@ static int reiserfs_write_full_page(struct page *page,
unlock_page(page); unlock_page(page);
return 0; return 0;
} }
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, last_offset, PAGE_CACHE_SIZE - last_offset, KM_USER0);
memset(kaddr + last_offset, 0, PAGE_CACHE_SIZE - last_offset);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
} }
bh = head; bh = head;
block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);