Btrfs: walk compressed pages based on the nr_pages count instead of bytes

The byte walk counting was awkward and error prone.  This uses the
number of pages sent the higher layer to build bios.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
Chris Mason 2008-10-30 13:22:14 -04:00
parent 87ef2bb46b
commit cfbc246eaa
2 changed files with 6 additions and 3 deletions

View File

@ -296,7 +296,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
/* create and submit bios for the compressed pages */
bytes_left = compressed_len;
while(bytes_left > 0) {
for (page_index = 0; page_index < cb->nr_pages; page_index++) {
page = compressed_pages[page_index];
page->mapping = inode->i_mapping;
if (bio->bi_size)
@ -324,7 +324,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio->bi_end_io = end_compressed_bio_write;
bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
}
page_index++;
if (bytes_left < PAGE_CACHE_SIZE) {
printk("bytes left %lu compress len %lu nr %lu\n",
bytes_left, cb->compressed_len, cb->nr_pages);
}
bytes_left -= PAGE_CACHE_SIZE;
first_byte += PAGE_CACHE_SIZE;
}

View File

@ -338,7 +338,7 @@ again:
if (!btrfs_test_flag(inode, NOCOMPRESS) &&
btrfs_test_opt(root, COMPRESS)) {
WARN_ON(pages);
pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
/* we want to make sure the amount of IO required to satisfy
* a random read is reasonably small, so we limit the size