Merge branch 'ib/4.17-bitmap' into next
Bring in bitmap API improvements.
This commit is contained in:
commit
5d81a787bd
|
@ -3859,7 +3859,7 @@ static int __load_dirty_region_bitmap(struct raid_set *rs)
|
|||
/* Try loading the bitmap unless "raid0", which does not have one */
|
||||
if (!rs_is_raid0(rs) &&
|
||||
!test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
|
||||
r = bitmap_load(&rs->md);
|
||||
r = md_bitmap_load(&rs->md);
|
||||
if (r)
|
||||
DMERR("Failed to load bitmap");
|
||||
}
|
||||
|
@ -3987,8 +3987,8 @@ static int raid_preresume(struct dm_target *ti)
|
|||
/* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
|
||||
if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
|
||||
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
|
||||
r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
|
||||
to_bytes(rs->requested_bitmap_chunk_sectors), 0);
|
||||
r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors,
|
||||
to_bytes(rs->requested_bitmap_chunk_sectors), 0);
|
||||
if (r)
|
||||
DMERR("Failed to resize bitmap");
|
||||
}
|
||||
|
|
|
@ -46,8 +46,8 @@ static inline char *bmname(struct bitmap *bitmap)
|
|||
* if we find our page, we increment the page's refcount so that it stays
|
||||
* allocated while we're using it
|
||||
*/
|
||||
static int bitmap_checkpage(struct bitmap_counts *bitmap,
|
||||
unsigned long page, int create, int no_hijack)
|
||||
static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
|
||||
unsigned long page, int create, int no_hijack)
|
||||
__releases(bitmap->lock)
|
||||
__acquires(bitmap->lock)
|
||||
{
|
||||
|
@ -115,7 +115,7 @@ __acquires(bitmap->lock)
|
|||
/* if page is completely empty, put it back on the free list, or dealloc it */
|
||||
/* if page was hijacked, unmark the flag so it might get alloced next time */
|
||||
/* Note: lock should be held when calling this */
|
||||
static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
|
||||
static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
|
||||
{
|
||||
char *ptr;
|
||||
|
||||
|
@ -280,7 +280,7 @@ restart:
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void bitmap_file_kick(struct bitmap *bitmap);
|
||||
static void md_bitmap_file_kick(struct bitmap *bitmap);
|
||||
/*
|
||||
* write out a page to a file
|
||||
*/
|
||||
|
@ -310,7 +310,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
|
|||
atomic_read(&bitmap->pending_writes)==0);
|
||||
}
|
||||
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
|
||||
bitmap_file_kick(bitmap);
|
||||
md_bitmap_file_kick(bitmap);
|
||||
}
|
||||
|
||||
static void end_bitmap_write(struct buffer_head *bh, int uptodate)
|
||||
|
@ -421,11 +421,11 @@ out:
|
|||
*/
|
||||
|
||||
/*
|
||||
* bitmap_wait_writes() should be called before writing any bitmap
|
||||
* md_bitmap_wait_writes() should be called before writing any bitmap
|
||||
* blocks, to ensure previous writes, particularly from
|
||||
* bitmap_daemon_work(), have completed.
|
||||
* md_bitmap_daemon_work(), have completed.
|
||||
*/
|
||||
static void bitmap_wait_writes(struct bitmap *bitmap)
|
||||
static void md_bitmap_wait_writes(struct bitmap *bitmap)
|
||||
{
|
||||
if (bitmap->storage.file)
|
||||
wait_event(bitmap->write_wait,
|
||||
|
@ -443,7 +443,7 @@ static void bitmap_wait_writes(struct bitmap *bitmap)
|
|||
|
||||
|
||||
/* update the event counter and sync the superblock to disk */
|
||||
void bitmap_update_sb(struct bitmap *bitmap)
|
||||
void md_bitmap_update_sb(struct bitmap *bitmap)
|
||||
{
|
||||
bitmap_super_t *sb;
|
||||
|
||||
|
@ -476,10 +476,10 @@ void bitmap_update_sb(struct bitmap *bitmap)
|
|||
kunmap_atomic(sb);
|
||||
write_page(bitmap, bitmap->storage.sb_page, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_update_sb);
|
||||
EXPORT_SYMBOL(md_bitmap_update_sb);
|
||||
|
||||
/* print out the bitmap file superblock */
|
||||
void bitmap_print_sb(struct bitmap *bitmap)
|
||||
void md_bitmap_print_sb(struct bitmap *bitmap)
|
||||
{
|
||||
bitmap_super_t *sb;
|
||||
|
||||
|
@ -518,7 +518,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
|
|||
*
|
||||
* Returns: 0 on success, -Exxx on error
|
||||
*/
|
||||
static int bitmap_new_disk_sb(struct bitmap *bitmap)
|
||||
static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
|
||||
{
|
||||
bitmap_super_t *sb;
|
||||
unsigned long chunksize, daemon_sleep, write_behind;
|
||||
|
@ -577,7 +577,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
|
|||
}
|
||||
|
||||
/* read the superblock from the bitmap file and initialize some bitmap fields */
|
||||
static int bitmap_read_sb(struct bitmap *bitmap)
|
||||
static int md_bitmap_read_sb(struct bitmap *bitmap)
|
||||
{
|
||||
char *reason = NULL;
|
||||
bitmap_super_t *sb;
|
||||
|
@ -727,7 +727,7 @@ out_no_sb:
|
|||
bitmap->mddev->bitmap_info.space > sectors_reserved)
|
||||
bitmap->mddev->bitmap_info.space = sectors_reserved;
|
||||
if (err) {
|
||||
bitmap_print_sb(bitmap);
|
||||
md_bitmap_print_sb(bitmap);
|
||||
if (bitmap->cluster_slot < 0)
|
||||
md_cluster_stop(bitmap->mddev);
|
||||
}
|
||||
|
@ -774,9 +774,9 @@ static inline struct page *filemap_get_page(struct bitmap_storage *store,
|
|||
return store->filemap[file_page_index(store, chunk)];
|
||||
}
|
||||
|
||||
static int bitmap_storage_alloc(struct bitmap_storage *store,
|
||||
unsigned long chunks, int with_super,
|
||||
int slot_number)
|
||||
static int md_bitmap_storage_alloc(struct bitmap_storage *store,
|
||||
unsigned long chunks, int with_super,
|
||||
int slot_number)
|
||||
{
|
||||
int pnum, offset = 0;
|
||||
unsigned long num_pages;
|
||||
|
@ -830,7 +830,7 @@ static int bitmap_storage_alloc(struct bitmap_storage *store,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void bitmap_file_unmap(struct bitmap_storage *store)
|
||||
static void md_bitmap_file_unmap(struct bitmap_storage *store)
|
||||
{
|
||||
struct page **map, *sb_page;
|
||||
int pages;
|
||||
|
@ -862,12 +862,12 @@ static void bitmap_file_unmap(struct bitmap_storage *store)
|
|||
* then it is no longer reliable, so we stop using it and we mark the file
|
||||
* as failed in the superblock
|
||||
*/
|
||||
static void bitmap_file_kick(struct bitmap *bitmap)
|
||||
static void md_bitmap_file_kick(struct bitmap *bitmap)
|
||||
{
|
||||
char *path, *ptr = NULL;
|
||||
|
||||
if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
|
||||
bitmap_update_sb(bitmap);
|
||||
md_bitmap_update_sb(bitmap);
|
||||
|
||||
if (bitmap->storage.file) {
|
||||
path = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
|
@ -923,7 +923,7 @@ static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
|
|||
* we set the bit immediately, then we record the page number so that
|
||||
* when an unplug occurs, we can flush the dirty pages out to disk
|
||||
*/
|
||||
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
|
||||
static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
|
||||
{
|
||||
unsigned long bit;
|
||||
struct page *page;
|
||||
|
@ -952,7 +952,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
|
|||
set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
|
||||
}
|
||||
|
||||
static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
|
||||
static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
|
||||
{
|
||||
unsigned long bit;
|
||||
struct page *page;
|
||||
|
@ -980,7 +980,7 @@ static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
|
|||
}
|
||||
}
|
||||
|
||||
static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
|
||||
static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
|
||||
{
|
||||
unsigned long bit;
|
||||
struct page *page;
|
||||
|
@ -1005,7 +1005,7 @@ static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
|
|||
/* this gets called when the md device is ready to unplug its underlying
|
||||
* (slave) device queues -- before we let any writes go down, we need to
|
||||
* sync the dirty pages of the bitmap file to disk */
|
||||
void bitmap_unplug(struct bitmap *bitmap)
|
||||
void md_bitmap_unplug(struct bitmap *bitmap)
|
||||
{
|
||||
unsigned long i;
|
||||
int dirty, need_write;
|
||||
|
@ -1025,7 +1025,7 @@ void bitmap_unplug(struct bitmap *bitmap)
|
|||
BITMAP_PAGE_NEEDWRITE);
|
||||
if (dirty || need_write) {
|
||||
if (!writing) {
|
||||
bitmap_wait_writes(bitmap);
|
||||
md_bitmap_wait_writes(bitmap);
|
||||
if (bitmap->mddev->queue)
|
||||
blk_add_trace_msg(bitmap->mddev->queue,
|
||||
"md bitmap_unplug");
|
||||
|
@ -1036,14 +1036,14 @@ void bitmap_unplug(struct bitmap *bitmap)
|
|||
}
|
||||
}
|
||||
if (writing)
|
||||
bitmap_wait_writes(bitmap);
|
||||
md_bitmap_wait_writes(bitmap);
|
||||
|
||||
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
|
||||
bitmap_file_kick(bitmap);
|
||||
md_bitmap_file_kick(bitmap);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_unplug);
|
||||
EXPORT_SYMBOL(md_bitmap_unplug);
|
||||
|
||||
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
|
||||
static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
|
||||
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
|
||||
* the in-memory bitmap from the on-disk bitmap -- also, sets up the
|
||||
* memory mapping of the bitmap file
|
||||
|
@ -1055,7 +1055,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
|
|||
* We ignore all bits for sectors that end earlier than 'start'.
|
||||
* This is used when reading an out-of-date bitmap...
|
||||
*/
|
||||
static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
|
||||
static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
|
||||
{
|
||||
unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
|
||||
struct page *page = NULL;
|
||||
|
@ -1078,9 +1078,9 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
|
|||
/* if the disk bit is set, set the memory bit */
|
||||
int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
|
||||
>= start);
|
||||
bitmap_set_memory_bits(bitmap,
|
||||
(sector_t)i << bitmap->counts.chunkshift,
|
||||
needed);
|
||||
md_bitmap_set_memory_bits(bitmap,
|
||||
(sector_t)i << bitmap->counts.chunkshift,
|
||||
needed);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1159,9 +1159,9 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
|
|||
/* if the disk bit is set, set the memory bit */
|
||||
int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
|
||||
>= start);
|
||||
bitmap_set_memory_bits(bitmap,
|
||||
(sector_t)i << bitmap->counts.chunkshift,
|
||||
needed);
|
||||
md_bitmap_set_memory_bits(bitmap,
|
||||
(sector_t)i << bitmap->counts.chunkshift,
|
||||
needed);
|
||||
bit_cnt++;
|
||||
}
|
||||
offset = 0;
|
||||
|
@ -1179,7 +1179,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void bitmap_write_all(struct bitmap *bitmap)
|
||||
void md_bitmap_write_all(struct bitmap *bitmap)
|
||||
{
|
||||
/* We don't actually write all bitmap blocks here,
|
||||
* just flag them as needing to be written
|
||||
|
@ -1198,16 +1198,16 @@ void bitmap_write_all(struct bitmap *bitmap)
|
|||
bitmap->allclean = 0;
|
||||
}
|
||||
|
||||
static void bitmap_count_page(struct bitmap_counts *bitmap,
|
||||
sector_t offset, int inc)
|
||||
static void md_bitmap_count_page(struct bitmap_counts *bitmap,
|
||||
sector_t offset, int inc)
|
||||
{
|
||||
sector_t chunk = offset >> bitmap->chunkshift;
|
||||
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
|
||||
bitmap->bp[page].count += inc;
|
||||
bitmap_checkfree(bitmap, page);
|
||||
md_bitmap_checkfree(bitmap, page);
|
||||
}
|
||||
|
||||
static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
|
||||
static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
|
||||
{
|
||||
sector_t chunk = offset >> bitmap->chunkshift;
|
||||
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
|
||||
|
@ -1217,16 +1217,16 @@ static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
|
|||
bp->pending = 1;
|
||||
}
|
||||
|
||||
static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
|
||||
sector_t offset, sector_t *blocks,
|
||||
int create);
|
||||
static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
|
||||
sector_t offset, sector_t *blocks,
|
||||
int create);
|
||||
|
||||
/*
|
||||
* bitmap daemon -- periodically wakes up to clean bits and flush pages
|
||||
* out to disk
|
||||
*/
|
||||
|
||||
void bitmap_daemon_work(struct mddev *mddev)
|
||||
void md_bitmap_daemon_work(struct mddev *mddev)
|
||||
{
|
||||
struct bitmap *bitmap;
|
||||
unsigned long j;
|
||||
|
@ -1301,10 +1301,8 @@ void bitmap_daemon_work(struct mddev *mddev)
|
|||
}
|
||||
counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
|
||||
}
|
||||
bmc = bitmap_get_counter(counts,
|
||||
block,
|
||||
&blocks, 0);
|
||||
|
||||
bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
|
||||
if (!bmc) {
|
||||
j |= PAGE_COUNTER_MASK;
|
||||
continue;
|
||||
|
@ -1312,17 +1310,17 @@ void bitmap_daemon_work(struct mddev *mddev)
|
|||
if (*bmc == 1 && !bitmap->need_sync) {
|
||||
/* We can clear the bit */
|
||||
*bmc = 0;
|
||||
bitmap_count_page(counts, block, -1);
|
||||
bitmap_file_clear_bit(bitmap, block);
|
||||
md_bitmap_count_page(counts, block, -1);
|
||||
md_bitmap_file_clear_bit(bitmap, block);
|
||||
} else if (*bmc && *bmc <= 2) {
|
||||
*bmc = 1;
|
||||
bitmap_set_pending(counts, block);
|
||||
md_bitmap_set_pending(counts, block);
|
||||
bitmap->allclean = 0;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&counts->lock);
|
||||
|
||||
bitmap_wait_writes(bitmap);
|
||||
md_bitmap_wait_writes(bitmap);
|
||||
/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
|
||||
* DIRTY pages need to be written by bitmap_unplug so it can wait
|
||||
* for them.
|
||||
|
@ -1352,9 +1350,9 @@ void bitmap_daemon_work(struct mddev *mddev)
|
|||
mutex_unlock(&mddev->bitmap_info.mutex);
|
||||
}
|
||||
|
||||
static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
|
||||
sector_t offset, sector_t *blocks,
|
||||
int create)
|
||||
static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
|
||||
sector_t offset, sector_t *blocks,
|
||||
int create)
|
||||
__releases(bitmap->lock)
|
||||
__acquires(bitmap->lock)
|
||||
{
|
||||
|
@ -1368,7 +1366,7 @@ __acquires(bitmap->lock)
|
|||
sector_t csize;
|
||||
int err;
|
||||
|
||||
err = bitmap_checkpage(bitmap, page, create, 0);
|
||||
err = md_bitmap_checkpage(bitmap, page, create, 0);
|
||||
|
||||
if (bitmap->bp[page].hijacked ||
|
||||
bitmap->bp[page].map == NULL)
|
||||
|
@ -1394,7 +1392,7 @@ __acquires(bitmap->lock)
|
|||
&(bitmap->bp[page].map[pageoff]);
|
||||
}
|
||||
|
||||
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
|
||||
int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
|
||||
{
|
||||
if (!bitmap)
|
||||
return 0;
|
||||
|
@ -1415,7 +1413,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
|
|||
bitmap_counter_t *bmc;
|
||||
|
||||
spin_lock_irq(&bitmap->counts.lock);
|
||||
bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
|
||||
bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
|
||||
if (!bmc) {
|
||||
spin_unlock_irq(&bitmap->counts.lock);
|
||||
return 0;
|
||||
|
@ -1437,8 +1435,8 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
|
|||
|
||||
switch (*bmc) {
|
||||
case 0:
|
||||
bitmap_file_set_bit(bitmap, offset);
|
||||
bitmap_count_page(&bitmap->counts, offset, 1);
|
||||
md_bitmap_file_set_bit(bitmap, offset);
|
||||
md_bitmap_count_page(&bitmap->counts, offset, 1);
|
||||
/* fall through */
|
||||
case 1:
|
||||
*bmc = 2;
|
||||
|
@ -1456,10 +1454,10 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_startwrite);
|
||||
EXPORT_SYMBOL(md_bitmap_startwrite);
|
||||
|
||||
void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
|
||||
int success, int behind)
|
||||
void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
|
||||
unsigned long sectors, int success, int behind)
|
||||
{
|
||||
if (!bitmap)
|
||||
return;
|
||||
|
@ -1477,7 +1475,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
|
|||
bitmap_counter_t *bmc;
|
||||
|
||||
spin_lock_irqsave(&bitmap->counts.lock, flags);
|
||||
bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
|
||||
bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
|
||||
if (!bmc) {
|
||||
spin_unlock_irqrestore(&bitmap->counts.lock, flags);
|
||||
return;
|
||||
|
@ -1498,7 +1496,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
|
|||
|
||||
(*bmc)--;
|
||||
if (*bmc <= 2) {
|
||||
bitmap_set_pending(&bitmap->counts, offset);
|
||||
md_bitmap_set_pending(&bitmap->counts, offset);
|
||||
bitmap->allclean = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&bitmap->counts.lock, flags);
|
||||
|
@ -1509,7 +1507,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
|
|||
sectors = 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_endwrite);
|
||||
EXPORT_SYMBOL(md_bitmap_endwrite);
|
||||
|
||||
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
|
||||
int degraded)
|
||||
|
@ -1521,7 +1519,7 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
|
|||
return 1; /* always resync if no bitmap */
|
||||
}
|
||||
spin_lock_irq(&bitmap->counts.lock);
|
||||
bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
|
||||
bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
|
||||
rv = 0;
|
||||
if (bmc) {
|
||||
/* locked */
|
||||
|
@ -1539,8 +1537,8 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
|
|||
return rv;
|
||||
}
|
||||
|
||||
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
|
||||
int degraded)
|
||||
int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
|
||||
int degraded)
|
||||
{
|
||||
/* bitmap_start_sync must always report on multiples of whole
|
||||
* pages, otherwise resync (which is very PAGE_SIZE based) will
|
||||
|
@ -1561,9 +1559,9 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
|
|||
}
|
||||
return rv;
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_start_sync);
|
||||
EXPORT_SYMBOL(md_bitmap_start_sync);
|
||||
|
||||
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
|
||||
void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
|
||||
{
|
||||
bitmap_counter_t *bmc;
|
||||
unsigned long flags;
|
||||
|
@ -1573,7 +1571,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i
|
|||
return;
|
||||
}
|
||||
spin_lock_irqsave(&bitmap->counts.lock, flags);
|
||||
bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
|
||||
bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
|
||||
if (bmc == NULL)
|
||||
goto unlock;
|
||||
/* locked */
|
||||
|
@ -1584,7 +1582,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i
|
|||
*bmc |= NEEDED_MASK;
|
||||
else {
|
||||
if (*bmc <= 2) {
|
||||
bitmap_set_pending(&bitmap->counts, offset);
|
||||
md_bitmap_set_pending(&bitmap->counts, offset);
|
||||
bitmap->allclean = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1592,9 +1590,9 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i
|
|||
unlock:
|
||||
spin_unlock_irqrestore(&bitmap->counts.lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_end_sync);
|
||||
EXPORT_SYMBOL(md_bitmap_end_sync);
|
||||
|
||||
void bitmap_close_sync(struct bitmap *bitmap)
|
||||
void md_bitmap_close_sync(struct bitmap *bitmap)
|
||||
{
|
||||
/* Sync has finished, and any bitmap chunks that weren't synced
|
||||
* properly have been aborted. It remains to us to clear the
|
||||
|
@ -1605,13 +1603,13 @@ void bitmap_close_sync(struct bitmap *bitmap)
|
|||
if (!bitmap)
|
||||
return;
|
||||
while (sector < bitmap->mddev->resync_max_sectors) {
|
||||
bitmap_end_sync(bitmap, sector, &blocks, 0);
|
||||
md_bitmap_end_sync(bitmap, sector, &blocks, 0);
|
||||
sector += blocks;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_close_sync);
|
||||
EXPORT_SYMBOL(md_bitmap_close_sync);
|
||||
|
||||
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
|
||||
void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
|
||||
{
|
||||
sector_t s = 0;
|
||||
sector_t blocks;
|
||||
|
@ -1633,15 +1631,15 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
|
|||
sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
|
||||
s = 0;
|
||||
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
|
||||
bitmap_end_sync(bitmap, s, &blocks, 0);
|
||||
md_bitmap_end_sync(bitmap, s, &blocks, 0);
|
||||
s += blocks;
|
||||
}
|
||||
bitmap->last_end_sync = jiffies;
|
||||
sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_cond_end_sync);
|
||||
EXPORT_SYMBOL(md_bitmap_cond_end_sync);
|
||||
|
||||
void bitmap_sync_with_cluster(struct mddev *mddev,
|
||||
void md_bitmap_sync_with_cluster(struct mddev *mddev,
|
||||
sector_t old_lo, sector_t old_hi,
|
||||
sector_t new_lo, sector_t new_hi)
|
||||
{
|
||||
|
@ -1649,20 +1647,20 @@ void bitmap_sync_with_cluster(struct mddev *mddev,
|
|||
sector_t sector, blocks = 0;
|
||||
|
||||
for (sector = old_lo; sector < new_lo; ) {
|
||||
bitmap_end_sync(bitmap, sector, &blocks, 0);
|
||||
md_bitmap_end_sync(bitmap, sector, &blocks, 0);
|
||||
sector += blocks;
|
||||
}
|
||||
WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
|
||||
|
||||
for (sector = old_hi; sector < new_hi; ) {
|
||||
bitmap_start_sync(bitmap, sector, &blocks, 0);
|
||||
md_bitmap_start_sync(bitmap, sector, &blocks, 0);
|
||||
sector += blocks;
|
||||
}
|
||||
WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_sync_with_cluster);
|
||||
EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
|
||||
|
||||
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
|
||||
static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
|
||||
{
|
||||
/* For each chunk covered by any of these sectors, set the
|
||||
* counter to 2 and possibly set resync_needed. They should all
|
||||
|
@ -1672,15 +1670,15 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
|
|||
sector_t secs;
|
||||
bitmap_counter_t *bmc;
|
||||
spin_lock_irq(&bitmap->counts.lock);
|
||||
bmc = bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
|
||||
bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
|
||||
if (!bmc) {
|
||||
spin_unlock_irq(&bitmap->counts.lock);
|
||||
return;
|
||||
}
|
||||
if (!*bmc) {
|
||||
*bmc = 2;
|
||||
bitmap_count_page(&bitmap->counts, offset, 1);
|
||||
bitmap_set_pending(&bitmap->counts, offset);
|
||||
md_bitmap_count_page(&bitmap->counts, offset, 1);
|
||||
md_bitmap_set_pending(&bitmap->counts, offset);
|
||||
bitmap->allclean = 0;
|
||||
}
|
||||
if (needed)
|
||||
|
@ -1689,14 +1687,14 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
|
|||
}
|
||||
|
||||
/* dirty the memory and file bits for bitmap chunks "s" to "e" */
|
||||
void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
|
||||
void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
|
||||
{
|
||||
unsigned long chunk;
|
||||
|
||||
for (chunk = s; chunk <= e; chunk++) {
|
||||
sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
|
||||
bitmap_set_memory_bits(bitmap, sec, 1);
|
||||
bitmap_file_set_bit(bitmap, sec);
|
||||
md_bitmap_set_memory_bits(bitmap, sec, 1);
|
||||
md_bitmap_file_set_bit(bitmap, sec);
|
||||
if (sec < bitmap->mddev->recovery_cp)
|
||||
/* We are asserting that the array is dirty,
|
||||
* so move the recovery_cp address back so
|
||||
|
@ -1709,7 +1707,7 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
|
|||
/*
|
||||
* flush out any pending updates
|
||||
*/
|
||||
void bitmap_flush(struct mddev *mddev)
|
||||
void md_bitmap_flush(struct mddev *mddev)
|
||||
{
|
||||
struct bitmap *bitmap = mddev->bitmap;
|
||||
long sleep;
|
||||
|
@ -1722,18 +1720,18 @@ void bitmap_flush(struct mddev *mddev)
|
|||
*/
|
||||
sleep = mddev->bitmap_info.daemon_sleep * 2;
|
||||
bitmap->daemon_lastrun -= sleep;
|
||||
bitmap_daemon_work(mddev);
|
||||
md_bitmap_daemon_work(mddev);
|
||||
bitmap->daemon_lastrun -= sleep;
|
||||
bitmap_daemon_work(mddev);
|
||||
md_bitmap_daemon_work(mddev);
|
||||
bitmap->daemon_lastrun -= sleep;
|
||||
bitmap_daemon_work(mddev);
|
||||
bitmap_update_sb(bitmap);
|
||||
md_bitmap_daemon_work(mddev);
|
||||
md_bitmap_update_sb(bitmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* free memory that was allocated
|
||||
*/
|
||||
void bitmap_free(struct bitmap *bitmap)
|
||||
void md_bitmap_free(struct bitmap *bitmap)
|
||||
{
|
||||
unsigned long k, pages;
|
||||
struct bitmap_page *bp;
|
||||
|
@ -1753,7 +1751,7 @@ void bitmap_free(struct bitmap *bitmap)
|
|||
atomic_read(&bitmap->pending_writes) == 0);
|
||||
|
||||
/* release the bitmap file */
|
||||
bitmap_file_unmap(&bitmap->storage);
|
||||
md_bitmap_file_unmap(&bitmap->storage);
|
||||
|
||||
bp = bitmap->counts.bp;
|
||||
pages = bitmap->counts.pages;
|
||||
|
@ -1767,9 +1765,9 @@ void bitmap_free(struct bitmap *bitmap)
|
|||
kfree(bp);
|
||||
kfree(bitmap);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_free);
|
||||
EXPORT_SYMBOL(md_bitmap_free);
|
||||
|
||||
void bitmap_wait_behind_writes(struct mddev *mddev)
|
||||
void md_bitmap_wait_behind_writes(struct mddev *mddev)
|
||||
{
|
||||
struct bitmap *bitmap = mddev->bitmap;
|
||||
|
||||
|
@ -1783,14 +1781,14 @@ void bitmap_wait_behind_writes(struct mddev *mddev)
|
|||
}
|
||||
}
|
||||
|
||||
void bitmap_destroy(struct mddev *mddev)
|
||||
void md_bitmap_destroy(struct mddev *mddev)
|
||||
{
|
||||
struct bitmap *bitmap = mddev->bitmap;
|
||||
|
||||
if (!bitmap) /* there was no bitmap */
|
||||
return;
|
||||
|
||||
bitmap_wait_behind_writes(mddev);
|
||||
md_bitmap_wait_behind_writes(mddev);
|
||||
|
||||
mutex_lock(&mddev->bitmap_info.mutex);
|
||||
spin_lock(&mddev->lock);
|
||||
|
@ -1800,7 +1798,7 @@ void bitmap_destroy(struct mddev *mddev)
|
|||
if (mddev->thread)
|
||||
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
|
||||
bitmap_free(bitmap);
|
||||
md_bitmap_free(bitmap);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1808,7 +1806,7 @@ void bitmap_destroy(struct mddev *mddev)
|
|||
* if this returns an error, bitmap_destroy must be called to do clean up
|
||||
* once mddev->bitmap is set
|
||||
*/
|
||||
struct bitmap *bitmap_create(struct mddev *mddev, int slot)
|
||||
struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
|
||||
{
|
||||
struct bitmap *bitmap;
|
||||
sector_t blocks = mddev->resync_max_sectors;
|
||||
|
@ -1863,9 +1861,9 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
|
|||
* instructing us to create a new on-disk bitmap instance.
|
||||
*/
|
||||
if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
|
||||
err = bitmap_new_disk_sb(bitmap);
|
||||
err = md_bitmap_new_disk_sb(bitmap);
|
||||
else
|
||||
err = bitmap_read_sb(bitmap);
|
||||
err = md_bitmap_read_sb(bitmap);
|
||||
} else {
|
||||
err = 0;
|
||||
if (mddev->bitmap_info.chunksize == 0 ||
|
||||
|
@ -1878,7 +1876,7 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
|
|||
goto error;
|
||||
|
||||
bitmap->daemon_lastrun = jiffies;
|
||||
err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
|
||||
err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
|
@ -1891,11 +1889,11 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
|
|||
|
||||
return bitmap;
|
||||
error:
|
||||
bitmap_free(bitmap);
|
||||
md_bitmap_free(bitmap);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
int bitmap_load(struct mddev *mddev)
|
||||
int md_bitmap_load(struct mddev *mddev)
|
||||
{
|
||||
int err = 0;
|
||||
sector_t start = 0;
|
||||
|
@ -1915,10 +1913,10 @@ int bitmap_load(struct mddev *mddev)
|
|||
*/
|
||||
while (sector < mddev->resync_max_sectors) {
|
||||
sector_t blocks;
|
||||
bitmap_start_sync(bitmap, sector, &blocks, 0);
|
||||
md_bitmap_start_sync(bitmap, sector, &blocks, 0);
|
||||
sector += blocks;
|
||||
}
|
||||
bitmap_close_sync(bitmap);
|
||||
md_bitmap_close_sync(bitmap);
|
||||
|
||||
if (mddev->degraded == 0
|
||||
|| bitmap->events_cleared == mddev->events)
|
||||
|
@ -1927,7 +1925,7 @@ int bitmap_load(struct mddev *mddev)
|
|||
start = mddev->recovery_cp;
|
||||
|
||||
mutex_lock(&mddev->bitmap_info.mutex);
|
||||
err = bitmap_init_from_disk(bitmap, start);
|
||||
err = md_bitmap_init_from_disk(bitmap, start);
|
||||
mutex_unlock(&mddev->bitmap_info.mutex);
|
||||
|
||||
if (err)
|
||||
|
@ -1940,29 +1938,29 @@ int bitmap_load(struct mddev *mddev)
|
|||
mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
|
||||
md_wakeup_thread(mddev->thread);
|
||||
|
||||
bitmap_update_sb(bitmap);
|
||||
md_bitmap_update_sb(bitmap);
|
||||
|
||||
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
|
||||
err = -EIO;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bitmap_load);
|
||||
EXPORT_SYMBOL_GPL(md_bitmap_load);
|
||||
|
||||
struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
|
||||
{
|
||||
int rv = 0;
|
||||
struct bitmap *bitmap;
|
||||
|
||||
bitmap = bitmap_create(mddev, slot);
|
||||
bitmap = md_bitmap_create(mddev, slot);
|
||||
if (IS_ERR(bitmap)) {
|
||||
rv = PTR_ERR(bitmap);
|
||||
return ERR_PTR(rv);
|
||||
}
|
||||
|
||||
rv = bitmap_init_from_disk(bitmap, 0);
|
||||
rv = md_bitmap_init_from_disk(bitmap, 0);
|
||||
if (rv) {
|
||||
bitmap_free(bitmap);
|
||||
md_bitmap_free(bitmap);
|
||||
return ERR_PTR(rv);
|
||||
}
|
||||
|
||||
|
@ -1973,7 +1971,7 @@ EXPORT_SYMBOL(get_bitmap_from_slot);
|
|||
/* Loads the bitmap associated with slot and copies the resync information
|
||||
* to our bitmap
|
||||
*/
|
||||
int bitmap_copy_from_slot(struct mddev *mddev, int slot,
|
||||
int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
|
||||
sector_t *low, sector_t *high, bool clear_bits)
|
||||
{
|
||||
int rv = 0, i, j;
|
||||
|
@ -1990,35 +1988,35 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
|
|||
counts = &bitmap->counts;
|
||||
for (j = 0; j < counts->chunks; j++) {
|
||||
block = (sector_t)j << counts->chunkshift;
|
||||
if (bitmap_file_test_bit(bitmap, block)) {
|
||||
if (md_bitmap_file_test_bit(bitmap, block)) {
|
||||
if (!lo)
|
||||
lo = block;
|
||||
hi = block;
|
||||
bitmap_file_clear_bit(bitmap, block);
|
||||
bitmap_set_memory_bits(mddev->bitmap, block, 1);
|
||||
bitmap_file_set_bit(mddev->bitmap, block);
|
||||
md_bitmap_file_clear_bit(bitmap, block);
|
||||
md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
|
||||
md_bitmap_file_set_bit(mddev->bitmap, block);
|
||||
}
|
||||
}
|
||||
|
||||
if (clear_bits) {
|
||||
bitmap_update_sb(bitmap);
|
||||
md_bitmap_update_sb(bitmap);
|
||||
/* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
|
||||
* BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
|
||||
for (i = 0; i < bitmap->storage.file_pages; i++)
|
||||
if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
|
||||
set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
|
||||
bitmap_unplug(bitmap);
|
||||
md_bitmap_unplug(bitmap);
|
||||
}
|
||||
bitmap_unplug(mddev->bitmap);
|
||||
md_bitmap_unplug(mddev->bitmap);
|
||||
*low = lo;
|
||||
*high = hi;
|
||||
|
||||
return rv;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bitmap_copy_from_slot);
|
||||
EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
|
||||
|
||||
|
||||
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
|
||||
void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
|
||||
{
|
||||
unsigned long chunk_kb;
|
||||
struct bitmap_counts *counts;
|
||||
|
@ -2045,7 +2043,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
|
|||
seq_printf(seq, "\n");
|
||||
}
|
||||
|
||||
int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
||||
int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
||||
int chunksize, int init)
|
||||
{
|
||||
/* If chunk_size is 0, choose an appropriate chunk size.
|
||||
|
@ -2106,12 +2104,12 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
|||
chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
|
||||
memset(&store, 0, sizeof(store));
|
||||
if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
|
||||
ret = bitmap_storage_alloc(&store, chunks,
|
||||
!bitmap->mddev->bitmap_info.external,
|
||||
mddev_is_clustered(bitmap->mddev)
|
||||
? bitmap->cluster_slot : 0);
|
||||
ret = md_bitmap_storage_alloc(&store, chunks,
|
||||
!bitmap->mddev->bitmap_info.external,
|
||||
mddev_is_clustered(bitmap->mddev)
|
||||
? bitmap->cluster_slot : 0);
|
||||
if (ret) {
|
||||
bitmap_file_unmap(&store);
|
||||
md_bitmap_file_unmap(&store);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -2120,7 +2118,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
|||
new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL);
|
||||
ret = -ENOMEM;
|
||||
if (!new_bp) {
|
||||
bitmap_file_unmap(&store);
|
||||
md_bitmap_file_unmap(&store);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -2134,7 +2132,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
|||
memcpy(page_address(store.sb_page),
|
||||
page_address(bitmap->storage.sb_page),
|
||||
sizeof(bitmap_super_t));
|
||||
bitmap_file_unmap(&bitmap->storage);
|
||||
md_bitmap_file_unmap(&bitmap->storage);
|
||||
bitmap->storage = store;
|
||||
|
||||
old_counts = bitmap->counts;
|
||||
|
@ -2154,7 +2152,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
|||
if (mddev_is_clustered(bitmap->mddev)) {
|
||||
unsigned long page;
|
||||
for (page = 0; page < pages; page++) {
|
||||
ret = bitmap_checkpage(&bitmap->counts, page, 1, 1);
|
||||
ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
|
||||
if (ret) {
|
||||
unsigned long k;
|
||||
|
||||
|
@ -2184,27 +2182,23 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
|||
bitmap_counter_t *bmc_old, *bmc_new;
|
||||
int set;
|
||||
|
||||
bmc_old = bitmap_get_counter(&old_counts, block,
|
||||
&old_blocks, 0);
|
||||
bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
|
||||
set = bmc_old && NEEDED(*bmc_old);
|
||||
|
||||
if (set) {
|
||||
bmc_new = bitmap_get_counter(&bitmap->counts, block,
|
||||
&new_blocks, 1);
|
||||
bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
|
||||
if (*bmc_new == 0) {
|
||||
/* need to set on-disk bits too. */
|
||||
sector_t end = block + new_blocks;
|
||||
sector_t start = block >> chunkshift;
|
||||
start <<= chunkshift;
|
||||
while (start < end) {
|
||||
bitmap_file_set_bit(bitmap, block);
|
||||
md_bitmap_file_set_bit(bitmap, block);
|
||||
start += 1 << chunkshift;
|
||||
}
|
||||
*bmc_new = 2;
|
||||
bitmap_count_page(&bitmap->counts,
|
||||
block, 1);
|
||||
bitmap_set_pending(&bitmap->counts,
|
||||
block);
|
||||
md_bitmap_count_page(&bitmap->counts, block, 1);
|
||||
md_bitmap_set_pending(&bitmap->counts, block);
|
||||
}
|
||||
*bmc_new |= NEEDED_MASK;
|
||||
if (new_blocks < old_blocks)
|
||||
|
@ -2225,18 +2219,15 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
|||
int i;
|
||||
while (block < (chunks << chunkshift)) {
|
||||
bitmap_counter_t *bmc;
|
||||
bmc = bitmap_get_counter(&bitmap->counts, block,
|
||||
&new_blocks, 1);
|
||||
bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
|
||||
if (bmc) {
|
||||
/* new space. It needs to be resynced, so
|
||||
* we set NEEDED_MASK.
|
||||
*/
|
||||
if (*bmc == 0) {
|
||||
*bmc = NEEDED_MASK | 2;
|
||||
bitmap_count_page(&bitmap->counts,
|
||||
block, 1);
|
||||
bitmap_set_pending(&bitmap->counts,
|
||||
block);
|
||||
md_bitmap_count_page(&bitmap->counts, block, 1);
|
||||
md_bitmap_set_pending(&bitmap->counts, block);
|
||||
}
|
||||
}
|
||||
block += new_blocks;
|
||||
|
@ -2247,14 +2238,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
|||
spin_unlock_irq(&bitmap->counts.lock);
|
||||
|
||||
if (!init) {
|
||||
bitmap_unplug(bitmap);
|
||||
md_bitmap_unplug(bitmap);
|
||||
bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
|
||||
}
|
||||
ret = 0;
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bitmap_resize);
|
||||
EXPORT_SYMBOL_GPL(md_bitmap_resize);
|
||||
|
||||
static ssize_t
|
||||
location_show(struct mddev *mddev, char *page)
|
||||
|
@ -2298,7 +2289,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
|
|||
}
|
||||
if (mddev->pers) {
|
||||
mddev->pers->quiesce(mddev, 1);
|
||||
bitmap_destroy(mddev);
|
||||
md_bitmap_destroy(mddev);
|
||||
mddev->pers->quiesce(mddev, 0);
|
||||
}
|
||||
mddev->bitmap_info.offset = 0;
|
||||
|
@ -2337,18 +2328,18 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
|
|||
if (mddev->pers) {
|
||||
struct bitmap *bitmap;
|
||||
mddev->pers->quiesce(mddev, 1);
|
||||
bitmap = bitmap_create(mddev, -1);
|
||||
bitmap = md_bitmap_create(mddev, -1);
|
||||
if (IS_ERR(bitmap))
|
||||
rv = PTR_ERR(bitmap);
|
||||
else {
|
||||
mddev->bitmap = bitmap;
|
||||
rv = bitmap_load(mddev);
|
||||
rv = md_bitmap_load(mddev);
|
||||
if (rv)
|
||||
mddev->bitmap_info.offset = 0;
|
||||
}
|
||||
mddev->pers->quiesce(mddev, 0);
|
||||
if (rv) {
|
||||
bitmap_destroy(mddev);
|
||||
md_bitmap_destroy(mddev);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -236,43 +236,43 @@ struct bitmap {
|
|||
/* the bitmap API */
|
||||
|
||||
/* these are used only by md/bitmap */
|
||||
struct bitmap *bitmap_create(struct mddev *mddev, int slot);
|
||||
int bitmap_load(struct mddev *mddev);
|
||||
void bitmap_flush(struct mddev *mddev);
|
||||
void bitmap_destroy(struct mddev *mddev);
|
||||
struct bitmap *md_bitmap_create(struct mddev *mddev, int slot);
|
||||
int md_bitmap_load(struct mddev *mddev);
|
||||
void md_bitmap_flush(struct mddev *mddev);
|
||||
void md_bitmap_destroy(struct mddev *mddev);
|
||||
|
||||
void bitmap_print_sb(struct bitmap *bitmap);
|
||||
void bitmap_update_sb(struct bitmap *bitmap);
|
||||
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap);
|
||||
void md_bitmap_print_sb(struct bitmap *bitmap);
|
||||
void md_bitmap_update_sb(struct bitmap *bitmap);
|
||||
void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap);
|
||||
|
||||
int bitmap_setallbits(struct bitmap *bitmap);
|
||||
void bitmap_write_all(struct bitmap *bitmap);
|
||||
int md_bitmap_setallbits(struct bitmap *bitmap);
|
||||
void md_bitmap_write_all(struct bitmap *bitmap);
|
||||
|
||||
void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
|
||||
void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
|
||||
|
||||
/* these are exported */
|
||||
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
|
||||
unsigned long sectors, int behind);
|
||||
void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
|
||||
int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
|
||||
unsigned long sectors, int behind);
|
||||
void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
|
||||
unsigned long sectors, int success, int behind);
|
||||
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
|
||||
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
|
||||
void bitmap_close_sync(struct bitmap *bitmap);
|
||||
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
|
||||
void bitmap_sync_with_cluster(struct mddev *mddev,
|
||||
sector_t old_lo, sector_t old_hi,
|
||||
sector_t new_lo, sector_t new_hi);
|
||||
int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
|
||||
void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
|
||||
void md_bitmap_close_sync(struct bitmap *bitmap);
|
||||
void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
|
||||
void md_bitmap_sync_with_cluster(struct mddev *mddev,
|
||||
sector_t old_lo, sector_t old_hi,
|
||||
sector_t new_lo, sector_t new_hi);
|
||||
|
||||
void bitmap_unplug(struct bitmap *bitmap);
|
||||
void bitmap_daemon_work(struct mddev *mddev);
|
||||
void md_bitmap_unplug(struct bitmap *bitmap);
|
||||
void md_bitmap_daemon_work(struct mddev *mddev);
|
||||
|
||||
int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
||||
int chunksize, int init);
|
||||
int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
||||
int chunksize, int init);
|
||||
struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot);
|
||||
int bitmap_copy_from_slot(struct mddev *mddev, int slot,
|
||||
sector_t *lo, sector_t *hi, bool clear_bits);
|
||||
void bitmap_free(struct bitmap *bitmap);
|
||||
void bitmap_wait_behind_writes(struct mddev *mddev);
|
||||
int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
|
||||
sector_t *lo, sector_t *hi, bool clear_bits);
|
||||
void md_bitmap_free(struct bitmap *bitmap);
|
||||
void md_bitmap_wait_behind_writes(struct mddev *mddev);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -326,7 +326,7 @@ static void recover_bitmaps(struct md_thread *thread)
|
|||
str, ret);
|
||||
goto clear_bit;
|
||||
}
|
||||
ret = bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
|
||||
ret = md_bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
|
||||
if (ret) {
|
||||
pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
|
||||
goto clear_bit;
|
||||
|
@ -480,9 +480,7 @@ static void process_suspend_info(struct mddev *mddev,
|
|||
* resync thread is running in another node,
|
||||
* so we don't need to do the resync again
|
||||
* with the same section */
|
||||
bitmap_sync_with_cluster(mddev, cinfo->sync_low,
|
||||
cinfo->sync_hi,
|
||||
lo, hi);
|
||||
md_bitmap_sync_with_cluster(mddev, cinfo->sync_low, cinfo->sync_hi, lo, hi);
|
||||
cinfo->sync_low = lo;
|
||||
cinfo->sync_hi = hi;
|
||||
|
||||
|
@ -829,7 +827,7 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
|
|||
}
|
||||
|
||||
/* Read the disk bitmap sb and check if it needs recovery */
|
||||
ret = bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
|
||||
ret = md_bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
|
||||
if (ret) {
|
||||
pr_warn("md-cluster: Could not gather bitmaps from slot %d", i);
|
||||
lockres_free(bm_lockres);
|
||||
|
@ -1127,13 +1125,13 @@ static int cluster_check_sync_size(struct mddev *mddev)
|
|||
bm_lockres = lockres_init(mddev, str, NULL, 1);
|
||||
if (!bm_lockres) {
|
||||
pr_err("md-cluster: Cannot initialize %s\n", str);
|
||||
bitmap_free(bitmap);
|
||||
md_bitmap_free(bitmap);
|
||||
return -1;
|
||||
}
|
||||
bm_lockres->flags |= DLM_LKF_NOQUEUE;
|
||||
rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
|
||||
if (!rv)
|
||||
bitmap_update_sb(bitmap);
|
||||
md_bitmap_update_sb(bitmap);
|
||||
lockres_free(bm_lockres);
|
||||
|
||||
sb = kmap_atomic(bitmap->storage.sb_page);
|
||||
|
@ -1141,11 +1139,11 @@ static int cluster_check_sync_size(struct mddev *mddev)
|
|||
sync_size = sb->sync_size;
|
||||
else if (sync_size != sb->sync_size) {
|
||||
kunmap_atomic(sb);
|
||||
bitmap_free(bitmap);
|
||||
md_bitmap_free(bitmap);
|
||||
return -1;
|
||||
}
|
||||
kunmap_atomic(sb);
|
||||
bitmap_free(bitmap);
|
||||
md_bitmap_free(bitmap);
|
||||
}
|
||||
|
||||
return (my_sync_size == sync_size) ? 0 : -1;
|
||||
|
@ -1442,7 +1440,7 @@ static int gather_bitmaps(struct md_rdev *rdev)
|
|||
for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) {
|
||||
if (sn == (cinfo->slot_number - 1))
|
||||
continue;
|
||||
err = bitmap_copy_from_slot(mddev, sn, &lo, &hi, false);
|
||||
err = md_bitmap_copy_from_slot(mddev, sn, &lo, &hi, false);
|
||||
if (err) {
|
||||
pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
|
||||
goto out;
|
||||
|
|
|
@ -2560,7 +2560,7 @@ repeat:
|
|||
if (mddev->queue)
|
||||
blk_add_trace_msg(mddev->queue, "md md_update_sb");
|
||||
rewrite:
|
||||
bitmap_update_sb(mddev->bitmap);
|
||||
md_bitmap_update_sb(mddev->bitmap);
|
||||
rdev_for_each(rdev, mddev) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
|
@ -4372,10 +4372,10 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
|
|||
if (buf == end) break;
|
||||
}
|
||||
if (*end && !isspace(*end)) break;
|
||||
bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
|
||||
md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
|
||||
buf = skip_spaces(end);
|
||||
}
|
||||
bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
|
||||
md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
|
||||
out:
|
||||
mddev_unlock(mddev);
|
||||
return len;
|
||||
|
@ -5588,7 +5588,7 @@ int md_run(struct mddev *mddev)
|
|||
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
|
||||
struct bitmap *bitmap;
|
||||
|
||||
bitmap = bitmap_create(mddev, -1);
|
||||
bitmap = md_bitmap_create(mddev, -1);
|
||||
if (IS_ERR(bitmap)) {
|
||||
err = PTR_ERR(bitmap);
|
||||
pr_warn("%s: failed to create bitmap (%d)\n",
|
||||
|
@ -5603,7 +5603,7 @@ int md_run(struct mddev *mddev)
|
|||
pers->free(mddev, mddev->private);
|
||||
mddev->private = NULL;
|
||||
module_put(pers->owner);
|
||||
bitmap_destroy(mddev);
|
||||
md_bitmap_destroy(mddev);
|
||||
goto abort;
|
||||
}
|
||||
if (mddev->queue) {
|
||||
|
@ -5688,9 +5688,9 @@ static int do_md_run(struct mddev *mddev)
|
|||
err = md_run(mddev);
|
||||
if (err)
|
||||
goto out;
|
||||
err = bitmap_load(mddev);
|
||||
err = md_bitmap_load(mddev);
|
||||
if (err) {
|
||||
bitmap_destroy(mddev);
|
||||
md_bitmap_destroy(mddev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -5832,7 +5832,7 @@ static void __md_stop_writes(struct mddev *mddev)
|
|||
mddev->pers->quiesce(mddev, 1);
|
||||
mddev->pers->quiesce(mddev, 0);
|
||||
}
|
||||
bitmap_flush(mddev);
|
||||
md_bitmap_flush(mddev);
|
||||
|
||||
if (mddev->ro == 0 &&
|
||||
((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
|
||||
|
@ -5854,7 +5854,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
|
|||
|
||||
static void mddev_detach(struct mddev *mddev)
|
||||
{
|
||||
bitmap_wait_behind_writes(mddev);
|
||||
md_bitmap_wait_behind_writes(mddev);
|
||||
if (mddev->pers && mddev->pers->quiesce) {
|
||||
mddev->pers->quiesce(mddev, 1);
|
||||
mddev->pers->quiesce(mddev, 0);
|
||||
|
@ -5867,7 +5867,7 @@ static void mddev_detach(struct mddev *mddev)
|
|||
static void __md_stop(struct mddev *mddev)
|
||||
{
|
||||
struct md_personality *pers = mddev->pers;
|
||||
bitmap_destroy(mddev);
|
||||
md_bitmap_destroy(mddev);
|
||||
mddev_detach(mddev);
|
||||
/* Ensure ->event_work is done */
|
||||
flush_workqueue(md_misc_wq);
|
||||
|
@ -6681,21 +6681,21 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
|
|||
if (fd >= 0) {
|
||||
struct bitmap *bitmap;
|
||||
|
||||
bitmap = bitmap_create(mddev, -1);
|
||||
bitmap = md_bitmap_create(mddev, -1);
|
||||
mddev_suspend(mddev);
|
||||
if (!IS_ERR(bitmap)) {
|
||||
mddev->bitmap = bitmap;
|
||||
err = bitmap_load(mddev);
|
||||
err = md_bitmap_load(mddev);
|
||||
} else
|
||||
err = PTR_ERR(bitmap);
|
||||
if (err) {
|
||||
bitmap_destroy(mddev);
|
||||
md_bitmap_destroy(mddev);
|
||||
fd = -1;
|
||||
}
|
||||
mddev_resume(mddev);
|
||||
} else if (fd < 0) {
|
||||
mddev_suspend(mddev);
|
||||
bitmap_destroy(mddev);
|
||||
md_bitmap_destroy(mddev);
|
||||
mddev_resume(mddev);
|
||||
}
|
||||
}
|
||||
|
@ -6981,15 +6981,15 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
|
|||
mddev->bitmap_info.default_offset;
|
||||
mddev->bitmap_info.space =
|
||||
mddev->bitmap_info.default_space;
|
||||
bitmap = bitmap_create(mddev, -1);
|
||||
bitmap = md_bitmap_create(mddev, -1);
|
||||
mddev_suspend(mddev);
|
||||
if (!IS_ERR(bitmap)) {
|
||||
mddev->bitmap = bitmap;
|
||||
rv = bitmap_load(mddev);
|
||||
rv = md_bitmap_load(mddev);
|
||||
} else
|
||||
rv = PTR_ERR(bitmap);
|
||||
if (rv)
|
||||
bitmap_destroy(mddev);
|
||||
md_bitmap_destroy(mddev);
|
||||
mddev_resume(mddev);
|
||||
} else {
|
||||
/* remove the bitmap */
|
||||
|
@ -7014,7 +7014,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
|
|||
md_cluster_ops->leave(mddev);
|
||||
}
|
||||
mddev_suspend(mddev);
|
||||
bitmap_destroy(mddev);
|
||||
md_bitmap_destroy(mddev);
|
||||
mddev_resume(mddev);
|
||||
mddev->bitmap_info.offset = 0;
|
||||
}
|
||||
|
@ -7877,7 +7877,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
|
|||
} else
|
||||
seq_printf(seq, "\n ");
|
||||
|
||||
bitmap_status(seq, mddev->bitmap);
|
||||
md_bitmap_status(seq, mddev->bitmap);
|
||||
|
||||
seq_printf(seq, "\n");
|
||||
}
|
||||
|
@ -8748,7 +8748,7 @@ void md_check_recovery(struct mddev *mddev)
|
|||
return;
|
||||
|
||||
if (mddev->bitmap)
|
||||
bitmap_daemon_work(mddev);
|
||||
md_bitmap_daemon_work(mddev);
|
||||
|
||||
if (signal_pending(current)) {
|
||||
if (mddev->pers->sync_request && !mddev->external) {
|
||||
|
@ -8885,7 +8885,7 @@ void md_check_recovery(struct mddev *mddev)
|
|||
* which has the bitmap stored on all devices.
|
||||
* So make sure all bitmap pages get written
|
||||
*/
|
||||
bitmap_write_all(mddev->bitmap);
|
||||
md_bitmap_write_all(mddev->bitmap);
|
||||
}
|
||||
INIT_WORK(&mddev->del_work, md_start_sync);
|
||||
queue_work(md_misc_wq, &mddev->del_work);
|
||||
|
@ -9133,7 +9133,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
|
|||
if (ret)
|
||||
pr_info("md-cluster: resize failed\n");
|
||||
else
|
||||
bitmap_update_sb(mddev->bitmap);
|
||||
md_bitmap_update_sb(mddev->bitmap);
|
||||
}
|
||||
|
||||
/* Check for change of roles in the active devices */
|
||||
|
|
|
@ -69,9 +69,9 @@ static struct dm_block_validator index_validator = {
|
|||
*/
|
||||
#define BITMAP_CSUM_XOR 240779
|
||||
|
||||
static void bitmap_prepare_for_write(struct dm_block_validator *v,
|
||||
struct dm_block *b,
|
||||
size_t block_size)
|
||||
static void dm_bitmap_prepare_for_write(struct dm_block_validator *v,
|
||||
struct dm_block *b,
|
||||
size_t block_size)
|
||||
{
|
||||
struct disk_bitmap_header *disk_header = dm_block_data(b);
|
||||
|
||||
|
@ -81,9 +81,9 @@ static void bitmap_prepare_for_write(struct dm_block_validator *v,
|
|||
BITMAP_CSUM_XOR));
|
||||
}
|
||||
|
||||
static int bitmap_check(struct dm_block_validator *v,
|
||||
struct dm_block *b,
|
||||
size_t block_size)
|
||||
static int dm_bitmap_check(struct dm_block_validator *v,
|
||||
struct dm_block *b,
|
||||
size_t block_size)
|
||||
{
|
||||
struct disk_bitmap_header *disk_header = dm_block_data(b);
|
||||
__le32 csum_disk;
|
||||
|
@ -108,8 +108,8 @@ static int bitmap_check(struct dm_block_validator *v,
|
|||
|
||||
static struct dm_block_validator dm_sm_bitmap_validator = {
|
||||
.name = "sm_bitmap",
|
||||
.prepare_for_write = bitmap_prepare_for_write,
|
||||
.check = bitmap_check
|
||||
.prepare_for_write = dm_bitmap_prepare_for_write,
|
||||
.check = dm_bitmap_check,
|
||||
};
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
|
@ -124,7 +124,7 @@ static void *dm_bitmap_data(struct dm_block *b)
|
|||
|
||||
#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
|
||||
|
||||
static unsigned bitmap_word_used(void *addr, unsigned b)
|
||||
static unsigned dm_bitmap_word_used(void *addr, unsigned b)
|
||||
{
|
||||
__le64 *words_le = addr;
|
||||
__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
|
||||
|
@ -170,7 +170,7 @@ static int sm_find_free(void *addr, unsigned begin, unsigned end,
|
|||
{
|
||||
while (begin < end) {
|
||||
if (!(begin & (ENTRIES_PER_WORD - 1)) &&
|
||||
bitmap_word_used(addr, begin)) {
|
||||
dm_bitmap_word_used(addr, begin)) {
|
||||
begin += ENTRIES_PER_WORD;
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -385,10 +385,10 @@ static void close_write(struct r1bio *r1_bio)
|
|||
r1_bio->behind_master_bio = NULL;
|
||||
}
|
||||
/* clear the bitmap if all writes complete successfully */
|
||||
bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
|
||||
r1_bio->sectors,
|
||||
!test_bit(R1BIO_Degraded, &r1_bio->state),
|
||||
test_bit(R1BIO_BehindIO, &r1_bio->state));
|
||||
md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
|
||||
r1_bio->sectors,
|
||||
!test_bit(R1BIO_Degraded, &r1_bio->state),
|
||||
test_bit(R1BIO_BehindIO, &r1_bio->state));
|
||||
md_write_end(r1_bio->mddev);
|
||||
}
|
||||
|
||||
|
@ -781,7 +781,7 @@ static int raid1_congested(struct mddev *mddev, int bits)
|
|||
static void flush_bio_list(struct r1conf *conf, struct bio *bio)
|
||||
{
|
||||
/* flush any pending bitmap writes to disk before proceeding w/ I/O */
|
||||
bitmap_unplug(conf->mddev->bitmap);
|
||||
md_bitmap_unplug(conf->mddev->bitmap);
|
||||
wake_up(&conf->wait_barrier);
|
||||
|
||||
while (bio) { /* submit pending writes */
|
||||
|
@ -1470,10 +1470,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|||
alloc_behind_master_bio(r1_bio, bio);
|
||||
}
|
||||
|
||||
bitmap_startwrite(bitmap, r1_bio->sector,
|
||||
r1_bio->sectors,
|
||||
test_bit(R1BIO_BehindIO,
|
||||
&r1_bio->state));
|
||||
md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
|
||||
test_bit(R1BIO_BehindIO, &r1_bio->state));
|
||||
first_clone = 0;
|
||||
}
|
||||
|
||||
|
@ -1881,8 +1879,7 @@ static void end_sync_write(struct bio *bio)
|
|||
long sectors_to_go = r1_bio->sectors;
|
||||
/* make sure these bits doesn't get cleared. */
|
||||
do {
|
||||
bitmap_end_sync(mddev->bitmap, s,
|
||||
&sync_blocks, 1);
|
||||
md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
|
||||
s += sync_blocks;
|
||||
sectors_to_go -= sync_blocks;
|
||||
} while (sectors_to_go > 0);
|
||||
|
@ -2629,12 +2626,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||
* We can find the current addess in mddev->curr_resync
|
||||
*/
|
||||
if (mddev->curr_resync < max_sector) /* aborted */
|
||||
bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
|
||||
&sync_blocks, 1);
|
||||
md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
|
||||
&sync_blocks, 1);
|
||||
else /* completed sync */
|
||||
conf->fullsync = 0;
|
||||
|
||||
bitmap_close_sync(mddev->bitmap);
|
||||
md_bitmap_close_sync(mddev->bitmap);
|
||||
close_sync(conf);
|
||||
|
||||
if (mddev_is_clustered(mddev)) {
|
||||
|
@ -2654,7 +2651,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||
/* before building a request, check if we can skip these blocks..
|
||||
* This call the bitmap_start_sync doesn't actually record anything
|
||||
*/
|
||||
if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
|
||||
if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
|
||||
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
|
||||
/* We can skip this block, and probably several more */
|
||||
*skipped = 1;
|
||||
|
@ -2672,7 +2669,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||
* sector_nr + two times RESYNC_SECTORS
|
||||
*/
|
||||
|
||||
bitmap_cond_end_sync(mddev->bitmap, sector_nr,
|
||||
md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
|
||||
mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
|
||||
|
||||
|
||||
|
@ -2831,8 +2828,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||
if (len == 0)
|
||||
break;
|
||||
if (sync_blocks == 0) {
|
||||
if (!bitmap_start_sync(mddev->bitmap, sector_nr,
|
||||
&sync_blocks, still_degraded) &&
|
||||
if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
|
||||
&sync_blocks, still_degraded) &&
|
||||
!conf->fullsync &&
|
||||
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
|
||||
break;
|
||||
|
@ -3171,7 +3168,7 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
|
|||
mddev->array_sectors > newsize)
|
||||
return -EINVAL;
|
||||
if (mddev->bitmap) {
|
||||
int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
|
||||
int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -438,10 +438,10 @@ static void raid10_end_read_request(struct bio *bio)
|
|||
static void close_write(struct r10bio *r10_bio)
|
||||
{
|
||||
/* clear the bitmap if all writes complete successfully */
|
||||
bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
|
||||
r10_bio->sectors,
|
||||
!test_bit(R10BIO_Degraded, &r10_bio->state),
|
||||
0);
|
||||
md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
|
||||
r10_bio->sectors,
|
||||
!test_bit(R10BIO_Degraded, &r10_bio->state),
|
||||
0);
|
||||
md_write_end(r10_bio->mddev);
|
||||
}
|
||||
|
||||
|
@ -915,7 +915,7 @@ static void flush_pending_writes(struct r10conf *conf)
|
|||
blk_start_plug(&plug);
|
||||
/* flush any pending bitmap writes to disk
|
||||
* before proceeding w/ I/O */
|
||||
bitmap_unplug(conf->mddev->bitmap);
|
||||
md_bitmap_unplug(conf->mddev->bitmap);
|
||||
wake_up(&conf->wait_barrier);
|
||||
|
||||
while (bio) { /* submit pending writes */
|
||||
|
@ -1100,7 +1100,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
|||
|
||||
/* we aren't scheduling, so we can do the write-out directly. */
|
||||
bio = bio_list_get(&plug->pending);
|
||||
bitmap_unplug(mddev->bitmap);
|
||||
md_bitmap_unplug(mddev->bitmap);
|
||||
wake_up(&conf->wait_barrier);
|
||||
|
||||
while (bio) { /* submit pending writes */
|
||||
|
@ -1517,7 +1517,7 @@ retry_write:
|
|||
}
|
||||
|
||||
atomic_set(&r10_bio->remaining, 1);
|
||||
bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
|
||||
md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
|
||||
|
||||
for (i = 0; i < conf->copies; i++) {
|
||||
if (r10_bio->devs[i].bio)
|
||||
|
@ -2990,13 +2990,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||
|
||||
if (mddev->curr_resync < max_sector) { /* aborted */
|
||||
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
|
||||
bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
|
||||
&sync_blocks, 1);
|
||||
md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
|
||||
&sync_blocks, 1);
|
||||
else for (i = 0; i < conf->geo.raid_disks; i++) {
|
||||
sector_t sect =
|
||||
raid10_find_virt(conf, mddev->curr_resync, i);
|
||||
bitmap_end_sync(mddev->bitmap, sect,
|
||||
&sync_blocks, 1);
|
||||
md_bitmap_end_sync(mddev->bitmap, sect,
|
||||
&sync_blocks, 1);
|
||||
}
|
||||
} else {
|
||||
/* completed sync */
|
||||
|
@ -3017,7 +3017,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||
}
|
||||
conf->fullsync = 0;
|
||||
}
|
||||
bitmap_close_sync(mddev->bitmap);
|
||||
md_bitmap_close_sync(mddev->bitmap);
|
||||
close_sync(conf);
|
||||
*skipped = 1;
|
||||
return sectors_skipped;
|
||||
|
@ -3111,8 +3111,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||
* we only need to recover the block if it is set in
|
||||
* the bitmap
|
||||
*/
|
||||
must_sync = bitmap_start_sync(mddev->bitmap, sect,
|
||||
&sync_blocks, 1);
|
||||
must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
|
||||
&sync_blocks, 1);
|
||||
if (sync_blocks < max_sync)
|
||||
max_sync = sync_blocks;
|
||||
if (!must_sync &&
|
||||
|
@ -3157,8 +3157,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||
}
|
||||
}
|
||||
|
||||
must_sync = bitmap_start_sync(mddev->bitmap, sect,
|
||||
&sync_blocks, still_degraded);
|
||||
must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
|
||||
&sync_blocks, still_degraded);
|
||||
|
||||
any_working = 0;
|
||||
for (j=0; j<conf->copies;j++) {
|
||||
|
@ -3334,13 +3334,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||
* safety reason, which ensures curr_resync_completed is
|
||||
* updated in bitmap_cond_end_sync.
|
||||
*/
|
||||
bitmap_cond_end_sync(mddev->bitmap, sector_nr,
|
||||
mddev_is_clustered(mddev) &&
|
||||
(sector_nr + 2 * RESYNC_SECTORS >
|
||||
conf->cluster_sync_high));
|
||||
md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
|
||||
mddev_is_clustered(mddev) &&
|
||||
(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
|
||||
|
||||
if (!bitmap_start_sync(mddev->bitmap, sector_nr,
|
||||
&sync_blocks, mddev->degraded) &&
|
||||
if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
|
||||
&sync_blocks, mddev->degraded) &&
|
||||
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
|
||||
&mddev->recovery)) {
|
||||
/* We can skip this block */
|
||||
|
@ -4015,7 +4014,7 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
|
|||
mddev->array_sectors > size)
|
||||
return -EINVAL;
|
||||
if (mddev->bitmap) {
|
||||
int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
|
||||
int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -4281,10 +4280,9 @@ static int raid10_start_reshape(struct mddev *mddev)
|
|||
spin_unlock_irq(&conf->device_lock);
|
||||
|
||||
if (mddev->delta_disks && mddev->bitmap) {
|
||||
ret = bitmap_resize(mddev->bitmap,
|
||||
raid10_size(mddev, 0,
|
||||
conf->geo.raid_disks),
|
||||
0, 0);
|
||||
ret = md_bitmap_resize(mddev->bitmap,
|
||||
raid10_size(mddev, 0, conf->geo.raid_disks),
|
||||
0, 0);
|
||||
if (ret)
|
||||
goto abort;
|
||||
}
|
||||
|
|
|
@ -324,10 +324,10 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
|
|||
if (sh->dev[i].written) {
|
||||
set_bit(R5_UPTODATE, &sh->dev[i].flags);
|
||||
r5c_return_dev_pending_writes(conf, &sh->dev[i]);
|
||||
bitmap_endwrite(conf->mddev->bitmap, sh->sector,
|
||||
STRIPE_SECTORS,
|
||||
!test_bit(STRIPE_DEGRADED, &sh->state),
|
||||
0);
|
||||
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
|
||||
STRIPE_SECTORS,
|
||||
!test_bit(STRIPE_DEGRADED, &sh->state),
|
||||
0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3295,8 +3295,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
|
|||
*/
|
||||
set_bit(STRIPE_BITMAP_PENDING, &sh->state);
|
||||
spin_unlock_irq(&sh->stripe_lock);
|
||||
bitmap_startwrite(conf->mddev->bitmap, sh->sector,
|
||||
STRIPE_SECTORS, 0);
|
||||
md_bitmap_startwrite(conf->mddev->bitmap, sh->sector,
|
||||
STRIPE_SECTORS, 0);
|
||||
spin_lock_irq(&sh->stripe_lock);
|
||||
clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
|
||||
if (!sh->batch_head) {
|
||||
|
@ -3386,8 +3386,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
|||
bi = nextbi;
|
||||
}
|
||||
if (bitmap_end)
|
||||
bitmap_endwrite(conf->mddev->bitmap, sh->sector,
|
||||
STRIPE_SECTORS, 0, 0);
|
||||
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
|
||||
STRIPE_SECTORS, 0, 0);
|
||||
bitmap_end = 0;
|
||||
/* and fail all 'written' */
|
||||
bi = sh->dev[i].written;
|
||||
|
@ -3432,8 +3432,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
|
|||
}
|
||||
}
|
||||
if (bitmap_end)
|
||||
bitmap_endwrite(conf->mddev->bitmap, sh->sector,
|
||||
STRIPE_SECTORS, 0, 0);
|
||||
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
|
||||
STRIPE_SECTORS, 0, 0);
|
||||
/* If we were in the middle of a write the parity block might
|
||||
* still be locked - so just clear all R5_LOCKED flags
|
||||
*/
|
||||
|
@ -3773,10 +3773,10 @@ returnbi:
|
|||
bio_endio(wbi);
|
||||
wbi = wbi2;
|
||||
}
|
||||
bitmap_endwrite(conf->mddev->bitmap, sh->sector,
|
||||
STRIPE_SECTORS,
|
||||
!test_bit(STRIPE_DEGRADED, &sh->state),
|
||||
0);
|
||||
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
|
||||
STRIPE_SECTORS,
|
||||
!test_bit(STRIPE_DEGRADED, &sh->state),
|
||||
0);
|
||||
if (head_sh->batch_head) {
|
||||
sh = list_first_entry(&sh->batch_list,
|
||||
struct stripe_head,
|
||||
|
@ -5533,10 +5533,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
|
|||
for (d = 0;
|
||||
d < conf->raid_disks - conf->max_degraded;
|
||||
d++)
|
||||
bitmap_startwrite(mddev->bitmap,
|
||||
sh->sector,
|
||||
STRIPE_SECTORS,
|
||||
0);
|
||||
md_bitmap_startwrite(mddev->bitmap,
|
||||
sh->sector,
|
||||
STRIPE_SECTORS,
|
||||
0);
|
||||
sh->bm_seq = conf->seq_flush + 1;
|
||||
set_bit(STRIPE_BIT_DELAY, &sh->state);
|
||||
}
|
||||
|
@ -6014,11 +6014,11 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
|
|||
}
|
||||
|
||||
if (mddev->curr_resync < max_sector) /* aborted */
|
||||
bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
|
||||
&sync_blocks, 1);
|
||||
md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
|
||||
&sync_blocks, 1);
|
||||
else /* completed sync */
|
||||
conf->fullsync = 0;
|
||||
bitmap_close_sync(mddev->bitmap);
|
||||
md_bitmap_close_sync(mddev->bitmap);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -6047,7 +6047,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
|
|||
}
|
||||
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
|
||||
!conf->fullsync &&
|
||||
!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
|
||||
!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
|
||||
sync_blocks >= STRIPE_SECTORS) {
|
||||
/* we can skip this block, and probably more */
|
||||
sync_blocks /= STRIPE_SECTORS;
|
||||
|
@ -6055,7 +6055,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
|
|||
return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
|
||||
}
|
||||
|
||||
bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
|
||||
md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
|
||||
|
||||
sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
|
||||
if (sh == NULL) {
|
||||
|
@ -6078,7 +6078,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
|
|||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
|
||||
md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
|
||||
|
||||
set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
|
||||
set_bit(STRIPE_HANDLE, &sh->state);
|
||||
|
@ -6279,7 +6279,7 @@ static void raid5d(struct md_thread *thread)
|
|||
/* Now is a good time to flush some bitmap updates */
|
||||
conf->seq_flush++;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
bitmap_unplug(mddev->bitmap);
|
||||
md_bitmap_unplug(mddev->bitmap);
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
conf->seq_write = conf->seq_flush;
|
||||
activate_bit_delay(conf, conf->temp_inactive_list);
|
||||
|
@ -7734,7 +7734,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
|
|||
mddev->array_sectors > newsize)
|
||||
return -EINVAL;
|
||||
if (mddev->bitmap) {
|
||||
int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
|
||||
int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -104,6 +104,14 @@
|
|||
* contain all bit positions from 0 to 'bits' - 1.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Allocation and deallocation of bitmap.
|
||||
* Provided in lib/bitmap.c to avoid circular dependency.
|
||||
*/
|
||||
extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
|
||||
extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
|
||||
extern void bitmap_free(const unsigned long *bitmap);
|
||||
|
||||
/*
|
||||
* lib/bitmap.c provides these functions:
|
||||
*/
|
||||
|
|
20
lib/bitmap.c
20
lib/bitmap.c
|
@ -13,6 +13,7 @@
|
|||
#include <linux/bitops.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
|
@ -1128,6 +1129,25 @@ void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int n
|
|||
EXPORT_SYMBOL(bitmap_copy_le);
|
||||
#endif
|
||||
|
||||
unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
|
||||
{
|
||||
return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
|
||||
flags);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_alloc);
|
||||
|
||||
unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
|
||||
{
|
||||
return bitmap_alloc(nbits, flags | __GFP_ZERO);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_zalloc);
|
||||
|
||||
void bitmap_free(const unsigned long *bitmap)
|
||||
{
|
||||
kfree(bitmap);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_free);
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
/**
|
||||
* bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
|
||||
|
|
Loading…
Reference in New Issue