md updates for 3.18

- a few minor bug fixes
 - quite a lot of code tidy-up and simplification
 - remove PRINT_RAID_DEBUG ioctl.  I'm fairly sure
   it is unused, and it isn't particularly useful.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIVAwUAVD9k1jnsnt1WYoG5AQKCaw/9F7jE9PDYtEfJ8ShEWMM0CWNsCKmgqfpV
 i4RaeVKe1IoA5JOurYk+wvdbWSXGfz5XQ9GX8ptRl9X7ZoG4aJ65v9GHpBamPLrc
 mB2Lz3zR9AZVYrMDeZym9cSZ6FpZNzzXpJE2O2RslXq3gFI03MObyM8xyeh8ybOD
 45nhH+CJ17OFNn5OzzFLhYEAoYDeOS97zAwInWeFlUp14Jl403xnZ3srF2YJ78TR
 PjcCpxo1IhGEnYE8rDYqH/UjDPzEfAdYrqM5k3NEnuPiqn+KxYNSsbAQGdeMrGUc
 DO0H8dt6U1U2tq/t/qN8n01uQ7AJ3S3JrTsQxSW/UC1SVfgpztK/a78eA/YSy/zs
 iZzPP7CpLfF4T945jaQionevZOBFRM+gbrMqeoQTPO2QtfrSGe4Awoht7Z+no3RR
 dCX0ScO16kHkcAcSXXGZkGtC1DcteEwUfufSdako12exo1k3efc98DsyMw2VfzOM
 EJcQD1JGYVW+czZM58EEue92TT5jvWnhU5s3PEUMTZrDgSWwTVQC3oNCgDGFKI4X
 eebpWlG3gEjNrnL5givbBwC2LCfI59R70gpnGhavdKtt9AtpfsMJnj8E3cCqHE9I
 xR6YPF161KSmKGOG47RK/VJJnq5SmZbxxeShT101uq3SVeqImit6ql3JfAM9HoMD
 RI2iWG9yma4=
 =2QEJ
 -----END PGP SIGNATURE-----

Merge tag 'md/3.18' of git://neil.brown.name/md

Pull md updates from Neil Brown:
 - a few minor bug fixes
 - quite a lot of code tidy-up and simplification
 - remove PRINT_RAID_DEBUG ioctl.  I'm fairly sure it is unused, and it
   isn't particularly useful.

* tag 'md/3.18' of git://neil.brown.name/md: (21 commits)
  lib/raid6: Add log level to printks
  md: move EXPORT_SYMBOL to after function in md.c
  md: discard PRINT_RAID_DEBUG ioctl
  md: remove MD_BUG()
  md: clean up 'exit' labels in md_ioctl().
  md: remove unnecessary test for MD_MAJOR in md_ioctl()
  md: don't allow "-sync" to be set for device in an active array.
  md: remove unwanted white space from md.c
  md: don't start resync thread directly from md thread.
  md: Just use RCU when checking for overlap between arrays.
  md: avoid potential long delay under pers_lock
  md: simplify export_array()
  md: discard find_rdev_nr in favour of find_rdev_nr_rcu
  md: use wait_event() to simplify md_super_wait()
  md: be more relaxed about stopping an array which isn't started.
  md/raid1: process_checks doesn't use its return value.
  md/raid5: fix init_stripe() inconsistencies
  md/raid10: another memory leak due to reshape.
  md: use set_bit/clear_bit instead of shift/mask for bi_flags changes.
  md/raid1: minor typos and reformatting.
  ...
This commit is contained in:
Linus Torvalds 2014-10-18 11:39:52 -07:00
commit 88ed806abb
13 changed files with 266 additions and 533 deletions

View File

@ -879,7 +879,6 @@ void bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i;
int dirty, need_write;
int wait = 0;
if (!bitmap || !bitmap->storage.filemap ||
test_bit(BITMAP_STALE, &bitmap->flags))
@ -897,16 +896,13 @@ void bitmap_unplug(struct bitmap *bitmap)
clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
write_page(bitmap, bitmap->storage.filemap[i], 0);
}
if (dirty)
wait = 1;
}
if (wait) { /* if any writes were performed, we need to wait on them */
if (bitmap->storage.file)
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0);
else
md_super_wait(bitmap->mddev);
}
if (bitmap->storage.file)
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0);
else
md_super_wait(bitmap->mddev);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
bitmap_file_kick(bitmap);
}

View File

@ -10,10 +10,10 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/blkdev.h>
@ -25,7 +25,7 @@
#include "linear.h"
/*
* find which device holds a particular offset
* find which device holds a particular offset
*/
static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
{
@ -355,7 +355,6 @@ static void linear_status (struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
}
static struct md_personality linear_personality =
{
.name = "linear",
@ -379,7 +378,6 @@ static void linear_exit (void)
unregister_md_personality (&linear_personality);
}
module_init(linear_init);
module_exit(linear_exit);
MODULE_LICENSE("GPL");

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +1,15 @@
/*
md.h : kernel internal structure of the Linux MD driver
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _MD_MD_H
@ -56,7 +56,7 @@ struct md_rdev {
__u64 sb_events;
sector_t data_offset; /* start of data in array */
sector_t new_data_offset;/* only relevant while reshaping */
sector_t sb_start; /* offset of the super block (in 512byte sectors) */
sector_t sb_start; /* offset of the super block (in 512byte sectors) */
int sb_size; /* bytes in the superblock */
int preferred_minor; /* autorun support */
@ -239,7 +239,7 @@ struct mddev {
minor_version,
patch_version;
int persistent;
int external; /* metadata is
int external; /* metadata is
* managed externally */
char metadata_type[17]; /* externally set*/
int chunk_sectors;
@ -248,7 +248,7 @@ struct mddev {
char clevel[16];
int raid_disks;
int max_disks;
sector_t dev_sectors; /* used size of
sector_t dev_sectors; /* used size of
* component devices */
sector_t array_sectors; /* exported array size */
int external_size; /* size managed
@ -312,7 +312,7 @@ struct mddev {
int parallel_resync;
int ok_start_degraded;
/* recovery/resync flags
/* recovery/resync flags
* NEEDED: we might need to start a resync/recover
* RUNNING: a thread is running, or about to be started
* SYNC: actually doing a resync, not a recovery
@ -392,20 +392,20 @@ struct mddev {
unsigned int safemode; /* if set, update "clean" superblock
* when no writes pending.
*/
*/
unsigned int safemode_delay;
struct timer_list safemode_timer;
atomic_t writes_pending;
atomic_t writes_pending;
struct request_queue *queue; /* for plugging ... */
struct bitmap *bitmap; /* the bitmap for the device */
struct bitmap *bitmap; /* the bitmap for the device */
struct {
struct file *file; /* the bitmap file */
loff_t offset; /* offset from superblock of
* start of bitmap. May be
* negative, but not '0'
* For external metadata, offset
* from start of device.
* from start of device.
*/
unsigned long space; /* space available at this offset */
loff_t default_offset; /* this is the offset to use when
@ -421,7 +421,7 @@ struct mddev {
int external;
} bitmap_info;
atomic_t max_corr_read_errors; /* max read retries */
atomic_t max_corr_read_errors; /* max read retries */
struct list_head all_mddevs;
struct attribute_group *to_remove;
@ -439,7 +439,6 @@ struct mddev {
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
};
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
int faulty = test_bit(Faulty, &rdev->flags);
@ -449,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
{
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
}
struct md_personality
@ -463,7 +462,7 @@ struct md_personality
int (*stop)(struct mddev *mddev);
void (*status)(struct seq_file *seq, struct mddev *mddev);
/* error_handler must set ->faulty and clear ->in_sync
* if appropriate, and should abort recovery if needed
* if appropriate, and should abort recovery if needed
*/
void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
@ -493,7 +492,6 @@ struct md_personality
void *(*takeover) (struct mddev *mddev);
};
struct md_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct mddev *, char *);
@ -560,7 +558,7 @@ struct md_thread {
void (*run) (struct md_thread *thread);
struct mddev *mddev;
wait_queue_head_t wqueue;
unsigned long flags;
unsigned long flags;
struct task_struct *tsk;
unsigned long timeout;
void *private;
@ -594,7 +592,7 @@ extern void md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
extern void md_new_event(struct mddev *mddev);

View File

@ -31,13 +31,12 @@
#define NR_RESERVED_BUFS 32
static int multipath_map (struct mpconf *conf)
{
int i, disks = conf->raid_disks;
/*
* Later we do read balancing on the read side
* Later we do read balancing on the read side
* now we use the first available disk.
*/
@ -68,7 +67,6 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
md_wakeup_thread(mddev->thread);
}
/*
* multipath_end_bh_io() is called when we have finished servicing a multipathed
* operation and are ready to return a success/failure code to the buffer
@ -98,8 +96,8 @@ static void multipath_end_request(struct bio *bio, int error)
*/
char b[BDEVNAME_SIZE];
md_error (mp_bh->mddev, rdev);
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdevname(rdev->bdev,b),
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdevname(rdev->bdev,b),
(unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
@ -145,12 +143,12 @@ static void multipath_status (struct seq_file *seq, struct mddev *mddev)
{
struct mpconf *conf = mddev->private;
int i;
seq_printf (seq, " [%d/%d] [", conf->raid_disks,
conf->raid_disks - mddev->degraded);
for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s",
conf->multipaths[i].rdev &&
conf->multipaths[i].rdev &&
test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_");
seq_printf (seq, "]");
}
@ -195,7 +193,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
* first check if this is a queued request for a device
* which has just failed.
*/
printk(KERN_ALERT
printk(KERN_ALERT
"multipath: only one IO path left and IO error.\n");
/* leave it active... it's all we have */
return;
@ -242,7 +240,6 @@ static void print_multipath_conf (struct mpconf *conf)
}
}
static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct mpconf *conf = mddev->private;
@ -325,8 +322,6 @@ abort:
return err;
}
/*
* This is a kernel thread which:
*
@ -356,7 +351,7 @@ static void multipathd(struct md_thread *thread)
bio = &mp_bh->bio;
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
" error for block %llu\n",
@ -414,7 +409,7 @@ static int multipath_run (struct mddev *mddev)
conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
mddev->private = conf;
if (!conf) {
printk(KERN_ERR
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out;
@ -423,7 +418,7 @@ static int multipath_run (struct mddev *mddev)
conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
GFP_KERNEL);
if (!conf->multipaths) {
printk(KERN_ERR
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out_free_conf;
@ -469,7 +464,7 @@ static int multipath_run (struct mddev *mddev)
conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
sizeof(struct multipath_bh));
if (conf->pool == NULL) {
printk(KERN_ERR
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out_free_conf;
@ -485,7 +480,7 @@ static int multipath_run (struct mddev *mddev)
}
}
printk(KERN_INFO
printk(KERN_INFO
"multipath: array %s active with %d out of %d IO paths\n",
mdname(mddev), conf->raid_disks - mddev->degraded,
mddev->raid_disks);
@ -512,7 +507,6 @@ out:
return -EIO;
}
static int multipath_stop (struct mddev *mddev)
{
struct mpconf *conf = mddev->private;

View File

@ -1,10 +1,9 @@
/*
raid0.c : Multiple Devices driver for Linux
Copyright (C) 1994-96 Marc ZYNGIER
Copyright (C) 1994-96 Marc ZYNGIER
<zyngier@ufr-info-p7.ibp.fr> or
<maz@gloups.fdn.fr>
Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
RAID-0 management functions.
@ -12,10 +11,10 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/blkdev.h>

View File

@ -494,7 +494,6 @@ static void raid1_end_write_request(struct bio *bio, int error)
bio_put(to_put);
}
/*
* This routine returns the disk from which the requested read should
* be done. There is a per-array 'next expected sequential IO' sector
@ -901,18 +900,18 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
* However if there are already pending
* requests (preventing the barrier from
* rising completely), and the
* pre-process bio queue isn't empty,
* per-process bio queue isn't empty,
* then don't wait, as we need to empty
* that queue to get the nr_pending
* count down.
* that queue to allow conf->start_next_window
* to increase.
*/
wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen &&
(!conf->barrier ||
((conf->start_next_window <
conf->next_resync + RESYNC_SECTORS) &&
current->bio_list &&
!bio_list_empty(current->bio_list))),
((conf->start_next_window <
conf->next_resync + RESYNC_SECTORS) &&
current->bio_list &&
!bio_list_empty(current->bio_list))),
conf->resync_lock);
conf->nr_waiting--;
}
@ -1001,8 +1000,7 @@ static void unfreeze_array(struct r1conf *conf)
spin_unlock_irq(&conf->resync_lock);
}
/* duplicate the data pages for behind I/O
/* duplicate the data pages for behind I/O
*/
static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
{
@ -1471,7 +1469,6 @@ static void status(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, "]");
}
static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
@ -1565,7 +1562,7 @@ static int raid1_spare_active(struct mddev *mddev)
unsigned long flags;
/*
* Find all failed disks within the RAID1 configuration
* Find all failed disks within the RAID1 configuration
* and mark them readable.
* Called under mddev lock, so rcu protection not needed.
*/
@ -1606,7 +1603,6 @@ static int raid1_spare_active(struct mddev *mddev)
return count;
}
static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r1conf *conf = mddev->private;
@ -1735,7 +1731,6 @@ abort:
return err;
}
static void end_sync_read(struct bio *bio, int error)
{
struct r1bio *r1_bio = bio->bi_private;
@ -1947,7 +1942,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
return 1;
}
static int process_checks(struct r1bio *r1_bio)
static void process_checks(struct r1bio *r1_bio)
{
/* We have read all readable devices. If we haven't
* got the block, then there is no hope left.
@ -2039,7 +2034,6 @@ static int process_checks(struct r1bio *r1_bio)
bio_copy_data(sbio, pbio);
}
return 0;
}
static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
@ -2057,8 +2051,8 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
return;
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
if (process_checks(r1_bio) < 0)
return;
process_checks(r1_bio);
/*
* schedule writes
*/
@ -2458,7 +2452,6 @@ static void raid1d(struct md_thread *thread)
blk_finish_plug(&plug);
}
static int init_resync(struct r1conf *conf)
{
int buffs;
@ -2722,7 +2715,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
/* remove last page from this bio */
bio->bi_vcnt--;
bio->bi_iter.bi_size -= len;
bio->bi_flags &= ~(1<< BIO_SEG_VALID);
__clear_bit(BIO_SEG_VALID, &bio->bi_flags);
}
goto bio_full;
}
@ -2947,9 +2940,9 @@ static int run(struct mddev *mddev)
printk(KERN_NOTICE "md/raid1:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO
printk(KERN_INFO
"md/raid1:%s: active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
/*

View File

@ -90,7 +90,6 @@ struct r1conf {
*/
int recovery_disabled;
/* poolinfo contains information about the content of the
* mempools - it changes when the array grows or shrinks
*/
@ -103,7 +102,6 @@ struct r1conf {
*/
struct page *tmppage;
/* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array.
*/

View File

@ -366,7 +366,6 @@ static void raid10_end_read_request(struct bio *bio, int error)
struct md_rdev *rdev;
struct r10conf *conf = r10_bio->mddev->private;
slot = r10_bio->read_slot;
dev = r10_bio->devs[slot].devnum;
rdev = r10_bio->devs[slot].rdev;
@ -1559,7 +1558,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
md_write_start(mddev, bio);
do {
/*
@ -1782,7 +1780,6 @@ static int raid10_spare_active(struct mddev *mddev)
return count;
}
static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r10conf *conf = mddev->private;
@ -1929,7 +1926,6 @@ abort:
return err;
}
static void end_sync_read(struct bio *bio, int error)
{
struct r10bio *r10_bio = bio->bi_private;
@ -2295,7 +2291,6 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
}
}
/*
* Used by fix_read_error() to decay the per rdev read_errors.
* We halve the read error count for every hour that has elapsed
@ -2852,7 +2847,6 @@ static void raid10d(struct md_thread *thread)
blk_finish_plug(&plug);
}
static int init_resync(struct r10conf *conf)
{
int buffs;
@ -3388,7 +3382,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* remove last page from this bio */
bio2->bi_vcnt--;
bio2->bi_iter.bi_size -= len;
bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
__clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
}
goto bio_full;
}
@ -3776,7 +3770,6 @@ static int run(struct mddev *mddev)
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
}
if (md_integrity_register(mddev))
goto out_free_conf;
@ -3834,6 +3827,8 @@ static int stop(struct mddev *mddev)
mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf->mirrors_old);
kfree(conf->mirrors_new);
kfree(conf);
mddev->private = NULL;
return 0;
@ -4121,7 +4116,7 @@ static int raid10_start_reshape(struct mddev *mddev)
memcpy(conf->mirrors_new, conf->mirrors,
sizeof(struct raid10_info)*conf->prev.raid_disks);
smp_mb();
kfree(conf->mirrors_old); /* FIXME and elsewhere */
kfree(conf->mirrors_old);
conf->mirrors_old = conf->mirrors;
conf->mirrors = conf->mirrors_new;
conf->mirrors_new = NULL;
@ -4416,7 +4411,7 @@ read_more:
read_bio->bi_end_io = end_sync_read;
read_bio->bi_rw = READ;
read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
read_bio->bi_flags |= 1 << BIO_UPTODATE;
__set_bit(BIO_UPTODATE, &read_bio->bi_flags);
read_bio->bi_vcnt = 0;
read_bio->bi_iter.bi_size = 0;
r10_bio->master_bio = read_bio;
@ -4473,7 +4468,7 @@ read_more:
/* Remove last page from this bio */
bio2->bi_vcnt--;
bio2->bi_iter.bi_size -= len;
bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
__clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
}
goto bio_full;
}
@ -4575,7 +4570,6 @@ static void end_reshape(struct r10conf *conf)
conf->fullsync = 0;
}
static int handle_reshape_read_error(struct mddev *mddev,
struct r10bio *r10_bio)
{

View File

@ -463,7 +463,6 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
hlist_add_head(&sh->hash, hp);
}
/* find an idle stripe, make sure it is unhashed, and return it. */
static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
{
@ -531,9 +530,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
BUG_ON(stripe_operations_active(sh));
pr_debug("init_stripe called, stripe %llu\n",
(unsigned long long)sh->sector);
remove_hash(sh);
(unsigned long long)sector);
retry:
seq = read_seqcount_begin(&conf->gen_lock);
sh->generation = conf->generation - previous;
@ -542,7 +539,6 @@ retry:
stripe_set_idx(sector, conf, previous, sh);
sh->state = 0;
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@ -1350,7 +1346,6 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
}
}
static void ops_complete_prexor(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
@ -2419,7 +2414,6 @@ static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
return new_sector;
}
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
{
struct r5conf *conf = sh->raid_conf;
@ -2437,7 +2431,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
sector_t r_sector;
struct stripe_head sh2;
chunk_offset = sector_div(new_sector, sectors_per_chunk);
stripe = new_sector;
@ -2541,7 +2534,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
return r_sector;
}
static void
schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int rcw, int expand)
@ -3013,7 +3005,6 @@ static void handle_stripe_fill(struct stripe_head *sh,
set_bit(STRIPE_HANDLE, &sh->state);
}
/* handle_stripe_clean_event
* any written block on an uptodate or failed drive can be returned.
* Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
@ -3304,7 +3295,6 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
}
}
static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
struct stripe_head_state *s,
int disks)
@ -3939,7 +3929,6 @@ static void handle_stripe(struct stripe_head *sh)
}
}
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
struct stripe_head *sh_src
@ -4137,7 +4126,6 @@ static int raid5_mergeable_bvec(struct request_queue *q,
return max;
}
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
@ -4167,7 +4155,6 @@ static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
md_wakeup_thread(conf->mddev->thread);
}
static struct bio *remove_bio_from_retry(struct r5conf *conf)
{
struct bio *bi;
@ -4191,7 +4178,6 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
return bi;
}
/*
* The "raid5_align_endio" should check if the read succeeded and if it
* did, call bio_endio on the original bio (having bio_put the new bio
@ -4224,7 +4210,6 @@ static void raid5_align_endio(struct bio *bi, int error)
return;
}
pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
add_bio_to_retry(raid_bi, conf);
@ -4249,7 +4234,6 @@ static int bio_fits_rdev(struct bio *bi)
return 1;
}
static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
{
struct r5conf *conf = mddev->private;
@ -4301,7 +4285,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
align_bi->bi_bdev = rdev->bdev;
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
__clear_bit(BIO_SEG_VALID, &align_bi->bi_flags);
if (!bio_fits_rdev(align_bi) ||
is_badblock(rdev, align_bi->bi_iter.bi_sector,
@ -5446,7 +5430,6 @@ raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
raid5_show_skip_copy,
raid5_store_skip_copy);
static ssize_t
stripe_cache_active_show(struct mddev *mddev, char *page)
{
@ -5898,7 +5881,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
return ERR_PTR(-ENOMEM);
}
static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
{
switch (algo) {
@ -5911,7 +5893,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
return 1;
break;
case ALGORITHM_PARITY_0_6:
if (raid_disk == 0 ||
if (raid_disk == 0 ||
raid_disk == raid_disks - 1)
return 1;
break;
@ -6165,7 +6147,6 @@ static int run(struct mddev *mddev)
"reshape");
}
/* Ok, everything is just fine now */
if (mddev->to_remove == &raid5_attrs_group)
mddev->to_remove = NULL;
@ -6814,7 +6795,6 @@ static void raid5_quiesce(struct mddev *mddev, int state)
}
}
static void *raid45_takeover_raid0(struct mddev *mddev, int level)
{
struct r0conf *raid0_conf = mddev->private;
@ -6841,7 +6821,6 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
return setup_conf(mddev);
}
static void *raid5_takeover_raid1(struct mddev *mddev)
{
int chunksect;
@ -6902,7 +6881,6 @@ static void *raid5_takeover_raid6(struct mddev *mddev)
return setup_conf(mddev);
}
static int raid5_check_reshape(struct mddev *mddev)
{
/* For a 2-drive array, the layout and chunk size can be changed
@ -7051,7 +7029,6 @@ static void *raid6_takeover(struct mddev *mddev)
return setup_conf(mddev);
}
static struct md_personality raid6_personality =
{
.name = "raid6",

View File

@ -155,7 +155,7 @@
*/
/*
* Operations state - intermediate states that are visible outside of
* Operations state - intermediate states that are visible outside of
* STRIPE_ACTIVE.
* In general _idle indicates nothing is running, _run indicates a data
* processing operation is active, and _result means the data processing result
@ -364,7 +364,6 @@ enum {
* HANDLE gets cleared if stripe_handle leaves nothing locked.
*/
struct disk_info {
struct md_rdev *rdev, *replacement;
};
@ -528,7 +527,6 @@ struct r5conf {
#define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */
#define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */
/* For every RAID5 algorithm we define a RAID6 algorithm
* with exactly the same layout for data and parity, and
* with the Q block always on the last device (N-1).

View File

@ -39,7 +39,6 @@
#define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t)
#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t)
#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t)
#define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13)
#define RAID_AUTORUN _IO (MD_MAJOR, 0x14)
#define GET_BITMAP_FILE _IOR (MD_MAJOR, 0x15, mdu_bitmap_file_t)

View File

@ -121,9 +121,9 @@ static inline const struct raid6_recov_calls *raid6_choose_recov(void)
raid6_2data_recov = best->data2;
raid6_datap_recov = best->datap;
printk("raid6: using %s recovery algorithm\n", best->name);
pr_info("raid6: using %s recovery algorithm\n", best->name);
} else
printk("raid6: Yikes! No recovery algorithm found!\n");
pr_err("raid6: Yikes! No recovery algorithm found!\n");
return best;
}
@ -157,18 +157,18 @@ static inline const struct raid6_calls *raid6_choose_gen(
bestperf = perf;
best = *algo;
}
printk("raid6: %-8s %5ld MB/s\n", (*algo)->name,
pr_info("raid6: %-8s %5ld MB/s\n", (*algo)->name,
(perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
}
}
if (best) {
printk("raid6: using algorithm %s (%ld MB/s)\n",
pr_info("raid6: using algorithm %s (%ld MB/s)\n",
best->name,
(bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
raid6_call = *best;
} else
printk("raid6: Yikes! No algorithm found!\n");
pr_err("raid6: Yikes! No algorithm found!\n");
return best;
}
@ -194,7 +194,7 @@ int __init raid6_select_algo(void)
syndromes = (void *) __get_free_pages(GFP_KERNEL, 1);
if (!syndromes) {
printk("raid6: Yikes! No memory available.\n");
pr_err("raid6: Yikes! No memory available.\n");
return -ENOMEM;
}