This pull request contains:

* Fixes for both UBI and UBIFS
 * overlayfs support (O_TMPFILE, RENAME_WHITEOUT/EXCHANGE)
 * Code refactoring for the upcoming MLC support
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABAgAGBQJX/QOCAAoJEEtJtSqsAOnWtp4QAKItkx/LrW44rHhkoJfqG62i
 o+OaxMKNu43/v/io+68JNEkIqgEap2vMZVkfoIgIyuyPxMG7nA/zG3c2JFvQ/ReS
 uH0PmcpkIXbRBKe9IEn6rXmRz9q9UTNGhP2U5kg0rL22vwVGYIuzF4Bny25Irzf/
 LLtYOkpfZfaNTSjs1pmuJMWVFF1Rj68eVJEWL6JZ1BPQ4bRPbn5sNgOKNTJYkrJs
 GcXXNtonf3B0zOzFnmfFhVO5neo4FEG3QEQafR+qbhoNBvXSluVIAFoO4VKEcyHD
 BJbotsT64TBsBj7ol97EXxz+N6LkB3tNM3bFBvhAFXZ+EvrJ0o+2QoEOH0igWjMI
 4AXwSl6htCs+wRmqAqpJfZpfI7kv2MDUB9ZGAbuXRS888OK78Dzt1CupPW7Q12xh
 yYMNsXZvRvK82n0DfqBLQ53SIe/L3PotG2Cc29hjGaHjK+YcwVRvdp/2B3ID3O2L
 6ap/M6KA+i1SiYZI6yAEYT76jKOam9YG/psb76q66xILJ7h5XQOZODYQ9zC2towo
 Pjb+bCPzHZPm+v7xtSsP6aanZ+5xRXO91JjvsWl9UOQVDCA/Jt98H5qhCJZjIeIs
 OJ7z9PbTv0/jcBBRrjJyZIUE85omDliY4h04B3Yu44xa7Q9e7wbE+Vs/6L9txS0e
 L8TBNHmrYB7ZIprCIhcE
 =UB7l
 -----END PGP SIGNATURE-----

Merge tag 'upstream-4.9-rc1' of git://git.infradead.org/linux-ubifs

Pull UBI/UBIFS updates from Richard Weinberger:
 "This pull request contains:

   - Fixes for both UBI and UBIFS
   - overlayfs support (O_TMPFILE, RENAME_WHITEOUT/EXCHANGE)
   - Code refactoring for the upcoming MLC support"

[ Ugh, we just got rid of the "rename2()" naming for the extended rename
  functionality. And this re-introduces it in ubifs with the cross-
  renaming and whiteout support.

  But rather than do any re-organizations in the merge itself, the
  naming can be cleaned up later ]

* tag 'upstream-4.9-rc1' of git://git.infradead.org/linux-ubifs: (27 commits)
  UBIFS: improve function-level documentation
  ubifs: fix host xattr_len when changing xattr
  ubifs: Use move variable in ubifs_rename()
  ubifs: Implement RENAME_EXCHANGE
  ubifs: Implement RENAME_WHITEOUT
  ubifs: Implement O_TMPFILE
  ubi: Fix Fastmap's update_vol()
  ubi: Fix races around ubi_refill_pools()
  ubi: Deal with interrupted erasures in WL
  UBI: introduce the VID buffer concept
  UBI: hide EBA internals
  UBI: provide an helper to query LEB information
  UBI: provide an helper to check whether a LEB is mapped or not
  UBI: add an helper to check lnum validity
  UBI: simplify LEB write and atomic LEB change code
  UBI: simplify recover_peb() code
  UBI: move the global ech and vidh variables into struct ubi_attach_info
  UBI: provide helpers to allocate and free aeb elements
  UBI: fastmap: use ubi_io_{read, write}_data() instead of ubi_io_{read, write}()
  UBI: fastmap: use ubi_rb_for_each_entry() in unmap_peb()
  ...
This commit is contained in:
Linus Torvalds 2016-10-11 10:49:44 -07:00
commit 4c609922a3
21 changed files with 1330 additions and 657 deletions

View File

@ -91,9 +91,132 @@
static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
/* Temporary variables used during scanning */
static struct ubi_ec_hdr *ech;
static struct ubi_vid_hdr *vidh;
#define AV_FIND BIT(0)
#define AV_ADD BIT(1)
#define AV_FIND_OR_ADD (AV_FIND | AV_ADD)
/**
* find_or_add_av - internal function to find a volume, add a volume or do
* both (find and add if missing).
* @ai: attaching information
* @vol_id: the requested volume ID
* @flags: a combination of the %AV_FIND and %AV_ADD flags describing the
* expected operation. If only %AV_ADD is set, -EEXIST is returned
* if the volume already exists. If only %AV_FIND is set, NULL is
* returned if the volume does not exist. And if both flags are
* set, the helper first tries to find an existing volume, and if
* it does not exist it creates a new one.
* @created: in value used to inform the caller whether it"s a newly created
* volume or not.
*
* This function returns a pointer to a volume description or an ERR_PTR if
* the operation failed. It can also return NULL if only %AV_FIND is set and
* the volume does not exist.
*/
static struct ubi_ainf_volume *find_or_add_av(struct ubi_attach_info *ai,
int vol_id, unsigned int flags,
bool *created)
{
struct ubi_ainf_volume *av;
struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
/* Walk the volume RB-tree to look if this volume is already present */
while (*p) {
parent = *p;
av = rb_entry(parent, struct ubi_ainf_volume, rb);
if (vol_id == av->vol_id) {
*created = false;
if (!(flags & AV_FIND))
return ERR_PTR(-EEXIST);
return av;
}
if (vol_id > av->vol_id)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
if (!(flags & AV_ADD))
return NULL;
/* The volume is absent - add it */
av = kzalloc(sizeof(*av), GFP_KERNEL);
if (!av)
return ERR_PTR(-ENOMEM);
av->vol_id = vol_id;
if (vol_id > ai->highest_vol_id)
ai->highest_vol_id = vol_id;
rb_link_node(&av->rb, parent, p);
rb_insert_color(&av->rb, &ai->volumes);
ai->vols_found += 1;
*created = true;
dbg_bld("added volume %d", vol_id);
return av;
}
/**
* ubi_find_or_add_av - search for a volume in the attaching information and
* add one if it does not exist.
* @ai: attaching information
* @vol_id: the requested volume ID
* @created: whether the volume has been created or not
*
* This function returns a pointer to the new volume description or an
* ERR_PTR if the operation failed.
*/
static struct ubi_ainf_volume *ubi_find_or_add_av(struct ubi_attach_info *ai,
int vol_id, bool *created)
{
return find_or_add_av(ai, vol_id, AV_FIND_OR_ADD, created);
}
/**
* ubi_alloc_aeb - allocate an aeb element
* @ai: attaching information
* @pnum: physical eraseblock number
* @ec: erase counter of the physical eraseblock
*
* Allocate an aeb object and initialize the pnum and ec information.
* vol_id and lnum are set to UBI_UNKNOWN, and the other fields are
* initialized to zero.
* Note that the element is not added in any list or RB tree.
*/
struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum,
int ec)
{
struct ubi_ainf_peb *aeb;
aeb = kmem_cache_zalloc(ai->aeb_slab_cache, GFP_KERNEL);
if (!aeb)
return NULL;
aeb->pnum = pnum;
aeb->ec = ec;
aeb->vol_id = UBI_UNKNOWN;
aeb->lnum = UBI_UNKNOWN;
return aeb;
}
/**
* ubi_free_aeb - free an aeb element
* @ai: attaching information
* @aeb: the element to free
*
* Free an aeb object. The caller must have removed the element from any list
* or RB tree.
*/
void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb)
{
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
/**
* add_to_list - add physical eraseblock to a list.
@ -131,14 +254,12 @@ static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
} else
BUG();
aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
aeb->pnum = pnum;
aeb->vol_id = vol_id;
aeb->lnum = lnum;
aeb->ec = ec;
if (to_head)
list_add(&aeb->u.list, list);
else
@ -163,13 +284,11 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
ai->corr_peb_count += 1;
aeb->pnum = pnum;
aeb->ec = ec;
list_add(&aeb->u.list, &ai->corr);
return 0;
}
@ -192,14 +311,12 @@ static int add_fastmap(struct ubi_attach_info *ai, int pnum,
{
struct ubi_ainf_peb *aeb;
aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
aeb->pnum = pnum;
aeb->vol_id = be32_to_cpu(vidh->vol_id);
aeb->sqnum = be64_to_cpu(vidh->sqnum);
aeb->ec = ec;
aeb->vol_id = be32_to_cpu(vid_hdr->vol_id);
aeb->sqnum = be64_to_cpu(vid_hdr->sqnum);
list_add(&aeb->u.list, &ai->fastmap);
dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum,
@ -294,44 +411,20 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
const struct ubi_vid_hdr *vid_hdr)
{
struct ubi_ainf_volume *av;
struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
bool created;
ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
/* Walk the volume RB-tree to look if this volume is already present */
while (*p) {
parent = *p;
av = rb_entry(parent, struct ubi_ainf_volume, rb);
av = ubi_find_or_add_av(ai, vol_id, &created);
if (IS_ERR(av) || !created)
return av;
if (vol_id == av->vol_id)
return av;
if (vol_id > av->vol_id)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
/* The volume is absent - add it */
av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
if (!av)
return ERR_PTR(-ENOMEM);
av->highest_lnum = av->leb_count = 0;
av->vol_id = vol_id;
av->root = RB_ROOT;
av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
av->data_pad = be32_to_cpu(vid_hdr->data_pad);
av->compat = vid_hdr->compat;
av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
: UBI_STATIC_VOLUME;
if (vol_id > ai->highest_vol_id)
ai->highest_vol_id = vol_id;
rb_link_node(&av->rb, parent, p);
rb_insert_color(&av->rb, &ai->volumes);
ai->vols_found += 1;
dbg_bld("added volume %d", vol_id);
return av;
}
@ -360,7 +453,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
{
int len, err, second_is_newer, bitflips = 0, corrupted = 0;
uint32_t data_crc, crc;
struct ubi_vid_hdr *vh = NULL;
struct ubi_vid_io_buf *vidb = NULL;
unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
if (sqnum2 == aeb->sqnum) {
@ -403,12 +496,12 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
return bitflips << 1;
}
vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vh)
vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!vidb)
return -ENOMEM;
pnum = aeb->pnum;
err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
if (err) {
if (err == UBI_IO_BITFLIPS)
bitflips = 1;
@ -422,7 +515,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
}
}
vid_hdr = vh;
vid_hdr = ubi_get_vid_hdr(vidb);
}
/* Read the data of the copy and check the CRC */
@ -448,7 +541,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
}
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vh);
ubi_free_vid_buf(vidb);
if (second_is_newer)
dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
@ -460,7 +553,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
out_unlock:
mutex_unlock(&ubi->buf_mutex);
out_free_vidh:
ubi_free_vid_hdr(ubi, vh);
ubi_free_vid_buf(vidb);
return err;
}
@ -605,12 +698,10 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
if (err)
return err;
aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
aeb->ec = ec;
aeb->pnum = pnum;
aeb->vol_id = vol_id;
aeb->lnum = lnum;
aeb->scrub = bitflips;
@ -628,6 +719,21 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
return 0;
}
/**
* ubi_add_av - add volume to the attaching information.
* @ai: attaching information
* @vol_id: the requested volume ID
*
* This function returns a pointer to the new volume description or an
* ERR_PTR if the operation failed.
*/
struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id)
{
bool created;
return find_or_add_av(ai, vol_id, AV_ADD, &created);
}
/**
* ubi_find_av - find volume in the attaching information.
* @ai: attaching information
@ -639,24 +745,15 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
int vol_id)
{
struct ubi_ainf_volume *av;
struct rb_node *p = ai->volumes.rb_node;
bool created;
while (p) {
av = rb_entry(p, struct ubi_ainf_volume, rb);
if (vol_id == av->vol_id)
return av;
if (vol_id > av->vol_id)
p = p->rb_left;
else
p = p->rb_right;
}
return NULL;
return find_or_add_av((struct ubi_attach_info *)ai, vol_id, AV_FIND,
&created);
}
static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av,
struct list_head *list);
/**
* ubi_remove_av - delete attaching information about a volume.
* @ai: attaching information
@ -664,19 +761,10 @@ struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
*/
void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
{
struct rb_node *rb;
struct ubi_ainf_peb *aeb;
dbg_bld("remove attaching information about volume %d", av->vol_id);
while ((rb = rb_first(&av->root))) {
aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
rb_erase(&aeb->u.rb, &av->root);
list_add_tail(&aeb->u.list, &ai->erase);
}
rb_erase(&av->rb, &ai->volumes);
kfree(av);
destroy_av(ai, av, &ai->erase);
ai->vols_found -= 1;
}
@ -866,6 +954,9 @@ static bool vol_ignored(int vol_id)
static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
int pnum, bool fast)
{
struct ubi_ec_hdr *ech = ai->ech;
struct ubi_vid_io_buf *vidb = ai->vidb;
struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb);
long long ec;
int err, bitflips = 0, vol_id = -1, ec_err = 0;
@ -963,7 +1054,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
/* OK, we've done with the EC header, let's look at the VID header */
err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
if (err < 0)
return err;
switch (err) {
@ -1191,10 +1282,12 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
* destroy_av - free volume attaching information.
* @av: volume attaching information
* @ai: attaching information
* @list: put the aeb elements in there if !NULL, otherwise free them
*
* This function destroys the volume attaching information.
*/
static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av,
struct list_head *list)
{
struct ubi_ainf_peb *aeb;
struct rb_node *this = av->root.rb_node;
@ -1214,7 +1307,10 @@ static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
this->rb_right = NULL;
}
kmem_cache_free(ai->aeb_slab_cache, aeb);
if (list)
list_add_tail(&aeb->u.list, list);
else
ubi_free_aeb(ai, aeb);
}
}
kfree(av);
@ -1232,23 +1328,23 @@ static void destroy_ai(struct ubi_attach_info *ai)
list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
ubi_free_aeb(ai, aeb);
}
/* Destroy the volume RB-tree */
@ -1269,7 +1365,7 @@ static void destroy_ai(struct ubi_attach_info *ai)
rb->rb_right = NULL;
}
destroy_av(ai, av);
destroy_av(ai, av, NULL);
}
}
@ -1297,12 +1393,12 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
err = -ENOMEM;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ai->ech)
return err;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!ai->vidb)
goto out_ech;
for (pnum = start; pnum < ubi->peb_count; pnum++) {
@ -1351,15 +1447,15 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (err)
goto out_vidh;
ubi_free_vid_hdr(ubi, vidh);
kfree(ech);
ubi_free_vid_buf(ai->vidb);
kfree(ai->ech);
return 0;
out_vidh:
ubi_free_vid_hdr(ubi, vidh);
ubi_free_vid_buf(ai->vidb);
out_ech:
kfree(ech);
kfree(ai->ech);
return err;
}
@ -1411,12 +1507,12 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
if (!scan_ai)
goto out;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
scan_ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!scan_ai->ech)
goto out_ai;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
scan_ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!scan_ai->vidb)
goto out_ech;
for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
@ -1428,8 +1524,8 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
goto out_vidh;
}
ubi_free_vid_hdr(ubi, vidh);
kfree(ech);
ubi_free_vid_buf(scan_ai->vidb);
kfree(scan_ai->ech);
if (scan_ai->force_full_scan)
err = UBI_NO_FASTMAP;
@ -1449,9 +1545,9 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
return err;
out_vidh:
ubi_free_vid_hdr(ubi, vidh);
ubi_free_vid_buf(scan_ai->vidb);
out_ech:
kfree(ech);
kfree(scan_ai->ech);
out_ai:
destroy_ai(scan_ai);
out:
@ -1573,6 +1669,8 @@ out_ai:
*/
static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
struct ubi_vid_io_buf *vidb = ai->vidb;
struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb);
int pnum, err, vols_found = 0;
struct rb_node *rb1, *rb2;
struct ubi_ainf_volume *av;
@ -1708,7 +1806,7 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
last_aeb = aeb;
err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidb, 1);
if (err && err != UBI_IO_BITFLIPS) {
ubi_err(ubi, "VID header is not OK (%d)",
err);

View File

@ -574,7 +574,7 @@ void ubi_free_internal_volumes(struct ubi_device *ubi)
for (i = ubi->vtbl_slots;
i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
kfree(ubi->volumes[i]->eba_tbl);
ubi_eba_replace_table(ubi->volumes[i], NULL);
kfree(ubi->volumes[i]);
}
}

View File

@ -416,7 +416,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
}
rsvd_bytes = (long long)vol->reserved_pebs *
ubi->leb_size-vol->data_pad;
vol->usable_leb_size;
if (bytes < 0 || bytes > rsvd_bytes) {
err = -EINVAL;
break;
@ -454,7 +454,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
/* Validate the request */
err = -EINVAL;
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
if (!ubi_leb_valid(vol, req.lnum) ||
req.bytes < 0 || req.bytes > vol->usable_leb_size)
break;
@ -485,7 +485,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
if (lnum < 0 || lnum >= vol->reserved_pebs) {
if (!ubi_leb_valid(vol, lnum)) {
err = -EINVAL;
break;
}

View File

@ -49,6 +49,30 @@
/* Number of physical eraseblocks reserved for atomic LEB change operation */
#define EBA_RESERVED_PEBS 1
/**
* struct ubi_eba_entry - structure encoding a single LEB -> PEB association
* @pnum: the physical eraseblock number attached to the LEB
*
* This structure is encoding a LEB -> PEB association. Note that the LEB
* number is not stored here, because it is the index used to access the
* entries table.
*/
struct ubi_eba_entry {
int pnum;
};
/**
* struct ubi_eba_table - LEB -> PEB association information
* @entries: the LEB to PEB mapping (one entry per LEB).
*
* This structure is private to the EBA logic and should be kept here.
* It is encoding the LEB to PEB association table, and is subject to
* changes.
*/
struct ubi_eba_table {
struct ubi_eba_entry *entries;
};
/**
* next_sqnum - get next sequence number.
* @ubi: UBI device description object
@ -83,6 +107,110 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
return 0;
}
/**
* ubi_eba_get_ldesc - get information about a LEB
* @vol: volume description object
* @lnum: logical eraseblock number
* @ldesc: the LEB descriptor to fill
*
* Used to query information about a specific LEB.
* It is currently only returning the physical position of the LEB, but will be
* extended to provide more information.
*/
void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
struct ubi_eba_leb_desc *ldesc)
{
ldesc->lnum = lnum;
ldesc->pnum = vol->eba_tbl->entries[lnum].pnum;
}
/**
* ubi_eba_create_table - allocate a new EBA table and initialize it with all
* LEBs unmapped
* @vol: volume containing the EBA table to copy
* @nentries: number of entries in the table
*
* Allocate a new EBA table and initialize it with all LEBs unmapped.
* Returns a valid pointer if it succeed, an ERR_PTR() otherwise.
*/
struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
int nentries)
{
struct ubi_eba_table *tbl;
int err = -ENOMEM;
int i;
tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
if (!tbl)
return ERR_PTR(-ENOMEM);
tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries),
GFP_KERNEL);
if (!tbl->entries)
goto err;
for (i = 0; i < nentries; i++)
tbl->entries[i].pnum = UBI_LEB_UNMAPPED;
return tbl;
err:
kfree(tbl->entries);
kfree(tbl);
return ERR_PTR(err);
}
/**
* ubi_eba_destroy_table - destroy an EBA table
* @tbl: the table to destroy
*
* Destroy an EBA table.
*/
void ubi_eba_destroy_table(struct ubi_eba_table *tbl)
{
if (!tbl)
return;
kfree(tbl->entries);
kfree(tbl);
}
/**
* ubi_eba_copy_table - copy the EBA table attached to vol into another table
* @vol: volume containing the EBA table to copy
* @dst: destination
* @nentries: number of entries to copy
*
* Copy the EBA table stored in vol into the one pointed by dst.
*/
void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
int nentries)
{
struct ubi_eba_table *src;
int i;
ubi_assert(dst && vol && vol->eba_tbl);
src = vol->eba_tbl;
for (i = 0; i < nentries; i++)
dst->entries[i].pnum = src->entries[i].pnum;
}
/**
* ubi_eba_replace_table - assign a new EBA table to a volume
* @vol: volume containing the EBA table to copy
* @tbl: new EBA table
*
* Assign a new EBA table to the volume and release the old one.
*/
void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl)
{
ubi_eba_destroy_table(vol->eba_tbl);
vol->eba_tbl = tbl;
}
/**
* ltree_lookup - look up the lock tree.
* @ubi: UBI device description object
@ -311,6 +439,18 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
spin_unlock(&ubi->ltree_lock);
}
/**
* ubi_eba_is_mapped - check if a LEB is mapped.
* @vol: volume description object
* @lnum: logical eraseblock number
*
* This function returns true if the LEB is mapped, false otherwise.
*/
bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum)
{
return vol->eba_tbl->entries[lnum].pnum >= 0;
}
/**
* ubi_eba_unmap_leb - un-map logical eraseblock.
* @ubi: UBI device description object
@ -333,7 +473,7 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
if (err)
return err;
pnum = vol->eba_tbl[lnum];
pnum = vol->eba_tbl->entries[lnum].pnum;
if (pnum < 0)
/* This logical eraseblock is already unmapped */
goto out_unlock;
@ -341,7 +481,7 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
down_read(&ubi->fm_eba_sem);
vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
up_read(&ubi->fm_eba_sem);
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
@ -373,6 +513,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int offset, int len, int check)
{
int err, pnum, scrub = 0, vol_id = vol->vol_id;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
uint32_t uninitialized_var(crc);
@ -380,7 +521,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
if (err)
return err;
pnum = vol->eba_tbl[lnum];
pnum = vol->eba_tbl->entries[lnum].pnum;
if (pnum < 0) {
/*
* The logical eraseblock is not mapped, fill the whole buffer
@ -403,13 +544,15 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
retry:
if (check) {
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr) {
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb) {
err = -ENOMEM;
goto out_unlock;
}
err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
vid_hdr = ubi_get_vid_hdr(vidb);
err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
if (err && err != UBI_IO_BITFLIPS) {
if (err > 0) {
/*
@ -455,7 +598,7 @@ retry:
ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
crc = be32_to_cpu(vid_hdr->data_crc);
ubi_free_vid_hdr(ubi, vid_hdr);
ubi_free_vid_buf(vidb);
}
err = ubi_io_read_data(ubi, buf, pnum, offset, len);
@ -492,7 +635,7 @@ retry:
return err;
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
ubi_free_vid_buf(vidb);
out_unlock:
leb_read_unlock(ubi, vol_id, lnum);
return err;
@ -553,6 +696,103 @@ int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
return ret;
}
/**
* try_recover_peb - try to recover from write failure.
* @vol: volume description object
* @pnum: the physical eraseblock to recover
* @lnum: logical eraseblock number
* @buf: data which was not written because of the write failure
* @offset: offset of the failed write
* @len: how many bytes should have been written
* @vidb: VID buffer
* @retry: whether the caller should retry in case of failure
*
* This function is called in case of a write failure and moves all good data
* from the potentially bad physical eraseblock to a good physical eraseblock.
* This function also writes the data which was not written due to the failure.
* Returns 0 in case of success, and a negative error code in case of failure.
* In case of failure, the %retry parameter is set to false if this is a fatal
* error (retrying won't help), and true otherwise.
*/
static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
const void *buf, int offset, int len,
struct ubi_vid_io_buf *vidb, bool *retry)
{
struct ubi_device *ubi = vol->ubi;
struct ubi_vid_hdr *vid_hdr;
int new_pnum, err, vol_id = vol->vol_id, data_size;
uint32_t crc;
*retry = false;
new_pnum = ubi_wl_get_peb(ubi);
if (new_pnum < 0) {
err = new_pnum;
goto out_put;
}
ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
pnum, new_pnum);
err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
if (err && err != UBI_IO_BITFLIPS) {
if (err > 0)
err = -EIO;
goto out_put;
}
ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
mutex_lock(&ubi->buf_mutex);
memset(ubi->peb_buf + offset, 0xFF, len);
/* Read everything before the area where the write failure happened */
if (offset > 0) {
err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS)
goto out_unlock;
}
*retry = true;
memcpy(ubi->peb_buf + offset, buf, len);
data_size = offset + len;
crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->copy_flag = 1;
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->data_crc = cpu_to_be32(crc);
err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb);
if (err)
goto out_unlock;
err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
out_unlock:
mutex_unlock(&ubi->buf_mutex);
if (!err)
vol->eba_tbl->entries[lnum].pnum = new_pnum;
out_put:
up_read(&ubi->fm_eba_sem);
if (!err) {
ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi_msg(ubi, "data was successfully recovered");
} else if (new_pnum >= 0) {
/*
* Bad luck? This physical eraseblock is bad too? Crud. Let's
* try to get another one.
*/
ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
}
return err;
}
/**
* recover_peb - recover from write failure.
* @ubi: UBI device description object
@ -566,106 +806,97 @@ int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
* This function is called in case of a write failure and moves all good data
* from the potentially bad physical eraseblock to a good physical eraseblock.
* This function also writes the data which was not written due to the failure.
* Returns new physical eraseblock number in case of success, and a negative
* error code in case of failure.
* Returns 0 in case of success, and a negative error code in case of failure.
* This function tries %UBI_IO_RETRIES before giving up.
*/
static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
const void *buf, int offset, int len)
{
int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
int err, idx = vol_id2idx(ubi, vol_id), tries;
struct ubi_volume *vol = ubi->volumes[idx];
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
struct ubi_vid_io_buf *vidb;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb)
return -ENOMEM;
retry:
new_pnum = ubi_wl_get_peb(ubi);
if (new_pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
up_read(&ubi->fm_eba_sem);
return new_pnum;
for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
bool retry;
err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb,
&retry);
if (!err || !retry)
break;
ubi_msg(ubi, "try again");
}
ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
pnum, new_pnum);
ubi_free_vid_buf(vidb);
err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
if (err && err != UBI_IO_BITFLIPS) {
if (err > 0)
err = -EIO;
up_read(&ubi->fm_eba_sem);
return err;
}
/**
* try_write_vid_and_data - try to write VID header and data to a new PEB.
* @vol: volume description object
* @lnum: logical eraseblock number
* @vidb: the VID buffer to write
* @buf: buffer containing the data
* @offset: where to start writing data
* @len: how many bytes should be written
*
* This function tries to write VID header and data belonging to logical
* eraseblock @lnum of volume @vol to a new physical eraseblock. Returns zero
* in case of success and a negative error code in case of failure.
* In case of error, it is possible that something was still written to the
* flash media, but may be some garbage.
*/
static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
struct ubi_vid_io_buf *vidb, const void *buf,
int offset, int len)
{
struct ubi_device *ubi = vol->ubi;
int pnum, opnum, err, vol_id = vol->vol_id;
pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
err = pnum;
goto out_put;
}
ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
opnum = vol->eba_tbl->entries[lnum].pnum;
mutex_lock(&ubi->buf_mutex);
memset(ubi->peb_buf + offset, 0xFF, len);
dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
/* Read everything before the area where the write failure happened */
if (offset > 0) {
err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS) {
up_read(&ubi->fm_eba_sem);
goto out_unlock;
err = ubi_io_write_vid_hdr(ubi, pnum, vidb);
if (err) {
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
goto out_put;
}
if (len) {
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
ubi_warn(ubi,
"failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
goto out_put;
}
}
memcpy(ubi->peb_buf + offset, buf, len);
vol->eba_tbl->entries[lnum].pnum = pnum;
data_size = offset + len;
crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->copy_flag = 1;
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->data_crc = cpu_to_be32(crc);
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
if (err) {
mutex_unlock(&ubi->buf_mutex);
up_read(&ubi->fm_eba_sem);
goto write_error;
}
err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
if (err) {
mutex_unlock(&ubi->buf_mutex);
up_read(&ubi->fm_eba_sem);
goto write_error;
}
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
vol->eba_tbl[lnum] = new_pnum;
up_read(&ubi->fm_eba_sem);
ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi_msg(ubi, "data was successfully recovered");
return 0;
out_unlock:
mutex_unlock(&ubi->buf_mutex);
out_put:
ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
up_read(&ubi->fm_eba_sem);
write_error:
/*
* Bad luck? This physical eraseblock is bad too? Crud. Let's try to
* get another one.
*/
ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
if (++tries > UBI_IO_RETRIES) {
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
ubi_msg(ubi, "try again");
goto retry;
if (err && pnum >= 0)
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
else if (!err && opnum >= 0)
err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
return err;
}
/**
@ -681,11 +912,13 @@ write_error:
* @vol. Returns zero in case of success and a negative error code in case
* of failure. In case of error, it is possible that something was still
* written to the flash media, but may be some garbage.
* This function retries %UBI_IO_RETRIES times before giving up.
*/
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
const void *buf, int offset, int len)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
int err, pnum, tries, vol_id = vol->vol_id;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
if (ubi->ro_mode)
@ -695,7 +928,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
if (err)
return err;
pnum = vol->eba_tbl[lnum];
pnum = vol->eba_tbl->entries[lnum].pnum;
if (pnum >= 0) {
dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
@ -706,23 +939,23 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
if (err == -EIO && ubi->bad_allowed)
err = recover_peb(ubi, pnum, vol_id, lnum, buf,
offset, len);
if (err)
ubi_ro_mode(ubi);
}
leb_write_unlock(ubi, vol_id, lnum);
return err;
goto out;
}
/*
* The logical eraseblock is not mapped. We have to get a free physical
* eraseblock and write the volume identifier header there first.
*/
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr) {
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb) {
leb_write_unlock(ubi, vol_id, lnum);
return -ENOMEM;
}
vid_hdr = ubi_get_vid_hdr(vidb);
vid_hdr->vol_type = UBI_VID_DYNAMIC;
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
@ -730,67 +963,30 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
retry:
pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
up_read(&ubi->fm_eba_sem);
return pnum;
for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len);
if (err != -EIO || !ubi->bad_allowed)
break;
/*
* Fortunately, this is the first write operation to this
* physical eraseblock, so just put it and request a new one.
* We assume that if this physical eraseblock went bad, the
* erase code will handle that.
*/
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg(ubi, "try another PEB");
}
dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
ubi_free_vid_buf(vidb);
err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
if (err) {
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
up_read(&ubi->fm_eba_sem);
goto write_error;
}
if (len) {
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
up_read(&ubi->fm_eba_sem);
goto write_error;
}
}
vol->eba_tbl[lnum] = pnum;
up_read(&ubi->fm_eba_sem);
out:
if (err)
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
write_error:
if (err != -EIO || !ubi->bad_allowed) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
/*
* Fortunately, this is the first write operation to this physical
* eraseblock, so just put it and request a new one. We assume that if
* this physical eraseblock went bad, the erase code will handle that.
*/
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg(ubi, "try another PEB");
goto retry;
return err;
}
/**
@ -818,7 +1014,8 @@ write_error:
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len, int used_ebs)
{
int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
int err, tries, data_size = len, vol_id = vol->vol_id;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
@ -831,15 +1028,15 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
else
ubi_assert(!(len & (ubi->min_io_size - 1)));
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb)
return -ENOMEM;
vid_hdr = ubi_get_vid_hdr(vidb);
err = leb_write_lock(ubi, vol_id, lnum);
if (err) {
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
if (err)
goto out;
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
@ -853,66 +1050,26 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->used_ebs = cpu_to_be32(used_ebs);
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
up_read(&ubi->fm_eba_sem);
return pnum;
ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0);
for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
if (err != -EIO || !ubi->bad_allowed)
break;
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg(ubi, "try another PEB");
}
dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
len, vol_id, lnum, pnum, used_ebs);
err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
if (err) {
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
up_read(&ubi->fm_eba_sem);
goto write_error;
}
err = ubi_io_write_data(ubi, buf, pnum, 0, len);
if (err) {
ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
len, pnum);
up_read(&ubi->fm_eba_sem);
goto write_error;
}
ubi_assert(vol->eba_tbl[lnum] < 0);
vol->eba_tbl[lnum] = pnum;
up_read(&ubi->fm_eba_sem);
if (err)
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
write_error:
if (err != -EIO || !ubi->bad_allowed) {
/*
* This flash device does not admit of bad eraseblocks or
* something nasty and unexpected happened. Switch to read-only
* mode just in case.
*/
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
out:
ubi_free_vid_buf(vidb);
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg(ubi, "try another PEB");
goto retry;
return err;
}
/*
@ -935,7 +1092,8 @@ write_error:
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len)
{
int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
int err, tries, vol_id = vol->vol_id;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
@ -953,10 +1111,12 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb)
return -ENOMEM;
vid_hdr = ubi_get_vid_hdr(vidb);
mutex_lock(&ubi->alc_mutex);
err = leb_write_lock(ubi, vol_id, lnum);
if (err)
@ -974,70 +1134,31 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->copy_flag = 1;
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
err = pnum;
up_read(&ubi->fm_eba_sem);
goto out_leb_unlock;
dbg_eba("change LEB %d:%d", vol_id, lnum);
for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
if (err != -EIO || !ubi->bad_allowed)
break;
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg(ubi, "try another PEB");
}
dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
vol_id, lnum, vol->eba_tbl[lnum], pnum);
/*
* This flash device does not admit of bad eraseblocks or
* something nasty and unexpected happened. Switch to read-only
* mode just in case.
*/
if (err)
ubi_ro_mode(ubi);
err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
if (err) {
ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
up_read(&ubi->fm_eba_sem);
goto write_error;
}
err = ubi_io_write_data(ubi, buf, pnum, 0, len);
if (err) {
ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
len, pnum);
up_read(&ubi->fm_eba_sem);
goto write_error;
}
old_pnum = vol->eba_tbl[lnum];
vol->eba_tbl[lnum] = pnum;
up_read(&ubi->fm_eba_sem);
if (old_pnum >= 0) {
err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
if (err)
goto out_leb_unlock;
}
out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
out_mutex:
mutex_unlock(&ubi->alc_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
ubi_free_vid_buf(vidb);
return err;
write_error:
if (err != -EIO || !ubi->bad_allowed) {
/*
* This flash device does not admit of bad eraseblocks or
* something nasty and unexpected happened. Switch to read-only
* mode just in case.
*/
ubi_ro_mode(ubi);
goto out_leb_unlock;
}
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
goto out_leb_unlock;
}
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg(ubi, "try another PEB");
goto retry;
}
/**
@ -1082,12 +1203,15 @@ static int is_error_sane(int err)
* o a negative error code in case of failure.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr)
struct ubi_vid_io_buf *vidb)
{
int err, vol_id, lnum, data_size, aldata_size, idx;
struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
struct ubi_volume *vol;
uint32_t crc;
ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
@ -1142,9 +1266,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* probably waiting on @ubi->move_mutex. No need to continue the work,
* cancel it.
*/
if (vol->eba_tbl[lnum] != from) {
if (vol->eba_tbl->entries[lnum].pnum != from) {
dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
vol_id, lnum, from, vol->eba_tbl[lnum]);
vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum);
err = MOVE_CANCEL_RACE;
goto out_unlock_leb;
}
@ -1196,7 +1320,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
err = ubi_io_write_vid_hdr(ubi, to, vidb);
if (err) {
if (err == -EIO)
err = MOVE_TARGET_WR_ERR;
@ -1206,7 +1330,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
cond_resched();
/* Read the VID header back and check if it was written correctly */
err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
if (err) {
if (err != UBI_IO_BITFLIPS) {
ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
@ -1229,10 +1353,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
cond_resched();
}
ubi_assert(vol->eba_tbl[lnum] == from);
down_read(&ubi->fm_eba_sem);
vol->eba_tbl[lnum] = to;
up_read(&ubi->fm_eba_sem);
ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
vol->eba_tbl->entries[lnum].pnum = to;
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
@ -1388,7 +1510,7 @@ out_free:
*/
int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int i, j, err, num_volumes;
int i, err, num_volumes;
struct ubi_ainf_volume *av;
struct ubi_volume *vol;
struct ubi_ainf_peb *aeb;
@ -1404,35 +1526,39 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
for (i = 0; i < num_volumes; i++) {
struct ubi_eba_table *tbl;
vol = ubi->volumes[i];
if (!vol)
continue;
cond_resched();
vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
GFP_KERNEL);
if (!vol->eba_tbl) {
err = -ENOMEM;
tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
if (IS_ERR(tbl)) {
err = PTR_ERR(tbl);
goto out_free;
}
for (j = 0; j < vol->reserved_pebs; j++)
vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
ubi_eba_replace_table(vol, tbl);
av = ubi_find_av(ai, idx2vol_id(ubi, i));
if (!av)
continue;
ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
if (aeb->lnum >= vol->reserved_pebs)
if (aeb->lnum >= vol->reserved_pebs) {
/*
* This may happen in case of an unclean reboot
* during re-size.
*/
ubi_move_aeb_to_list(av, aeb, &ai->erase);
else
vol->eba_tbl[aeb->lnum] = aeb->pnum;
} else {
struct ubi_eba_entry *entry;
entry = &vol->eba_tbl->entries[aeb->lnum];
entry->pnum = aeb->pnum;
}
}
}
@ -1469,8 +1595,7 @@ out_free:
for (i = 0; i < num_volumes; i++) {
if (!ubi->volumes[i])
continue;
kfree(ubi->volumes[i]->eba_tbl);
ubi->volumes[i]->eba_tbl = NULL;
ubi_eba_replace_table(ubi->volumes[i], NULL);
}
return err;
}

View File

@ -262,6 +262,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
int pnum;
ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
if (pool->used == pool->size) {
/* We cannot update the fastmap here because this
* function is called in atomic context.
@ -303,7 +305,7 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
wrk->anchor = 1;
wrk->func = &wear_leveling_worker;
schedule_ubi_work(ubi, wrk);
__schedule_ubi_work(ubi, wrk);
return 0;
}
@ -344,7 +346,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
spin_unlock(&ubi->wl_lock);
vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
return schedule_erase(ubi, e, vol_id, lnum, torture);
return schedule_erase(ubi, e, vol_id, lnum, torture, true);
}
/**

View File

@ -110,21 +110,23 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
* Returns a new struct ubi_vid_hdr on success.
* NULL indicates out of memory.
*/
static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
{
struct ubi_vid_hdr *new;
struct ubi_vid_io_buf *new;
struct ubi_vid_hdr *vh;
new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!new)
goto out;
new->vol_type = UBI_VID_DYNAMIC;
new->vol_id = cpu_to_be32(vol_id);
vh = ubi_get_vid_hdr(new);
vh->vol_type = UBI_VID_DYNAMIC;
vh->vol_id = cpu_to_be32(vol_id);
/* UBI implementations without fastmap support have to delete the
* fastmap.
*/
new->compat = UBI_COMPAT_DELETE;
vh->compat = UBI_COMPAT_DELETE;
out:
return new;
@ -145,12 +147,10 @@ static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
{
struct ubi_ainf_peb *aeb;
aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
aeb->pnum = pnum;
aeb->ec = ec;
aeb->lnum = -1;
aeb->scrub = scrub;
aeb->copy_flag = aeb->sqnum = 0;
@ -186,40 +186,19 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
int last_eb_bytes)
{
struct ubi_ainf_volume *av;
struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
while (*p) {
parent = *p;
av = rb_entry(parent, struct ubi_ainf_volume, rb);
av = ubi_add_av(ai, vol_id);
if (IS_ERR(av))
return av;
if (vol_id > av->vol_id)
p = &(*p)->rb_left;
else if (vol_id < av->vol_id)
p = &(*p)->rb_right;
else
return ERR_PTR(-EINVAL);
}
av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
if (!av)
goto out;
av->highest_lnum = av->leb_count = av->used_ebs = 0;
av->vol_id = vol_id;
av->data_pad = data_pad;
av->last_data_size = last_eb_bytes;
av->compat = 0;
av->vol_type = vol_type;
av->root = RB_ROOT;
if (av->vol_type == UBI_STATIC_VOLUME)
av->used_ebs = used_ebs;
dbg_bld("found volume (ID %i)", vol_id);
rb_link_node(&av->rb, parent, p);
rb_insert_color(&av->rb, &ai->volumes);
out:
return av;
}
@ -297,7 +276,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
*/
if (aeb->pnum == new_aeb->pnum) {
ubi_assert(aeb->lnum == new_aeb->lnum);
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
ubi_free_aeb(ai, new_aeb);
return 0;
}
@ -308,13 +287,10 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
/* new_aeb is newer */
if (cmp_res & 1) {
victim = kmem_cache_alloc(ai->aeb_slab_cache,
GFP_KERNEL);
victim = ubi_alloc_aeb(ai, aeb->ec, aeb->pnum);
if (!victim)
return -ENOMEM;
victim->ec = aeb->ec;
victim->pnum = aeb->pnum;
list_add_tail(&victim->u.list, &ai->erase);
if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
@ -328,7 +304,8 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
aeb->pnum = new_aeb->pnum;
aeb->copy_flag = new_vh->copy_flag;
aeb->scrub = new_aeb->scrub;
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
aeb->sqnum = new_aeb->sqnum;
ubi_free_aeb(ai, new_aeb);
/* new_aeb is older */
} else {
@ -370,41 +347,24 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_vid_hdr *new_vh,
struct ubi_ainf_peb *new_aeb)
{
struct ubi_ainf_volume *av, *tmp_av = NULL;
struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
int found = 0;
int vol_id = be32_to_cpu(new_vh->vol_id);
struct ubi_ainf_volume *av;
if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
ubi_free_aeb(ai, new_aeb);
return 0;
}
/* Find the volume this SEB belongs to */
while (*p) {
parent = *p;
tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
p = &(*p)->rb_left;
else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
p = &(*p)->rb_right;
else {
found = 1;
break;
}
}
if (found)
av = tmp_av;
else {
av = ubi_find_av(ai, vol_id);
if (!av) {
ubi_err(ubi, "orphaned volume in fastmap pool!");
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
ubi_free_aeb(ai, new_aeb);
return UBI_BAD_FASTMAP;
}
ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
ubi_assert(vol_id == av->vol_id);
return update_vol(ubi, ai, av, new_vh, new_aeb);
}
@ -423,16 +383,12 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum)
struct rb_node *node, *node2;
struct ubi_ainf_peb *aeb;
for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
av = rb_entry(node, struct ubi_ainf_volume, rb);
for (node2 = rb_first(&av->root); node2;
node2 = rb_next(node2)) {
aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
if (aeb->pnum == pnum) {
rb_erase(&aeb->u.rb, &av->root);
av->leb_count--;
kmem_cache_free(ai->aeb_slab_cache, aeb);
ubi_free_aeb(ai, aeb);
return;
}
}
@ -455,6 +411,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
__be32 *pebs, int pool_size, unsigned long long *max_sqnum,
struct list_head *free)
{
struct ubi_vid_io_buf *vb;
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_ainf_peb *new_aeb;
@ -464,12 +421,14 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (!ech)
return -ENOMEM;
vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vh) {
vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!vb) {
kfree(ech);
return -ENOMEM;
}
vh = ubi_get_vid_hdr(vb);
dbg_bld("scanning fastmap pool: size = %i", pool_size);
/*
@ -510,15 +469,16 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
goto out;
}
err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
unsigned long long ec = be64_to_cpu(ech->ec);
unmap_peb(ai, pnum);
dbg_bld("Adding PEB to free: %i", pnum);
if (err == UBI_IO_FF_BITFLIPS)
add_aeb(ai, free, pnum, ec, 1);
else
add_aeb(ai, free, pnum, ec, 0);
scrub = 1;
add_aeb(ai, free, pnum, ec, scrub);
continue;
} else if (err == 0 || err == UBI_IO_BITFLIPS) {
dbg_bld("Found non empty PEB:%i in pool", pnum);
@ -526,15 +486,12 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (err == UBI_IO_BITFLIPS)
scrub = 1;
new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
GFP_KERNEL);
new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
if (!new_aeb) {
ret = -ENOMEM;
goto out;
}
new_aeb->ec = be64_to_cpu(ech->ec);
new_aeb->pnum = pnum;
new_aeb->lnum = be32_to_cpu(vh->lnum);
new_aeb->sqnum = be64_to_cpu(vh->sqnum);
new_aeb->copy_flag = vh->copy_flag;
@ -558,7 +515,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
out:
ubi_free_vid_hdr(ubi, vh);
ubi_free_vid_buf(vb);
kfree(ech);
return ret;
}
@ -841,11 +798,11 @@ fail_bad:
fail:
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
list_del(&tmp_aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
ubi_free_aeb(ai, tmp_aeb);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
list_del(&tmp_aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
ubi_free_aeb(ai, tmp_aeb);
}
return ret;
@ -886,6 +843,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_attach_info *scan_ai)
{
struct ubi_fm_sb *fmsb, *fmsb2;
struct ubi_vid_io_buf *vb;
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_fastmap_layout *fm;
@ -919,7 +877,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
goto out;
}
ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
if (ret && ret != UBI_IO_BITFLIPS)
goto free_fm_sb;
else if (ret == UBI_IO_BITFLIPS)
@ -961,12 +919,14 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
goto free_fm_sb;
}
vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vh) {
vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!vb) {
ret = -ENOMEM;
goto free_hdr;
}
vh = ubi_get_vid_hdr(vb);
for (i = 0; i < used_blocks; i++) {
int image_seq;
@ -1009,7 +969,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
goto free_hdr;
}
ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
i, pnum);
@ -1037,8 +997,8 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (sqnum < be64_to_cpu(vh->sqnum))
sqnum = be64_to_cpu(vh->sqnum);
ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
ubi->leb_start, ubi->leb_size);
ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
pnum, 0, ubi->leb_size);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
"err: %i)", i, pnum, ret);
@ -1099,7 +1059,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi->fm_disabled = 0;
ubi->fast_attach = 1;
ubi_free_vid_hdr(ubi, vh);
ubi_free_vid_buf(vb);
kfree(ech);
out:
up_write(&ubi->fm_protect);
@ -1108,7 +1068,7 @@ out:
return ret;
free_hdr:
ubi_free_vid_hdr(ubi, vh);
ubi_free_vid_buf(vb);
kfree(ech);
free_fm_sb:
kfree(fmsb);
@ -1136,6 +1096,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
struct ubi_fm_eba *feba;
struct ubi_wl_entry *wl_e;
struct ubi_volume *vol;
struct ubi_vid_io_buf *avbuf, *dvbuf;
struct ubi_vid_hdr *avhdr, *dvhdr;
struct ubi_work *ubi_wrk;
struct rb_node *tmp_rb;
@ -1146,18 +1107,21 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_raw = ubi->fm_buf;
memset(ubi->fm_buf, 0, ubi->fm_size);
avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
if (!avhdr) {
avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
if (!avbuf) {
ret = -ENOMEM;
goto out;
}
dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
if (!dvhdr) {
dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
if (!dvbuf) {
ret = -ENOMEM;
goto out_kfree;
}
avhdr = ubi_get_vid_hdr(avbuf);
dvhdr = ubi_get_vid_hdr(dvbuf);
seen_pebs = init_seen(ubi);
if (IS_ERR(seen_pebs)) {
ret = PTR_ERR(seen_pebs);
@ -1306,8 +1270,12 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
ubi_assert(fm_pos <= ubi->fm_size);
for (j = 0; j < vol->reserved_pebs; j++)
feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
for (j = 0; j < vol->reserved_pebs; j++) {
struct ubi_eba_leb_desc ldesc;
ubi_eba_get_ldesc(vol, j, &ldesc);
feba->pnum[j] = cpu_to_be32(ldesc.pnum);
}
feba->reserved_pebs = cpu_to_be32(j);
feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
@ -1322,7 +1290,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
spin_unlock(&ubi->volumes_lock);
dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
goto out_kfree;
@ -1343,7 +1311,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
dvhdr->lnum = cpu_to_be32(i);
dbg_bld("writing fastmap data to PEB %i sqnum %llu",
new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
new_fm->e[i]->pnum);
@ -1352,8 +1320,8 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
}
for (i = 0; i < new_fm->used_blocks; i++) {
ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
new_fm->e[i]->pnum, 0, ubi->leb_size);
if (ret) {
ubi_err(ubi, "unable to write fastmap to PEB %i!",
new_fm->e[i]->pnum);
@ -1368,8 +1336,8 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
dbg_bld("fastmap written!");
out_kfree:
ubi_free_vid_hdr(ubi, avhdr);
ubi_free_vid_hdr(ubi, dvhdr);
ubi_free_vid_buf(avbuf);
ubi_free_vid_buf(dvbuf);
free_seen(seen_pebs);
out:
return ret;
@ -1439,7 +1407,8 @@ static int invalidate_fastmap(struct ubi_device *ubi)
int ret;
struct ubi_fastmap_layout *fm;
struct ubi_wl_entry *e;
struct ubi_vid_hdr *vh = NULL;
struct ubi_vid_io_buf *vb = NULL;
struct ubi_vid_hdr *vh;
if (!ubi->fm)
return 0;
@ -1451,10 +1420,12 @@ static int invalidate_fastmap(struct ubi_device *ubi)
if (!fm)
goto out;
vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
if (!vh)
vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
if (!vb)
goto out_free_fm;
vh = ubi_get_vid_hdr(vb);
ret = -ENOSPC;
e = ubi_wl_get_fm_peb(ubi, 1);
if (!e)
@ -1465,7 +1436,7 @@ static int invalidate_fastmap(struct ubi_device *ubi)
* to scanning mode.
*/
vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
if (ret < 0) {
ubi_wl_put_fm_peb(ubi, e, 0, 0);
goto out_free_fm;
@ -1477,7 +1448,7 @@ static int invalidate_fastmap(struct ubi_device *ubi)
ubi->fm = fm;
out:
ubi_free_vid_hdr(ubi, vh);
ubi_free_vid_buf(vb);
return ret;
out_free_fm:
@ -1522,22 +1493,30 @@ int ubi_update_fastmap(struct ubi_device *ubi)
struct ubi_wl_entry *tmp_e;
down_write(&ubi->fm_protect);
down_write(&ubi->work_sem);
down_write(&ubi->fm_eba_sem);
ubi_refill_pools(ubi);
if (ubi->ro_mode || ubi->fm_disabled) {
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return 0;
}
ret = ubi_ensure_anchor_pebs(ubi);
if (ret) {
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return ret;
}
new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
if (!new_fm) {
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return -ENOMEM;
}
@ -1646,16 +1625,14 @@ int ubi_update_fastmap(struct ubi_device *ubi)
new_fm->e[0] = tmp_e;
}
down_write(&ubi->work_sem);
down_write(&ubi->fm_eba_sem);
ret = ubi_write_fastmap(ubi, new_fm);
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
if (ret)
goto err;
out_unlock:
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
kfree(old_fm);
return ret;

View File

@ -502,6 +502,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
loff_t addr;
uint32_t data = 0;
struct ubi_ec_hdr ec_hdr;
struct ubi_vid_io_buf vidb;
/*
* Note, we cannot generally define VID header buffers on stack,
@ -528,7 +529,10 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
goto error;
}
err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
ubi_init_vid_buf(ubi, &vidb, &vid_hdr);
ubi_assert(&vid_hdr == ubi_get_vid_hdr(&vidb));
err = ubi_io_read_vid_hdr(ubi, pnum, &vidb, 0);
if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
err != UBI_IO_FF){
addr += ubi->vid_hdr_aloffset;
@ -995,12 +999,11 @@ bad:
* ubi_io_read_vid_hdr - read and check a volume identifier header.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to read from
* @vid_hdr: &struct ubi_vid_hdr object where to store the read volume
* identifier header
* @vidb: the volume identifier buffer to store data in
* @verbose: be verbose if the header is corrupted or wasn't found
*
* This function reads the volume identifier header from physical eraseblock
* @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read
* @pnum and stores it in @vidb. It also checks CRC checksum of the read
* volume identifier header. The error codes are the same as in
* 'ubi_io_read_ec_hdr()'.
*
@ -1008,16 +1011,16 @@ bad:
* 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'.
*/
int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr, int verbose)
struct ubi_vid_io_buf *vidb, int verbose)
{
int err, read_err;
uint32_t crc, magic, hdr_crc;
void *p;
struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
void *p = vidb->buffer;
dbg_io("read VID header from PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
p = (char *)vid_hdr - ubi->vid_hdr_shift;
read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_shift + UBI_VID_HDR_SIZE);
if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
@ -1080,23 +1083,24 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
* ubi_io_write_vid_hdr - write a volume identifier header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to write to
* @vid_hdr: the volume identifier header to write
* @vidb: the volume identifier buffer to write
*
* This function writes the volume identifier header described by @vid_hdr to
* physical eraseblock @pnum. This function automatically fills the
* @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates
* header CRC checksum and stores it at vid_hdr->hdr_crc.
* @vidb->hdr->magic and the @vidb->hdr->version fields, as well as calculates
* header CRC checksum and stores it at vidb->hdr->hdr_crc.
*
* This function returns zero in case of success and a negative error code in
* case of failure. If %-EIO is returned, the physical eraseblock probably went
* bad.
*/
int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr)
struct ubi_vid_io_buf *vidb)
{
struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
int err;
uint32_t crc;
void *p;
void *p = vidb->buffer;
dbg_io("write VID header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
@ -1117,7 +1121,6 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
return -EROFS;
p = (char *)vid_hdr - ubi->vid_hdr_shift;
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
return err;
@ -1283,17 +1286,19 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
void *p;
if (!ubi_dbg_chk_io(ubi))
return 0;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb)
return -ENOMEM;
p = (char *)vid_hdr - ubi->vid_hdr_shift;
vid_hdr = ubi_get_vid_hdr(vidb);
p = vidb->buffer;
err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
@ -1314,7 +1319,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
err = self_check_vid_hdr(ubi, pnum, vid_hdr);
exit:
ubi_free_vid_hdr(ubi, vid_hdr);
ubi_free_vid_buf(vidb);
return err;
}

View File

@ -538,7 +538,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 ||
if (!ubi_leb_valid(vol, lnum) || offset < 0 || len < 0 ||
offset + len > vol->usable_leb_size ||
offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
return -EINVAL;
@ -583,7 +583,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 ||
if (!ubi_leb_valid(vol, lnum) || len < 0 ||
len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
return -EINVAL;
@ -620,7 +620,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
if (lnum < 0 || lnum >= vol->reserved_pebs)
if (!ubi_leb_valid(vol, lnum))
return -EINVAL;
if (vol->upd_marker)
@ -680,7 +680,7 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
if (lnum < 0 || lnum >= vol->reserved_pebs)
if (!ubi_leb_valid(vol, lnum))
return -EINVAL;
if (vol->upd_marker)
@ -716,13 +716,13 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
if (lnum < 0 || lnum >= vol->reserved_pebs)
if (!ubi_leb_valid(vol, lnum))
return -EINVAL;
if (vol->upd_marker)
return -EBADF;
if (vol->eba_tbl[lnum] >= 0)
if (ubi_eba_is_mapped(vol, lnum))
return -EBADMSG;
return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
@ -751,13 +751,13 @@ int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum)
dbg_gen("test LEB %d:%d", vol->vol_id, lnum);
if (lnum < 0 || lnum >= vol->reserved_pebs)
if (!ubi_leb_valid(vol, lnum))
return -EINVAL;
if (vol->upd_marker)
return -EBADF;
return vol->eba_tbl[lnum] >= 0;
return ubi_eba_is_mapped(vol, lnum);
}
EXPORT_SYMBOL_GPL(ubi_is_mapped);

View File

@ -166,6 +166,17 @@ enum {
POWER_CUT_VID_WRITE = 0x02,
};
/**
* struct ubi_vid_io_buf - VID buffer used to read/write VID info to/from the
* flash.
* @hdr: a pointer to the VID header stored in buffer
* @buffer: underlying buffer
*/
struct ubi_vid_io_buf {
struct ubi_vid_hdr *hdr;
void *buffer;
};
/**
* struct ubi_wl_entry - wear-leveling entry.
* @u.rb: link in the corresponding (free/used) RB-tree
@ -266,6 +277,21 @@ struct ubi_fm_pool {
int max_size;
};
/**
* struct ubi_eba_leb_desc - EBA logical eraseblock descriptor
* @lnum: the logical eraseblock number
* @pnum: the physical eraseblock where the LEB can be found
*
* This structure is here to hide EBA's internal from other part of the
* UBI implementation.
*
* One can query the position of a LEB by calling ubi_eba_get_ldesc().
*/
struct ubi_eba_leb_desc {
int lnum;
int pnum;
};
/**
* struct ubi_volume - UBI volume description data structure.
* @dev: device object to make use of the the Linux device model
@ -344,7 +370,7 @@ struct ubi_volume {
long long upd_received;
void *upd_buf;
int *eba_tbl;
struct ubi_eba_table *eba_tbl;
unsigned int checked:1;
unsigned int corrupted:1;
unsigned int upd_marker:1;
@ -724,6 +750,8 @@ struct ubi_ainf_volume {
* @ec_sum: a temporary variable used when calculating @mean_ec
* @ec_count: a temporary variable used when calculating @mean_ec
* @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
* @ech: temporary EC header. Only available during scan
* @vidh: temporary VID buffer. Only available during scan
*
* This data structure contains the result of attaching an MTD device and may
* be used by other UBI sub-systems to build final UBI data structures, further
@ -752,6 +780,8 @@ struct ubi_attach_info {
uint64_t ec_sum;
int ec_count;
struct kmem_cache *aeb_slab_cache;
struct ubi_ec_hdr *ech;
struct ubi_vid_io_buf *vidb;
};
/**
@ -792,8 +822,12 @@ extern struct mutex ubi_devices_mutex;
extern struct blocking_notifier_head ubi_notifiers;
/* attach.c */
struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum,
int ec);
void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb);
int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id);
struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
int vol_id);
void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
@ -835,7 +869,21 @@ void ubi_update_reserved(struct ubi_device *ubi);
void ubi_calculate_reserved(struct ubi_device *ubi);
int ubi_check_pattern(const void *buf, uint8_t patt, int size);
static inline bool ubi_leb_valid(struct ubi_volume *vol, int lnum)
{
return lnum >= 0 && lnum < vol->reserved_pebs;
}
/* eba.c */
struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
int nentries);
void ubi_eba_destroy_table(struct ubi_eba_table *tbl);
void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
int nentries);
void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl);
void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
struct ubi_eba_leb_desc *ldesc);
bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum);
int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum);
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
@ -850,7 +898,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len);
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr);
struct ubi_vid_io_buf *vidb);
int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
@ -885,9 +933,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
struct ubi_ec_hdr *ec_hdr);
int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr, int verbose);
struct ubi_vid_io_buf *vidb, int verbose);
int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr);
struct ubi_vid_io_buf *vidb);
/* build.c */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
@ -1008,44 +1056,68 @@ static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
}
/**
* ubi_zalloc_vid_hdr - allocate a volume identifier header object.
* @ubi: UBI device description object
* @gfp_flags: GFP flags to allocate with
*
* This function returns a pointer to the newly allocated and zero-filled
* volume identifier header object in case of success and %NULL in case of
* failure.
* ubi_init_vid_buf - Initialize a VID buffer
* @ubi: the UBI device
* @vidb: the VID buffer to initialize
* @buf: the underlying buffer
*/
static inline struct ubi_vid_hdr *
ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags)
static inline void ubi_init_vid_buf(const struct ubi_device *ubi,
struct ubi_vid_io_buf *vidb,
void *buf)
{
void *vid_hdr;
if (buf)
memset(buf, 0, ubi->vid_hdr_alsize);
vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags);
if (!vid_hdr)
return NULL;
/*
* VID headers may be stored at un-aligned flash offsets, so we shift
* the pointer.
*/
return vid_hdr + ubi->vid_hdr_shift;
vidb->buffer = buf;
vidb->hdr = buf + ubi->vid_hdr_shift;
}
/**
* ubi_free_vid_hdr - free a volume identifier header object.
* @ubi: UBI device description object
* @vid_hdr: the object to free
* ubi_init_vid_buf - Allocate a VID buffer
* @ubi: the UBI device
* @gfp_flags: GFP flags to use for the allocation
*/
static inline void ubi_free_vid_hdr(const struct ubi_device *ubi,
struct ubi_vid_hdr *vid_hdr)
static inline struct ubi_vid_io_buf *
ubi_alloc_vid_buf(const struct ubi_device *ubi, gfp_t gfp_flags)
{
void *p = vid_hdr;
struct ubi_vid_io_buf *vidb;
void *buf;
if (!p)
vidb = kzalloc(sizeof(*vidb), gfp_flags);
if (!vidb)
return NULL;
buf = kmalloc(ubi->vid_hdr_alsize, gfp_flags);
if (!buf) {
kfree(vidb);
return NULL;
}
ubi_init_vid_buf(ubi, vidb, buf);
return vidb;
}
/**
* ubi_free_vid_buf - Free a VID buffer
* @vidb: the VID buffer to free
*/
static inline void ubi_free_vid_buf(struct ubi_vid_io_buf *vidb)
{
if (!vidb)
return;
kfree(p - ubi->vid_hdr_shift);
kfree(vidb->buffer);
kfree(vidb);
}
/**
* ubi_get_vid_hdr - Get the VID header attached to a VID buffer
* @vidb: VID buffer
*/
static inline struct ubi_vid_hdr *ubi_get_vid_hdr(struct ubi_vid_io_buf *vidb)
{
return vidb->hdr;
}
/*

View File

@ -138,7 +138,7 @@ static void vol_release(struct device *dev)
{
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
kfree(vol->eba_tbl);
ubi_eba_replace_table(vol, NULL);
kfree(vol);
}
@ -158,6 +158,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
int i, err, vol_id = req->vol_id, do_free = 1;
struct ubi_volume *vol;
struct ubi_vtbl_record vtbl_rec;
struct ubi_eba_table *eba_tbl = NULL;
dev_t dev;
if (ubi->ro_mode)
@ -241,14 +242,13 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (err)
goto out_acc;
vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), GFP_KERNEL);
if (!vol->eba_tbl) {
err = -ENOMEM;
eba_tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
if (IS_ERR(eba_tbl)) {
err = PTR_ERR(eba_tbl);
goto out_acc;
}
for (i = 0; i < vol->reserved_pebs; i++)
vol->eba_tbl[i] = UBI_LEB_UNMAPPED;
ubi_eba_replace_table(vol, eba_tbl);
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = vol->reserved_pebs;
@ -329,7 +329,7 @@ out_cdev:
cdev_del(&vol->cdev);
out_mapping:
if (do_free)
kfree(vol->eba_tbl);
ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
@ -427,10 +427,11 @@ out_unlock:
*/
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
{
int i, err, pebs, *new_mapping;
int i, err, pebs;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
struct ubi_vtbl_record vtbl_rec;
struct ubi_eba_table *new_eba_tbl = NULL;
int vol_id = vol->vol_id;
if (ubi->ro_mode)
@ -450,12 +451,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (reserved_pebs == vol->reserved_pebs)
return 0;
new_mapping = kmalloc(reserved_pebs * sizeof(int), GFP_KERNEL);
if (!new_mapping)
return -ENOMEM;
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = UBI_LEB_UNMAPPED;
new_eba_tbl = ubi_eba_create_table(vol, reserved_pebs);
if (IS_ERR(new_eba_tbl))
return PTR_ERR(new_eba_tbl);
spin_lock(&ubi->volumes_lock);
if (vol->ref_count > 1) {
@ -481,10 +479,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
}
ubi->avail_pebs -= pebs;
ubi->rsvd_pebs += pebs;
for (i = 0; i < vol->reserved_pebs; i++)
new_mapping[i] = vol->eba_tbl[i];
kfree(vol->eba_tbl);
vol->eba_tbl = new_mapping;
ubi_eba_copy_table(vol, new_eba_tbl, vol->reserved_pebs);
ubi_eba_replace_table(vol, new_eba_tbl);
spin_unlock(&ubi->volumes_lock);
}
@ -498,10 +494,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
ubi->rsvd_pebs += pebs;
ubi->avail_pebs -= pebs;
ubi_update_reserved(ubi);
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = vol->eba_tbl[i];
kfree(vol->eba_tbl);
vol->eba_tbl = new_mapping;
ubi_eba_copy_table(vol, new_eba_tbl, reserved_pebs);
ubi_eba_replace_table(vol, new_eba_tbl);
spin_unlock(&ubi->volumes_lock);
}
@ -543,7 +537,7 @@ out_acc:
spin_unlock(&ubi->volumes_lock);
}
out_free:
kfree(new_mapping);
kfree(new_eba_tbl);
return err;
}

View File

@ -299,15 +299,18 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
int copy, void *vtbl)
{
int err, tries = 0;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
struct ubi_ainf_peb *new_aeb;
dbg_gen("create volume table (copy #%d)", copy + 1);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vid_hdr)
vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!vidb)
return -ENOMEM;
vid_hdr = ubi_get_vid_hdr(vidb);
retry:
new_aeb = ubi_early_get_peb(ubi, ai);
if (IS_ERR(new_aeb)) {
@ -324,7 +327,7 @@ retry:
vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
/* The EC header is already there, write the VID header */
err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr);
err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vidb);
if (err)
goto write_error;
@ -338,8 +341,8 @@ retry:
* of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
*/
err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
ubi_free_vid_hdr(ubi, vid_hdr);
ubi_free_aeb(ai, new_aeb);
ubi_free_vid_buf(vidb);
return err;
write_error:
@ -351,9 +354,9 @@ write_error:
list_add(&new_aeb->u.list, &ai->erase);
goto retry;
}
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
ubi_free_aeb(ai, new_aeb);
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
ubi_free_vid_buf(vidb);
return err;
}

View File

@ -580,7 +580,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* failure.
*/
static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
int vol_id, int lnum, int torture)
int vol_id, int lnum, int torture, bool nested)
{
struct ubi_work *wl_wrk;
@ -599,7 +599,10 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
wl_wrk->lnum = lnum;
wl_wrk->torture = torture;
schedule_ubi_work(ubi, wl_wrk);
if (nested)
__schedule_ubi_work(ubi, wl_wrk);
else
schedule_ubi_work(ubi, wl_wrk);
return 0;
}
@ -644,11 +647,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int shutdown)
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int vol_id = -1, lnum = -1;
int erase = 0, keep = 0, vol_id = -1, lnum = -1;
#ifdef CONFIG_MTD_UBI_FASTMAP
int anchor = wrk->anchor;
#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
int dst_leb_clean = 0;
@ -656,10 +660,13 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (shutdown)
return 0;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
if (!vidb)
return -ENOMEM;
vid_hdr = ubi_get_vid_hdr(vidb);
down_read(&ubi->fm_eba_sem);
mutex_lock(&ubi->move_mutex);
spin_lock(&ubi->wl_lock);
ubi_assert(!ubi->move_from && !ubi->move_to);
@ -753,7 +760,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* which is being moved was unmapped.
*/
err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
if (err && err != UBI_IO_BITFLIPS) {
dst_leb_clean = 1;
if (err == UBI_IO_FF) {
@ -780,6 +787,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
e1->pnum);
scrubbing = 1;
goto out_not_moved;
} else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
/*
* While a full scan would detect interrupted erasures
* at attach time we can face them here when attached from
* Fastmap.
*/
dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
e1->pnum);
erase = 1;
goto out_not_moved;
}
ubi_err(ubi, "error %d while reading VID header from PEB %d",
@ -790,7 +807,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
if (err) {
if (err == MOVE_CANCEL_RACE) {
/*
@ -815,6 +832,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* Target PEB had bit-flips or write error - torture it.
*/
torture = 1;
keep = 1;
goto out_not_moved;
}
@ -847,7 +865,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (scrubbing)
ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
e1->pnum, vol_id, lnum, e2->pnum);
ubi_free_vid_hdr(ubi, vid_hdr);
ubi_free_vid_buf(vidb);
spin_lock(&ubi->wl_lock);
if (!ubi->move_to_put) {
@ -879,6 +897,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
dbg_wl("done");
mutex_unlock(&ubi->move_mutex);
up_read(&ubi->fm_eba_sem);
return 0;
/*
@ -901,7 +920,7 @@ out_not_moved:
ubi->erroneous_peb_count += 1;
} else if (scrubbing)
wl_tree_add(e1, &ubi->scrub);
else
else if (keep)
wl_tree_add(e1, &ubi->used);
if (dst_leb_clean) {
wl_tree_add(e2, &ubi->free);
@ -913,7 +932,7 @@ out_not_moved:
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
ubi_free_vid_buf(vidb);
if (dst_leb_clean) {
ensure_wear_leveling(ubi, 1);
} else {
@ -922,7 +941,14 @@ out_not_moved:
goto out_ro;
}
if (erase) {
err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
if (err)
goto out_ro;
}
mutex_unlock(&ubi->move_mutex);
up_read(&ubi->fm_eba_sem);
return 0;
out_error:
@ -937,13 +963,14 @@ out_error:
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
ubi_free_vid_buf(vidb);
wl_entry_destroy(ubi, e1);
wl_entry_destroy(ubi, e2);
out_ro:
ubi_ro_mode(ubi);
mutex_unlock(&ubi->move_mutex);
up_read(&ubi->fm_eba_sem);
ubi_assert(err != 0);
return err < 0 ? err : -EIO;
@ -951,7 +978,8 @@ out_cancel:
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
mutex_unlock(&ubi->move_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
up_read(&ubi->fm_eba_sem);
ubi_free_vid_buf(vidb);
return 0;
}
@ -1073,7 +1101,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
int err1;
/* Re-schedule the LEB for erasure */
err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
if (err1) {
wl_entry_destroy(ubi, e);
err = err1;
@ -1254,7 +1282,7 @@ retry:
}
spin_unlock(&ubi->wl_lock);
err = schedule_erase(ubi, e, vol_id, lnum, torture);
err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
if (err) {
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->used);
@ -1545,7 +1573,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
wl_entry_destroy(ubi, e);
goto out_free;
}
@ -1624,7 +1652,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->ec = aeb->ec;
ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
wl_entry_destroy(ubi, e);
goto out_free;
}

View File

@ -301,6 +301,95 @@ out_budg:
return err;
}
static int do_tmpfile(struct inode *dir, struct dentry *dentry,
umode_t mode, struct inode **whiteout)
{
struct inode *inode;
struct ubifs_info *c = dir->i_sb->s_fs_info;
struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1};
struct ubifs_budget_req ino_req = { .dirtied_ino = 1 };
struct ubifs_inode *ui, *dir_ui = ubifs_inode(dir);
int err, instantiated = 0;
/*
* Budget request settings: new dirty inode, new direntry,
* budget for dirtied inode will be released via writeback.
*/
dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
dentry, mode, dir->i_ino);
err = ubifs_budget_space(c, &req);
if (err)
return err;
err = ubifs_budget_space(c, &ino_req);
if (err) {
ubifs_release_budget(c, &req);
return err;
}
inode = ubifs_new_inode(c, dir, mode);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_budg;
}
ui = ubifs_inode(inode);
if (whiteout) {
init_special_inode(inode, inode->i_mode, WHITEOUT_DEV);
ubifs_assert(inode->i_op == &ubifs_file_inode_operations);
}
err = ubifs_init_security(dir, inode, &dentry->d_name);
if (err)
goto out_inode;
mutex_lock(&ui->ui_mutex);
insert_inode_hash(inode);
if (whiteout) {
mark_inode_dirty(inode);
drop_nlink(inode);
*whiteout = inode;
} else {
d_tmpfile(dentry, inode);
}
ubifs_assert(ui->dirty);
instantiated = 1;
mutex_unlock(&ui->ui_mutex);
mutex_lock(&dir_ui->ui_mutex);
err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 1, 0);
if (err)
goto out_cancel;
mutex_unlock(&dir_ui->ui_mutex);
ubifs_release_budget(c, &req);
return 0;
out_cancel:
mutex_unlock(&dir_ui->ui_mutex);
out_inode:
make_bad_inode(inode);
if (!instantiated)
iput(inode);
out_budg:
ubifs_release_budget(c, &req);
if (!instantiated)
ubifs_release_budget(c, &ino_req);
ubifs_err(c, "cannot create temporary file, error %d", err);
return err;
}
static int ubifs_tmpfile(struct inode *dir, struct dentry *dentry,
umode_t mode)
{
return do_tmpfile(dir, dentry, mode, NULL);
}
/**
* vfs_dent_type - get VFS directory entry type.
* @type: UBIFS directory entry type
@ -927,37 +1016,43 @@ out_budg:
}
/**
* lock_3_inodes - a wrapper for locking three UBIFS inodes.
* lock_4_inodes - a wrapper for locking three UBIFS inodes.
* @inode1: first inode
* @inode2: second inode
* @inode3: third inode
* @inode4: fouth inode
*
* This function is used for 'ubifs_rename()' and @inode1 may be the same as
* @inode2 whereas @inode3 may be %NULL.
* @inode2 whereas @inode3 and @inode4 may be %NULL.
*
* We do not implement any tricks to guarantee strict lock ordering, because
* VFS has already done it for us on the @i_mutex. So this is just a simple
* wrapper function.
*/
static void lock_3_inodes(struct inode *inode1, struct inode *inode2,
struct inode *inode3)
static void lock_4_inodes(struct inode *inode1, struct inode *inode2,
struct inode *inode3, struct inode *inode4)
{
mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
if (inode2 != inode1)
mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
if (inode3)
mutex_lock_nested(&ubifs_inode(inode3)->ui_mutex, WB_MUTEX_3);
if (inode4)
mutex_lock_nested(&ubifs_inode(inode4)->ui_mutex, WB_MUTEX_4);
}
/**
* unlock_3_inodes - a wrapper for unlocking three UBIFS inodes for rename.
* unlock_4_inodes - a wrapper for unlocking three UBIFS inodes for rename.
* @inode1: first inode
* @inode2: second inode
* @inode3: third inode
* @inode4: fouth inode
*/
static void unlock_3_inodes(struct inode *inode1, struct inode *inode2,
struct inode *inode3)
static void unlock_4_inodes(struct inode *inode1, struct inode *inode2,
struct inode *inode3, struct inode *inode4)
{
if (inode4)
mutex_unlock(&ubifs_inode(inode4)->ui_mutex);
if (inode3)
mutex_unlock(&ubifs_inode(inode3)->ui_mutex);
if (inode1 != inode2)
@ -972,7 +1067,9 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ubifs_info *c = old_dir->i_sb->s_fs_info;
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
struct inode *whiteout = NULL;
struct ubifs_inode *old_inode_ui = ubifs_inode(old_inode);
struct ubifs_inode *whiteout_ui = NULL;
int err, release, sync = 0, move = (new_dir != old_dir);
int is_dir = S_ISDIR(old_inode->i_mode);
int unlink = !!new_inode;
@ -997,15 +1094,13 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
* separately.
*/
dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu",
dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu flags 0x%x",
old_dentry, old_inode->i_ino, old_dir->i_ino,
new_dentry, new_dir->i_ino);
ubifs_assert(inode_is_locked(old_dir));
ubifs_assert(inode_is_locked(new_dir));
new_dentry, new_dir->i_ino, flags);
if (unlink)
ubifs_assert(inode_is_locked(new_inode));
if (unlink && is_dir) {
err = check_dir_empty(c, new_inode);
if (err)
@ -1021,7 +1116,32 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
return err;
}
lock_3_inodes(old_dir, new_dir, new_inode);
if (flags & RENAME_WHITEOUT) {
union ubifs_dev_desc *dev = NULL;
dev = kmalloc(sizeof(union ubifs_dev_desc), GFP_NOFS);
if (!dev) {
ubifs_release_budget(c, &req);
ubifs_release_budget(c, &ino_req);
return -ENOMEM;
}
err = do_tmpfile(old_dir, old_dentry, S_IFCHR | WHITEOUT_MODE, &whiteout);
if (err) {
ubifs_release_budget(c, &req);
ubifs_release_budget(c, &ino_req);
kfree(dev);
return err;
}
whiteout->i_state |= I_LINKABLE;
whiteout_ui = ubifs_inode(whiteout);
whiteout_ui->data = dev;
whiteout_ui->data_len = ubifs_encode_dev(dev, MKDEV(0, 0));
ubifs_assert(!whiteout_ui->dirty);
}
lock_4_inodes(old_dir, new_dir, new_inode, whiteout);
/*
* Like most other Unix systems, set the @i_ctime for inodes on a
@ -1091,12 +1211,34 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlink && IS_SYNC(new_inode))
sync = 1;
}
err = ubifs_jnl_rename(c, old_dir, old_dentry, new_dir, new_dentry,
if (whiteout) {
struct ubifs_budget_req wht_req = { .dirtied_ino = 1,
.dirtied_ino_d = \
ALIGN(ubifs_inode(whiteout)->data_len, 8) };
err = ubifs_budget_space(c, &wht_req);
if (err) {
ubifs_release_budget(c, &req);
ubifs_release_budget(c, &ino_req);
kfree(whiteout_ui->data);
whiteout_ui->data_len = 0;
iput(whiteout);
return err;
}
inc_nlink(whiteout);
mark_inode_dirty(whiteout);
whiteout->i_state &= ~I_LINKABLE;
iput(whiteout);
}
err = ubifs_jnl_rename(c, old_dir, old_dentry, new_dir, new_dentry, whiteout,
sync);
if (err)
goto out_cancel;
unlock_3_inodes(old_dir, new_dir, new_inode);
unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
ubifs_release_budget(c, &req);
mutex_lock(&old_inode_ui->ui_mutex);
@ -1129,12 +1271,74 @@ out_cancel:
inc_nlink(old_dir);
}
}
unlock_3_inodes(old_dir, new_dir, new_inode);
if (whiteout) {
drop_nlink(whiteout);
iput(whiteout);
}
unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
ubifs_release_budget(c, &ino_req);
ubifs_release_budget(c, &req);
return err;
}
static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct ubifs_info *c = old_dir->i_sb->s_fs_info;
struct ubifs_budget_req req = { .new_dent = 1, .mod_dent = 1,
.dirtied_ino = 2 };
int sync = IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir);
struct inode *fst_inode = d_inode(old_dentry);
struct inode *snd_inode = d_inode(new_dentry);
struct timespec time;
int err;
ubifs_assert(fst_inode && snd_inode);
lock_4_inodes(old_dir, new_dir, NULL, NULL);
time = ubifs_current_time(old_dir);
fst_inode->i_ctime = time;
snd_inode->i_ctime = time;
old_dir->i_mtime = old_dir->i_ctime = time;
new_dir->i_mtime = new_dir->i_ctime = time;
if (old_dir != new_dir) {
if (S_ISDIR(fst_inode->i_mode) && !S_ISDIR(snd_inode->i_mode)) {
inc_nlink(new_dir);
drop_nlink(old_dir);
}
else if (!S_ISDIR(fst_inode->i_mode) && S_ISDIR(snd_inode->i_mode)) {
drop_nlink(new_dir);
inc_nlink(old_dir);
}
}
err = ubifs_jnl_xrename(c, old_dir, old_dentry, new_dir, new_dentry,
sync);
unlock_4_inodes(old_dir, new_dir, NULL, NULL);
ubifs_release_budget(c, &req);
return err;
}
static int ubifs_rename2(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
if (flags & ~(RENAME_NOREPLACE | RENAME_WHITEOUT | RENAME_EXCHANGE))
return -EINVAL;
ubifs_assert(inode_is_locked(old_dir));
ubifs_assert(inode_is_locked(new_dir));
if (flags & RENAME_EXCHANGE)
return ubifs_xrename(old_dir, old_dentry, new_dir, new_dentry);
return ubifs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
}
int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
@ -1183,13 +1387,14 @@ const struct inode_operations ubifs_dir_inode_operations = {
.mkdir = ubifs_mkdir,
.rmdir = ubifs_rmdir,
.mknod = ubifs_mknod,
.rename = ubifs_rename,
.rename = ubifs_rename2,
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
.listxattr = ubifs_listxattr,
#ifdef CONFIG_UBIFS_ATIME_SUPPORT
.update_time = ubifs_update_time,
#endif
.tmpfile = ubifs_tmpfile,
};
const struct file_operations ubifs_dir_operations = {

View File

@ -1397,7 +1397,7 @@ int ubifs_update_time(struct inode *inode, struct timespec *time,
#endif
/**
* update_ctime - update mtime and ctime of an inode.
* update_mctime - update mtime and ctime of an inode.
* @inode: inode to update
*
* This function updates mtime and ctime of the inode if it is not equivalent to

View File

@ -113,7 +113,7 @@ static int switch_gc_head(struct ubifs_info *c)
* data_nodes_cmp - compare 2 data nodes.
* @priv: UBIFS file-system description object
* @a: first data node
* @a: second data node
* @b: second data node
*
* This function compares data nodes @a and @b. Returns %1 if @a has greater
* inode or block number, and %-1 otherwise.

View File

@ -907,6 +907,147 @@ int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
return err;
}
/**
* ubifs_jnl_xrename - cross rename two directory entries.
* @c: UBIFS file-system description object
* @fst_dir: parent inode of 1st directory entry to exchange
* @fst_dentry: 1st directory entry to exchange
* @snd_dir: parent inode of 2nd directory entry to exchange
* @snd_dentry: 2nd directory entry to exchange
* @sync: non-zero if the write-buffer has to be synchronized
*
* This function implements the cross rename operation which may involve
* writing 2 inodes and 2 directory entries. It marks the written inodes as clean
* and returns zero on success. In case of failure, a negative error code is
* returned.
*/
int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
const struct dentry *fst_dentry,
const struct inode *snd_dir,
const struct dentry *snd_dentry, int sync)
{
union ubifs_key key;
struct ubifs_dent_node *dent1, *dent2;
int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ;
int aligned_dlen1, aligned_dlen2;
int twoparents = (fst_dir != snd_dir);
const struct inode *fst_inode = d_inode(fst_dentry);
const struct inode *snd_inode = d_inode(snd_dentry);
void *p;
dbg_jnl("dent '%pd' in dir ino %lu between dent '%pd' in dir ino %lu",
fst_dentry, fst_dir->i_ino, snd_dentry, snd_dir->i_ino);
ubifs_assert(ubifs_inode(fst_dir)->data_len == 0);
ubifs_assert(ubifs_inode(snd_dir)->data_len == 0);
ubifs_assert(mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
ubifs_assert(mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex));
dlen1 = UBIFS_DENT_NODE_SZ + snd_dentry->d_name.len + 1;
dlen2 = UBIFS_DENT_NODE_SZ + fst_dentry->d_name.len + 1;
aligned_dlen1 = ALIGN(dlen1, 8);
aligned_dlen2 = ALIGN(dlen2, 8);
len = aligned_dlen1 + aligned_dlen2 + ALIGN(plen, 8);
if (twoparents)
len += plen;
dent1 = kmalloc(len, GFP_NOFS);
if (!dent1)
return -ENOMEM;
/* Make reservation before allocating sequence numbers */
err = make_reservation(c, BASEHD, len);
if (err)
goto out_free;
/* Make new dent for 1st entry */
dent1->ch.node_type = UBIFS_DENT_NODE;
dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, &snd_dentry->d_name);
dent1->inum = cpu_to_le64(fst_inode->i_ino);
dent1->type = get_dent_type(fst_inode->i_mode);
dent1->nlen = cpu_to_le16(snd_dentry->d_name.len);
memcpy(dent1->name, snd_dentry->d_name.name, snd_dentry->d_name.len);
dent1->name[snd_dentry->d_name.len] = '\0';
zero_dent_node_unused(dent1);
ubifs_prep_grp_node(c, dent1, dlen1, 0);
/* Make new dent for 2nd entry */
dent2 = (void *)dent1 + aligned_dlen1;
dent2->ch.node_type = UBIFS_DENT_NODE;
dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, &fst_dentry->d_name);
dent2->inum = cpu_to_le64(snd_inode->i_ino);
dent2->type = get_dent_type(snd_inode->i_mode);
dent2->nlen = cpu_to_le16(fst_dentry->d_name.len);
memcpy(dent2->name, fst_dentry->d_name.name, fst_dentry->d_name.len);
dent2->name[fst_dentry->d_name.len] = '\0';
zero_dent_node_unused(dent2);
ubifs_prep_grp_node(c, dent2, dlen2, 0);
p = (void *)dent2 + aligned_dlen2;
if (!twoparents)
pack_inode(c, p, fst_dir, 1);
else {
pack_inode(c, p, fst_dir, 0);
p += ALIGN(plen, 8);
pack_inode(c, p, snd_dir, 1);
}
err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync);
if (err)
goto out_release;
if (!sync) {
struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
ubifs_wbuf_add_ino_nolock(wbuf, fst_dir->i_ino);
ubifs_wbuf_add_ino_nolock(wbuf, snd_dir->i_ino);
}
release_head(c, BASEHD);
dent_key_init(c, &key, snd_dir->i_ino, &snd_dentry->d_name);
err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, &snd_dentry->d_name);
if (err)
goto out_ro;
offs += aligned_dlen1;
dent_key_init(c, &key, fst_dir->i_ino, &fst_dentry->d_name);
err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, &fst_dentry->d_name);
if (err)
goto out_ro;
offs += aligned_dlen2;
ino_key_init(c, &key, fst_dir->i_ino);
err = ubifs_tnc_add(c, &key, lnum, offs, plen);
if (err)
goto out_ro;
if (twoparents) {
offs += ALIGN(plen, 8);
ino_key_init(c, &key, snd_dir->i_ino);
err = ubifs_tnc_add(c, &key, lnum, offs, plen);
if (err)
goto out_ro;
}
finish_reservation(c);
mark_inode_clean(c, ubifs_inode(fst_dir));
if (twoparents)
mark_inode_clean(c, ubifs_inode(snd_dir));
kfree(dent1);
return 0;
out_release:
release_head(c, BASEHD);
out_ro:
ubifs_ro_mode(c, err);
finish_reservation(c);
out_free:
kfree(dent1);
return err;
}
/**
* ubifs_jnl_rename - rename a directory entry.
* @c: UBIFS file-system description object
@ -917,14 +1058,15 @@ int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
* @sync: non-zero if the write-buffer has to be synchronized
*
* This function implements the re-name operation which may involve writing up
* to 3 inodes and 2 directory entries. It marks the written inodes as clean
* to 4 inodes and 2 directory entries. It marks the written inodes as clean
* and returns zero on success. In case of failure, a negative error code is
* returned.
*/
int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
const struct dentry *old_dentry,
const struct inode *new_dir,
const struct dentry *new_dentry, int sync)
const struct dentry *new_dentry,
const struct inode *whiteout, int sync)
{
void *p;
union ubifs_key key;
@ -958,7 +1100,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
aligned_dlen1 = ALIGN(dlen1, 8);
aligned_dlen2 = ALIGN(dlen2, 8);
len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
if (old_dir != new_dir)
if (move)
len += plen;
dent = kmalloc(len, GFP_NOFS);
if (!dent)
@ -980,13 +1122,19 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
zero_dent_node_unused(dent);
ubifs_prep_grp_node(c, dent, dlen1, 0);
/* Make deletion dent */
dent2 = (void *)dent + aligned_dlen1;
dent2->ch.node_type = UBIFS_DENT_NODE;
dent_key_init_flash(c, &dent2->key, old_dir->i_ino,
&old_dentry->d_name);
dent2->inum = 0;
dent2->type = DT_UNKNOWN;
if (whiteout) {
dent2->inum = cpu_to_le64(whiteout->i_ino);
dent2->type = get_dent_type(whiteout->i_mode);
} else {
/* Make deletion dent */
dent2->inum = 0;
dent2->type = DT_UNKNOWN;
}
dent2->nlen = cpu_to_le16(old_dentry->d_name.len);
memcpy(dent2->name, old_dentry->d_name.name, old_dentry->d_name.len);
dent2->name[old_dentry->d_name.len] = '\0';
@ -1035,16 +1183,26 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
if (err)
goto out_ro;
err = ubifs_add_dirt(c, lnum, dlen2);
if (err)
goto out_ro;
offs += aligned_dlen1;
if (whiteout) {
dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name);
err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, &old_dentry->d_name);
if (err)
goto out_ro;
dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name);
err = ubifs_tnc_remove_nm(c, &key, &old_dentry->d_name);
if (err)
goto out_ro;
ubifs_delete_orphan(c, whiteout->i_ino);
} else {
err = ubifs_add_dirt(c, lnum, dlen2);
if (err)
goto out_ro;
offs += aligned_dlen1 + aligned_dlen2;
dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name);
err = ubifs_tnc_remove_nm(c, &key, &old_dentry->d_name);
if (err)
goto out_ro;
}
offs += aligned_dlen2;
if (new_inode) {
ino_key_init(c, &key, new_inode->i_ino);
err = ubifs_tnc_add(c, &key, lnum, offs, ilen);
@ -1058,7 +1216,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
if (err)
goto out_ro;
if (old_dir != new_dir) {
if (move) {
offs += ALIGN(plen, 8);
ino_key_init(c, &key, new_dir->i_ino);
err = ubifs_tnc_add(c, &key, lnum, offs, plen);

View File

@ -636,7 +636,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c,
/**
* ubifs_get_lp_stats - get lprops statistics.
* @c: UBIFS file-system description object
* @st: return statistics
* @lst: return statistics
*/
void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst)
{

View File

@ -34,7 +34,6 @@ static int dbg_populate_lsave(struct ubifs_info *c);
/**
* first_dirty_cnode - find first dirty cnode.
* @c: UBIFS file-system description object
* @nnode: nnode at which to start
*
* This function returns the first dirty cnode or %NULL if there is not one.
@ -1623,7 +1622,6 @@ static int dbg_is_node_dirty(struct ubifs_info *c, int node_type, int lnum,
* dbg_check_ltab_lnum - check the ltab for a LPT LEB number.
* @c: the UBIFS file-system description object
* @lnum: LEB number where node was written
* @offs: offset where node was written
*
* This function returns %0 on success and a negative error code on failure.
*/
@ -1870,7 +1868,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
}
/**
* ubifs_dump_lpt_leb - dump an LPT LEB.
* dump_lpt_leb - dump an LPT LEB.
* @c: UBIFS file-system description object
* @lnum: LEB number to dump
*

View File

@ -267,7 +267,7 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
* replay_entries_cmp - compare 2 replay entries.
* @priv: UBIFS file-system description object
* @a: first replay entry
* @a: second replay entry
* @b: second replay entry
*
* This is a comparios function for 'list_sort()' which compares 2 replay
* entries @a and @b by comparing their sequence numer. Returns %1 if @a has

View File

@ -157,6 +157,7 @@ enum {
WB_MUTEX_1 = 0,
WB_MUTEX_2 = 1,
WB_MUTEX_3 = 2,
WB_MUTEX_4 = 3,
};
/*
@ -1520,10 +1521,15 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
const union ubifs_key *key, const void *buf, int len);
int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode);
int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode);
int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
const struct dentry *fst_dentry,
const struct inode *snd_dir,
const struct dentry *snd_dentry, int sync);
int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
const struct dentry *old_dentry,
const struct inode *new_dir,
const struct dentry *new_dentry, int sync);
const struct dentry *new_dentry,
const struct inode *whiteout, int sync);
int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
loff_t old_size, loff_t new_size);
int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,

View File

@ -200,6 +200,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
struct ubifs_inode *host_ui = ubifs_inode(host);
struct ubifs_inode *ui = ubifs_inode(inode);
void *buf = NULL;
int old_size;
struct ubifs_budget_req req = { .dirtied_ino = 2,
.dirtied_ino_d = ALIGN(size, 8) + ALIGN(host_ui->data_len, 8) };
@ -217,12 +218,13 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
kfree(ui->data);
ui->data = buf;
inode->i_size = ui->ui_size = size;
old_size = ui->data_len;
ui->data_len = size;
mutex_unlock(&ui->ui_mutex);
mutex_lock(&host_ui->ui_mutex);
host->i_ctime = ubifs_current_time(host);
host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len);
host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
host_ui->xattr_size += CALC_XATTR_BYTES(size);
/*
@ -241,7 +243,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
out_cancel:
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
mutex_unlock(&host_ui->ui_mutex);
make_bad_inode(inode);
out_free: