A bunch of fixes for the fastmap feature, which is still new and rather

experimental. It looks like it starts getting more users. No significant
 changes for the "classical" non-fastmap UBI.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.14 (GNU/Linux)
 
 iQIcBAABAgAGBQJSgh0zAAoJECmIfjd9wqK0/dYQAKakcfMmk8eudJJL9LZRltsQ
 d6tSZq2M7vKK0TlOfG3JEZa//muY6X/UAANWo/mNLOp1WGhlTkFjMi7PSc+8REGC
 /ASln/Hswz8FAWFfL/rua0Kn3QSeZLPi4azueqqvPVjEswK7gGyTFMmiIqVaNMTY
 f6RwH17U7TZZeOXw1Lc3HF3GMtIiEZyYGX4WZXj0VIs087G9rntOD5ZRNsFMDaDr
 mVnOYKkJzsVGb503D3LJfZpkn140HQtsQM0txYRalBf/BCEvGF9X3aKfXd5qbTKp
 6qLkP+vT9zlc+05ajDpIvw5bT/I7LDnUZ3ciFLFBxuTlDA0S50vvdjlBm8B0HRoa
 YxMHPJY1uKj/fE2dZQsea3VUCSi09QVJKq1oOk5fqF9BjF3kbmBYCMb/czSTcQ70
 7YVksPnjGwyd2VXL1PaeUoNov84FWoPgcLZ8/SrElcXrH0W5CHQCvbv8bAH5+GUZ
 o9xRvCMmldv5/2G4/zvHsrDvSfMBQYaiJ8QChgvHLw7ghcUjPKdLlgL/FWCpw5Pp
 Ewzm3px5xK2ZX3hAdr2odiW9omB3RdZdiSgobqBf7evW6Xw+xMtG4UZr4THPgdJT
 9i8vbwkpXWFfB21QbazPpPXdUOHw4UlvbAmGhqZHQPTuZgf9e6GkV4u9hufoPd7E
 TQ60qx/XnnbFJZEp0qmg
 =P8sc
 -----END PGP SIGNATURE-----

Merge tag 'upstream-3.13-rc1' of git://git.infradead.org/linux-ubi

Pull UBI changes from Artem Bityutskiy:
 "A bunch of fixes for the fastmap feature, which is still new and
  rather experimental.  It looks like it starts getting more users.

  No significant changes for the "classical" non-fastmap UBI"

* tag 'upstream-3.13-rc1' of git://git.infradead.org/linux-ubi:
  UBI: Add some asserts to ubi_attach_fastmap()
  UBI: Fix memory leak in ubi_attach_fastmap() error path
  UBI: simplify image sequence test
  UBI: fastmap: fix backward compatibility with image_seq
  UBI: Call scan_all() with correct offset in error case
  UBI: Fix error path in scan_pool()
  UBI: fix refill_wl_user_pool()
This commit is contained in:
Linus Torvalds 2013-11-13 15:29:38 +09:00
commit 39222c82f7
3 changed files with 43 additions and 13 deletions

View File

@ -900,10 +900,9 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
* number.
*/
image_seq = be32_to_cpu(ech->image_seq);
if (!ubi->image_seq && image_seq)
if (!ubi->image_seq)
ubi->image_seq = image_seq;
if (ubi->image_seq && image_seq &&
ubi->image_seq != image_seq) {
if (image_seq && ubi->image_seq != image_seq) {
ubi_err("bad image sequence number %d in PEB %d, expected %d",
image_seq, pnum, ubi->image_seq);
ubi_dump_ec_hdr(ech);
@ -1417,9 +1416,11 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
ai = alloc_ai("ubi_aeb_slab_cache2");
if (!ai)
return -ENOMEM;
}
err = scan_all(ubi, ai, UBI_FM_MAX_START);
err = scan_all(ubi, ai, 0);
} else {
err = scan_all(ubi, ai, UBI_FM_MAX_START);
}
}
}
#else

View File

@ -407,6 +407,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
*/
for (i = 0; i < pool_size; i++) {
int scrub = 0;
int image_seq;
pnum = be32_to_cpu(pebs[i]);
@ -425,10 +426,16 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
} else if (ret == UBI_IO_BITFLIPS)
scrub = 1;
if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
/*
* Older UBI implementations have image_seq set to zero, so
* we shouldn't fail if image_seq == 0.
*/
image_seq = be32_to_cpu(ech->image_seq);
if (image_seq && (image_seq != ubi->image_seq)) {
ubi_err("bad image seq: 0x%x, expected: 0x%x",
be32_to_cpu(ech->image_seq), ubi->image_seq);
err = UBI_BAD_FASTMAP;
ret = UBI_BAD_FASTMAP;
goto out;
}
@ -819,6 +826,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
list_move_tail(&tmp_aeb->u.list, &ai->free);
ubi_assert(list_empty(&used));
ubi_assert(list_empty(&eba_orphans));
ubi_assert(list_empty(&free));
/*
* If fastmap is leaking PEBs (must not happen), raise a
* fat warning and fall back to scanning mode.
@ -834,6 +845,19 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
fail_bad:
ret = UBI_BAD_FASTMAP;
fail:
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
}
return ret;
}
@ -923,6 +947,8 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
for (i = 0; i < used_blocks; i++) {
int image_seq;
pnum = be32_to_cpu(fmsb->block_loc[i]);
if (ubi_io_is_bad(ubi, pnum)) {
@ -940,10 +966,17 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
} else if (ret == UBI_IO_BITFLIPS)
fm->to_be_tortured[i] = 1;
image_seq = be32_to_cpu(ech->image_seq);
if (!ubi->image_seq)
ubi->image_seq = be32_to_cpu(ech->image_seq);
ubi->image_seq = image_seq;
if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
/*
* Older UBI implementations have image_seq set to zero, so
* we shouldn't fail if image_seq == 0.
*/
if (image_seq && (image_seq != ubi->image_seq)) {
ubi_err("wrong image seq:%d instead of %d",
be32_to_cpu(ech->image_seq), ubi->image_seq);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}

View File

@ -599,10 +599,6 @@ static void refill_wl_user_pool(struct ubi_device *ubi)
return_unused_pool_pebs(ubi, pool);
for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
if (!ubi->free.rb_node ||
(ubi->free_count - ubi->beb_rsvd_pebs < 1))
break;
pool->pebs[pool->size] = __wl_get_peb(ubi);
if (pool->pebs[pool->size] < 0)
break;