|
@@ -407,6 +407,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
|
|
*/
|
|
|
for (i = 0; i < pool_size; i++) {
|
|
|
int scrub = 0;
|
|
|
+ int image_seq;
|
|
|
|
|
|
pnum = be32_to_cpu(pebs[i]);
|
|
|
|
|
@@ -425,10 +426,16 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
|
|
} else if (ret == UBI_IO_BITFLIPS)
|
|
|
scrub = 1;
|
|
|
|
|
|
- if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
|
|
|
+ /*
|
|
|
+ * Older UBI implementations have image_seq set to zero, so
|
|
|
+ * we shouldn't fail if image_seq == 0.
|
|
|
+ */
|
|
|
+ image_seq = be32_to_cpu(ech->image_seq);
|
|
|
+
|
|
|
+ if (image_seq && (image_seq != ubi->image_seq)) {
|
|
|
ubi_err("bad image seq: 0x%x, expected: 0x%x",
|
|
|
be32_to_cpu(ech->image_seq), ubi->image_seq);
|
|
|
- err = UBI_BAD_FASTMAP;
|
|
|
+ ret = UBI_BAD_FASTMAP;
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -819,6 +826,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
|
|
|
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
|
|
|
list_move_tail(&tmp_aeb->u.list, &ai->free);
|
|
|
|
|
|
+ ubi_assert(list_empty(&used));
|
|
|
+ ubi_assert(list_empty(&eba_orphans));
|
|
|
+ ubi_assert(list_empty(&free));
|
|
|
+
|
|
|
/*
|
|
|
* If fastmap is leaking PEBs (must not happen), raise a
|
|
|
* fat warning and fall back to scanning mode.
|
|
@@ -834,6 +845,19 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
|
|
|
fail_bad:
|
|
|
ret = UBI_BAD_FASTMAP;
|
|
|
fail:
|
|
|
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
|
|
|
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
|
|
|
+ list_del(&tmp_aeb->u.list);
|
|
|
+ }
|
|
|
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
|
|
|
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
|
|
|
+ list_del(&tmp_aeb->u.list);
|
|
|
+ }
|
|
|
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
|
|
|
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
|
|
|
+ list_del(&tmp_aeb->u.list);
|
|
|
+ }
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -923,6 +947,8 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < used_blocks; i++) {
|
|
|
+ int image_seq;
|
|
|
+
|
|
|
pnum = be32_to_cpu(fmsb->block_loc[i]);
|
|
|
|
|
|
if (ubi_io_is_bad(ubi, pnum)) {
|
|
@@ -940,10 +966,17 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
|
|
} else if (ret == UBI_IO_BITFLIPS)
|
|
|
fm->to_be_tortured[i] = 1;
|
|
|
|
|
|
+ image_seq = be32_to_cpu(ech->image_seq);
|
|
|
if (!ubi->image_seq)
|
|
|
- ubi->image_seq = be32_to_cpu(ech->image_seq);
|
|
|
+ ubi->image_seq = image_seq;
|
|
|
|
|
|
- if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
|
|
|
+ /*
|
|
|
+ * Older UBI implementations have image_seq set to zero, so
|
|
|
+ * we shouldn't fail if image_seq == 0.
|
|
|
+ */
|
|
|
+ if (image_seq && (image_seq != ubi->image_seq)) {
|
|
|
+ ubi_err("wrong image seq:%d instead of %d",
|
|
|
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
|
|
|
ret = UBI_BAD_FASTMAP;
|
|
|
goto free_hdr;
|
|
|
}
|