|
@@ -407,6 +407,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
|
|
*/
|
|
|
for (i = 0; i < pool_size; i++) {
|
|
|
int scrub = 0;
|
|
|
+ int image_seq;
|
|
|
|
|
|
pnum = be32_to_cpu(pebs[i]);
|
|
|
|
|
@@ -425,7 +426,13 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
|
|
} else if (ret == UBI_IO_BITFLIPS)
|
|
|
scrub = 1;
|
|
|
|
|
|
- if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
|
|
|
+ /*
|
|
|
+ * Older UBI implementations have image_seq set to zero, so
|
|
|
+ * we shouldn't fail if image_seq == 0.
|
|
|
+ */
|
|
|
+ image_seq = be32_to_cpu(ech->image_seq);
|
|
|
+
|
|
|
+ if (image_seq && (image_seq != ubi->image_seq)) {
|
|
|
ubi_err("bad image seq: 0x%x, expected: 0x%x",
|
|
|
be32_to_cpu(ech->image_seq), ubi->image_seq);
|
|
|
ret = UBI_BAD_FASTMAP;
|
|
@@ -923,6 +930,8 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < used_blocks; i++) {
|
|
|
+ int image_seq;
|
|
|
+
|
|
|
pnum = be32_to_cpu(fmsb->block_loc[i]);
|
|
|
|
|
|
if (ubi_io_is_bad(ubi, pnum)) {
|
|
@@ -940,10 +949,17 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
|
|
|
} else if (ret == UBI_IO_BITFLIPS)
|
|
|
fm->to_be_tortured[i] = 1;
|
|
|
|
|
|
+ image_seq = be32_to_cpu(ech->image_seq);
|
|
|
if (!ubi->image_seq)
|
|
|
- ubi->image_seq = be32_to_cpu(ech->image_seq);
|
|
|
+ ubi->image_seq = image_seq;
|
|
|
|
|
|
- if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
|
|
|
+ /*
|
|
|
+ * Older UBI implementations have image_seq set to zero, so
|
|
|
+ * we shouldn't fail if image_seq == 0.
|
|
|
+ */
|
|
|
+ if (image_seq && (image_seq != ubi->image_seq)) {
|
|
|
+ ubi_err("wrong image seq:%d instead of %d",
|
|
|
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
|
|
|
ret = UBI_BAD_FASTMAP;
|
|
|
goto free_hdr;
|
|
|
}
|