|
@@ -830,8 +830,8 @@ static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
|
|
* @q: queue the io is for
|
|
* @q: queue the io is for
|
|
* @bio: the source bio
|
|
* @bio: the source bio
|
|
* @dev: target device
|
|
* @dev: target device
|
|
- * @from: source sector
|
|
|
|
* @to: target sector
|
|
* @to: target sector
|
|
|
|
+ * @from: source sector
|
|
*
|
|
*
|
|
* Description:
|
|
* Description:
|
|
* Device mapper or raid target sometimes need to split a bio because
|
|
* Device mapper or raid target sometimes need to split a bio because
|
|
@@ -839,7 +839,7 @@ static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
|
|
*
|
|
*
|
|
**/
|
|
**/
|
|
static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
|
|
static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
|
|
- dev_t dev, sector_t from, sector_t to)
|
|
|
|
|
|
+ dev_t dev, sector_t to, sector_t from)
|
|
{
|
|
{
|
|
struct blk_trace *bt = q->blk_trace;
|
|
struct blk_trace *bt = q->blk_trace;
|
|
struct blk_io_trace_remap r;
|
|
struct blk_io_trace_remap r;
|
|
@@ -847,9 +847,9 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
|
|
if (likely(!bt))
|
|
if (likely(!bt))
|
|
return;
|
|
return;
|
|
|
|
|
|
- r.device = cpu_to_be32(dev);
|
|
|
|
- r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
|
|
|
|
- r.sector = cpu_to_be64(to);
|
|
|
|
|
|
+ r.device_from = cpu_to_be32(dev);
|
|
|
|
+ r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
|
|
|
|
+ r.sector_from = cpu_to_be64(from);
|
|
|
|
|
|
__blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
|
|
__blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
|
|
!bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
|
|
!bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
|
|
@@ -1028,11 +1028,11 @@ static void get_pdu_remap(const struct trace_entry *ent,
|
|
struct blk_io_trace_remap *r)
|
|
struct blk_io_trace_remap *r)
|
|
{
|
|
{
|
|
const struct blk_io_trace_remap *__r = pdu_start(ent);
|
|
const struct blk_io_trace_remap *__r = pdu_start(ent);
|
|
- __u64 sector = __r->sector;
|
|
|
|
|
|
+ __u64 sector_from = __r->sector_from;
|
|
|
|
|
|
- r->device = be32_to_cpu(__r->device);
|
|
|
|
r->device_from = be32_to_cpu(__r->device_from);
|
|
r->device_from = be32_to_cpu(__r->device_from);
|
|
- r->sector = be64_to_cpu(sector);
|
|
|
|
|
|
+ r->device_to = be32_to_cpu(__r->device_to);
|
|
|
|
+ r->sector_from = be64_to_cpu(sector_from);
|
|
}
|
|
}
|
|
|
|
|
|
typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
|
|
typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
|
|
@@ -1148,13 +1148,13 @@ static int blk_log_with_error(struct trace_seq *s,
|
|
|
|
|
|
static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
|
|
static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
|
|
{
|
|
{
|
|
- struct blk_io_trace_remap r = { .device = 0, };
|
|
|
|
|
|
+ struct blk_io_trace_remap r = { .device_from = 0, };
|
|
|
|
|
|
get_pdu_remap(ent, &r);
|
|
get_pdu_remap(ent, &r);
|
|
return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
|
|
return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
|
|
- t_sector(ent),
|
|
|
|
- t_sec(ent), MAJOR(r.device), MINOR(r.device),
|
|
|
|
- (unsigned long long)r.sector);
|
|
|
|
|
|
+ t_sector(ent), t_sec(ent),
|
|
|
|
+ MAJOR(r.device_from), MINOR(r.device_from),
|
|
|
|
+ (unsigned long long)r.sector_from);
|
|
}
|
|
}
|
|
|
|
|
|
static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
|
|
static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
|