Przeglądaj źródła

blktrace: port to tracepoints, update

Port to the new tracepoints API: split DEFINE_TRACE() and DECLARE_TRACE()
sites. Spread them out to the usage sites, as suggested by
Mathieu Desnoyers.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Ingo Molnar 16 lat temu
rodzic
commit
0bfc24559d
6 zmienionych plików z 74 dodań i 34 usunięć
  1. 13 0
      block/blk-core.c
  2. 5 0
      block/elevator.c
  3. 2 0
      drivers/md/dm.c
  4. 2 0
      fs/bio.c
  5. 50 34
      include/trace/block.h
  6. 2 0
      mm/bounce.c

+ 13 - 0
block/blk-core.c

@@ -32,6 +32,19 @@
 
 #include "blk.h"
 
+DEFINE_TRACE(block_plug);
+DEFINE_TRACE(block_unplug_io);
+DEFINE_TRACE(block_unplug_timer);
+DEFINE_TRACE(block_getrq);
+DEFINE_TRACE(block_sleeprq);
+DEFINE_TRACE(block_rq_requeue);
+DEFINE_TRACE(block_bio_backmerge);
+DEFINE_TRACE(block_bio_frontmerge);
+DEFINE_TRACE(block_bio_queue);
+DEFINE_TRACE(block_rq_complete);
+DEFINE_TRACE(block_remap);	/* Also used in drivers/md/dm.c */
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
+
 static int __make_request(struct request_queue *q, struct bio *bio);
 
 /*

+ 5 - 0
block/elevator.c

@@ -42,6 +42,8 @@
 static DEFINE_SPINLOCK(elv_list_lock);
 static LIST_HEAD(elv_list);
 
+DEFINE_TRACE(block_rq_abort);
+
 /*
  * Merge hash stuff.
  */
@@ -53,6 +55,9 @@ static const int elv_hash_shift = 6;
 #define rq_hash_key(rq)		((rq)->sector + (rq)->nr_sectors)
 #define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))
 
+DEFINE_TRACE(block_rq_insert);
+DEFINE_TRACE(block_rq_issue);
+
 /*
  * Query io scheduler to see if the current process issuing bio may be
  * merged with rq.

+ 2 - 0
drivers/md/dm.c

@@ -52,6 +52,8 @@ struct dm_target_io {
 	union map_info info;
 };
 
+DEFINE_TRACE(block_bio_complete);
+
 union map_info *dm_get_mapinfo(struct bio *bio)
 {
 	if (bio && bio->bi_private)

+ 2 - 0
fs/bio.c

@@ -29,6 +29,8 @@
 #include <trace/block.h>
 #include <scsi/sg.h>		/* for struct sg_iovec */
 
+DEFINE_TRACE(block_split);
+
 static struct kmem_cache *bio_slab __read_mostly;
 
 static mempool_t *bio_split_pool __read_mostly;

+ 50 - 34
include/trace/block.h

@@ -4,57 +4,73 @@
 #include <linux/blkdev.h>
 #include <linux/tracepoint.h>
 
-DEFINE_TRACE(block_rq_abort,
+DECLARE_TRACE(block_rq_abort,
 	TPPROTO(struct request_queue *q, struct request *rq),
-	TPARGS(q, rq));
-DEFINE_TRACE(block_rq_insert,
+		TPARGS(q, rq));
+
+DECLARE_TRACE(block_rq_insert,
 	TPPROTO(struct request_queue *q, struct request *rq),
-	TPARGS(q, rq));
-DEFINE_TRACE(block_rq_issue,
+		TPARGS(q, rq));
+
+DECLARE_TRACE(block_rq_issue,
 	TPPROTO(struct request_queue *q, struct request *rq),
-	TPARGS(q, rq));
-DEFINE_TRACE(block_rq_requeue,
+		TPARGS(q, rq));
+
+DECLARE_TRACE(block_rq_requeue,
 	TPPROTO(struct request_queue *q, struct request *rq),
-	TPARGS(q, rq));
-DEFINE_TRACE(block_rq_complete,
+		TPARGS(q, rq));
+
+DECLARE_TRACE(block_rq_complete,
 	TPPROTO(struct request_queue *q, struct request *rq),
-	TPARGS(q, rq));
-DEFINE_TRACE(block_bio_bounce,
+		TPARGS(q, rq));
+
+DECLARE_TRACE(block_bio_bounce,
 	TPPROTO(struct request_queue *q, struct bio *bio),
-	TPARGS(q, bio));
-DEFINE_TRACE(block_bio_complete,
+		TPARGS(q, bio));
+
+DECLARE_TRACE(block_bio_complete,
 	TPPROTO(struct request_queue *q, struct bio *bio),
-	TPARGS(q, bio));
-DEFINE_TRACE(block_bio_backmerge,
+		TPARGS(q, bio));
+
+DECLARE_TRACE(block_bio_backmerge,
 	TPPROTO(struct request_queue *q, struct bio *bio),
-	TPARGS(q, bio));
-DEFINE_TRACE(block_bio_frontmerge,
+		TPARGS(q, bio));
+
+DECLARE_TRACE(block_bio_frontmerge,
 	TPPROTO(struct request_queue *q, struct bio *bio),
-	TPARGS(q, bio));
-DEFINE_TRACE(block_bio_queue,
+		TPARGS(q, bio));
+
+DECLARE_TRACE(block_bio_queue,
 	TPPROTO(struct request_queue *q, struct bio *bio),
-	TPARGS(q, bio));
-DEFINE_TRACE(block_getrq,
+		TPARGS(q, bio));
+
+DECLARE_TRACE(block_getrq,
 	TPPROTO(struct request_queue *q, struct bio *bio, int rw),
-	TPARGS(q, bio, rw));
-DEFINE_TRACE(block_sleeprq,
+		TPARGS(q, bio, rw));
+
+DECLARE_TRACE(block_sleeprq,
 	TPPROTO(struct request_queue *q, struct bio *bio, int rw),
-	TPARGS(q, bio, rw));
-DEFINE_TRACE(block_plug,
+		TPARGS(q, bio, rw));
+
+DECLARE_TRACE(block_plug,
 	TPPROTO(struct request_queue *q),
-	TPARGS(q));
-DEFINE_TRACE(block_unplug_timer,
+		TPARGS(q));
+
+DECLARE_TRACE(block_unplug_timer,
 	TPPROTO(struct request_queue *q),
-	TPARGS(q));
-DEFINE_TRACE(block_unplug_io,
+		TPARGS(q));
+
+DECLARE_TRACE(block_unplug_io,
 	TPPROTO(struct request_queue *q),
-	TPARGS(q));
-DEFINE_TRACE(block_split,
+		TPARGS(q));
+
+DECLARE_TRACE(block_split,
 	TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
-	TPARGS(q, bio, pdu));
-DEFINE_TRACE(block_remap,
+		TPARGS(q, bio, pdu));
+
+DECLARE_TRACE(block_remap,
 	TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev,
 		sector_t from, sector_t to),
-	TPARGS(q, bio, dev, from, to));
+		TPARGS(q, bio, dev, from, to));
 
 #endif

+ 2 - 0
mm/bounce.c

@@ -22,6 +22,8 @@
 
 static mempool_t *page_pool, *isa_page_pool;
 
+DEFINE_TRACE(block_bio_bounce);
+
 #ifdef CONFIG_HIGHMEM
 static __init int init_emergency_pool(void)
 {