block.h 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. #undef TRACE_SYSTEM
  2. #define TRACE_SYSTEM block
  3. #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
  4. #define _TRACE_BLOCK_H
  5. #include <linux/blktrace_api.h>
  6. #include <linux/blkdev.h>
  7. #include <linux/tracepoint.h>
  8. DECLARE_EVENT_CLASS(block_rq_with_error,
  9. TP_PROTO(struct request_queue *q, struct request *rq),
  10. TP_ARGS(q, rq),
  11. TP_STRUCT__entry(
  12. __field( dev_t, dev )
  13. __field( sector_t, sector )
  14. __field( unsigned int, nr_sector )
  15. __field( int, errors )
  16. __array( char, rwbs, 6 )
  17. __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
  18. ),
  19. TP_fast_assign(
  20. __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
  21. __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
  22. __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
  23. __entry->errors = rq->errors;
  24. blk_fill_rwbs_rq(__entry->rwbs, rq);
  25. blk_dump_cmd(__get_str(cmd), rq);
  26. ),
  27. TP_printk("%d,%d %s (%s) %llu + %u [%d]",
  28. MAJOR(__entry->dev), MINOR(__entry->dev),
  29. __entry->rwbs, __get_str(cmd),
  30. (unsigned long long)__entry->sector,
  31. __entry->nr_sector, __entry->errors)
  32. );
  33. DEFINE_EVENT(block_rq_with_error, block_rq_abort,
  34. TP_PROTO(struct request_queue *q, struct request *rq),
  35. TP_ARGS(q, rq)
  36. );
  37. DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
  38. TP_PROTO(struct request_queue *q, struct request *rq),
  39. TP_ARGS(q, rq)
  40. );
  41. DEFINE_EVENT(block_rq_with_error, block_rq_complete,
  42. TP_PROTO(struct request_queue *q, struct request *rq),
  43. TP_ARGS(q, rq)
  44. );
  45. DECLARE_EVENT_CLASS(block_rq,
  46. TP_PROTO(struct request_queue *q, struct request *rq),
  47. TP_ARGS(q, rq),
  48. TP_STRUCT__entry(
  49. __field( dev_t, dev )
  50. __field( sector_t, sector )
  51. __field( unsigned int, nr_sector )
  52. __field( unsigned int, bytes )
  53. __array( char, rwbs, 6 )
  54. __array( char, comm, TASK_COMM_LEN )
  55. __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
  56. ),
  57. TP_fast_assign(
  58. __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
  59. __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
  60. __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
  61. __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
  62. blk_fill_rwbs_rq(__entry->rwbs, rq);
  63. blk_dump_cmd(__get_str(cmd), rq);
  64. memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
  65. ),
  66. TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
  67. MAJOR(__entry->dev), MINOR(__entry->dev),
  68. __entry->rwbs, __entry->bytes, __get_str(cmd),
  69. (unsigned long long)__entry->sector,
  70. __entry->nr_sector, __entry->comm)
  71. );
  72. DEFINE_EVENT(block_rq, block_rq_insert,
  73. TP_PROTO(struct request_queue *q, struct request *rq),
  74. TP_ARGS(q, rq)
  75. );
  76. DEFINE_EVENT(block_rq, block_rq_issue,
  77. TP_PROTO(struct request_queue *q, struct request *rq),
  78. TP_ARGS(q, rq)
  79. );
  80. TRACE_EVENT(block_bio_bounce,
  81. TP_PROTO(struct request_queue *q, struct bio *bio),
  82. TP_ARGS(q, bio),
  83. TP_STRUCT__entry(
  84. __field( dev_t, dev )
  85. __field( sector_t, sector )
  86. __field( unsigned int, nr_sector )
  87. __array( char, rwbs, 6 )
  88. __array( char, comm, TASK_COMM_LEN )
  89. ),
  90. TP_fast_assign(
  91. __entry->dev = bio->bi_bdev ?
  92. bio->bi_bdev->bd_dev : 0;
  93. __entry->sector = bio->bi_sector;
  94. __entry->nr_sector = bio->bi_size >> 9;
  95. blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
  96. memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
  97. ),
  98. TP_printk("%d,%d %s %llu + %u [%s]",
  99. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
  100. (unsigned long long)__entry->sector,
  101. __entry->nr_sector, __entry->comm)
  102. );
  103. TRACE_EVENT(block_bio_complete,
  104. TP_PROTO(struct request_queue *q, struct bio *bio),
  105. TP_ARGS(q, bio),
  106. TP_STRUCT__entry(
  107. __field( dev_t, dev )
  108. __field( sector_t, sector )
  109. __field( unsigned, nr_sector )
  110. __field( int, error )
  111. __array( char, rwbs, 6 )
  112. ),
  113. TP_fast_assign(
  114. __entry->dev = bio->bi_bdev->bd_dev;
  115. __entry->sector = bio->bi_sector;
  116. __entry->nr_sector = bio->bi_size >> 9;
  117. blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
  118. ),
  119. TP_printk("%d,%d %s %llu + %u [%d]",
  120. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
  121. (unsigned long long)__entry->sector,
  122. __entry->nr_sector, __entry->error)
  123. );
  124. DECLARE_EVENT_CLASS(block_bio,
  125. TP_PROTO(struct request_queue *q, struct bio *bio),
  126. TP_ARGS(q, bio),
  127. TP_STRUCT__entry(
  128. __field( dev_t, dev )
  129. __field( sector_t, sector )
  130. __field( unsigned int, nr_sector )
  131. __array( char, rwbs, 6 )
  132. __array( char, comm, TASK_COMM_LEN )
  133. ),
  134. TP_fast_assign(
  135. __entry->dev = bio->bi_bdev->bd_dev;
  136. __entry->sector = bio->bi_sector;
  137. __entry->nr_sector = bio->bi_size >> 9;
  138. blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
  139. memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
  140. ),
  141. TP_printk("%d,%d %s %llu + %u [%s]",
  142. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
  143. (unsigned long long)__entry->sector,
  144. __entry->nr_sector, __entry->comm)
  145. );
  146. DEFINE_EVENT(block_bio, block_bio_backmerge,
  147. TP_PROTO(struct request_queue *q, struct bio *bio),
  148. TP_ARGS(q, bio)
  149. );
  150. DEFINE_EVENT(block_bio, block_bio_frontmerge,
  151. TP_PROTO(struct request_queue *q, struct bio *bio),
  152. TP_ARGS(q, bio)
  153. );
  154. DEFINE_EVENT(block_bio, block_bio_queue,
  155. TP_PROTO(struct request_queue *q, struct bio *bio),
  156. TP_ARGS(q, bio)
  157. );
  158. DECLARE_EVENT_CLASS(block_get_rq,
  159. TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
  160. TP_ARGS(q, bio, rw),
  161. TP_STRUCT__entry(
  162. __field( dev_t, dev )
  163. __field( sector_t, sector )
  164. __field( unsigned int, nr_sector )
  165. __array( char, rwbs, 6 )
  166. __array( char, comm, TASK_COMM_LEN )
  167. ),
  168. TP_fast_assign(
  169. __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
  170. __entry->sector = bio ? bio->bi_sector : 0;
  171. __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
  172. blk_fill_rwbs(__entry->rwbs,
  173. bio ? bio->bi_rw : 0, __entry->nr_sector);
  174. memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
  175. ),
  176. TP_printk("%d,%d %s %llu + %u [%s]",
  177. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
  178. (unsigned long long)__entry->sector,
  179. __entry->nr_sector, __entry->comm)
  180. );
  181. DEFINE_EVENT(block_get_rq, block_getrq,
  182. TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
  183. TP_ARGS(q, bio, rw)
  184. );
  185. DEFINE_EVENT(block_get_rq, block_sleeprq,
  186. TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
  187. TP_ARGS(q, bio, rw)
  188. );
  189. TRACE_EVENT(block_plug,
  190. TP_PROTO(struct request_queue *q),
  191. TP_ARGS(q),
  192. TP_STRUCT__entry(
  193. __array( char, comm, TASK_COMM_LEN )
  194. ),
  195. TP_fast_assign(
  196. memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
  197. ),
  198. TP_printk("[%s]", __entry->comm)
  199. );
  200. DECLARE_EVENT_CLASS(block_unplug,
  201. TP_PROTO(struct request_queue *q),
  202. TP_ARGS(q),
  203. TP_STRUCT__entry(
  204. __field( int, nr_rq )
  205. __array( char, comm, TASK_COMM_LEN )
  206. ),
  207. TP_fast_assign(
  208. __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
  209. memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
  210. ),
  211. TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
  212. );
  213. DEFINE_EVENT(block_unplug, block_unplug_timer,
  214. TP_PROTO(struct request_queue *q),
  215. TP_ARGS(q)
  216. );
  217. DEFINE_EVENT(block_unplug, block_unplug_io,
  218. TP_PROTO(struct request_queue *q),
  219. TP_ARGS(q)
  220. );
  221. TRACE_EVENT(block_split,
  222. TP_PROTO(struct request_queue *q, struct bio *bio,
  223. unsigned int new_sector),
  224. TP_ARGS(q, bio, new_sector),
  225. TP_STRUCT__entry(
  226. __field( dev_t, dev )
  227. __field( sector_t, sector )
  228. __field( sector_t, new_sector )
  229. __array( char, rwbs, 6 )
  230. __array( char, comm, TASK_COMM_LEN )
  231. ),
  232. TP_fast_assign(
  233. __entry->dev = bio->bi_bdev->bd_dev;
  234. __entry->sector = bio->bi_sector;
  235. __entry->new_sector = new_sector;
  236. blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
  237. memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
  238. ),
  239. TP_printk("%d,%d %s %llu / %llu [%s]",
  240. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
  241. (unsigned long long)__entry->sector,
  242. (unsigned long long)__entry->new_sector,
  243. __entry->comm)
  244. );
  245. TRACE_EVENT(block_remap,
  246. TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
  247. sector_t from),
  248. TP_ARGS(q, bio, dev, from),
  249. TP_STRUCT__entry(
  250. __field( dev_t, dev )
  251. __field( sector_t, sector )
  252. __field( unsigned int, nr_sector )
  253. __field( dev_t, old_dev )
  254. __field( sector_t, old_sector )
  255. __array( char, rwbs, 6 )
  256. ),
  257. TP_fast_assign(
  258. __entry->dev = bio->bi_bdev->bd_dev;
  259. __entry->sector = bio->bi_sector;
  260. __entry->nr_sector = bio->bi_size >> 9;
  261. __entry->old_dev = dev;
  262. __entry->old_sector = from;
  263. blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
  264. ),
  265. TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
  266. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
  267. (unsigned long long)__entry->sector,
  268. __entry->nr_sector,
  269. MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
  270. (unsigned long long)__entry->old_sector)
  271. );
  272. TRACE_EVENT(block_rq_remap,
  273. TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
  274. sector_t from),
  275. TP_ARGS(q, rq, dev, from),
  276. TP_STRUCT__entry(
  277. __field( dev_t, dev )
  278. __field( sector_t, sector )
  279. __field( unsigned int, nr_sector )
  280. __field( dev_t, old_dev )
  281. __field( sector_t, old_sector )
  282. __array( char, rwbs, 6 )
  283. ),
  284. TP_fast_assign(
  285. __entry->dev = disk_devt(rq->rq_disk);
  286. __entry->sector = blk_rq_pos(rq);
  287. __entry->nr_sector = blk_rq_sectors(rq);
  288. __entry->old_dev = dev;
  289. __entry->old_sector = from;
  290. blk_fill_rwbs_rq(__entry->rwbs, rq);
  291. ),
  292. TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
  293. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
  294. (unsigned long long)__entry->sector,
  295. __entry->nr_sector,
  296. MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
  297. (unsigned long long)__entry->old_sector)
  298. );
  299. #endif /* _TRACE_BLOCK_H */
  300. /* This part must be outside protection */
  301. #include <trace/define_trace.h>