blktrace.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. /*
  2. * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/blktrace_api.h>
  21. #include <linux/percpu.h>
  22. #include <linux/init.h>
  23. #include <linux/mutex.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/time.h>
  26. #include <trace/block.h>
  27. #include <asm/uaccess.h>
  28. static unsigned int blktrace_seq __read_mostly = 1;
  29. /* Global reference count of probes */
  30. static DEFINE_MUTEX(blk_probe_mutex);
  31. static atomic_t blk_probes_ref = ATOMIC_INIT(0);
  32. static int blk_register_tracepoints(void);
  33. static void blk_unregister_tracepoints(void);
  34. /*
  35. * Send out a notify message.
  36. */
  37. static void trace_note(struct blk_trace *bt, pid_t pid, int action,
  38. const void *data, size_t len)
  39. {
  40. struct blk_io_trace *t;
  41. t = relay_reserve(bt->rchan, sizeof(*t) + len);
  42. if (t) {
  43. const int cpu = smp_processor_id();
  44. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  45. t->time = ktime_to_ns(ktime_get());
  46. t->device = bt->dev;
  47. t->action = action;
  48. t->pid = pid;
  49. t->cpu = cpu;
  50. t->pdu_len = len;
  51. memcpy((void *) t + sizeof(*t), data, len);
  52. }
  53. }
  54. /*
  55. * Send out a notify for this process, if we haven't done so since a trace
  56. * started
  57. */
  58. static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
  59. {
  60. tsk->btrace_seq = blktrace_seq;
  61. trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
  62. }
  63. static void trace_note_time(struct blk_trace *bt)
  64. {
  65. struct timespec now;
  66. unsigned long flags;
  67. u32 words[2];
  68. getnstimeofday(&now);
  69. words[0] = now.tv_sec;
  70. words[1] = now.tv_nsec;
  71. local_irq_save(flags);
  72. trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
  73. local_irq_restore(flags);
  74. }
  75. void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
  76. {
  77. int n;
  78. va_list args;
  79. unsigned long flags;
  80. char *buf;
  81. local_irq_save(flags);
  82. buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
  83. va_start(args, fmt);
  84. n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
  85. va_end(args);
  86. trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
  87. local_irq_restore(flags);
  88. }
  89. EXPORT_SYMBOL_GPL(__trace_note_message);
  90. static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
  91. pid_t pid)
  92. {
  93. if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
  94. return 1;
  95. if (sector < bt->start_lba || sector > bt->end_lba)
  96. return 1;
  97. if (bt->pid && pid != bt->pid)
  98. return 1;
  99. return 0;
  100. }
  101. /*
  102. * Data direction bit lookup
  103. */
  104. static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };
  105. /* The ilog2() calls fall out because they're constant */
  106. #define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \
  107. (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) )
  108. /*
  109. * The worker for the various blk_add_trace*() types. Fills out a
  110. * blk_io_trace structure and places it in a per-cpu subbuffer.
  111. */
  112. static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
  113. int rw, u32 what, int error, int pdu_len, void *pdu_data)
  114. {
  115. struct task_struct *tsk = current;
  116. struct blk_io_trace *t;
  117. unsigned long flags;
  118. unsigned long *sequence;
  119. pid_t pid;
  120. int cpu;
  121. if (unlikely(bt->trace_state != Blktrace_running))
  122. return;
  123. what |= ddir_act[rw & WRITE];
  124. what |= MASK_TC_BIT(rw, BARRIER);
  125. what |= MASK_TC_BIT(rw, SYNC);
  126. what |= MASK_TC_BIT(rw, AHEAD);
  127. what |= MASK_TC_BIT(rw, META);
  128. what |= MASK_TC_BIT(rw, DISCARD);
  129. pid = tsk->pid;
  130. if (unlikely(act_log_check(bt, what, sector, pid)))
  131. return;
  132. /*
  133. * A word about the locking here - we disable interrupts to reserve
  134. * some space in the relay per-cpu buffer, to prevent an irq
  135. * from coming in and stepping on our toes.
  136. */
  137. local_irq_save(flags);
  138. if (unlikely(tsk->btrace_seq != blktrace_seq))
  139. trace_note_tsk(bt, tsk);
  140. t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
  141. if (t) {
  142. cpu = smp_processor_id();
  143. sequence = per_cpu_ptr(bt->sequence, cpu);
  144. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  145. t->sequence = ++(*sequence);
  146. t->time = ktime_to_ns(ktime_get());
  147. t->sector = sector;
  148. t->bytes = bytes;
  149. t->action = what;
  150. t->pid = pid;
  151. t->device = bt->dev;
  152. t->cpu = cpu;
  153. t->error = error;
  154. t->pdu_len = pdu_len;
  155. if (pdu_len)
  156. memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
  157. }
  158. local_irq_restore(flags);
  159. }
  160. static struct dentry *blk_tree_root;
  161. static DEFINE_MUTEX(blk_tree_mutex);
  162. static void blk_trace_cleanup(struct blk_trace *bt)
  163. {
  164. debugfs_remove(bt->msg_file);
  165. debugfs_remove(bt->dropped_file);
  166. relay_close(bt->rchan);
  167. free_percpu(bt->sequence);
  168. free_percpu(bt->msg_data);
  169. kfree(bt);
  170. mutex_lock(&blk_probe_mutex);
  171. if (atomic_dec_and_test(&blk_probes_ref))
  172. blk_unregister_tracepoints();
  173. mutex_unlock(&blk_probe_mutex);
  174. }
  175. int blk_trace_remove(struct request_queue *q)
  176. {
  177. struct blk_trace *bt;
  178. bt = xchg(&q->blk_trace, NULL);
  179. if (!bt)
  180. return -EINVAL;
  181. if (bt->trace_state == Blktrace_setup ||
  182. bt->trace_state == Blktrace_stopped)
  183. blk_trace_cleanup(bt);
  184. return 0;
  185. }
  186. EXPORT_SYMBOL_GPL(blk_trace_remove);
  187. static int blk_dropped_open(struct inode *inode, struct file *filp)
  188. {
  189. filp->private_data = inode->i_private;
  190. return 0;
  191. }
  192. static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
  193. size_t count, loff_t *ppos)
  194. {
  195. struct blk_trace *bt = filp->private_data;
  196. char buf[16];
  197. snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
  198. return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
  199. }
  200. static const struct file_operations blk_dropped_fops = {
  201. .owner = THIS_MODULE,
  202. .open = blk_dropped_open,
  203. .read = blk_dropped_read,
  204. };
  205. static int blk_msg_open(struct inode *inode, struct file *filp)
  206. {
  207. filp->private_data = inode->i_private;
  208. return 0;
  209. }
  210. static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
  211. size_t count, loff_t *ppos)
  212. {
  213. char *msg;
  214. struct blk_trace *bt;
  215. if (count > BLK_TN_MAX_MSG)
  216. return -EINVAL;
  217. msg = kmalloc(count, GFP_KERNEL);
  218. if (msg == NULL)
  219. return -ENOMEM;
  220. if (copy_from_user(msg, buffer, count)) {
  221. kfree(msg);
  222. return -EFAULT;
  223. }
  224. bt = filp->private_data;
  225. __trace_note_message(bt, "%s", msg);
  226. kfree(msg);
  227. return count;
  228. }
  229. static const struct file_operations blk_msg_fops = {
  230. .owner = THIS_MODULE,
  231. .open = blk_msg_open,
  232. .write = blk_msg_write,
  233. };
  234. /*
  235. * Keep track of how many times we encountered a full subbuffer, to aid
  236. * the user space app in telling how many lost events there were.
  237. */
  238. static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
  239. void *prev_subbuf, size_t prev_padding)
  240. {
  241. struct blk_trace *bt;
  242. if (!relay_buf_full(buf))
  243. return 1;
  244. bt = buf->chan->private_data;
  245. atomic_inc(&bt->dropped);
  246. return 0;
  247. }
  248. static int blk_remove_buf_file_callback(struct dentry *dentry)
  249. {
  250. struct dentry *parent = dentry->d_parent;
  251. debugfs_remove(dentry);
  252. /*
  253. * this will fail for all but the last file, but that is ok. what we
  254. * care about is the top level buts->name directory going away, when
  255. * the last trace file is gone. Then we don't have to rmdir() that
  256. * manually on trace stop, so it nicely solves the issue with
  257. * force killing of running traces.
  258. */
  259. debugfs_remove(parent);
  260. return 0;
  261. }
  262. static struct dentry *blk_create_buf_file_callback(const char *filename,
  263. struct dentry *parent,
  264. int mode,
  265. struct rchan_buf *buf,
  266. int *is_global)
  267. {
  268. return debugfs_create_file(filename, mode, parent, buf,
  269. &relay_file_operations);
  270. }
  271. static struct rchan_callbacks blk_relay_callbacks = {
  272. .subbuf_start = blk_subbuf_start_callback,
  273. .create_buf_file = blk_create_buf_file_callback,
  274. .remove_buf_file = blk_remove_buf_file_callback,
  275. };
  276. /*
  277. * Setup everything required to start tracing
  278. */
  279. int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  280. struct blk_user_trace_setup *buts)
  281. {
  282. struct blk_trace *old_bt, *bt = NULL;
  283. struct dentry *dir = NULL;
  284. int ret, i;
  285. if (!buts->buf_size || !buts->buf_nr)
  286. return -EINVAL;
  287. strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
  288. buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
  289. /*
  290. * some device names have larger paths - convert the slashes
  291. * to underscores for this to work as expected
  292. */
  293. for (i = 0; i < strlen(buts->name); i++)
  294. if (buts->name[i] == '/')
  295. buts->name[i] = '_';
  296. ret = -ENOMEM;
  297. bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  298. if (!bt)
  299. goto err;
  300. bt->sequence = alloc_percpu(unsigned long);
  301. if (!bt->sequence)
  302. goto err;
  303. bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG);
  304. if (!bt->msg_data)
  305. goto err;
  306. ret = -ENOENT;
  307. if (!blk_tree_root) {
  308. blk_tree_root = debugfs_create_dir("block", NULL);
  309. if (!blk_tree_root)
  310. return -ENOMEM;
  311. }
  312. dir = debugfs_create_dir(buts->name, blk_tree_root);
  313. if (!dir)
  314. goto err;
  315. bt->dir = dir;
  316. bt->dev = dev;
  317. atomic_set(&bt->dropped, 0);
  318. ret = -EIO;
  319. bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
  320. if (!bt->dropped_file)
  321. goto err;
  322. bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
  323. if (!bt->msg_file)
  324. goto err;
  325. bt->rchan = relay_open("trace", dir, buts->buf_size,
  326. buts->buf_nr, &blk_relay_callbacks, bt);
  327. if (!bt->rchan)
  328. goto err;
  329. bt->act_mask = buts->act_mask;
  330. if (!bt->act_mask)
  331. bt->act_mask = (u16) -1;
  332. bt->start_lba = buts->start_lba;
  333. bt->end_lba = buts->end_lba;
  334. if (!bt->end_lba)
  335. bt->end_lba = -1ULL;
  336. bt->pid = buts->pid;
  337. bt->trace_state = Blktrace_setup;
  338. mutex_lock(&blk_probe_mutex);
  339. if (atomic_add_return(1, &blk_probes_ref) == 1) {
  340. ret = blk_register_tracepoints();
  341. if (ret)
  342. goto probe_err;
  343. }
  344. mutex_unlock(&blk_probe_mutex);
  345. ret = -EBUSY;
  346. old_bt = xchg(&q->blk_trace, bt);
  347. if (old_bt) {
  348. (void) xchg(&q->blk_trace, old_bt);
  349. goto err;
  350. }
  351. return 0;
  352. probe_err:
  353. atomic_dec(&blk_probes_ref);
  354. mutex_unlock(&blk_probe_mutex);
  355. err:
  356. if (bt) {
  357. if (bt->msg_file)
  358. debugfs_remove(bt->msg_file);
  359. if (bt->dropped_file)
  360. debugfs_remove(bt->dropped_file);
  361. free_percpu(bt->sequence);
  362. free_percpu(bt->msg_data);
  363. if (bt->rchan)
  364. relay_close(bt->rchan);
  365. kfree(bt);
  366. }
  367. return ret;
  368. }
  369. int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  370. char __user *arg)
  371. {
  372. struct blk_user_trace_setup buts;
  373. int ret;
  374. ret = copy_from_user(&buts, arg, sizeof(buts));
  375. if (ret)
  376. return -EFAULT;
  377. ret = do_blk_trace_setup(q, name, dev, &buts);
  378. if (ret)
  379. return ret;
  380. if (copy_to_user(arg, &buts, sizeof(buts)))
  381. return -EFAULT;
  382. return 0;
  383. }
  384. EXPORT_SYMBOL_GPL(blk_trace_setup);
  385. int blk_trace_startstop(struct request_queue *q, int start)
  386. {
  387. struct blk_trace *bt;
  388. int ret;
  389. if ((bt = q->blk_trace) == NULL)
  390. return -EINVAL;
  391. /*
  392. * For starting a trace, we can transition from a setup or stopped
  393. * trace. For stopping a trace, the state must be running
  394. */
  395. ret = -EINVAL;
  396. if (start) {
  397. if (bt->trace_state == Blktrace_setup ||
  398. bt->trace_state == Blktrace_stopped) {
  399. blktrace_seq++;
  400. smp_mb();
  401. bt->trace_state = Blktrace_running;
  402. trace_note_time(bt);
  403. ret = 0;
  404. }
  405. } else {
  406. if (bt->trace_state == Blktrace_running) {
  407. bt->trace_state = Blktrace_stopped;
  408. relay_flush(bt->rchan);
  409. ret = 0;
  410. }
  411. }
  412. return ret;
  413. }
  414. EXPORT_SYMBOL_GPL(blk_trace_startstop);
  415. /**
  416. * blk_trace_ioctl: - handle the ioctls associated with tracing
  417. * @bdev: the block device
  418. * @cmd: the ioctl cmd
  419. * @arg: the argument data, if any
  420. *
  421. **/
  422. int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
  423. {
  424. struct request_queue *q;
  425. int ret, start = 0;
  426. char b[BDEVNAME_SIZE];
  427. q = bdev_get_queue(bdev);
  428. if (!q)
  429. return -ENXIO;
  430. mutex_lock(&bdev->bd_mutex);
  431. switch (cmd) {
  432. case BLKTRACESETUP:
  433. bdevname(bdev, b);
  434. ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
  435. break;
  436. case BLKTRACESTART:
  437. start = 1;
  438. case BLKTRACESTOP:
  439. ret = blk_trace_startstop(q, start);
  440. break;
  441. case BLKTRACETEARDOWN:
  442. ret = blk_trace_remove(q);
  443. break;
  444. default:
  445. ret = -ENOTTY;
  446. break;
  447. }
  448. mutex_unlock(&bdev->bd_mutex);
  449. return ret;
  450. }
  451. /**
  452. * blk_trace_shutdown: - stop and cleanup trace structures
  453. * @q: the request queue associated with the device
  454. *
  455. **/
  456. void blk_trace_shutdown(struct request_queue *q)
  457. {
  458. if (q->blk_trace) {
  459. blk_trace_startstop(q, 0);
  460. blk_trace_remove(q);
  461. }
  462. }
  463. /*
  464. * blktrace probes
  465. */
  466. /**
  467. * blk_add_trace_rq - Add a trace for a request oriented action
  468. * @q: queue the io is for
  469. * @rq: the source request
  470. * @what: the action
  471. *
  472. * Description:
  473. * Records an action against a request. Will log the bio offset + size.
  474. *
  475. **/
  476. static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
  477. u32 what)
  478. {
  479. struct blk_trace *bt = q->blk_trace;
  480. int rw = rq->cmd_flags & 0x03;
  481. if (likely(!bt))
  482. return;
  483. if (blk_discard_rq(rq))
  484. rw |= (1 << BIO_RW_DISCARD);
  485. if (blk_pc_request(rq)) {
  486. what |= BLK_TC_ACT(BLK_TC_PC);
  487. __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
  488. sizeof(rq->cmd), rq->cmd);
  489. } else {
  490. what |= BLK_TC_ACT(BLK_TC_FS);
  491. __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
  492. rw, what, rq->errors, 0, NULL);
  493. }
  494. }
  495. static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
  496. {
  497. blk_add_trace_rq(q, rq, BLK_TA_ABORT);
  498. }
  499. static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
  500. {
  501. blk_add_trace_rq(q, rq, BLK_TA_INSERT);
  502. }
  503. static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
  504. {
  505. blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
  506. }
  507. static void blk_add_trace_rq_requeue(struct request_queue *q, struct request *rq)
  508. {
  509. blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
  510. }
  511. static void blk_add_trace_rq_complete(struct request_queue *q, struct request *rq)
  512. {
  513. blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
  514. }
  515. /**
  516. * blk_add_trace_bio - Add a trace for a bio oriented action
  517. * @q: queue the io is for
  518. * @bio: the source bio
  519. * @what: the action
  520. *
  521. * Description:
  522. * Records an action against a bio. Will log the bio offset + size.
  523. *
  524. **/
  525. static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
  526. u32 what)
  527. {
  528. struct blk_trace *bt = q->blk_trace;
  529. if (likely(!bt))
  530. return;
  531. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
  532. !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
  533. }
  534. static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
  535. {
  536. blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
  537. }
  538. static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
  539. {
  540. blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
  541. }
  542. static void blk_add_trace_bio_backmerge(struct request_queue *q, struct bio *bio)
  543. {
  544. blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
  545. }
  546. static void blk_add_trace_bio_frontmerge(struct request_queue *q, struct bio *bio)
  547. {
  548. blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
  549. }
  550. static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
  551. {
  552. blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
  553. }
  554. static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw)
  555. {
  556. if (bio)
  557. blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
  558. else {
  559. struct blk_trace *bt = q->blk_trace;
  560. if (bt)
  561. __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
  562. }
  563. }
  564. static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int rw)
  565. {
  566. if (bio)
  567. blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
  568. else {
  569. struct blk_trace *bt = q->blk_trace;
  570. if (bt)
  571. __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL);
  572. }
  573. }
  574. static void blk_add_trace_plug(struct request_queue *q)
  575. {
  576. struct blk_trace *bt = q->blk_trace;
  577. if (bt)
  578. __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
  579. }
  580. static void blk_add_trace_unplug_io(struct request_queue *q)
  581. {
  582. struct blk_trace *bt = q->blk_trace;
  583. if (bt) {
  584. unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
  585. __be64 rpdu = cpu_to_be64(pdu);
  586. __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
  587. sizeof(rpdu), &rpdu);
  588. }
  589. }
  590. static void blk_add_trace_unplug_timer(struct request_queue *q)
  591. {
  592. struct blk_trace *bt = q->blk_trace;
  593. if (bt) {
  594. unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
  595. __be64 rpdu = cpu_to_be64(pdu);
  596. __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
  597. sizeof(rpdu), &rpdu);
  598. }
  599. }
  600. static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
  601. unsigned int pdu)
  602. {
  603. struct blk_trace *bt = q->blk_trace;
  604. if (bt) {
  605. __be64 rpdu = cpu_to_be64(pdu);
  606. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
  607. BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
  608. sizeof(rpdu), &rpdu);
  609. }
  610. }
  611. /**
  612. * blk_add_trace_remap - Add a trace for a remap operation
  613. * @q: queue the io is for
  614. * @bio: the source bio
  615. * @dev: target device
  616. * @from: source sector
  617. * @to: target sector
  618. *
  619. * Description:
  620. * Device mapper or raid target sometimes need to split a bio because
  621. * it spans a stripe (or similar). Add a trace for that action.
  622. *
  623. **/
  624. static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
  625. dev_t dev, sector_t from, sector_t to)
  626. {
  627. struct blk_trace *bt = q->blk_trace;
  628. struct blk_io_trace_remap r;
  629. if (likely(!bt))
  630. return;
  631. r.device = cpu_to_be32(dev);
  632. r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
  633. r.sector = cpu_to_be64(to);
  634. __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
  635. !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
  636. }
  637. /**
  638. * blk_add_driver_data - Add binary message with driver-specific data
  639. * @q: queue the io is for
  640. * @rq: io request
  641. * @data: driver-specific data
  642. * @len: length of driver-specific data
  643. *
  644. * Description:
  645. * Some drivers might want to write driver-specific data per request.
  646. *
  647. **/
  648. void blk_add_driver_data(struct request_queue *q,
  649. struct request *rq,
  650. void *data, size_t len)
  651. {
  652. struct blk_trace *bt = q->blk_trace;
  653. if (likely(!bt))
  654. return;
  655. if (blk_pc_request(rq))
  656. __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
  657. rq->errors, len, data);
  658. else
  659. __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
  660. 0, BLK_TA_DRV_DATA, rq->errors, len, data);
  661. }
  662. EXPORT_SYMBOL_GPL(blk_add_driver_data);
  663. static int blk_register_tracepoints(void)
  664. {
  665. int ret;
  666. ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
  667. WARN_ON(ret);
  668. ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
  669. WARN_ON(ret);
  670. ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
  671. WARN_ON(ret);
  672. ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
  673. WARN_ON(ret);
  674. ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
  675. WARN_ON(ret);
  676. ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
  677. WARN_ON(ret);
  678. ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
  679. WARN_ON(ret);
  680. ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
  681. WARN_ON(ret);
  682. ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
  683. WARN_ON(ret);
  684. ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
  685. WARN_ON(ret);
  686. ret = register_trace_block_getrq(blk_add_trace_getrq);
  687. WARN_ON(ret);
  688. ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
  689. WARN_ON(ret);
  690. ret = register_trace_block_plug(blk_add_trace_plug);
  691. WARN_ON(ret);
  692. ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
  693. WARN_ON(ret);
  694. ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
  695. WARN_ON(ret);
  696. ret = register_trace_block_split(blk_add_trace_split);
  697. WARN_ON(ret);
  698. ret = register_trace_block_remap(blk_add_trace_remap);
  699. WARN_ON(ret);
  700. return 0;
  701. }
  702. static void blk_unregister_tracepoints(void)
  703. {
  704. unregister_trace_block_remap(blk_add_trace_remap);
  705. unregister_trace_block_split(blk_add_trace_split);
  706. unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
  707. unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
  708. unregister_trace_block_plug(blk_add_trace_plug);
  709. unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
  710. unregister_trace_block_getrq(blk_add_trace_getrq);
  711. unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
  712. unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
  713. unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
  714. unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
  715. unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
  716. unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
  717. unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
  718. unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
  719. unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
  720. unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
  721. tracepoint_synchronize_unregister();
  722. }