blktrace.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890
  1. /*
  2. * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/blktrace_api.h>
  21. #include <linux/percpu.h>
  22. #include <linux/init.h>
  23. #include <linux/mutex.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/time.h>
  26. #include <trace/block.h>
  27. #include <asm/uaccess.h>
  28. static unsigned int blktrace_seq __read_mostly = 1;
  29. /* Global reference count of probes */
  30. static DEFINE_MUTEX(blk_probe_mutex);
  31. static atomic_t blk_probes_ref = ATOMIC_INIT(0);
  32. static int blk_register_tracepoints(void);
  33. static void blk_unregister_tracepoints(void);
  34. /*
  35. * Send out a notify message.
  36. */
  37. static void trace_note(struct blk_trace *bt, pid_t pid, int action,
  38. const void *data, size_t len)
  39. {
  40. struct blk_io_trace *t;
  41. t = relay_reserve(bt->rchan, sizeof(*t) + len);
  42. if (t) {
  43. const int cpu = smp_processor_id();
  44. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  45. t->time = ktime_to_ns(ktime_get());
  46. t->device = bt->dev;
  47. t->action = action;
  48. t->pid = pid;
  49. t->cpu = cpu;
  50. t->pdu_len = len;
  51. memcpy((void *) t + sizeof(*t), data, len);
  52. }
  53. }
  54. /*
  55. * Send out a notify for this process, if we haven't done so since a trace
  56. * started
  57. */
  58. static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
  59. {
  60. tsk->btrace_seq = blktrace_seq;
  61. trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
  62. }
  63. static void trace_note_time(struct blk_trace *bt)
  64. {
  65. struct timespec now;
  66. unsigned long flags;
  67. u32 words[2];
  68. getnstimeofday(&now);
  69. words[0] = now.tv_sec;
  70. words[1] = now.tv_nsec;
  71. local_irq_save(flags);
  72. trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
  73. local_irq_restore(flags);
  74. }
  75. void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
  76. {
  77. int n;
  78. va_list args;
  79. unsigned long flags;
  80. char *buf;
  81. local_irq_save(flags);
  82. buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
  83. va_start(args, fmt);
  84. n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
  85. va_end(args);
  86. trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
  87. local_irq_restore(flags);
  88. }
  89. EXPORT_SYMBOL_GPL(__trace_note_message);
  90. static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
  91. pid_t pid)
  92. {
  93. if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
  94. return 1;
  95. if (sector < bt->start_lba || sector > bt->end_lba)
  96. return 1;
  97. if (bt->pid && pid != bt->pid)
  98. return 1;
  99. return 0;
  100. }
  101. /*
  102. * Data direction bit lookup
  103. */
  104. static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };
  105. /* The ilog2() calls fall out because they're constant */
  106. #define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \
  107. (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) )
  108. /*
  109. * The worker for the various blk_add_trace*() types. Fills out a
  110. * blk_io_trace structure and places it in a per-cpu subbuffer.
  111. */
  112. static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
  113. int rw, u32 what, int error, int pdu_len, void *pdu_data)
  114. {
  115. struct task_struct *tsk = current;
  116. struct blk_io_trace *t;
  117. unsigned long flags;
  118. unsigned long *sequence;
  119. pid_t pid;
  120. int cpu;
  121. if (unlikely(bt->trace_state != Blktrace_running))
  122. return;
  123. what |= ddir_act[rw & WRITE];
  124. what |= MASK_TC_BIT(rw, BARRIER);
  125. what |= MASK_TC_BIT(rw, SYNC);
  126. what |= MASK_TC_BIT(rw, AHEAD);
  127. what |= MASK_TC_BIT(rw, META);
  128. what |= MASK_TC_BIT(rw, DISCARD);
  129. pid = tsk->pid;
  130. if (unlikely(act_log_check(bt, what, sector, pid)))
  131. return;
  132. /*
  133. * A word about the locking here - we disable interrupts to reserve
  134. * some space in the relay per-cpu buffer, to prevent an irq
  135. * from coming in and stepping on our toes.
  136. */
  137. local_irq_save(flags);
  138. if (unlikely(tsk->btrace_seq != blktrace_seq))
  139. trace_note_tsk(bt, tsk);
  140. t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
  141. if (t) {
  142. cpu = smp_processor_id();
  143. sequence = per_cpu_ptr(bt->sequence, cpu);
  144. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  145. t->sequence = ++(*sequence);
  146. t->time = ktime_to_ns(ktime_get());
  147. t->sector = sector;
  148. t->bytes = bytes;
  149. t->action = what;
  150. t->pid = pid;
  151. t->device = bt->dev;
  152. t->cpu = cpu;
  153. t->error = error;
  154. t->pdu_len = pdu_len;
  155. if (pdu_len)
  156. memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
  157. }
  158. local_irq_restore(flags);
  159. }
  160. static struct dentry *blk_tree_root;
  161. static DEFINE_MUTEX(blk_tree_mutex);
  162. static unsigned int root_users;
  163. static inline void blk_remove_root(void)
  164. {
  165. if (blk_tree_root) {
  166. debugfs_remove(blk_tree_root);
  167. blk_tree_root = NULL;
  168. }
  169. }
  170. static void blk_remove_tree(struct dentry *dir)
  171. {
  172. mutex_lock(&blk_tree_mutex);
  173. debugfs_remove(dir);
  174. if (--root_users == 0)
  175. blk_remove_root();
  176. mutex_unlock(&blk_tree_mutex);
  177. }
  178. static struct dentry *blk_create_tree(const char *blk_name)
  179. {
  180. struct dentry *dir = NULL;
  181. int created = 0;
  182. mutex_lock(&blk_tree_mutex);
  183. if (!blk_tree_root) {
  184. blk_tree_root = debugfs_create_dir("block", NULL);
  185. if (!blk_tree_root)
  186. goto err;
  187. created = 1;
  188. }
  189. dir = debugfs_create_dir(blk_name, blk_tree_root);
  190. if (dir)
  191. root_users++;
  192. else {
  193. /* Delete root only if we created it */
  194. if (created)
  195. blk_remove_root();
  196. }
  197. err:
  198. mutex_unlock(&blk_tree_mutex);
  199. return dir;
  200. }
  201. static void blk_trace_cleanup(struct blk_trace *bt)
  202. {
  203. relay_close(bt->rchan);
  204. debugfs_remove(bt->msg_file);
  205. debugfs_remove(bt->dropped_file);
  206. blk_remove_tree(bt->dir);
  207. free_percpu(bt->sequence);
  208. free_percpu(bt->msg_data);
  209. kfree(bt);
  210. mutex_lock(&blk_probe_mutex);
  211. if (atomic_dec_and_test(&blk_probes_ref))
  212. blk_unregister_tracepoints();
  213. mutex_unlock(&blk_probe_mutex);
  214. }
  215. int blk_trace_remove(struct request_queue *q)
  216. {
  217. struct blk_trace *bt;
  218. bt = xchg(&q->blk_trace, NULL);
  219. if (!bt)
  220. return -EINVAL;
  221. if (bt->trace_state == Blktrace_setup ||
  222. bt->trace_state == Blktrace_stopped)
  223. blk_trace_cleanup(bt);
  224. return 0;
  225. }
  226. EXPORT_SYMBOL_GPL(blk_trace_remove);
  227. static int blk_dropped_open(struct inode *inode, struct file *filp)
  228. {
  229. filp->private_data = inode->i_private;
  230. return 0;
  231. }
  232. static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
  233. size_t count, loff_t *ppos)
  234. {
  235. struct blk_trace *bt = filp->private_data;
  236. char buf[16];
  237. snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
  238. return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
  239. }
  240. static const struct file_operations blk_dropped_fops = {
  241. .owner = THIS_MODULE,
  242. .open = blk_dropped_open,
  243. .read = blk_dropped_read,
  244. };
  245. static int blk_msg_open(struct inode *inode, struct file *filp)
  246. {
  247. filp->private_data = inode->i_private;
  248. return 0;
  249. }
  250. static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
  251. size_t count, loff_t *ppos)
  252. {
  253. char *msg;
  254. struct blk_trace *bt;
  255. if (count > BLK_TN_MAX_MSG)
  256. return -EINVAL;
  257. msg = kmalloc(count, GFP_KERNEL);
  258. if (msg == NULL)
  259. return -ENOMEM;
  260. if (copy_from_user(msg, buffer, count)) {
  261. kfree(msg);
  262. return -EFAULT;
  263. }
  264. bt = filp->private_data;
  265. __trace_note_message(bt, "%s", msg);
  266. kfree(msg);
  267. return count;
  268. }
  269. static const struct file_operations blk_msg_fops = {
  270. .owner = THIS_MODULE,
  271. .open = blk_msg_open,
  272. .write = blk_msg_write,
  273. };
  274. /*
  275. * Keep track of how many times we encountered a full subbuffer, to aid
  276. * the user space app in telling how many lost events there were.
  277. */
  278. static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
  279. void *prev_subbuf, size_t prev_padding)
  280. {
  281. struct blk_trace *bt;
  282. if (!relay_buf_full(buf))
  283. return 1;
  284. bt = buf->chan->private_data;
  285. atomic_inc(&bt->dropped);
  286. return 0;
  287. }
  288. static int blk_remove_buf_file_callback(struct dentry *dentry)
  289. {
  290. debugfs_remove(dentry);
  291. return 0;
  292. }
  293. static struct dentry *blk_create_buf_file_callback(const char *filename,
  294. struct dentry *parent,
  295. int mode,
  296. struct rchan_buf *buf,
  297. int *is_global)
  298. {
  299. return debugfs_create_file(filename, mode, parent, buf,
  300. &relay_file_operations);
  301. }
  302. static struct rchan_callbacks blk_relay_callbacks = {
  303. .subbuf_start = blk_subbuf_start_callback,
  304. .create_buf_file = blk_create_buf_file_callback,
  305. .remove_buf_file = blk_remove_buf_file_callback,
  306. };
  307. /*
  308. * Setup everything required to start tracing
  309. */
  310. int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  311. struct blk_user_trace_setup *buts)
  312. {
  313. struct blk_trace *old_bt, *bt = NULL;
  314. struct dentry *dir = NULL;
  315. int ret, i;
  316. if (!buts->buf_size || !buts->buf_nr)
  317. return -EINVAL;
  318. strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
  319. buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
  320. /*
  321. * some device names have larger paths - convert the slashes
  322. * to underscores for this to work as expected
  323. */
  324. for (i = 0; i < strlen(buts->name); i++)
  325. if (buts->name[i] == '/')
  326. buts->name[i] = '_';
  327. ret = -ENOMEM;
  328. bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  329. if (!bt)
  330. goto err;
  331. bt->sequence = alloc_percpu(unsigned long);
  332. if (!bt->sequence)
  333. goto err;
  334. bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG);
  335. if (!bt->msg_data)
  336. goto err;
  337. ret = -ENOENT;
  338. dir = blk_create_tree(buts->name);
  339. if (!dir)
  340. goto err;
  341. bt->dir = dir;
  342. bt->dev = dev;
  343. atomic_set(&bt->dropped, 0);
  344. ret = -EIO;
  345. bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
  346. if (!bt->dropped_file)
  347. goto err;
  348. bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
  349. if (!bt->msg_file)
  350. goto err;
  351. bt->rchan = relay_open("trace", dir, buts->buf_size,
  352. buts->buf_nr, &blk_relay_callbacks, bt);
  353. if (!bt->rchan)
  354. goto err;
  355. bt->act_mask = buts->act_mask;
  356. if (!bt->act_mask)
  357. bt->act_mask = (u16) -1;
  358. bt->start_lba = buts->start_lba;
  359. bt->end_lba = buts->end_lba;
  360. if (!bt->end_lba)
  361. bt->end_lba = -1ULL;
  362. bt->pid = buts->pid;
  363. bt->trace_state = Blktrace_setup;
  364. mutex_lock(&blk_probe_mutex);
  365. if (atomic_add_return(1, &blk_probes_ref) == 1) {
  366. ret = blk_register_tracepoints();
  367. if (ret)
  368. goto probe_err;
  369. }
  370. mutex_unlock(&blk_probe_mutex);
  371. ret = -EBUSY;
  372. old_bt = xchg(&q->blk_trace, bt);
  373. if (old_bt) {
  374. (void) xchg(&q->blk_trace, old_bt);
  375. goto err;
  376. }
  377. return 0;
  378. probe_err:
  379. atomic_dec(&blk_probes_ref);
  380. mutex_unlock(&blk_probe_mutex);
  381. err:
  382. if (dir)
  383. blk_remove_tree(dir);
  384. if (bt) {
  385. if (bt->msg_file)
  386. debugfs_remove(bt->msg_file);
  387. if (bt->dropped_file)
  388. debugfs_remove(bt->dropped_file);
  389. free_percpu(bt->sequence);
  390. free_percpu(bt->msg_data);
  391. if (bt->rchan)
  392. relay_close(bt->rchan);
  393. kfree(bt);
  394. }
  395. return ret;
  396. }
  397. int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  398. char __user *arg)
  399. {
  400. struct blk_user_trace_setup buts;
  401. int ret;
  402. ret = copy_from_user(&buts, arg, sizeof(buts));
  403. if (ret)
  404. return -EFAULT;
  405. ret = do_blk_trace_setup(q, name, dev, &buts);
  406. if (ret)
  407. return ret;
  408. if (copy_to_user(arg, &buts, sizeof(buts)))
  409. return -EFAULT;
  410. return 0;
  411. }
  412. EXPORT_SYMBOL_GPL(blk_trace_setup);
  413. int blk_trace_startstop(struct request_queue *q, int start)
  414. {
  415. struct blk_trace *bt;
  416. int ret;
  417. if ((bt = q->blk_trace) == NULL)
  418. return -EINVAL;
  419. /*
  420. * For starting a trace, we can transition from a setup or stopped
  421. * trace. For stopping a trace, the state must be running
  422. */
  423. ret = -EINVAL;
  424. if (start) {
  425. if (bt->trace_state == Blktrace_setup ||
  426. bt->trace_state == Blktrace_stopped) {
  427. blktrace_seq++;
  428. smp_mb();
  429. bt->trace_state = Blktrace_running;
  430. trace_note_time(bt);
  431. ret = 0;
  432. }
  433. } else {
  434. if (bt->trace_state == Blktrace_running) {
  435. bt->trace_state = Blktrace_stopped;
  436. relay_flush(bt->rchan);
  437. ret = 0;
  438. }
  439. }
  440. return ret;
  441. }
  442. EXPORT_SYMBOL_GPL(blk_trace_startstop);
  443. /**
  444. * blk_trace_ioctl: - handle the ioctls associated with tracing
  445. * @bdev: the block device
  446. * @cmd: the ioctl cmd
  447. * @arg: the argument data, if any
  448. *
  449. **/
  450. int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
  451. {
  452. struct request_queue *q;
  453. int ret, start = 0;
  454. char b[BDEVNAME_SIZE];
  455. q = bdev_get_queue(bdev);
  456. if (!q)
  457. return -ENXIO;
  458. mutex_lock(&bdev->bd_mutex);
  459. switch (cmd) {
  460. case BLKTRACESETUP:
  461. bdevname(bdev, b);
  462. ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
  463. break;
  464. case BLKTRACESTART:
  465. start = 1;
  466. case BLKTRACESTOP:
  467. ret = blk_trace_startstop(q, start);
  468. break;
  469. case BLKTRACETEARDOWN:
  470. ret = blk_trace_remove(q);
  471. break;
  472. default:
  473. ret = -ENOTTY;
  474. break;
  475. }
  476. mutex_unlock(&bdev->bd_mutex);
  477. return ret;
  478. }
  479. /**
  480. * blk_trace_shutdown: - stop and cleanup trace structures
  481. * @q: the request queue associated with the device
  482. *
  483. **/
  484. void blk_trace_shutdown(struct request_queue *q)
  485. {
  486. if (q->blk_trace) {
  487. blk_trace_startstop(q, 0);
  488. blk_trace_remove(q);
  489. }
  490. }
  491. /*
  492. * blktrace probes
  493. */
  494. /**
  495. * blk_add_trace_rq - Add a trace for a request oriented action
  496. * @q: queue the io is for
  497. * @rq: the source request
  498. * @what: the action
  499. *
  500. * Description:
  501. * Records an action against a request. Will log the bio offset + size.
  502. *
  503. **/
  504. static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
  505. u32 what)
  506. {
  507. struct blk_trace *bt = q->blk_trace;
  508. int rw = rq->cmd_flags & 0x03;
  509. if (likely(!bt))
  510. return;
  511. if (blk_discard_rq(rq))
  512. rw |= (1 << BIO_RW_DISCARD);
  513. if (blk_pc_request(rq)) {
  514. what |= BLK_TC_ACT(BLK_TC_PC);
  515. __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
  516. sizeof(rq->cmd), rq->cmd);
  517. } else {
  518. what |= BLK_TC_ACT(BLK_TC_FS);
  519. __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
  520. rw, what, rq->errors, 0, NULL);
  521. }
  522. }
  523. static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
  524. {
  525. blk_add_trace_rq(q, rq, BLK_TA_ABORT);
  526. }
  527. static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
  528. {
  529. blk_add_trace_rq(q, rq, BLK_TA_INSERT);
  530. }
  531. static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
  532. {
  533. blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
  534. }
  535. static void blk_add_trace_rq_requeue(struct request_queue *q, struct request *rq)
  536. {
  537. blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
  538. }
  539. static void blk_add_trace_rq_complete(struct request_queue *q, struct request *rq)
  540. {
  541. blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
  542. }
  543. /**
  544. * blk_add_trace_bio - Add a trace for a bio oriented action
  545. * @q: queue the io is for
  546. * @bio: the source bio
  547. * @what: the action
  548. *
  549. * Description:
  550. * Records an action against a bio. Will log the bio offset + size.
  551. *
  552. **/
  553. static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
  554. u32 what)
  555. {
  556. struct blk_trace *bt = q->blk_trace;
  557. if (likely(!bt))
  558. return;
  559. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
  560. !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
  561. }
  562. static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
  563. {
  564. blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
  565. }
  566. static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
  567. {
  568. blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
  569. }
  570. static void blk_add_trace_bio_backmerge(struct request_queue *q, struct bio *bio)
  571. {
  572. blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
  573. }
  574. static void blk_add_trace_bio_frontmerge(struct request_queue *q, struct bio *bio)
  575. {
  576. blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
  577. }
  578. static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
  579. {
  580. blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
  581. }
  582. static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw)
  583. {
  584. if (bio)
  585. blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
  586. else {
  587. struct blk_trace *bt = q->blk_trace;
  588. if (bt)
  589. __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
  590. }
  591. }
  592. static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int rw)
  593. {
  594. if (bio)
  595. blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
  596. else {
  597. struct blk_trace *bt = q->blk_trace;
  598. if (bt)
  599. __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL);
  600. }
  601. }
  602. static void blk_add_trace_plug(struct request_queue *q)
  603. {
  604. struct blk_trace *bt = q->blk_trace;
  605. if (bt)
  606. __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
  607. }
  608. static void blk_add_trace_unplug_io(struct request_queue *q)
  609. {
  610. struct blk_trace *bt = q->blk_trace;
  611. if (bt) {
  612. unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
  613. __be64 rpdu = cpu_to_be64(pdu);
  614. __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
  615. sizeof(rpdu), &rpdu);
  616. }
  617. }
  618. static void blk_add_trace_unplug_timer(struct request_queue *q)
  619. {
  620. struct blk_trace *bt = q->blk_trace;
  621. if (bt) {
  622. unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
  623. __be64 rpdu = cpu_to_be64(pdu);
  624. __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
  625. sizeof(rpdu), &rpdu);
  626. }
  627. }
  628. static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
  629. unsigned int pdu)
  630. {
  631. struct blk_trace *bt = q->blk_trace;
  632. if (bt) {
  633. __be64 rpdu = cpu_to_be64(pdu);
  634. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
  635. BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
  636. sizeof(rpdu), &rpdu);
  637. }
  638. }
  639. /**
  640. * blk_add_trace_remap - Add a trace for a remap operation
  641. * @q: queue the io is for
  642. * @bio: the source bio
  643. * @dev: target device
  644. * @from: source sector
  645. * @to: target sector
  646. *
  647. * Description:
  648. * Device mapper or raid target sometimes need to split a bio because
  649. * it spans a stripe (or similar). Add a trace for that action.
  650. *
  651. **/
  652. static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
  653. dev_t dev, sector_t from, sector_t to)
  654. {
  655. struct blk_trace *bt = q->blk_trace;
  656. struct blk_io_trace_remap r;
  657. if (likely(!bt))
  658. return;
  659. r.device = cpu_to_be32(dev);
  660. r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
  661. r.sector = cpu_to_be64(to);
  662. __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
  663. !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
  664. }
  665. /**
  666. * blk_add_driver_data - Add binary message with driver-specific data
  667. * @q: queue the io is for
  668. * @rq: io request
  669. * @data: driver-specific data
  670. * @len: length of driver-specific data
  671. *
  672. * Description:
  673. * Some drivers might want to write driver-specific data per request.
  674. *
  675. **/
  676. void blk_add_driver_data(struct request_queue *q,
  677. struct request *rq,
  678. void *data, size_t len)
  679. {
  680. struct blk_trace *bt = q->blk_trace;
  681. if (likely(!bt))
  682. return;
  683. if (blk_pc_request(rq))
  684. __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
  685. rq->errors, len, data);
  686. else
  687. __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
  688. 0, BLK_TA_DRV_DATA, rq->errors, len, data);
  689. }
  690. EXPORT_SYMBOL_GPL(blk_add_driver_data);
  691. static int blk_register_tracepoints(void)
  692. {
  693. int ret;
  694. ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
  695. WARN_ON(ret);
  696. ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
  697. WARN_ON(ret);
  698. ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
  699. WARN_ON(ret);
  700. ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
  701. WARN_ON(ret);
  702. ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
  703. WARN_ON(ret);
  704. ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
  705. WARN_ON(ret);
  706. ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
  707. WARN_ON(ret);
  708. ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
  709. WARN_ON(ret);
  710. ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
  711. WARN_ON(ret);
  712. ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
  713. WARN_ON(ret);
  714. ret = register_trace_block_getrq(blk_add_trace_getrq);
  715. WARN_ON(ret);
  716. ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
  717. WARN_ON(ret);
  718. ret = register_trace_block_plug(blk_add_trace_plug);
  719. WARN_ON(ret);
  720. ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
  721. WARN_ON(ret);
  722. ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
  723. WARN_ON(ret);
  724. ret = register_trace_block_split(blk_add_trace_split);
  725. WARN_ON(ret);
  726. ret = register_trace_block_remap(blk_add_trace_remap);
  727. WARN_ON(ret);
  728. return 0;
  729. }
  730. static void blk_unregister_tracepoints(void)
  731. {
  732. unregister_trace_block_remap(blk_add_trace_remap);
  733. unregister_trace_block_split(blk_add_trace_split);
  734. unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
  735. unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
  736. unregister_trace_block_plug(blk_add_trace_plug);
  737. unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
  738. unregister_trace_block_getrq(blk_add_trace_getrq);
  739. unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
  740. unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
  741. unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
  742. unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
  743. unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
  744. unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
  745. unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
  746. unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
  747. unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
  748. unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
  749. tracepoint_synchronize_unregister();
  750. }