blktrace.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659
  1. /*
  2. * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/blktrace_api.h>
  21. #include <linux/percpu.h>
  22. #include <linux/init.h>
  23. #include <linux/mutex.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/time.h>
  26. #include <trace/block.h>
  27. #include <linux/uaccess.h>
  28. #include "trace_output.h"
  29. static unsigned int blktrace_seq __read_mostly = 1;
  30. static struct trace_array *blk_tr;
  31. static bool blk_tracer_enabled __read_mostly;
  32. /* Select an alternative, minimalistic output than the original one */
  33. #define TRACE_BLK_OPT_CLASSIC 0x1
  34. static struct tracer_opt blk_tracer_opts[] = {
  35. /* Default disable the minimalistic output */
  36. { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
  37. { }
  38. };
  39. static struct tracer_flags blk_tracer_flags = {
  40. .val = 0,
  41. .opts = blk_tracer_opts,
  42. };
  43. /* Global reference count of probes */
  44. static atomic_t blk_probes_ref = ATOMIC_INIT(0);
  45. static void blk_register_tracepoints(void);
  46. static void blk_unregister_tracepoints(void);
  47. /*
  48. * Send out a notify message.
  49. */
  50. static void trace_note(struct blk_trace *bt, pid_t pid, int action,
  51. const void *data, size_t len)
  52. {
  53. struct blk_io_trace *t;
  54. struct ring_buffer_event *event = NULL;
  55. int pc = 0;
  56. int cpu = smp_processor_id();
  57. bool blk_tracer = blk_tracer_enabled;
  58. if (blk_tracer) {
  59. pc = preempt_count();
  60. event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
  61. sizeof(*t) + len,
  62. 0, pc);
  63. if (!event)
  64. return;
  65. t = ring_buffer_event_data(event);
  66. goto record_it;
  67. }
  68. if (!bt->rchan)
  69. return;
  70. t = relay_reserve(bt->rchan, sizeof(*t) + len);
  71. if (t) {
  72. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  73. t->time = ktime_to_ns(ktime_get());
  74. record_it:
  75. t->device = bt->dev;
  76. t->action = action;
  77. t->pid = pid;
  78. t->cpu = cpu;
  79. t->pdu_len = len;
  80. memcpy((void *) t + sizeof(*t), data, len);
  81. if (blk_tracer)
  82. trace_buffer_unlock_commit(blk_tr, event, 0, pc);
  83. }
  84. }
  85. /*
  86. * Send out a notify for this process, if we haven't done so since a trace
  87. * started
  88. */
  89. static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
  90. {
  91. tsk->btrace_seq = blktrace_seq;
  92. trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
  93. }
  94. static void trace_note_time(struct blk_trace *bt)
  95. {
  96. struct timespec now;
  97. unsigned long flags;
  98. u32 words[2];
  99. getnstimeofday(&now);
  100. words[0] = now.tv_sec;
  101. words[1] = now.tv_nsec;
  102. local_irq_save(flags);
  103. trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
  104. local_irq_restore(flags);
  105. }
  106. void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
  107. {
  108. int n;
  109. va_list args;
  110. unsigned long flags;
  111. char *buf;
  112. if (unlikely(bt->trace_state != Blktrace_running &&
  113. !blk_tracer_enabled))
  114. return;
  115. local_irq_save(flags);
  116. buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
  117. va_start(args, fmt);
  118. n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
  119. va_end(args);
  120. trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
  121. local_irq_restore(flags);
  122. }
  123. EXPORT_SYMBOL_GPL(__trace_note_message);
  124. static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
  125. pid_t pid)
  126. {
  127. if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
  128. return 1;
  129. if (sector && (sector < bt->start_lba || sector > bt->end_lba))
  130. return 1;
  131. if (bt->pid && pid != bt->pid)
  132. return 1;
  133. return 0;
  134. }
  135. /*
  136. * Data direction bit lookup
  137. */
  138. static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
  139. BLK_TC_ACT(BLK_TC_WRITE) };
  140. /* The ilog2() calls fall out because they're constant */
  141. #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
  142. (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
  143. /*
  144. * The worker for the various blk_add_trace*() types. Fills out a
  145. * blk_io_trace structure and places it in a per-cpu subbuffer.
  146. */
  147. static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
  148. int rw, u32 what, int error, int pdu_len, void *pdu_data)
  149. {
  150. struct task_struct *tsk = current;
  151. struct ring_buffer_event *event = NULL;
  152. struct blk_io_trace *t;
  153. unsigned long flags = 0;
  154. unsigned long *sequence;
  155. pid_t pid;
  156. int cpu, pc = 0;
  157. bool blk_tracer = blk_tracer_enabled;
  158. if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
  159. return;
  160. what |= ddir_act[rw & WRITE];
  161. what |= MASK_TC_BIT(rw, BARRIER);
  162. what |= MASK_TC_BIT(rw, SYNCIO);
  163. what |= MASK_TC_BIT(rw, AHEAD);
  164. what |= MASK_TC_BIT(rw, META);
  165. what |= MASK_TC_BIT(rw, DISCARD);
  166. pid = tsk->pid;
  167. if (act_log_check(bt, what, sector, pid))
  168. return;
  169. cpu = raw_smp_processor_id();
  170. if (blk_tracer) {
  171. tracing_record_cmdline(current);
  172. pc = preempt_count();
  173. event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
  174. sizeof(*t) + pdu_len,
  175. 0, pc);
  176. if (!event)
  177. return;
  178. t = ring_buffer_event_data(event);
  179. goto record_it;
  180. }
  181. /*
  182. * A word about the locking here - we disable interrupts to reserve
  183. * some space in the relay per-cpu buffer, to prevent an irq
  184. * from coming in and stepping on our toes.
  185. */
  186. local_irq_save(flags);
  187. if (unlikely(tsk->btrace_seq != blktrace_seq))
  188. trace_note_tsk(bt, tsk);
  189. t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
  190. if (t) {
  191. sequence = per_cpu_ptr(bt->sequence, cpu);
  192. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  193. t->sequence = ++(*sequence);
  194. t->time = ktime_to_ns(ktime_get());
  195. record_it:
  196. /*
  197. * These two are not needed in ftrace as they are in the
  198. * generic trace_entry, filled by tracing_generic_entry_update,
  199. * but for the trace_event->bin() synthesizer benefit we do it
  200. * here too.
  201. */
  202. t->cpu = cpu;
  203. t->pid = pid;
  204. t->sector = sector;
  205. t->bytes = bytes;
  206. t->action = what;
  207. t->device = bt->dev;
  208. t->error = error;
  209. t->pdu_len = pdu_len;
  210. if (pdu_len)
  211. memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
  212. if (blk_tracer) {
  213. trace_buffer_unlock_commit(blk_tr, event, 0, pc);
  214. return;
  215. }
  216. }
  217. local_irq_restore(flags);
  218. }
  219. static struct dentry *blk_tree_root;
  220. static DEFINE_MUTEX(blk_tree_mutex);
  221. static void blk_trace_free(struct blk_trace *bt)
  222. {
  223. debugfs_remove(bt->msg_file);
  224. debugfs_remove(bt->dropped_file);
  225. relay_close(bt->rchan);
  226. free_percpu(bt->sequence);
  227. free_percpu(bt->msg_data);
  228. kfree(bt);
  229. }
  230. static void blk_trace_cleanup(struct blk_trace *bt)
  231. {
  232. blk_trace_free(bt);
  233. if (atomic_dec_and_test(&blk_probes_ref))
  234. blk_unregister_tracepoints();
  235. }
  236. int blk_trace_remove(struct request_queue *q)
  237. {
  238. struct blk_trace *bt;
  239. bt = xchg(&q->blk_trace, NULL);
  240. if (!bt)
  241. return -EINVAL;
  242. if (bt->trace_state != Blktrace_running)
  243. blk_trace_cleanup(bt);
  244. return 0;
  245. }
  246. EXPORT_SYMBOL_GPL(blk_trace_remove);
  247. static int blk_dropped_open(struct inode *inode, struct file *filp)
  248. {
  249. filp->private_data = inode->i_private;
  250. return 0;
  251. }
  252. static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
  253. size_t count, loff_t *ppos)
  254. {
  255. struct blk_trace *bt = filp->private_data;
  256. char buf[16];
  257. snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
  258. return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
  259. }
  260. static const struct file_operations blk_dropped_fops = {
  261. .owner = THIS_MODULE,
  262. .open = blk_dropped_open,
  263. .read = blk_dropped_read,
  264. };
  265. static int blk_msg_open(struct inode *inode, struct file *filp)
  266. {
  267. filp->private_data = inode->i_private;
  268. return 0;
  269. }
  270. static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
  271. size_t count, loff_t *ppos)
  272. {
  273. char *msg;
  274. struct blk_trace *bt;
  275. if (count >= BLK_TN_MAX_MSG)
  276. return -EINVAL;
  277. msg = kmalloc(count + 1, GFP_KERNEL);
  278. if (msg == NULL)
  279. return -ENOMEM;
  280. if (copy_from_user(msg, buffer, count)) {
  281. kfree(msg);
  282. return -EFAULT;
  283. }
  284. msg[count] = '\0';
  285. bt = filp->private_data;
  286. __trace_note_message(bt, "%s", msg);
  287. kfree(msg);
  288. return count;
  289. }
  290. static const struct file_operations blk_msg_fops = {
  291. .owner = THIS_MODULE,
  292. .open = blk_msg_open,
  293. .write = blk_msg_write,
  294. };
  295. /*
  296. * Keep track of how many times we encountered a full subbuffer, to aid
  297. * the user space app in telling how many lost events there were.
  298. */
  299. static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
  300. void *prev_subbuf, size_t prev_padding)
  301. {
  302. struct blk_trace *bt;
  303. if (!relay_buf_full(buf))
  304. return 1;
  305. bt = buf->chan->private_data;
  306. atomic_inc(&bt->dropped);
  307. return 0;
  308. }
  309. static int blk_remove_buf_file_callback(struct dentry *dentry)
  310. {
  311. struct dentry *parent = dentry->d_parent;
  312. debugfs_remove(dentry);
  313. /*
  314. * this will fail for all but the last file, but that is ok. what we
  315. * care about is the top level buts->name directory going away, when
  316. * the last trace file is gone. Then we don't have to rmdir() that
  317. * manually on trace stop, so it nicely solves the issue with
  318. * force killing of running traces.
  319. */
  320. debugfs_remove(parent);
  321. return 0;
  322. }
  323. static struct dentry *blk_create_buf_file_callback(const char *filename,
  324. struct dentry *parent,
  325. int mode,
  326. struct rchan_buf *buf,
  327. int *is_global)
  328. {
  329. return debugfs_create_file(filename, mode, parent, buf,
  330. &relay_file_operations);
  331. }
  332. static struct rchan_callbacks blk_relay_callbacks = {
  333. .subbuf_start = blk_subbuf_start_callback,
  334. .create_buf_file = blk_create_buf_file_callback,
  335. .remove_buf_file = blk_remove_buf_file_callback,
  336. };
  337. static void blk_trace_setup_lba(struct blk_trace *bt,
  338. struct block_device *bdev)
  339. {
  340. struct hd_struct *part = NULL;
  341. if (bdev)
  342. part = bdev->bd_part;
  343. if (part) {
  344. bt->start_lba = part->start_sect;
  345. bt->end_lba = part->start_sect + part->nr_sects;
  346. } else {
  347. bt->start_lba = 0;
  348. bt->end_lba = -1ULL;
  349. }
  350. }
  351. /*
  352. * Setup everything required to start tracing
  353. */
  354. int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  355. struct block_device *bdev,
  356. struct blk_user_trace_setup *buts)
  357. {
  358. struct blk_trace *old_bt, *bt = NULL;
  359. struct dentry *dir = NULL;
  360. int ret, i;
  361. if (!buts->buf_size || !buts->buf_nr)
  362. return -EINVAL;
  363. strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
  364. buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
  365. /*
  366. * some device names have larger paths - convert the slashes
  367. * to underscores for this to work as expected
  368. */
  369. for (i = 0; i < strlen(buts->name); i++)
  370. if (buts->name[i] == '/')
  371. buts->name[i] = '_';
  372. bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  373. if (!bt)
  374. return -ENOMEM;
  375. ret = -ENOMEM;
  376. bt->sequence = alloc_percpu(unsigned long);
  377. if (!bt->sequence)
  378. goto err;
  379. bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
  380. if (!bt->msg_data)
  381. goto err;
  382. ret = -ENOENT;
  383. mutex_lock(&blk_tree_mutex);
  384. if (!blk_tree_root) {
  385. blk_tree_root = debugfs_create_dir("block", NULL);
  386. if (!blk_tree_root) {
  387. mutex_unlock(&blk_tree_mutex);
  388. goto err;
  389. }
  390. }
  391. mutex_unlock(&blk_tree_mutex);
  392. dir = debugfs_create_dir(buts->name, blk_tree_root);
  393. if (!dir)
  394. goto err;
  395. bt->dir = dir;
  396. bt->dev = dev;
  397. atomic_set(&bt->dropped, 0);
  398. ret = -EIO;
  399. bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
  400. &blk_dropped_fops);
  401. if (!bt->dropped_file)
  402. goto err;
  403. bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
  404. if (!bt->msg_file)
  405. goto err;
  406. bt->rchan = relay_open("trace", dir, buts->buf_size,
  407. buts->buf_nr, &blk_relay_callbacks, bt);
  408. if (!bt->rchan)
  409. goto err;
  410. bt->act_mask = buts->act_mask;
  411. if (!bt->act_mask)
  412. bt->act_mask = (u16) -1;
  413. blk_trace_setup_lba(bt, bdev);
  414. /* overwrite with user settings */
  415. if (buts->start_lba)
  416. bt->start_lba = buts->start_lba;
  417. if (buts->end_lba)
  418. bt->end_lba = buts->end_lba;
  419. bt->pid = buts->pid;
  420. bt->trace_state = Blktrace_setup;
  421. ret = -EBUSY;
  422. old_bt = xchg(&q->blk_trace, bt);
  423. if (old_bt) {
  424. (void) xchg(&q->blk_trace, old_bt);
  425. goto err;
  426. }
  427. if (atomic_inc_return(&blk_probes_ref) == 1)
  428. blk_register_tracepoints();
  429. return 0;
  430. err:
  431. blk_trace_free(bt);
  432. return ret;
  433. }
  434. int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  435. struct block_device *bdev,
  436. char __user *arg)
  437. {
  438. struct blk_user_trace_setup buts;
  439. int ret;
  440. ret = copy_from_user(&buts, arg, sizeof(buts));
  441. if (ret)
  442. return -EFAULT;
  443. ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
  444. if (ret)
  445. return ret;
  446. if (copy_to_user(arg, &buts, sizeof(buts)))
  447. return -EFAULT;
  448. return 0;
  449. }
  450. EXPORT_SYMBOL_GPL(blk_trace_setup);
  451. int blk_trace_startstop(struct request_queue *q, int start)
  452. {
  453. int ret;
  454. struct blk_trace *bt = q->blk_trace;
  455. if (bt == NULL)
  456. return -EINVAL;
  457. /*
  458. * For starting a trace, we can transition from a setup or stopped
  459. * trace. For stopping a trace, the state must be running
  460. */
  461. ret = -EINVAL;
  462. if (start) {
  463. if (bt->trace_state == Blktrace_setup ||
  464. bt->trace_state == Blktrace_stopped) {
  465. blktrace_seq++;
  466. smp_mb();
  467. bt->trace_state = Blktrace_running;
  468. trace_note_time(bt);
  469. ret = 0;
  470. }
  471. } else {
  472. if (bt->trace_state == Blktrace_running) {
  473. bt->trace_state = Blktrace_stopped;
  474. relay_flush(bt->rchan);
  475. ret = 0;
  476. }
  477. }
  478. return ret;
  479. }
  480. EXPORT_SYMBOL_GPL(blk_trace_startstop);
  481. /**
  482. * blk_trace_ioctl: - handle the ioctls associated with tracing
  483. * @bdev: the block device
  484. * @cmd: the ioctl cmd
  485. * @arg: the argument data, if any
  486. *
  487. **/
  488. int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
  489. {
  490. struct request_queue *q;
  491. int ret, start = 0;
  492. char b[BDEVNAME_SIZE];
  493. q = bdev_get_queue(bdev);
  494. if (!q)
  495. return -ENXIO;
  496. mutex_lock(&bdev->bd_mutex);
  497. switch (cmd) {
  498. case BLKTRACESETUP:
  499. bdevname(bdev, b);
  500. ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
  501. break;
  502. case BLKTRACESTART:
  503. start = 1;
  504. case BLKTRACESTOP:
  505. ret = blk_trace_startstop(q, start);
  506. break;
  507. case BLKTRACETEARDOWN:
  508. ret = blk_trace_remove(q);
  509. break;
  510. default:
  511. ret = -ENOTTY;
  512. break;
  513. }
  514. mutex_unlock(&bdev->bd_mutex);
  515. return ret;
  516. }
  517. /**
  518. * blk_trace_shutdown: - stop and cleanup trace structures
  519. * @q: the request queue associated with the device
  520. *
  521. **/
  522. void blk_trace_shutdown(struct request_queue *q)
  523. {
  524. if (q->blk_trace) {
  525. blk_trace_startstop(q, 0);
  526. blk_trace_remove(q);
  527. }
  528. }
  529. /*
  530. * blktrace probes
  531. */
  532. /**
  533. * blk_add_trace_rq - Add a trace for a request oriented action
  534. * @q: queue the io is for
  535. * @rq: the source request
  536. * @what: the action
  537. *
  538. * Description:
  539. * Records an action against a request. Will log the bio offset + size.
  540. *
  541. **/
  542. static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
  543. u32 what)
  544. {
  545. struct blk_trace *bt = q->blk_trace;
  546. int rw = rq->cmd_flags & 0x03;
  547. if (likely(!bt))
  548. return;
  549. if (blk_discard_rq(rq))
  550. rw |= (1 << BIO_RW_DISCARD);
  551. if (blk_pc_request(rq)) {
  552. what |= BLK_TC_ACT(BLK_TC_PC);
  553. __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
  554. rq->cmd_len, rq->cmd);
  555. } else {
  556. what |= BLK_TC_ACT(BLK_TC_FS);
  557. __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
  558. rw, what, rq->errors, 0, NULL);
  559. }
  560. }
  561. static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
  562. {
  563. blk_add_trace_rq(q, rq, BLK_TA_ABORT);
  564. }
  565. static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
  566. {
  567. blk_add_trace_rq(q, rq, BLK_TA_INSERT);
  568. }
  569. static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
  570. {
  571. blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
  572. }
  573. static void blk_add_trace_rq_requeue(struct request_queue *q,
  574. struct request *rq)
  575. {
  576. blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
  577. }
  578. static void blk_add_trace_rq_complete(struct request_queue *q,
  579. struct request *rq)
  580. {
  581. blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
  582. }
  583. /**
  584. * blk_add_trace_bio - Add a trace for a bio oriented action
  585. * @q: queue the io is for
  586. * @bio: the source bio
  587. * @what: the action
  588. *
  589. * Description:
  590. * Records an action against a bio. Will log the bio offset + size.
  591. *
  592. **/
  593. static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
  594. u32 what)
  595. {
  596. struct blk_trace *bt = q->blk_trace;
  597. if (likely(!bt))
  598. return;
  599. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
  600. !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
  601. }
  602. static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
  603. {
  604. blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
  605. }
  606. static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
  607. {
  608. blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
  609. }
  610. static void blk_add_trace_bio_backmerge(struct request_queue *q,
  611. struct bio *bio)
  612. {
  613. blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
  614. }
  615. static void blk_add_trace_bio_frontmerge(struct request_queue *q,
  616. struct bio *bio)
  617. {
  618. blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
  619. }
  620. static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
  621. {
  622. blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
  623. }
  624. static void blk_add_trace_getrq(struct request_queue *q,
  625. struct bio *bio, int rw)
  626. {
  627. if (bio)
  628. blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
  629. else {
  630. struct blk_trace *bt = q->blk_trace;
  631. if (bt)
  632. __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
  633. }
  634. }
  635. static void blk_add_trace_sleeprq(struct request_queue *q,
  636. struct bio *bio, int rw)
  637. {
  638. if (bio)
  639. blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
  640. else {
  641. struct blk_trace *bt = q->blk_trace;
  642. if (bt)
  643. __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
  644. 0, 0, NULL);
  645. }
  646. }
  647. static void blk_add_trace_plug(struct request_queue *q)
  648. {
  649. struct blk_trace *bt = q->blk_trace;
  650. if (bt)
  651. __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
  652. }
  653. static void blk_add_trace_unplug_io(struct request_queue *q)
  654. {
  655. struct blk_trace *bt = q->blk_trace;
  656. if (bt) {
  657. unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
  658. __be64 rpdu = cpu_to_be64(pdu);
  659. __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
  660. sizeof(rpdu), &rpdu);
  661. }
  662. }
  663. static void blk_add_trace_unplug_timer(struct request_queue *q)
  664. {
  665. struct blk_trace *bt = q->blk_trace;
  666. if (bt) {
  667. unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
  668. __be64 rpdu = cpu_to_be64(pdu);
  669. __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
  670. sizeof(rpdu), &rpdu);
  671. }
  672. }
  673. static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
  674. unsigned int pdu)
  675. {
  676. struct blk_trace *bt = q->blk_trace;
  677. if (bt) {
  678. __be64 rpdu = cpu_to_be64(pdu);
  679. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
  680. BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
  681. sizeof(rpdu), &rpdu);
  682. }
  683. }
  684. /**
  685. * blk_add_trace_remap - Add a trace for a remap operation
  686. * @q: queue the io is for
  687. * @bio: the source bio
  688. * @dev: target device
  689. * @from: source sector
  690. * @to: target sector
  691. *
  692. * Description:
  693. * Device mapper or raid target sometimes need to split a bio because
  694. * it spans a stripe (or similar). Add a trace for that action.
  695. *
  696. **/
  697. static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
  698. dev_t dev, sector_t from, sector_t to)
  699. {
  700. struct blk_trace *bt = q->blk_trace;
  701. struct blk_io_trace_remap r;
  702. if (likely(!bt))
  703. return;
  704. r.device = cpu_to_be32(dev);
  705. r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
  706. r.sector = cpu_to_be64(to);
  707. __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
  708. !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
  709. }
  710. /**
  711. * blk_add_driver_data - Add binary message with driver-specific data
  712. * @q: queue the io is for
  713. * @rq: io request
  714. * @data: driver-specific data
  715. * @len: length of driver-specific data
  716. *
  717. * Description:
  718. * Some drivers might want to write driver-specific data per request.
  719. *
  720. **/
  721. void blk_add_driver_data(struct request_queue *q,
  722. struct request *rq,
  723. void *data, size_t len)
  724. {
  725. struct blk_trace *bt = q->blk_trace;
  726. if (likely(!bt))
  727. return;
  728. if (blk_pc_request(rq))
  729. __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
  730. rq->errors, len, data);
  731. else
  732. __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
  733. 0, BLK_TA_DRV_DATA, rq->errors, len, data);
  734. }
  735. EXPORT_SYMBOL_GPL(blk_add_driver_data);
  736. static void blk_register_tracepoints(void)
  737. {
  738. int ret;
  739. ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
  740. WARN_ON(ret);
  741. ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
  742. WARN_ON(ret);
  743. ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
  744. WARN_ON(ret);
  745. ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
  746. WARN_ON(ret);
  747. ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
  748. WARN_ON(ret);
  749. ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
  750. WARN_ON(ret);
  751. ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
  752. WARN_ON(ret);
  753. ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
  754. WARN_ON(ret);
  755. ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
  756. WARN_ON(ret);
  757. ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
  758. WARN_ON(ret);
  759. ret = register_trace_block_getrq(blk_add_trace_getrq);
  760. WARN_ON(ret);
  761. ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
  762. WARN_ON(ret);
  763. ret = register_trace_block_plug(blk_add_trace_plug);
  764. WARN_ON(ret);
  765. ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
  766. WARN_ON(ret);
  767. ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
  768. WARN_ON(ret);
  769. ret = register_trace_block_split(blk_add_trace_split);
  770. WARN_ON(ret);
  771. ret = register_trace_block_remap(blk_add_trace_remap);
  772. WARN_ON(ret);
  773. }
  774. static void blk_unregister_tracepoints(void)
  775. {
  776. unregister_trace_block_remap(blk_add_trace_remap);
  777. unregister_trace_block_split(blk_add_trace_split);
  778. unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
  779. unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
  780. unregister_trace_block_plug(blk_add_trace_plug);
  781. unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
  782. unregister_trace_block_getrq(blk_add_trace_getrq);
  783. unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
  784. unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
  785. unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
  786. unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
  787. unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
  788. unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
  789. unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
  790. unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
  791. unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
  792. unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
  793. tracepoint_synchronize_unregister();
  794. }
  795. /*
  796. * struct blk_io_tracer formatting routines
  797. */
  798. static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
  799. {
  800. int i = 0;
  801. int tc = t->action >> BLK_TC_SHIFT;
  802. if (t->action == BLK_TN_MESSAGE) {
  803. rwbs[i++] = 'N';
  804. goto out;
  805. }
  806. if (tc & BLK_TC_DISCARD)
  807. rwbs[i++] = 'D';
  808. else if (tc & BLK_TC_WRITE)
  809. rwbs[i++] = 'W';
  810. else if (t->bytes)
  811. rwbs[i++] = 'R';
  812. else
  813. rwbs[i++] = 'N';
  814. if (tc & BLK_TC_AHEAD)
  815. rwbs[i++] = 'A';
  816. if (tc & BLK_TC_BARRIER)
  817. rwbs[i++] = 'B';
  818. if (tc & BLK_TC_SYNC)
  819. rwbs[i++] = 'S';
  820. if (tc & BLK_TC_META)
  821. rwbs[i++] = 'M';
  822. out:
  823. rwbs[i] = '\0';
  824. }
  825. static inline
  826. const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
  827. {
  828. return (const struct blk_io_trace *)ent;
  829. }
  830. static inline const void *pdu_start(const struct trace_entry *ent)
  831. {
  832. return te_blk_io_trace(ent) + 1;
  833. }
  834. static inline u32 t_action(const struct trace_entry *ent)
  835. {
  836. return te_blk_io_trace(ent)->action;
  837. }
  838. static inline u32 t_bytes(const struct trace_entry *ent)
  839. {
  840. return te_blk_io_trace(ent)->bytes;
  841. }
  842. static inline u32 t_sec(const struct trace_entry *ent)
  843. {
  844. return te_blk_io_trace(ent)->bytes >> 9;
  845. }
  846. static inline unsigned long long t_sector(const struct trace_entry *ent)
  847. {
  848. return te_blk_io_trace(ent)->sector;
  849. }
  850. static inline __u16 t_error(const struct trace_entry *ent)
  851. {
  852. return te_blk_io_trace(ent)->error;
  853. }
  854. static __u64 get_pdu_int(const struct trace_entry *ent)
  855. {
  856. const __u64 *val = pdu_start(ent);
  857. return be64_to_cpu(*val);
  858. }
  859. static void get_pdu_remap(const struct trace_entry *ent,
  860. struct blk_io_trace_remap *r)
  861. {
  862. const struct blk_io_trace_remap *__r = pdu_start(ent);
  863. __u64 sector = __r->sector;
  864. r->device = be32_to_cpu(__r->device);
  865. r->device_from = be32_to_cpu(__r->device_from);
  866. r->sector = be64_to_cpu(sector);
  867. }
  868. typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
  869. static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
  870. {
  871. char rwbs[6];
  872. unsigned long long ts = iter->ts;
  873. unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
  874. unsigned secs = (unsigned long)ts;
  875. const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
  876. fill_rwbs(rwbs, t);
  877. return trace_seq_printf(&iter->seq,
  878. "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
  879. MAJOR(t->device), MINOR(t->device), iter->cpu,
  880. secs, nsec_rem, iter->ent->pid, act, rwbs);
  881. }
  882. static int blk_log_action(struct trace_iterator *iter, const char *act)
  883. {
  884. char rwbs[6];
  885. const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
  886. fill_rwbs(rwbs, t);
  887. return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
  888. MAJOR(t->device), MINOR(t->device), act, rwbs);
  889. }
  890. static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
  891. {
  892. const char *pdu_buf;
  893. int pdu_len;
  894. int i, end, ret;
  895. pdu_buf = pdu_start(ent);
  896. pdu_len = te_blk_io_trace(ent)->pdu_len;
  897. if (!pdu_len)
  898. return 1;
  899. /* find the last zero that needs to be printed */
  900. for (end = pdu_len - 1; end >= 0; end--)
  901. if (pdu_buf[end])
  902. break;
  903. end++;
  904. if (!trace_seq_putc(s, '('))
  905. return 0;
  906. for (i = 0; i < pdu_len; i++) {
  907. ret = trace_seq_printf(s, "%s%02x",
  908. i == 0 ? "" : " ", pdu_buf[i]);
  909. if (!ret)
  910. return ret;
  911. /*
  912. * stop when the rest is just zeroes and indicate so
  913. * with a ".." appended
  914. */
  915. if (i == end && end != pdu_len - 1)
  916. return trace_seq_puts(s, " ..) ");
  917. }
  918. return trace_seq_puts(s, ") ");
  919. }
  920. static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
  921. {
  922. char cmd[TASK_COMM_LEN];
  923. trace_find_cmdline(ent->pid, cmd);
  924. if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
  925. int ret;
  926. ret = trace_seq_printf(s, "%u ", t_bytes(ent));
  927. if (!ret)
  928. return 0;
  929. ret = blk_log_dump_pdu(s, ent);
  930. if (!ret)
  931. return 0;
  932. return trace_seq_printf(s, "[%s]\n", cmd);
  933. } else {
  934. if (t_sec(ent))
  935. return trace_seq_printf(s, "%llu + %u [%s]\n",
  936. t_sector(ent), t_sec(ent), cmd);
  937. return trace_seq_printf(s, "[%s]\n", cmd);
  938. }
  939. }
  940. static int blk_log_with_error(struct trace_seq *s,
  941. const struct trace_entry *ent)
  942. {
  943. if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
  944. int ret;
  945. ret = blk_log_dump_pdu(s, ent);
  946. if (ret)
  947. return trace_seq_printf(s, "[%d]\n", t_error(ent));
  948. return 0;
  949. } else {
  950. if (t_sec(ent))
  951. return trace_seq_printf(s, "%llu + %u [%d]\n",
  952. t_sector(ent),
  953. t_sec(ent), t_error(ent));
  954. return trace_seq_printf(s, "%llu [%d]\n",
  955. t_sector(ent), t_error(ent));
  956. }
  957. }
  958. static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
  959. {
  960. struct blk_io_trace_remap r = { .device = 0, };
  961. get_pdu_remap(ent, &r);
  962. return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
  963. t_sector(ent),
  964. t_sec(ent), MAJOR(r.device), MINOR(r.device),
  965. (unsigned long long)r.sector);
  966. }
  967. static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
  968. {
  969. char cmd[TASK_COMM_LEN];
  970. trace_find_cmdline(ent->pid, cmd);
  971. return trace_seq_printf(s, "[%s]\n", cmd);
  972. }
  973. static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
  974. {
  975. char cmd[TASK_COMM_LEN];
  976. trace_find_cmdline(ent->pid, cmd);
  977. return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
  978. }
  979. static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
  980. {
  981. char cmd[TASK_COMM_LEN];
  982. trace_find_cmdline(ent->pid, cmd);
  983. return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
  984. get_pdu_int(ent), cmd);
  985. }
  986. static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
  987. {
  988. int ret;
  989. const struct blk_io_trace *t = te_blk_io_trace(ent);
  990. ret = trace_seq_putmem(s, t + 1, t->pdu_len);
  991. if (ret)
  992. return trace_seq_putc(s, '\n');
  993. return ret;
  994. }
  995. /*
  996. * struct tracer operations
  997. */
  998. static void blk_tracer_print_header(struct seq_file *m)
  999. {
  1000. if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
  1001. return;
  1002. seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
  1003. "# | | | | | |\n");
  1004. }
  1005. static void blk_tracer_start(struct trace_array *tr)
  1006. {
  1007. blk_tracer_enabled = true;
  1008. }
  1009. static int blk_tracer_init(struct trace_array *tr)
  1010. {
  1011. blk_tr = tr;
  1012. blk_tracer_start(tr);
  1013. return 0;
  1014. }
  1015. static void blk_tracer_stop(struct trace_array *tr)
  1016. {
  1017. blk_tracer_enabled = false;
  1018. }
  1019. static void blk_tracer_reset(struct trace_array *tr)
  1020. {
  1021. blk_tracer_stop(tr);
  1022. }
  1023. static const struct {
  1024. const char *act[2];
  1025. int (*print)(struct trace_seq *s, const struct trace_entry *ent);
  1026. } what2act[] = {
  1027. [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
  1028. [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
  1029. [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
  1030. [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
  1031. [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
  1032. [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
  1033. [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
  1034. [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
  1035. [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
  1036. [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
  1037. [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
  1038. [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
  1039. [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
  1040. [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
  1041. [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
  1042. };
  1043. static enum print_line_t print_one_line(struct trace_iterator *iter,
  1044. bool classic)
  1045. {
  1046. struct trace_seq *s = &iter->seq;
  1047. const struct blk_io_trace *t;
  1048. u16 what;
  1049. int ret;
  1050. bool long_act;
  1051. blk_log_action_t *log_action;
  1052. t = te_blk_io_trace(iter->ent);
  1053. what = t->action & ((1 << BLK_TC_SHIFT) - 1);
  1054. long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
  1055. log_action = classic ? &blk_log_action_classic : &blk_log_action;
  1056. if (t->action == BLK_TN_MESSAGE) {
  1057. ret = log_action(iter, long_act ? "message" : "m");
  1058. if (ret)
  1059. ret = blk_log_msg(s, iter->ent);
  1060. goto out;
  1061. }
  1062. if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
  1063. ret = trace_seq_printf(s, "Unknown action %x\n", what);
  1064. else {
  1065. ret = log_action(iter, what2act[what].act[long_act]);
  1066. if (ret)
  1067. ret = what2act[what].print(s, iter->ent);
  1068. }
  1069. out:
  1070. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  1071. }
  1072. static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
  1073. int flags)
  1074. {
  1075. return print_one_line(iter, false);
  1076. }
  1077. static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
  1078. {
  1079. struct trace_seq *s = &iter->seq;
  1080. struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
  1081. const int offset = offsetof(struct blk_io_trace, sector);
  1082. struct blk_io_trace old = {
  1083. .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
  1084. .time = iter->ts,
  1085. };
  1086. if (!trace_seq_putmem(s, &old, offset))
  1087. return 0;
  1088. return trace_seq_putmem(s, &t->sector,
  1089. sizeof(old) - offset + t->pdu_len);
  1090. }
  1091. static enum print_line_t
  1092. blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
  1093. {
  1094. return blk_trace_synthesize_old_trace(iter) ?
  1095. TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  1096. }
  1097. static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
  1098. {
  1099. if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
  1100. return TRACE_TYPE_UNHANDLED;
  1101. return print_one_line(iter, true);
  1102. }
  1103. static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
  1104. {
  1105. /* don't output context-info for blk_classic output */
  1106. if (bit == TRACE_BLK_OPT_CLASSIC) {
  1107. if (set)
  1108. trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
  1109. else
  1110. trace_flags |= TRACE_ITER_CONTEXT_INFO;
  1111. }
  1112. return 0;
  1113. }
  1114. static struct tracer blk_tracer __read_mostly = {
  1115. .name = "blk",
  1116. .init = blk_tracer_init,
  1117. .reset = blk_tracer_reset,
  1118. .start = blk_tracer_start,
  1119. .stop = blk_tracer_stop,
  1120. .print_header = blk_tracer_print_header,
  1121. .print_line = blk_tracer_print_line,
  1122. .flags = &blk_tracer_flags,
  1123. .set_flag = blk_tracer_set_flag,
  1124. };
  1125. static struct trace_event trace_blk_event = {
  1126. .type = TRACE_BLK,
  1127. .trace = blk_trace_event_print,
  1128. .binary = blk_trace_event_print_binary,
  1129. };
  1130. static int __init init_blk_tracer(void)
  1131. {
  1132. if (!register_ftrace_event(&trace_blk_event)) {
  1133. pr_warning("Warning: could not register block events\n");
  1134. return 1;
  1135. }
  1136. if (register_tracer(&blk_tracer) != 0) {
  1137. pr_warning("Warning: could not register the block tracer\n");
  1138. unregister_ftrace_event(&trace_blk_event);
  1139. return 1;
  1140. }
  1141. return 0;
  1142. }
  1143. device_initcall(init_blk_tracer);
  1144. static int blk_trace_remove_queue(struct request_queue *q)
  1145. {
  1146. struct blk_trace *bt;
  1147. bt = xchg(&q->blk_trace, NULL);
  1148. if (bt == NULL)
  1149. return -EINVAL;
  1150. if (atomic_dec_and_test(&blk_probes_ref))
  1151. blk_unregister_tracepoints();
  1152. blk_trace_free(bt);
  1153. return 0;
  1154. }
  1155. /*
  1156. * Setup everything required to start tracing
  1157. */
  1158. static int blk_trace_setup_queue(struct request_queue *q,
  1159. struct block_device *bdev)
  1160. {
  1161. struct blk_trace *old_bt, *bt = NULL;
  1162. int ret = -ENOMEM;
  1163. bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  1164. if (!bt)
  1165. return -ENOMEM;
  1166. bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
  1167. if (!bt->msg_data)
  1168. goto free_bt;
  1169. bt->dev = bdev->bd_dev;
  1170. bt->act_mask = (u16)-1;
  1171. blk_trace_setup_lba(bt, bdev);
  1172. old_bt = xchg(&q->blk_trace, bt);
  1173. if (old_bt != NULL) {
  1174. (void)xchg(&q->blk_trace, old_bt);
  1175. ret = -EBUSY;
  1176. goto free_bt;
  1177. }
  1178. if (atomic_inc_return(&blk_probes_ref) == 1)
  1179. blk_register_tracepoints();
  1180. return 0;
  1181. free_bt:
  1182. blk_trace_free(bt);
  1183. return ret;
  1184. }
  1185. /*
  1186. * sysfs interface to enable and configure tracing
  1187. */
  1188. static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
  1189. struct device_attribute *attr,
  1190. char *buf);
  1191. static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
  1192. struct device_attribute *attr,
  1193. const char *buf, size_t count);
  1194. #define BLK_TRACE_DEVICE_ATTR(_name) \
  1195. DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
  1196. sysfs_blk_trace_attr_show, \
  1197. sysfs_blk_trace_attr_store)
  1198. static BLK_TRACE_DEVICE_ATTR(enable);
  1199. static BLK_TRACE_DEVICE_ATTR(act_mask);
  1200. static BLK_TRACE_DEVICE_ATTR(pid);
  1201. static BLK_TRACE_DEVICE_ATTR(start_lba);
  1202. static BLK_TRACE_DEVICE_ATTR(end_lba);
  1203. static struct attribute *blk_trace_attrs[] = {
  1204. &dev_attr_enable.attr,
  1205. &dev_attr_act_mask.attr,
  1206. &dev_attr_pid.attr,
  1207. &dev_attr_start_lba.attr,
  1208. &dev_attr_end_lba.attr,
  1209. NULL
  1210. };
  1211. struct attribute_group blk_trace_attr_group = {
  1212. .name = "trace",
  1213. .attrs = blk_trace_attrs,
  1214. };
  1215. static const struct {
  1216. int mask;
  1217. const char *str;
  1218. } mask_maps[] = {
  1219. { BLK_TC_READ, "read" },
  1220. { BLK_TC_WRITE, "write" },
  1221. { BLK_TC_BARRIER, "barrier" },
  1222. { BLK_TC_SYNC, "sync" },
  1223. { BLK_TC_QUEUE, "queue" },
  1224. { BLK_TC_REQUEUE, "requeue" },
  1225. { BLK_TC_ISSUE, "issue" },
  1226. { BLK_TC_COMPLETE, "complete" },
  1227. { BLK_TC_FS, "fs" },
  1228. { BLK_TC_PC, "pc" },
  1229. { BLK_TC_AHEAD, "ahead" },
  1230. { BLK_TC_META, "meta" },
  1231. { BLK_TC_DISCARD, "discard" },
  1232. { BLK_TC_DRV_DATA, "drv_data" },
  1233. };
  1234. static int blk_trace_str2mask(const char *str)
  1235. {
  1236. int i;
  1237. int mask = 0;
  1238. char *buf, *s, *token;
  1239. buf = kstrdup(str, GFP_KERNEL);
  1240. if (buf == NULL)
  1241. return -ENOMEM;
  1242. s = strstrip(buf);
  1243. while (1) {
  1244. token = strsep(&s, ",");
  1245. if (token == NULL)
  1246. break;
  1247. if (*token == '\0')
  1248. continue;
  1249. for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
  1250. if (strcasecmp(token, mask_maps[i].str) == 0) {
  1251. mask |= mask_maps[i].mask;
  1252. break;
  1253. }
  1254. }
  1255. if (i == ARRAY_SIZE(mask_maps)) {
  1256. mask = -EINVAL;
  1257. break;
  1258. }
  1259. }
  1260. kfree(buf);
  1261. return mask;
  1262. }
  1263. static ssize_t blk_trace_mask2str(char *buf, int mask)
  1264. {
  1265. int i;
  1266. char *p = buf;
  1267. for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
  1268. if (mask & mask_maps[i].mask) {
  1269. p += sprintf(p, "%s%s",
  1270. (p == buf) ? "" : ",", mask_maps[i].str);
  1271. }
  1272. }
  1273. *p++ = '\n';
  1274. return p - buf;
  1275. }
  1276. static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
  1277. {
  1278. if (bdev->bd_disk == NULL)
  1279. return NULL;
  1280. return bdev_get_queue(bdev);
  1281. }
  1282. static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
  1283. struct device_attribute *attr,
  1284. char *buf)
  1285. {
  1286. struct hd_struct *p = dev_to_part(dev);
  1287. struct request_queue *q;
  1288. struct block_device *bdev;
  1289. ssize_t ret = -ENXIO;
  1290. lock_kernel();
  1291. bdev = bdget(part_devt(p));
  1292. if (bdev == NULL)
  1293. goto out_unlock_kernel;
  1294. q = blk_trace_get_queue(bdev);
  1295. if (q == NULL)
  1296. goto out_bdput;
  1297. mutex_lock(&bdev->bd_mutex);
  1298. if (attr == &dev_attr_enable) {
  1299. ret = sprintf(buf, "%u\n", !!q->blk_trace);
  1300. goto out_unlock_bdev;
  1301. }
  1302. if (q->blk_trace == NULL)
  1303. ret = sprintf(buf, "disabled\n");
  1304. else if (attr == &dev_attr_act_mask)
  1305. ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
  1306. else if (attr == &dev_attr_pid)
  1307. ret = sprintf(buf, "%u\n", q->blk_trace->pid);
  1308. else if (attr == &dev_attr_start_lba)
  1309. ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
  1310. else if (attr == &dev_attr_end_lba)
  1311. ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
  1312. out_unlock_bdev:
  1313. mutex_unlock(&bdev->bd_mutex);
  1314. out_bdput:
  1315. bdput(bdev);
  1316. out_unlock_kernel:
  1317. unlock_kernel();
  1318. return ret;
  1319. }
  1320. static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
  1321. struct device_attribute *attr,
  1322. const char *buf, size_t count)
  1323. {
  1324. struct block_device *bdev;
  1325. struct request_queue *q;
  1326. struct hd_struct *p;
  1327. u64 value;
  1328. ssize_t ret = -EINVAL;
  1329. if (count == 0)
  1330. goto out;
  1331. if (attr == &dev_attr_act_mask) {
  1332. if (sscanf(buf, "%llx", &value) != 1) {
  1333. /* Assume it is a list of trace category names */
  1334. ret = blk_trace_str2mask(buf);
  1335. if (ret < 0)
  1336. goto out;
  1337. value = ret;
  1338. }
  1339. } else if (sscanf(buf, "%llu", &value) != 1)
  1340. goto out;
  1341. ret = -ENXIO;
  1342. lock_kernel();
  1343. p = dev_to_part(dev);
  1344. bdev = bdget(part_devt(p));
  1345. if (bdev == NULL)
  1346. goto out_unlock_kernel;
  1347. q = blk_trace_get_queue(bdev);
  1348. if (q == NULL)
  1349. goto out_bdput;
  1350. mutex_lock(&bdev->bd_mutex);
  1351. if (attr == &dev_attr_enable) {
  1352. if (value)
  1353. ret = blk_trace_setup_queue(q, bdev);
  1354. else
  1355. ret = blk_trace_remove_queue(q);
  1356. goto out_unlock_bdev;
  1357. }
  1358. ret = 0;
  1359. if (q->blk_trace == NULL)
  1360. ret = blk_trace_setup_queue(q, bdev);
  1361. if (ret == 0) {
  1362. if (attr == &dev_attr_act_mask)
  1363. q->blk_trace->act_mask = value;
  1364. else if (attr == &dev_attr_pid)
  1365. q->blk_trace->pid = value;
  1366. else if (attr == &dev_attr_start_lba)
  1367. q->blk_trace->start_lba = value;
  1368. else if (attr == &dev_attr_end_lba)
  1369. q->blk_trace->end_lba = value;
  1370. }
  1371. out_unlock_bdev:
  1372. mutex_unlock(&bdev->bd_mutex);
  1373. out_bdput:
  1374. bdput(bdev);
  1375. out_unlock_kernel:
  1376. unlock_kernel();
  1377. out:
  1378. return ret ? ret : count;
  1379. }
  1380. int blk_trace_init_sysfs(struct device *dev)
  1381. {
  1382. return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
  1383. }