blktrace.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789
  1. /*
  2. * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/blktrace_api.h>
  21. #include <linux/percpu.h>
  22. #include <linux/init.h>
  23. #include <linux/mutex.h>
  24. #include <linux/slab.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/smp_lock.h>
  27. #include <linux/time.h>
  28. #include <linux/uaccess.h>
  29. #include <trace/events/block.h>
  30. #include "trace_output.h"
  31. #ifdef CONFIG_BLK_DEV_IO_TRACE
  32. static unsigned int blktrace_seq __read_mostly = 1;
  33. static struct trace_array *blk_tr;
  34. static bool blk_tracer_enabled __read_mostly;
  35. /* Select an alternative, minimalistic output than the original one */
  36. #define TRACE_BLK_OPT_CLASSIC 0x1
  37. static struct tracer_opt blk_tracer_opts[] = {
  38. /* Default disable the minimalistic output */
  39. { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
  40. { }
  41. };
  42. static struct tracer_flags blk_tracer_flags = {
  43. .val = 0,
  44. .opts = blk_tracer_opts,
  45. };
  46. /* Global reference count of probes */
  47. static atomic_t blk_probes_ref = ATOMIC_INIT(0);
  48. static void blk_register_tracepoints(void);
  49. static void blk_unregister_tracepoints(void);
  50. /*
  51. * Send out a notify message.
  52. */
  53. static void trace_note(struct blk_trace *bt, pid_t pid, int action,
  54. const void *data, size_t len)
  55. {
  56. struct blk_io_trace *t;
  57. struct ring_buffer_event *event = NULL;
  58. struct ring_buffer *buffer = NULL;
  59. int pc = 0;
  60. int cpu = smp_processor_id();
  61. bool blk_tracer = blk_tracer_enabled;
  62. if (blk_tracer) {
  63. buffer = blk_tr->buffer;
  64. pc = preempt_count();
  65. event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
  66. sizeof(*t) + len,
  67. 0, pc);
  68. if (!event)
  69. return;
  70. t = ring_buffer_event_data(event);
  71. goto record_it;
  72. }
  73. if (!bt->rchan)
  74. return;
  75. t = relay_reserve(bt->rchan, sizeof(*t) + len);
  76. if (t) {
  77. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  78. t->time = ktime_to_ns(ktime_get());
  79. record_it:
  80. t->device = bt->dev;
  81. t->action = action;
  82. t->pid = pid;
  83. t->cpu = cpu;
  84. t->pdu_len = len;
  85. memcpy((void *) t + sizeof(*t), data, len);
  86. if (blk_tracer)
  87. trace_buffer_unlock_commit(buffer, event, 0, pc);
  88. }
  89. }
  90. /*
  91. * Send out a notify for this process, if we haven't done so since a trace
  92. * started
  93. */
  94. static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
  95. {
  96. tsk->btrace_seq = blktrace_seq;
  97. trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
  98. }
  99. static void trace_note_time(struct blk_trace *bt)
  100. {
  101. struct timespec now;
  102. unsigned long flags;
  103. u32 words[2];
  104. getnstimeofday(&now);
  105. words[0] = now.tv_sec;
  106. words[1] = now.tv_nsec;
  107. local_irq_save(flags);
  108. trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
  109. local_irq_restore(flags);
  110. }
  111. void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
  112. {
  113. int n;
  114. va_list args;
  115. unsigned long flags;
  116. char *buf;
  117. if (unlikely(bt->trace_state != Blktrace_running &&
  118. !blk_tracer_enabled))
  119. return;
  120. local_irq_save(flags);
  121. buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
  122. va_start(args, fmt);
  123. n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
  124. va_end(args);
  125. trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
  126. local_irq_restore(flags);
  127. }
  128. EXPORT_SYMBOL_GPL(__trace_note_message);
  129. static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
  130. pid_t pid)
  131. {
  132. if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
  133. return 1;
  134. if (sector && (sector < bt->start_lba || sector > bt->end_lba))
  135. return 1;
  136. if (bt->pid && pid != bt->pid)
  137. return 1;
  138. return 0;
  139. }
  140. /*
  141. * Data direction bit lookup
  142. */
  143. static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
  144. BLK_TC_ACT(BLK_TC_WRITE) };
  145. /* The ilog2() calls fall out because they're constant */
  146. #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
  147. (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
  148. /*
  149. * The worker for the various blk_add_trace*() types. Fills out a
  150. * blk_io_trace structure and places it in a per-cpu subbuffer.
  151. */
  152. static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
  153. int rw, u32 what, int error, int pdu_len, void *pdu_data)
  154. {
  155. struct task_struct *tsk = current;
  156. struct ring_buffer_event *event = NULL;
  157. struct ring_buffer *buffer = NULL;
  158. struct blk_io_trace *t;
  159. unsigned long flags = 0;
  160. unsigned long *sequence;
  161. pid_t pid;
  162. int cpu, pc = 0;
  163. bool blk_tracer = blk_tracer_enabled;
  164. if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
  165. return;
  166. what |= ddir_act[rw & WRITE];
  167. what |= MASK_TC_BIT(rw, BARRIER);
  168. what |= MASK_TC_BIT(rw, SYNCIO);
  169. what |= MASK_TC_BIT(rw, AHEAD);
  170. what |= MASK_TC_BIT(rw, META);
  171. what |= MASK_TC_BIT(rw, DISCARD);
  172. pid = tsk->pid;
  173. if (act_log_check(bt, what, sector, pid))
  174. return;
  175. cpu = raw_smp_processor_id();
  176. if (blk_tracer) {
  177. tracing_record_cmdline(current);
  178. buffer = blk_tr->buffer;
  179. pc = preempt_count();
  180. event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
  181. sizeof(*t) + pdu_len,
  182. 0, pc);
  183. if (!event)
  184. return;
  185. t = ring_buffer_event_data(event);
  186. goto record_it;
  187. }
  188. /*
  189. * A word about the locking here - we disable interrupts to reserve
  190. * some space in the relay per-cpu buffer, to prevent an irq
  191. * from coming in and stepping on our toes.
  192. */
  193. local_irq_save(flags);
  194. if (unlikely(tsk->btrace_seq != blktrace_seq))
  195. trace_note_tsk(bt, tsk);
  196. t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
  197. if (t) {
  198. sequence = per_cpu_ptr(bt->sequence, cpu);
  199. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  200. t->sequence = ++(*sequence);
  201. t->time = ktime_to_ns(ktime_get());
  202. record_it:
  203. /*
  204. * These two are not needed in ftrace as they are in the
  205. * generic trace_entry, filled by tracing_generic_entry_update,
  206. * but for the trace_event->bin() synthesizer benefit we do it
  207. * here too.
  208. */
  209. t->cpu = cpu;
  210. t->pid = pid;
  211. t->sector = sector;
  212. t->bytes = bytes;
  213. t->action = what;
  214. t->device = bt->dev;
  215. t->error = error;
  216. t->pdu_len = pdu_len;
  217. if (pdu_len)
  218. memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
  219. if (blk_tracer) {
  220. trace_buffer_unlock_commit(buffer, event, 0, pc);
  221. return;
  222. }
  223. }
  224. local_irq_restore(flags);
  225. }
  226. static struct dentry *blk_tree_root;
  227. static DEFINE_MUTEX(blk_tree_mutex);
  228. static void blk_trace_free(struct blk_trace *bt)
  229. {
  230. debugfs_remove(bt->msg_file);
  231. debugfs_remove(bt->dropped_file);
  232. relay_close(bt->rchan);
  233. debugfs_remove(bt->dir);
  234. free_percpu(bt->sequence);
  235. free_percpu(bt->msg_data);
  236. kfree(bt);
  237. }
  238. static void blk_trace_cleanup(struct blk_trace *bt)
  239. {
  240. blk_trace_free(bt);
  241. if (atomic_dec_and_test(&blk_probes_ref))
  242. blk_unregister_tracepoints();
  243. }
  244. int blk_trace_remove(struct request_queue *q)
  245. {
  246. struct blk_trace *bt;
  247. bt = xchg(&q->blk_trace, NULL);
  248. if (!bt)
  249. return -EINVAL;
  250. if (bt->trace_state != Blktrace_running)
  251. blk_trace_cleanup(bt);
  252. return 0;
  253. }
  254. EXPORT_SYMBOL_GPL(blk_trace_remove);
  255. static int blk_dropped_open(struct inode *inode, struct file *filp)
  256. {
  257. filp->private_data = inode->i_private;
  258. return 0;
  259. }
  260. static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
  261. size_t count, loff_t *ppos)
  262. {
  263. struct blk_trace *bt = filp->private_data;
  264. char buf[16];
  265. snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
  266. return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
  267. }
  268. static const struct file_operations blk_dropped_fops = {
  269. .owner = THIS_MODULE,
  270. .open = blk_dropped_open,
  271. .read = blk_dropped_read,
  272. };
  273. static int blk_msg_open(struct inode *inode, struct file *filp)
  274. {
  275. filp->private_data = inode->i_private;
  276. return 0;
  277. }
  278. static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
  279. size_t count, loff_t *ppos)
  280. {
  281. char *msg;
  282. struct blk_trace *bt;
  283. if (count >= BLK_TN_MAX_MSG)
  284. return -EINVAL;
  285. msg = kmalloc(count + 1, GFP_KERNEL);
  286. if (msg == NULL)
  287. return -ENOMEM;
  288. if (copy_from_user(msg, buffer, count)) {
  289. kfree(msg);
  290. return -EFAULT;
  291. }
  292. msg[count] = '\0';
  293. bt = filp->private_data;
  294. __trace_note_message(bt, "%s", msg);
  295. kfree(msg);
  296. return count;
  297. }
  298. static const struct file_operations blk_msg_fops = {
  299. .owner = THIS_MODULE,
  300. .open = blk_msg_open,
  301. .write = blk_msg_write,
  302. };
  303. /*
  304. * Keep track of how many times we encountered a full subbuffer, to aid
  305. * the user space app in telling how many lost events there were.
  306. */
  307. static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
  308. void *prev_subbuf, size_t prev_padding)
  309. {
  310. struct blk_trace *bt;
  311. if (!relay_buf_full(buf))
  312. return 1;
  313. bt = buf->chan->private_data;
  314. atomic_inc(&bt->dropped);
  315. return 0;
  316. }
  317. static int blk_remove_buf_file_callback(struct dentry *dentry)
  318. {
  319. debugfs_remove(dentry);
  320. return 0;
  321. }
  322. static struct dentry *blk_create_buf_file_callback(const char *filename,
  323. struct dentry *parent,
  324. int mode,
  325. struct rchan_buf *buf,
  326. int *is_global)
  327. {
  328. return debugfs_create_file(filename, mode, parent, buf,
  329. &relay_file_operations);
  330. }
  331. static struct rchan_callbacks blk_relay_callbacks = {
  332. .subbuf_start = blk_subbuf_start_callback,
  333. .create_buf_file = blk_create_buf_file_callback,
  334. .remove_buf_file = blk_remove_buf_file_callback,
  335. };
  336. static void blk_trace_setup_lba(struct blk_trace *bt,
  337. struct block_device *bdev)
  338. {
  339. struct hd_struct *part = NULL;
  340. if (bdev)
  341. part = bdev->bd_part;
  342. if (part) {
  343. bt->start_lba = part->start_sect;
  344. bt->end_lba = part->start_sect + part->nr_sects;
  345. } else {
  346. bt->start_lba = 0;
  347. bt->end_lba = -1ULL;
  348. }
  349. }
  350. /*
  351. * Setup everything required to start tracing
  352. */
  353. int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  354. struct block_device *bdev,
  355. struct blk_user_trace_setup *buts)
  356. {
  357. struct blk_trace *old_bt, *bt = NULL;
  358. struct dentry *dir = NULL;
  359. int ret, i;
  360. if (!buts->buf_size || !buts->buf_nr)
  361. return -EINVAL;
  362. strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
  363. buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
  364. /*
  365. * some device names have larger paths - convert the slashes
  366. * to underscores for this to work as expected
  367. */
  368. for (i = 0; i < strlen(buts->name); i++)
  369. if (buts->name[i] == '/')
  370. buts->name[i] = '_';
  371. bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  372. if (!bt)
  373. return -ENOMEM;
  374. ret = -ENOMEM;
  375. bt->sequence = alloc_percpu(unsigned long);
  376. if (!bt->sequence)
  377. goto err;
  378. bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
  379. if (!bt->msg_data)
  380. goto err;
  381. ret = -ENOENT;
  382. mutex_lock(&blk_tree_mutex);
  383. if (!blk_tree_root) {
  384. blk_tree_root = debugfs_create_dir("block", NULL);
  385. if (!blk_tree_root) {
  386. mutex_unlock(&blk_tree_mutex);
  387. goto err;
  388. }
  389. }
  390. mutex_unlock(&blk_tree_mutex);
  391. dir = debugfs_create_dir(buts->name, blk_tree_root);
  392. if (!dir)
  393. goto err;
  394. bt->dir = dir;
  395. bt->dev = dev;
  396. atomic_set(&bt->dropped, 0);
  397. ret = -EIO;
  398. bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
  399. &blk_dropped_fops);
  400. if (!bt->dropped_file)
  401. goto err;
  402. bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
  403. if (!bt->msg_file)
  404. goto err;
  405. bt->rchan = relay_open("trace", dir, buts->buf_size,
  406. buts->buf_nr, &blk_relay_callbacks, bt);
  407. if (!bt->rchan)
  408. goto err;
  409. bt->act_mask = buts->act_mask;
  410. if (!bt->act_mask)
  411. bt->act_mask = (u16) -1;
  412. blk_trace_setup_lba(bt, bdev);
  413. /* overwrite with user settings */
  414. if (buts->start_lba)
  415. bt->start_lba = buts->start_lba;
  416. if (buts->end_lba)
  417. bt->end_lba = buts->end_lba;
  418. bt->pid = buts->pid;
  419. bt->trace_state = Blktrace_setup;
  420. ret = -EBUSY;
  421. old_bt = xchg(&q->blk_trace, bt);
  422. if (old_bt) {
  423. (void) xchg(&q->blk_trace, old_bt);
  424. goto err;
  425. }
  426. if (atomic_inc_return(&blk_probes_ref) == 1)
  427. blk_register_tracepoints();
  428. return 0;
  429. err:
  430. blk_trace_free(bt);
  431. return ret;
  432. }
  433. int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  434. struct block_device *bdev,
  435. char __user *arg)
  436. {
  437. struct blk_user_trace_setup buts;
  438. int ret;
  439. ret = copy_from_user(&buts, arg, sizeof(buts));
  440. if (ret)
  441. return -EFAULT;
  442. ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
  443. if (ret)
  444. return ret;
  445. if (copy_to_user(arg, &buts, sizeof(buts))) {
  446. blk_trace_remove(q);
  447. return -EFAULT;
  448. }
  449. return 0;
  450. }
  451. EXPORT_SYMBOL_GPL(blk_trace_setup);
  452. int blk_trace_startstop(struct request_queue *q, int start)
  453. {
  454. int ret;
  455. struct blk_trace *bt = q->blk_trace;
  456. if (bt == NULL)
  457. return -EINVAL;
  458. /*
  459. * For starting a trace, we can transition from a setup or stopped
  460. * trace. For stopping a trace, the state must be running
  461. */
  462. ret = -EINVAL;
  463. if (start) {
  464. if (bt->trace_state == Blktrace_setup ||
  465. bt->trace_state == Blktrace_stopped) {
  466. blktrace_seq++;
  467. smp_mb();
  468. bt->trace_state = Blktrace_running;
  469. trace_note_time(bt);
  470. ret = 0;
  471. }
  472. } else {
  473. if (bt->trace_state == Blktrace_running) {
  474. bt->trace_state = Blktrace_stopped;
  475. relay_flush(bt->rchan);
  476. ret = 0;
  477. }
  478. }
  479. return ret;
  480. }
  481. EXPORT_SYMBOL_GPL(blk_trace_startstop);
  482. /**
  483. * blk_trace_ioctl: - handle the ioctls associated with tracing
  484. * @bdev: the block device
  485. * @cmd: the ioctl cmd
  486. * @arg: the argument data, if any
  487. *
  488. **/
  489. int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
  490. {
  491. struct request_queue *q;
  492. int ret, start = 0;
  493. char b[BDEVNAME_SIZE];
  494. q = bdev_get_queue(bdev);
  495. if (!q)
  496. return -ENXIO;
  497. mutex_lock(&bdev->bd_mutex);
  498. switch (cmd) {
  499. case BLKTRACESETUP:
  500. bdevname(bdev, b);
  501. ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
  502. break;
  503. case BLKTRACESTART:
  504. start = 1;
  505. case BLKTRACESTOP:
  506. ret = blk_trace_startstop(q, start);
  507. break;
  508. case BLKTRACETEARDOWN:
  509. ret = blk_trace_remove(q);
  510. break;
  511. default:
  512. ret = -ENOTTY;
  513. break;
  514. }
  515. mutex_unlock(&bdev->bd_mutex);
  516. return ret;
  517. }
  518. /**
  519. * blk_trace_shutdown: - stop and cleanup trace structures
  520. * @q: the request queue associated with the device
  521. *
  522. **/
  523. void blk_trace_shutdown(struct request_queue *q)
  524. {
  525. if (q->blk_trace) {
  526. blk_trace_startstop(q, 0);
  527. blk_trace_remove(q);
  528. }
  529. }
  530. /*
  531. * blktrace probes
  532. */
  533. /**
  534. * blk_add_trace_rq - Add a trace for a request oriented action
  535. * @q: queue the io is for
  536. * @rq: the source request
  537. * @what: the action
  538. *
  539. * Description:
  540. * Records an action against a request. Will log the bio offset + size.
  541. *
  542. **/
  543. static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
  544. u32 what)
  545. {
  546. struct blk_trace *bt = q->blk_trace;
  547. int rw = rq->cmd_flags & 0x03;
  548. if (likely(!bt))
  549. return;
  550. if (blk_discard_rq(rq))
  551. rw |= (1 << BIO_RW_DISCARD);
  552. if (blk_pc_request(rq)) {
  553. what |= BLK_TC_ACT(BLK_TC_PC);
  554. __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
  555. what, rq->errors, rq->cmd_len, rq->cmd);
  556. } else {
  557. what |= BLK_TC_ACT(BLK_TC_FS);
  558. __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
  559. what, rq->errors, 0, NULL);
  560. }
  561. }
  562. static void blk_add_trace_rq_abort(void *ignore,
  563. struct request_queue *q, struct request *rq)
  564. {
  565. blk_add_trace_rq(q, rq, BLK_TA_ABORT);
  566. }
  567. static void blk_add_trace_rq_insert(void *ignore,
  568. struct request_queue *q, struct request *rq)
  569. {
  570. blk_add_trace_rq(q, rq, BLK_TA_INSERT);
  571. }
  572. static void blk_add_trace_rq_issue(void *ignore,
  573. struct request_queue *q, struct request *rq)
  574. {
  575. blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
  576. }
  577. static void blk_add_trace_rq_requeue(void *ignore,
  578. struct request_queue *q,
  579. struct request *rq)
  580. {
  581. blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
  582. }
  583. static void blk_add_trace_rq_complete(void *ignore,
  584. struct request_queue *q,
  585. struct request *rq)
  586. {
  587. blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
  588. }
  589. /**
  590. * blk_add_trace_bio - Add a trace for a bio oriented action
  591. * @q: queue the io is for
  592. * @bio: the source bio
  593. * @what: the action
  594. *
  595. * Description:
  596. * Records an action against a bio. Will log the bio offset + size.
  597. *
  598. **/
  599. static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
  600. u32 what)
  601. {
  602. struct blk_trace *bt = q->blk_trace;
  603. if (likely(!bt))
  604. return;
  605. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
  606. !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
  607. }
  608. static void blk_add_trace_bio_bounce(void *ignore,
  609. struct request_queue *q, struct bio *bio)
  610. {
  611. blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
  612. }
  613. static void blk_add_trace_bio_complete(void *ignore,
  614. struct request_queue *q, struct bio *bio)
  615. {
  616. blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
  617. }
  618. static void blk_add_trace_bio_backmerge(void *ignore,
  619. struct request_queue *q,
  620. struct bio *bio)
  621. {
  622. blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
  623. }
  624. static void blk_add_trace_bio_frontmerge(void *ignore,
  625. struct request_queue *q,
  626. struct bio *bio)
  627. {
  628. blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
  629. }
  630. static void blk_add_trace_bio_queue(void *ignore,
  631. struct request_queue *q, struct bio *bio)
  632. {
  633. blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
  634. }
  635. static void blk_add_trace_getrq(void *ignore,
  636. struct request_queue *q,
  637. struct bio *bio, int rw)
  638. {
  639. if (bio)
  640. blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
  641. else {
  642. struct blk_trace *bt = q->blk_trace;
  643. if (bt)
  644. __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
  645. }
  646. }
  647. static void blk_add_trace_sleeprq(void *ignore,
  648. struct request_queue *q,
  649. struct bio *bio, int rw)
  650. {
  651. if (bio)
  652. blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
  653. else {
  654. struct blk_trace *bt = q->blk_trace;
  655. if (bt)
  656. __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
  657. 0, 0, NULL);
  658. }
  659. }
  660. static void blk_add_trace_plug(void *ignore, struct request_queue *q)
  661. {
  662. struct blk_trace *bt = q->blk_trace;
  663. if (bt)
  664. __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
  665. }
  666. static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q)
  667. {
  668. struct blk_trace *bt = q->blk_trace;
  669. if (bt) {
  670. unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
  671. __be64 rpdu = cpu_to_be64(pdu);
  672. __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
  673. sizeof(rpdu), &rpdu);
  674. }
  675. }
  676. static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
  677. {
  678. struct blk_trace *bt = q->blk_trace;
  679. if (bt) {
  680. unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
  681. __be64 rpdu = cpu_to_be64(pdu);
  682. __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
  683. sizeof(rpdu), &rpdu);
  684. }
  685. }
  686. static void blk_add_trace_split(void *ignore,
  687. struct request_queue *q, struct bio *bio,
  688. unsigned int pdu)
  689. {
  690. struct blk_trace *bt = q->blk_trace;
  691. if (bt) {
  692. __be64 rpdu = cpu_to_be64(pdu);
  693. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
  694. BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
  695. sizeof(rpdu), &rpdu);
  696. }
  697. }
  698. /**
  699. * blk_add_trace_remap - Add a trace for a remap operation
  700. * @q: queue the io is for
  701. * @bio: the source bio
  702. * @dev: target device
  703. * @from: source sector
  704. *
  705. * Description:
  706. * Device mapper or raid target sometimes need to split a bio because
  707. * it spans a stripe (or similar). Add a trace for that action.
  708. *
  709. **/
  710. static void blk_add_trace_remap(void *ignore,
  711. struct request_queue *q, struct bio *bio,
  712. dev_t dev, sector_t from)
  713. {
  714. struct blk_trace *bt = q->blk_trace;
  715. struct blk_io_trace_remap r;
  716. if (likely(!bt))
  717. return;
  718. r.device_from = cpu_to_be32(dev);
  719. r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
  720. r.sector_from = cpu_to_be64(from);
  721. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
  722. BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
  723. sizeof(r), &r);
  724. }
  725. /**
  726. * blk_add_trace_rq_remap - Add a trace for a request-remap operation
  727. * @q: queue the io is for
  728. * @rq: the source request
  729. * @dev: target device
  730. * @from: source sector
  731. *
  732. * Description:
  733. * Device mapper remaps request to other devices.
  734. * Add a trace for that action.
  735. *
  736. **/
  737. static void blk_add_trace_rq_remap(void *ignore,
  738. struct request_queue *q,
  739. struct request *rq, dev_t dev,
  740. sector_t from)
  741. {
  742. struct blk_trace *bt = q->blk_trace;
  743. struct blk_io_trace_remap r;
  744. if (likely(!bt))
  745. return;
  746. r.device_from = cpu_to_be32(dev);
  747. r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
  748. r.sector_from = cpu_to_be64(from);
  749. __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
  750. rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
  751. sizeof(r), &r);
  752. }
  753. /**
  754. * blk_add_driver_data - Add binary message with driver-specific data
  755. * @q: queue the io is for
  756. * @rq: io request
  757. * @data: driver-specific data
  758. * @len: length of driver-specific data
  759. *
  760. * Description:
  761. * Some drivers might want to write driver-specific data per request.
  762. *
  763. **/
  764. void blk_add_driver_data(struct request_queue *q,
  765. struct request *rq,
  766. void *data, size_t len)
  767. {
  768. struct blk_trace *bt = q->blk_trace;
  769. if (likely(!bt))
  770. return;
  771. if (blk_pc_request(rq))
  772. __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
  773. BLK_TA_DRV_DATA, rq->errors, len, data);
  774. else
  775. __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
  776. BLK_TA_DRV_DATA, rq->errors, len, data);
  777. }
  778. EXPORT_SYMBOL_GPL(blk_add_driver_data);
  779. static void blk_register_tracepoints(void)
  780. {
  781. int ret;
  782. ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
  783. WARN_ON(ret);
  784. ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
  785. WARN_ON(ret);
  786. ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
  787. WARN_ON(ret);
  788. ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
  789. WARN_ON(ret);
  790. ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
  791. WARN_ON(ret);
  792. ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
  793. WARN_ON(ret);
  794. ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
  795. WARN_ON(ret);
  796. ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
  797. WARN_ON(ret);
  798. ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
  799. WARN_ON(ret);
  800. ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
  801. WARN_ON(ret);
  802. ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
  803. WARN_ON(ret);
  804. ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
  805. WARN_ON(ret);
  806. ret = register_trace_block_plug(blk_add_trace_plug, NULL);
  807. WARN_ON(ret);
  808. ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
  809. WARN_ON(ret);
  810. ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
  811. WARN_ON(ret);
  812. ret = register_trace_block_split(blk_add_trace_split, NULL);
  813. WARN_ON(ret);
  814. ret = register_trace_block_remap(blk_add_trace_remap, NULL);
  815. WARN_ON(ret);
  816. ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
  817. WARN_ON(ret);
  818. }
  819. static void blk_unregister_tracepoints(void)
  820. {
  821. unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
  822. unregister_trace_block_remap(blk_add_trace_remap, NULL);
  823. unregister_trace_block_split(blk_add_trace_split, NULL);
  824. unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
  825. unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
  826. unregister_trace_block_plug(blk_add_trace_plug, NULL);
  827. unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
  828. unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
  829. unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
  830. unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
  831. unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
  832. unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
  833. unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
  834. unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
  835. unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
  836. unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
  837. unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
  838. unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
  839. tracepoint_synchronize_unregister();
  840. }
  841. /*
  842. * struct blk_io_tracer formatting routines
  843. */
  844. static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
  845. {
  846. int i = 0;
  847. int tc = t->action >> BLK_TC_SHIFT;
  848. if (t->action == BLK_TN_MESSAGE) {
  849. rwbs[i++] = 'N';
  850. goto out;
  851. }
  852. if (tc & BLK_TC_DISCARD)
  853. rwbs[i++] = 'D';
  854. else if (tc & BLK_TC_WRITE)
  855. rwbs[i++] = 'W';
  856. else if (t->bytes)
  857. rwbs[i++] = 'R';
  858. else
  859. rwbs[i++] = 'N';
  860. if (tc & BLK_TC_AHEAD)
  861. rwbs[i++] = 'A';
  862. if (tc & BLK_TC_BARRIER)
  863. rwbs[i++] = 'B';
  864. if (tc & BLK_TC_SYNC)
  865. rwbs[i++] = 'S';
  866. if (tc & BLK_TC_META)
  867. rwbs[i++] = 'M';
  868. out:
  869. rwbs[i] = '\0';
  870. }
  871. static inline
  872. const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
  873. {
  874. return (const struct blk_io_trace *)ent;
  875. }
  876. static inline const void *pdu_start(const struct trace_entry *ent)
  877. {
  878. return te_blk_io_trace(ent) + 1;
  879. }
  880. static inline u32 t_action(const struct trace_entry *ent)
  881. {
  882. return te_blk_io_trace(ent)->action;
  883. }
  884. static inline u32 t_bytes(const struct trace_entry *ent)
  885. {
  886. return te_blk_io_trace(ent)->bytes;
  887. }
  888. static inline u32 t_sec(const struct trace_entry *ent)
  889. {
  890. return te_blk_io_trace(ent)->bytes >> 9;
  891. }
  892. static inline unsigned long long t_sector(const struct trace_entry *ent)
  893. {
  894. return te_blk_io_trace(ent)->sector;
  895. }
  896. static inline __u16 t_error(const struct trace_entry *ent)
  897. {
  898. return te_blk_io_trace(ent)->error;
  899. }
  900. static __u64 get_pdu_int(const struct trace_entry *ent)
  901. {
  902. const __u64 *val = pdu_start(ent);
  903. return be64_to_cpu(*val);
  904. }
  905. static void get_pdu_remap(const struct trace_entry *ent,
  906. struct blk_io_trace_remap *r)
  907. {
  908. const struct blk_io_trace_remap *__r = pdu_start(ent);
  909. __u64 sector_from = __r->sector_from;
  910. r->device_from = be32_to_cpu(__r->device_from);
  911. r->device_to = be32_to_cpu(__r->device_to);
  912. r->sector_from = be64_to_cpu(sector_from);
  913. }
  914. typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
  915. static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
  916. {
  917. char rwbs[6];
  918. unsigned long long ts = iter->ts;
  919. unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
  920. unsigned secs = (unsigned long)ts;
  921. const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
  922. fill_rwbs(rwbs, t);
  923. return trace_seq_printf(&iter->seq,
  924. "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
  925. MAJOR(t->device), MINOR(t->device), iter->cpu,
  926. secs, nsec_rem, iter->ent->pid, act, rwbs);
  927. }
  928. static int blk_log_action(struct trace_iterator *iter, const char *act)
  929. {
  930. char rwbs[6];
  931. const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
  932. fill_rwbs(rwbs, t);
  933. return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
  934. MAJOR(t->device), MINOR(t->device), act, rwbs);
  935. }
  936. static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
  937. {
  938. const unsigned char *pdu_buf;
  939. int pdu_len;
  940. int i, end, ret;
  941. pdu_buf = pdu_start(ent);
  942. pdu_len = te_blk_io_trace(ent)->pdu_len;
  943. if (!pdu_len)
  944. return 1;
  945. /* find the last zero that needs to be printed */
  946. for (end = pdu_len - 1; end >= 0; end--)
  947. if (pdu_buf[end])
  948. break;
  949. end++;
  950. if (!trace_seq_putc(s, '('))
  951. return 0;
  952. for (i = 0; i < pdu_len; i++) {
  953. ret = trace_seq_printf(s, "%s%02x",
  954. i == 0 ? "" : " ", pdu_buf[i]);
  955. if (!ret)
  956. return ret;
  957. /*
  958. * stop when the rest is just zeroes and indicate so
  959. * with a ".." appended
  960. */
  961. if (i == end && end != pdu_len - 1)
  962. return trace_seq_puts(s, " ..) ");
  963. }
  964. return trace_seq_puts(s, ") ");
  965. }
  966. static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
  967. {
  968. char cmd[TASK_COMM_LEN];
  969. trace_find_cmdline(ent->pid, cmd);
  970. if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
  971. int ret;
  972. ret = trace_seq_printf(s, "%u ", t_bytes(ent));
  973. if (!ret)
  974. return 0;
  975. ret = blk_log_dump_pdu(s, ent);
  976. if (!ret)
  977. return 0;
  978. return trace_seq_printf(s, "[%s]\n", cmd);
  979. } else {
  980. if (t_sec(ent))
  981. return trace_seq_printf(s, "%llu + %u [%s]\n",
  982. t_sector(ent), t_sec(ent), cmd);
  983. return trace_seq_printf(s, "[%s]\n", cmd);
  984. }
  985. }
  986. static int blk_log_with_error(struct trace_seq *s,
  987. const struct trace_entry *ent)
  988. {
  989. if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
  990. int ret;
  991. ret = blk_log_dump_pdu(s, ent);
  992. if (ret)
  993. return trace_seq_printf(s, "[%d]\n", t_error(ent));
  994. return 0;
  995. } else {
  996. if (t_sec(ent))
  997. return trace_seq_printf(s, "%llu + %u [%d]\n",
  998. t_sector(ent),
  999. t_sec(ent), t_error(ent));
  1000. return trace_seq_printf(s, "%llu [%d]\n",
  1001. t_sector(ent), t_error(ent));
  1002. }
  1003. }
  1004. static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
  1005. {
  1006. struct blk_io_trace_remap r = { .device_from = 0, };
  1007. get_pdu_remap(ent, &r);
  1008. return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
  1009. t_sector(ent), t_sec(ent),
  1010. MAJOR(r.device_from), MINOR(r.device_from),
  1011. (unsigned long long)r.sector_from);
  1012. }
  1013. static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
  1014. {
  1015. char cmd[TASK_COMM_LEN];
  1016. trace_find_cmdline(ent->pid, cmd);
  1017. return trace_seq_printf(s, "[%s]\n", cmd);
  1018. }
  1019. static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
  1020. {
  1021. char cmd[TASK_COMM_LEN];
  1022. trace_find_cmdline(ent->pid, cmd);
  1023. return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
  1024. }
  1025. static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
  1026. {
  1027. char cmd[TASK_COMM_LEN];
  1028. trace_find_cmdline(ent->pid, cmd);
  1029. return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
  1030. get_pdu_int(ent), cmd);
  1031. }
  1032. static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
  1033. {
  1034. int ret;
  1035. const struct blk_io_trace *t = te_blk_io_trace(ent);
  1036. ret = trace_seq_putmem(s, t + 1, t->pdu_len);
  1037. if (ret)
  1038. return trace_seq_putc(s, '\n');
  1039. return ret;
  1040. }
  1041. /*
  1042. * struct tracer operations
  1043. */
  1044. static void blk_tracer_print_header(struct seq_file *m)
  1045. {
  1046. if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
  1047. return;
  1048. seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
  1049. "# | | | | | |\n");
  1050. }
  1051. static void blk_tracer_start(struct trace_array *tr)
  1052. {
  1053. blk_tracer_enabled = true;
  1054. }
  1055. static int blk_tracer_init(struct trace_array *tr)
  1056. {
  1057. blk_tr = tr;
  1058. blk_tracer_start(tr);
  1059. return 0;
  1060. }
  1061. static void blk_tracer_stop(struct trace_array *tr)
  1062. {
  1063. blk_tracer_enabled = false;
  1064. }
  1065. static void blk_tracer_reset(struct trace_array *tr)
  1066. {
  1067. blk_tracer_stop(tr);
  1068. }
  1069. static const struct {
  1070. const char *act[2];
  1071. int (*print)(struct trace_seq *s, const struct trace_entry *ent);
  1072. } what2act[] = {
  1073. [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
  1074. [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
  1075. [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
  1076. [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
  1077. [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
  1078. [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
  1079. [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
  1080. [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
  1081. [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
  1082. [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
  1083. [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
  1084. [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
  1085. [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
  1086. [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
  1087. [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
  1088. };
  1089. static enum print_line_t print_one_line(struct trace_iterator *iter,
  1090. bool classic)
  1091. {
  1092. struct trace_seq *s = &iter->seq;
  1093. const struct blk_io_trace *t;
  1094. u16 what;
  1095. int ret;
  1096. bool long_act;
  1097. blk_log_action_t *log_action;
  1098. t = te_blk_io_trace(iter->ent);
  1099. what = t->action & ((1 << BLK_TC_SHIFT) - 1);
  1100. long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
  1101. log_action = classic ? &blk_log_action_classic : &blk_log_action;
  1102. if (t->action == BLK_TN_MESSAGE) {
  1103. ret = log_action(iter, long_act ? "message" : "m");
  1104. if (ret)
  1105. ret = blk_log_msg(s, iter->ent);
  1106. goto out;
  1107. }
  1108. if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
  1109. ret = trace_seq_printf(s, "Unknown action %x\n", what);
  1110. else {
  1111. ret = log_action(iter, what2act[what].act[long_act]);
  1112. if (ret)
  1113. ret = what2act[what].print(s, iter->ent);
  1114. }
  1115. out:
  1116. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  1117. }
  1118. static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
  1119. int flags, struct trace_event *event)
  1120. {
  1121. return print_one_line(iter, false);
  1122. }
  1123. static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
  1124. {
  1125. struct trace_seq *s = &iter->seq;
  1126. struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
  1127. const int offset = offsetof(struct blk_io_trace, sector);
  1128. struct blk_io_trace old = {
  1129. .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
  1130. .time = iter->ts,
  1131. };
  1132. if (!trace_seq_putmem(s, &old, offset))
  1133. return 0;
  1134. return trace_seq_putmem(s, &t->sector,
  1135. sizeof(old) - offset + t->pdu_len);
  1136. }
  1137. static enum print_line_t
  1138. blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
  1139. struct trace_event *event)
  1140. {
  1141. return blk_trace_synthesize_old_trace(iter) ?
  1142. TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  1143. }
  1144. static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
  1145. {
  1146. if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
  1147. return TRACE_TYPE_UNHANDLED;
  1148. return print_one_line(iter, true);
  1149. }
  1150. static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
  1151. {
  1152. /* don't output context-info for blk_classic output */
  1153. if (bit == TRACE_BLK_OPT_CLASSIC) {
  1154. if (set)
  1155. trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
  1156. else
  1157. trace_flags |= TRACE_ITER_CONTEXT_INFO;
  1158. }
  1159. return 0;
  1160. }
  1161. static struct tracer blk_tracer __read_mostly = {
  1162. .name = "blk",
  1163. .init = blk_tracer_init,
  1164. .reset = blk_tracer_reset,
  1165. .start = blk_tracer_start,
  1166. .stop = blk_tracer_stop,
  1167. .print_header = blk_tracer_print_header,
  1168. .print_line = blk_tracer_print_line,
  1169. .flags = &blk_tracer_flags,
  1170. .set_flag = blk_tracer_set_flag,
  1171. };
  1172. static struct trace_event_functions trace_blk_event_funcs = {
  1173. .trace = blk_trace_event_print,
  1174. .binary = blk_trace_event_print_binary,
  1175. };
  1176. static struct trace_event trace_blk_event = {
  1177. .type = TRACE_BLK,
  1178. .funcs = &trace_blk_event_funcs,
  1179. };
  1180. static int __init init_blk_tracer(void)
  1181. {
  1182. if (!register_ftrace_event(&trace_blk_event)) {
  1183. pr_warning("Warning: could not register block events\n");
  1184. return 1;
  1185. }
  1186. if (register_tracer(&blk_tracer) != 0) {
  1187. pr_warning("Warning: could not register the block tracer\n");
  1188. unregister_ftrace_event(&trace_blk_event);
  1189. return 1;
  1190. }
  1191. return 0;
  1192. }
  1193. device_initcall(init_blk_tracer);
  1194. static int blk_trace_remove_queue(struct request_queue *q)
  1195. {
  1196. struct blk_trace *bt;
  1197. bt = xchg(&q->blk_trace, NULL);
  1198. if (bt == NULL)
  1199. return -EINVAL;
  1200. if (atomic_dec_and_test(&blk_probes_ref))
  1201. blk_unregister_tracepoints();
  1202. blk_trace_free(bt);
  1203. return 0;
  1204. }
  1205. /*
  1206. * Setup everything required to start tracing
  1207. */
  1208. static int blk_trace_setup_queue(struct request_queue *q,
  1209. struct block_device *bdev)
  1210. {
  1211. struct blk_trace *old_bt, *bt = NULL;
  1212. int ret = -ENOMEM;
  1213. bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  1214. if (!bt)
  1215. return -ENOMEM;
  1216. bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
  1217. if (!bt->msg_data)
  1218. goto free_bt;
  1219. bt->dev = bdev->bd_dev;
  1220. bt->act_mask = (u16)-1;
  1221. blk_trace_setup_lba(bt, bdev);
  1222. old_bt = xchg(&q->blk_trace, bt);
  1223. if (old_bt != NULL) {
  1224. (void)xchg(&q->blk_trace, old_bt);
  1225. ret = -EBUSY;
  1226. goto free_bt;
  1227. }
  1228. if (atomic_inc_return(&blk_probes_ref) == 1)
  1229. blk_register_tracepoints();
  1230. return 0;
  1231. free_bt:
  1232. blk_trace_free(bt);
  1233. return ret;
  1234. }
  1235. /*
  1236. * sysfs interface to enable and configure tracing
  1237. */
  1238. static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
  1239. struct device_attribute *attr,
  1240. char *buf);
  1241. static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
  1242. struct device_attribute *attr,
  1243. const char *buf, size_t count);
  1244. #define BLK_TRACE_DEVICE_ATTR(_name) \
  1245. DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
  1246. sysfs_blk_trace_attr_show, \
  1247. sysfs_blk_trace_attr_store)
  1248. static BLK_TRACE_DEVICE_ATTR(enable);
  1249. static BLK_TRACE_DEVICE_ATTR(act_mask);
  1250. static BLK_TRACE_DEVICE_ATTR(pid);
  1251. static BLK_TRACE_DEVICE_ATTR(start_lba);
  1252. static BLK_TRACE_DEVICE_ATTR(end_lba);
  1253. static struct attribute *blk_trace_attrs[] = {
  1254. &dev_attr_enable.attr,
  1255. &dev_attr_act_mask.attr,
  1256. &dev_attr_pid.attr,
  1257. &dev_attr_start_lba.attr,
  1258. &dev_attr_end_lba.attr,
  1259. NULL
  1260. };
  1261. struct attribute_group blk_trace_attr_group = {
  1262. .name = "trace",
  1263. .attrs = blk_trace_attrs,
  1264. };
  1265. static const struct {
  1266. int mask;
  1267. const char *str;
  1268. } mask_maps[] = {
  1269. { BLK_TC_READ, "read" },
  1270. { BLK_TC_WRITE, "write" },
  1271. { BLK_TC_BARRIER, "barrier" },
  1272. { BLK_TC_SYNC, "sync" },
  1273. { BLK_TC_QUEUE, "queue" },
  1274. { BLK_TC_REQUEUE, "requeue" },
  1275. { BLK_TC_ISSUE, "issue" },
  1276. { BLK_TC_COMPLETE, "complete" },
  1277. { BLK_TC_FS, "fs" },
  1278. { BLK_TC_PC, "pc" },
  1279. { BLK_TC_AHEAD, "ahead" },
  1280. { BLK_TC_META, "meta" },
  1281. { BLK_TC_DISCARD, "discard" },
  1282. { BLK_TC_DRV_DATA, "drv_data" },
  1283. };
  1284. static int blk_trace_str2mask(const char *str)
  1285. {
  1286. int i;
  1287. int mask = 0;
  1288. char *buf, *s, *token;
  1289. buf = kstrdup(str, GFP_KERNEL);
  1290. if (buf == NULL)
  1291. return -ENOMEM;
  1292. s = strstrip(buf);
  1293. while (1) {
  1294. token = strsep(&s, ",");
  1295. if (token == NULL)
  1296. break;
  1297. if (*token == '\0')
  1298. continue;
  1299. for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
  1300. if (strcasecmp(token, mask_maps[i].str) == 0) {
  1301. mask |= mask_maps[i].mask;
  1302. break;
  1303. }
  1304. }
  1305. if (i == ARRAY_SIZE(mask_maps)) {
  1306. mask = -EINVAL;
  1307. break;
  1308. }
  1309. }
  1310. kfree(buf);
  1311. return mask;
  1312. }
  1313. static ssize_t blk_trace_mask2str(char *buf, int mask)
  1314. {
  1315. int i;
  1316. char *p = buf;
  1317. for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
  1318. if (mask & mask_maps[i].mask) {
  1319. p += sprintf(p, "%s%s",
  1320. (p == buf) ? "" : ",", mask_maps[i].str);
  1321. }
  1322. }
  1323. *p++ = '\n';
  1324. return p - buf;
  1325. }
  1326. static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
  1327. {
  1328. if (bdev->bd_disk == NULL)
  1329. return NULL;
  1330. return bdev_get_queue(bdev);
  1331. }
  1332. static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
  1333. struct device_attribute *attr,
  1334. char *buf)
  1335. {
  1336. struct hd_struct *p = dev_to_part(dev);
  1337. struct request_queue *q;
  1338. struct block_device *bdev;
  1339. ssize_t ret = -ENXIO;
  1340. lock_kernel();
  1341. bdev = bdget(part_devt(p));
  1342. if (bdev == NULL)
  1343. goto out_unlock_kernel;
  1344. q = blk_trace_get_queue(bdev);
  1345. if (q == NULL)
  1346. goto out_bdput;
  1347. mutex_lock(&bdev->bd_mutex);
  1348. if (attr == &dev_attr_enable) {
  1349. ret = sprintf(buf, "%u\n", !!q->blk_trace);
  1350. goto out_unlock_bdev;
  1351. }
  1352. if (q->blk_trace == NULL)
  1353. ret = sprintf(buf, "disabled\n");
  1354. else if (attr == &dev_attr_act_mask)
  1355. ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
  1356. else if (attr == &dev_attr_pid)
  1357. ret = sprintf(buf, "%u\n", q->blk_trace->pid);
  1358. else if (attr == &dev_attr_start_lba)
  1359. ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
  1360. else if (attr == &dev_attr_end_lba)
  1361. ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
  1362. out_unlock_bdev:
  1363. mutex_unlock(&bdev->bd_mutex);
  1364. out_bdput:
  1365. bdput(bdev);
  1366. out_unlock_kernel:
  1367. unlock_kernel();
  1368. return ret;
  1369. }
  1370. static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
  1371. struct device_attribute *attr,
  1372. const char *buf, size_t count)
  1373. {
  1374. struct block_device *bdev;
  1375. struct request_queue *q;
  1376. struct hd_struct *p;
  1377. u64 value;
  1378. ssize_t ret = -EINVAL;
  1379. if (count == 0)
  1380. goto out;
  1381. if (attr == &dev_attr_act_mask) {
  1382. if (sscanf(buf, "%llx", &value) != 1) {
  1383. /* Assume it is a list of trace category names */
  1384. ret = blk_trace_str2mask(buf);
  1385. if (ret < 0)
  1386. goto out;
  1387. value = ret;
  1388. }
  1389. } else if (sscanf(buf, "%llu", &value) != 1)
  1390. goto out;
  1391. ret = -ENXIO;
  1392. lock_kernel();
  1393. p = dev_to_part(dev);
  1394. bdev = bdget(part_devt(p));
  1395. if (bdev == NULL)
  1396. goto out_unlock_kernel;
  1397. q = blk_trace_get_queue(bdev);
  1398. if (q == NULL)
  1399. goto out_bdput;
  1400. mutex_lock(&bdev->bd_mutex);
  1401. if (attr == &dev_attr_enable) {
  1402. if (value)
  1403. ret = blk_trace_setup_queue(q, bdev);
  1404. else
  1405. ret = blk_trace_remove_queue(q);
  1406. goto out_unlock_bdev;
  1407. }
  1408. ret = 0;
  1409. if (q->blk_trace == NULL)
  1410. ret = blk_trace_setup_queue(q, bdev);
  1411. if (ret == 0) {
  1412. if (attr == &dev_attr_act_mask)
  1413. q->blk_trace->act_mask = value;
  1414. else if (attr == &dev_attr_pid)
  1415. q->blk_trace->pid = value;
  1416. else if (attr == &dev_attr_start_lba)
  1417. q->blk_trace->start_lba = value;
  1418. else if (attr == &dev_attr_end_lba)
  1419. q->blk_trace->end_lba = value;
  1420. }
  1421. out_unlock_bdev:
  1422. mutex_unlock(&bdev->bd_mutex);
  1423. out_bdput:
  1424. bdput(bdev);
  1425. out_unlock_kernel:
  1426. unlock_kernel();
  1427. out:
  1428. return ret ? ret : count;
  1429. }
  1430. int blk_trace_init_sysfs(struct device *dev)
  1431. {
  1432. return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
  1433. }
  1434. void blk_trace_remove_sysfs(struct device *dev)
  1435. {
  1436. sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
  1437. }
  1438. #endif /* CONFIG_BLK_DEV_IO_TRACE */
  1439. #ifdef CONFIG_EVENT_TRACING
  1440. void blk_dump_cmd(char *buf, struct request *rq)
  1441. {
  1442. int i, end;
  1443. int len = rq->cmd_len;
  1444. unsigned char *cmd = rq->cmd;
  1445. if (!blk_pc_request(rq)) {
  1446. buf[0] = '\0';
  1447. return;
  1448. }
  1449. for (end = len - 1; end >= 0; end--)
  1450. if (cmd[end])
  1451. break;
  1452. end++;
  1453. for (i = 0; i < len; i++) {
  1454. buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
  1455. if (i == end && end != len - 1) {
  1456. sprintf(buf, " ..");
  1457. break;
  1458. }
  1459. }
  1460. }
  1461. void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
  1462. {
  1463. int i = 0;
  1464. if (rw & WRITE)
  1465. rwbs[i++] = 'W';
  1466. else if (rw & 1 << BIO_RW_DISCARD)
  1467. rwbs[i++] = 'D';
  1468. else if (bytes)
  1469. rwbs[i++] = 'R';
  1470. else
  1471. rwbs[i++] = 'N';
  1472. if (rw & 1 << BIO_RW_AHEAD)
  1473. rwbs[i++] = 'A';
  1474. if (rw & 1 << BIO_RW_BARRIER)
  1475. rwbs[i++] = 'B';
  1476. if (rw & 1 << BIO_RW_SYNCIO)
  1477. rwbs[i++] = 'S';
  1478. if (rw & 1 << BIO_RW_META)
  1479. rwbs[i++] = 'M';
  1480. rwbs[i] = '\0';
  1481. }
  1482. void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
  1483. {
  1484. int rw = rq->cmd_flags & 0x03;
  1485. int bytes;
  1486. if (blk_discard_rq(rq))
  1487. rw |= (1 << BIO_RW_DISCARD);
  1488. bytes = blk_rq_bytes(rq);
  1489. blk_fill_rwbs(rwbs, rw, bytes);
  1490. }
  1491. #endif /* CONFIG_EVENT_TRACING */