blktrace.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486
  1. /*
  2. * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/blktrace_api.h>
  21. #include <linux/percpu.h>
  22. #include <linux/init.h>
  23. #include <linux/mutex.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/time.h>
  26. #include <trace/block.h>
  27. #include <linux/uaccess.h>
  28. #include "trace_output.h"
  29. static unsigned int blktrace_seq __read_mostly = 1;
  30. static struct trace_array *blk_tr;
  31. static bool blk_tracer_enabled __read_mostly;
  32. /* Select an alternative, minimalistic output than the original one */
  33. #define TRACE_BLK_OPT_CLASSIC 0x1
  34. static struct tracer_opt blk_tracer_opts[] = {
  35. /* Default disable the minimalistic output */
  36. { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
  37. { }
  38. };
  39. static struct tracer_flags blk_tracer_flags = {
  40. .val = 0,
  41. .opts = blk_tracer_opts,
  42. };
  43. /* Global reference count of probes */
  44. static atomic_t blk_probes_ref = ATOMIC_INIT(0);
  45. static void blk_register_tracepoints(void);
  46. static void blk_unregister_tracepoints(void);
  47. /*
  48. * Send out a notify message.
  49. */
  50. static void trace_note(struct blk_trace *bt, pid_t pid, int action,
  51. const void *data, size_t len)
  52. {
  53. struct blk_io_trace *t;
  54. if (!bt->rchan)
  55. return;
  56. t = relay_reserve(bt->rchan, sizeof(*t) + len);
  57. if (t) {
  58. const int cpu = smp_processor_id();
  59. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  60. t->time = ktime_to_ns(ktime_get());
  61. t->device = bt->dev;
  62. t->action = action;
  63. t->pid = pid;
  64. t->cpu = cpu;
  65. t->pdu_len = len;
  66. memcpy((void *) t + sizeof(*t), data, len);
  67. }
  68. }
  69. /*
  70. * Send out a notify for this process, if we haven't done so since a trace
  71. * started
  72. */
  73. static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
  74. {
  75. tsk->btrace_seq = blktrace_seq;
  76. trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
  77. }
  78. static void trace_note_time(struct blk_trace *bt)
  79. {
  80. struct timespec now;
  81. unsigned long flags;
  82. u32 words[2];
  83. getnstimeofday(&now);
  84. words[0] = now.tv_sec;
  85. words[1] = now.tv_nsec;
  86. local_irq_save(flags);
  87. trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
  88. local_irq_restore(flags);
  89. }
  90. void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
  91. {
  92. int n;
  93. va_list args;
  94. unsigned long flags;
  95. char *buf;
  96. if (blk_tr) {
  97. va_start(args, fmt);
  98. ftrace_vprintk(fmt, args);
  99. va_end(args);
  100. return;
  101. }
  102. if (!bt->msg_data)
  103. return;
  104. local_irq_save(flags);
  105. buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
  106. va_start(args, fmt);
  107. n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
  108. va_end(args);
  109. trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
  110. local_irq_restore(flags);
  111. }
  112. EXPORT_SYMBOL_GPL(__trace_note_message);
  113. static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
  114. pid_t pid)
  115. {
  116. if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
  117. return 1;
  118. if (sector < bt->start_lba || sector > bt->end_lba)
  119. return 1;
  120. if (bt->pid && pid != bt->pid)
  121. return 1;
  122. return 0;
  123. }
  124. /*
  125. * Data direction bit lookup
  126. */
  127. static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
  128. BLK_TC_ACT(BLK_TC_WRITE) };
  129. /* The ilog2() calls fall out because they're constant */
  130. #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
  131. (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
  132. /*
  133. * The worker for the various blk_add_trace*() types. Fills out a
  134. * blk_io_trace structure and places it in a per-cpu subbuffer.
  135. */
  136. static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
  137. int rw, u32 what, int error, int pdu_len, void *pdu_data)
  138. {
  139. struct task_struct *tsk = current;
  140. struct ring_buffer_event *event = NULL;
  141. struct blk_io_trace *t;
  142. unsigned long flags = 0;
  143. unsigned long *sequence;
  144. pid_t pid;
  145. int cpu, pc = 0;
  146. if (unlikely(bt->trace_state != Blktrace_running ||
  147. !blk_tracer_enabled))
  148. return;
  149. what |= ddir_act[rw & WRITE];
  150. what |= MASK_TC_BIT(rw, BARRIER);
  151. what |= MASK_TC_BIT(rw, SYNCIO);
  152. what |= MASK_TC_BIT(rw, AHEAD);
  153. what |= MASK_TC_BIT(rw, META);
  154. what |= MASK_TC_BIT(rw, DISCARD);
  155. pid = tsk->pid;
  156. if (unlikely(act_log_check(bt, what, sector, pid)))
  157. return;
  158. cpu = raw_smp_processor_id();
  159. if (blk_tr) {
  160. tracing_record_cmdline(current);
  161. pc = preempt_count();
  162. event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
  163. sizeof(*t) + pdu_len,
  164. 0, pc);
  165. if (!event)
  166. return;
  167. t = ring_buffer_event_data(event);
  168. goto record_it;
  169. }
  170. /*
  171. * A word about the locking here - we disable interrupts to reserve
  172. * some space in the relay per-cpu buffer, to prevent an irq
  173. * from coming in and stepping on our toes.
  174. */
  175. local_irq_save(flags);
  176. if (unlikely(tsk->btrace_seq != blktrace_seq))
  177. trace_note_tsk(bt, tsk);
  178. t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
  179. if (t) {
  180. sequence = per_cpu_ptr(bt->sequence, cpu);
  181. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  182. t->sequence = ++(*sequence);
  183. t->time = ktime_to_ns(ktime_get());
  184. record_it:
  185. /*
  186. * These two are not needed in ftrace as they are in the
  187. * generic trace_entry, filled by tracing_generic_entry_update,
  188. * but for the trace_event->bin() synthesizer benefit we do it
  189. * here too.
  190. */
  191. t->cpu = cpu;
  192. t->pid = pid;
  193. t->sector = sector;
  194. t->bytes = bytes;
  195. t->action = what;
  196. t->device = bt->dev;
  197. t->error = error;
  198. t->pdu_len = pdu_len;
  199. if (pdu_len)
  200. memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
  201. if (blk_tr) {
  202. trace_buffer_unlock_commit(blk_tr, event, 0, pc);
  203. return;
  204. }
  205. }
  206. local_irq_restore(flags);
  207. }
  208. static struct dentry *blk_tree_root;
  209. static DEFINE_MUTEX(blk_tree_mutex);
  210. static void blk_trace_cleanup(struct blk_trace *bt)
  211. {
  212. debugfs_remove(bt->msg_file);
  213. debugfs_remove(bt->dropped_file);
  214. relay_close(bt->rchan);
  215. free_percpu(bt->sequence);
  216. free_percpu(bt->msg_data);
  217. kfree(bt);
  218. if (atomic_dec_and_test(&blk_probes_ref))
  219. blk_unregister_tracepoints();
  220. }
  221. int blk_trace_remove(struct request_queue *q)
  222. {
  223. struct blk_trace *bt;
  224. bt = xchg(&q->blk_trace, NULL);
  225. if (!bt)
  226. return -EINVAL;
  227. if (bt->trace_state == Blktrace_setup ||
  228. bt->trace_state == Blktrace_stopped)
  229. blk_trace_cleanup(bt);
  230. return 0;
  231. }
  232. EXPORT_SYMBOL_GPL(blk_trace_remove);
  233. static int blk_dropped_open(struct inode *inode, struct file *filp)
  234. {
  235. filp->private_data = inode->i_private;
  236. return 0;
  237. }
  238. static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
  239. size_t count, loff_t *ppos)
  240. {
  241. struct blk_trace *bt = filp->private_data;
  242. char buf[16];
  243. snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
  244. return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
  245. }
  246. static const struct file_operations blk_dropped_fops = {
  247. .owner = THIS_MODULE,
  248. .open = blk_dropped_open,
  249. .read = blk_dropped_read,
  250. };
  251. static int blk_msg_open(struct inode *inode, struct file *filp)
  252. {
  253. filp->private_data = inode->i_private;
  254. return 0;
  255. }
  256. static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
  257. size_t count, loff_t *ppos)
  258. {
  259. char *msg;
  260. struct blk_trace *bt;
  261. if (count > BLK_TN_MAX_MSG)
  262. return -EINVAL;
  263. msg = kmalloc(count, GFP_KERNEL);
  264. if (msg == NULL)
  265. return -ENOMEM;
  266. if (copy_from_user(msg, buffer, count)) {
  267. kfree(msg);
  268. return -EFAULT;
  269. }
  270. bt = filp->private_data;
  271. __trace_note_message(bt, "%s", msg);
  272. kfree(msg);
  273. return count;
  274. }
  275. static const struct file_operations blk_msg_fops = {
  276. .owner = THIS_MODULE,
  277. .open = blk_msg_open,
  278. .write = blk_msg_write,
  279. };
  280. /*
  281. * Keep track of how many times we encountered a full subbuffer, to aid
  282. * the user space app in telling how many lost events there were.
  283. */
  284. static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
  285. void *prev_subbuf, size_t prev_padding)
  286. {
  287. struct blk_trace *bt;
  288. if (!relay_buf_full(buf))
  289. return 1;
  290. bt = buf->chan->private_data;
  291. atomic_inc(&bt->dropped);
  292. return 0;
  293. }
  294. static int blk_remove_buf_file_callback(struct dentry *dentry)
  295. {
  296. struct dentry *parent = dentry->d_parent;
  297. debugfs_remove(dentry);
  298. /*
  299. * this will fail for all but the last file, but that is ok. what we
  300. * care about is the top level buts->name directory going away, when
  301. * the last trace file is gone. Then we don't have to rmdir() that
  302. * manually on trace stop, so it nicely solves the issue with
  303. * force killing of running traces.
  304. */
  305. debugfs_remove(parent);
  306. return 0;
  307. }
  308. static struct dentry *blk_create_buf_file_callback(const char *filename,
  309. struct dentry *parent,
  310. int mode,
  311. struct rchan_buf *buf,
  312. int *is_global)
  313. {
  314. return debugfs_create_file(filename, mode, parent, buf,
  315. &relay_file_operations);
  316. }
  317. static struct rchan_callbacks blk_relay_callbacks = {
  318. .subbuf_start = blk_subbuf_start_callback,
  319. .create_buf_file = blk_create_buf_file_callback,
  320. .remove_buf_file = blk_remove_buf_file_callback,
  321. };
  322. /*
  323. * Setup everything required to start tracing
  324. */
  325. int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  326. struct blk_user_trace_setup *buts)
  327. {
  328. struct blk_trace *old_bt, *bt = NULL;
  329. struct dentry *dir = NULL;
  330. int ret, i;
  331. if (!buts->buf_size || !buts->buf_nr)
  332. return -EINVAL;
  333. strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
  334. buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
  335. /*
  336. * some device names have larger paths - convert the slashes
  337. * to underscores for this to work as expected
  338. */
  339. for (i = 0; i < strlen(buts->name); i++)
  340. if (buts->name[i] == '/')
  341. buts->name[i] = '_';
  342. ret = -ENOMEM;
  343. bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  344. if (!bt)
  345. goto err;
  346. bt->sequence = alloc_percpu(unsigned long);
  347. if (!bt->sequence)
  348. goto err;
  349. bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
  350. if (!bt->msg_data)
  351. goto err;
  352. ret = -ENOENT;
  353. if (!blk_tree_root) {
  354. blk_tree_root = debugfs_create_dir("block", NULL);
  355. if (!blk_tree_root)
  356. goto err;
  357. }
  358. dir = debugfs_create_dir(buts->name, blk_tree_root);
  359. if (!dir)
  360. goto err;
  361. bt->dir = dir;
  362. bt->dev = dev;
  363. atomic_set(&bt->dropped, 0);
  364. ret = -EIO;
  365. bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
  366. &blk_dropped_fops);
  367. if (!bt->dropped_file)
  368. goto err;
  369. bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
  370. if (!bt->msg_file)
  371. goto err;
  372. bt->rchan = relay_open("trace", dir, buts->buf_size,
  373. buts->buf_nr, &blk_relay_callbacks, bt);
  374. if (!bt->rchan)
  375. goto err;
  376. bt->act_mask = buts->act_mask;
  377. if (!bt->act_mask)
  378. bt->act_mask = (u16) -1;
  379. bt->start_lba = buts->start_lba;
  380. bt->end_lba = buts->end_lba;
  381. if (!bt->end_lba)
  382. bt->end_lba = -1ULL;
  383. bt->pid = buts->pid;
  384. bt->trace_state = Blktrace_setup;
  385. ret = -EBUSY;
  386. old_bt = xchg(&q->blk_trace, bt);
  387. if (old_bt) {
  388. (void) xchg(&q->blk_trace, old_bt);
  389. goto err;
  390. }
  391. if (atomic_add_return(1, &blk_probes_ref) == 1)
  392. blk_register_tracepoints();
  393. return 0;
  394. err:
  395. if (bt) {
  396. if (bt->msg_file)
  397. debugfs_remove(bt->msg_file);
  398. if (bt->dropped_file)
  399. debugfs_remove(bt->dropped_file);
  400. free_percpu(bt->sequence);
  401. free_percpu(bt->msg_data);
  402. if (bt->rchan)
  403. relay_close(bt->rchan);
  404. kfree(bt);
  405. }
  406. return ret;
  407. }
  408. int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  409. char __user *arg)
  410. {
  411. struct blk_user_trace_setup buts;
  412. int ret;
  413. ret = copy_from_user(&buts, arg, sizeof(buts));
  414. if (ret)
  415. return -EFAULT;
  416. ret = do_blk_trace_setup(q, name, dev, &buts);
  417. if (ret)
  418. return ret;
  419. if (copy_to_user(arg, &buts, sizeof(buts)))
  420. return -EFAULT;
  421. return 0;
  422. }
  423. EXPORT_SYMBOL_GPL(blk_trace_setup);
  424. int blk_trace_startstop(struct request_queue *q, int start)
  425. {
  426. int ret;
  427. struct blk_trace *bt = q->blk_trace;
  428. if (bt == NULL)
  429. return -EINVAL;
  430. /*
  431. * For starting a trace, we can transition from a setup or stopped
  432. * trace. For stopping a trace, the state must be running
  433. */
  434. ret = -EINVAL;
  435. if (start) {
  436. if (bt->trace_state == Blktrace_setup ||
  437. bt->trace_state == Blktrace_stopped) {
  438. blktrace_seq++;
  439. smp_mb();
  440. bt->trace_state = Blktrace_running;
  441. trace_note_time(bt);
  442. ret = 0;
  443. }
  444. } else {
  445. if (bt->trace_state == Blktrace_running) {
  446. bt->trace_state = Blktrace_stopped;
  447. relay_flush(bt->rchan);
  448. ret = 0;
  449. }
  450. }
  451. return ret;
  452. }
  453. EXPORT_SYMBOL_GPL(blk_trace_startstop);
  454. /**
  455. * blk_trace_ioctl: - handle the ioctls associated with tracing
  456. * @bdev: the block device
  457. * @cmd: the ioctl cmd
  458. * @arg: the argument data, if any
  459. *
  460. **/
  461. int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
  462. {
  463. struct request_queue *q;
  464. int ret, start = 0;
  465. char b[BDEVNAME_SIZE];
  466. q = bdev_get_queue(bdev);
  467. if (!q)
  468. return -ENXIO;
  469. mutex_lock(&bdev->bd_mutex);
  470. switch (cmd) {
  471. case BLKTRACESETUP:
  472. bdevname(bdev, b);
  473. ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
  474. break;
  475. case BLKTRACESTART:
  476. start = 1;
  477. case BLKTRACESTOP:
  478. ret = blk_trace_startstop(q, start);
  479. break;
  480. case BLKTRACETEARDOWN:
  481. ret = blk_trace_remove(q);
  482. break;
  483. default:
  484. ret = -ENOTTY;
  485. break;
  486. }
  487. mutex_unlock(&bdev->bd_mutex);
  488. return ret;
  489. }
  490. /**
  491. * blk_trace_shutdown: - stop and cleanup trace structures
  492. * @q: the request queue associated with the device
  493. *
  494. **/
  495. void blk_trace_shutdown(struct request_queue *q)
  496. {
  497. if (q->blk_trace) {
  498. blk_trace_startstop(q, 0);
  499. blk_trace_remove(q);
  500. }
  501. }
  502. /*
  503. * blktrace probes
  504. */
  505. /**
  506. * blk_add_trace_rq - Add a trace for a request oriented action
  507. * @q: queue the io is for
  508. * @rq: the source request
  509. * @what: the action
  510. *
  511. * Description:
  512. * Records an action against a request. Will log the bio offset + size.
  513. *
  514. **/
  515. static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
  516. u32 what)
  517. {
  518. struct blk_trace *bt = q->blk_trace;
  519. int rw = rq->cmd_flags & 0x03;
  520. if (likely(!bt))
  521. return;
  522. if (blk_discard_rq(rq))
  523. rw |= (1 << BIO_RW_DISCARD);
  524. if (blk_pc_request(rq)) {
  525. what |= BLK_TC_ACT(BLK_TC_PC);
  526. __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
  527. sizeof(rq->cmd), rq->cmd);
  528. } else {
  529. what |= BLK_TC_ACT(BLK_TC_FS);
  530. __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
  531. rw, what, rq->errors, 0, NULL);
  532. }
  533. }
  534. static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
  535. {
  536. blk_add_trace_rq(q, rq, BLK_TA_ABORT);
  537. }
  538. static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
  539. {
  540. blk_add_trace_rq(q, rq, BLK_TA_INSERT);
  541. }
  542. static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
  543. {
  544. blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
  545. }
  546. static void blk_add_trace_rq_requeue(struct request_queue *q,
  547. struct request *rq)
  548. {
  549. blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
  550. }
  551. static void blk_add_trace_rq_complete(struct request_queue *q,
  552. struct request *rq)
  553. {
  554. blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
  555. }
  556. /**
  557. * blk_add_trace_bio - Add a trace for a bio oriented action
  558. * @q: queue the io is for
  559. * @bio: the source bio
  560. * @what: the action
  561. *
  562. * Description:
  563. * Records an action against a bio. Will log the bio offset + size.
  564. *
  565. **/
  566. static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
  567. u32 what)
  568. {
  569. struct blk_trace *bt = q->blk_trace;
  570. if (likely(!bt))
  571. return;
  572. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
  573. !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
  574. }
  575. static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
  576. {
  577. blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
  578. }
  579. static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
  580. {
  581. blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
  582. }
  583. static void blk_add_trace_bio_backmerge(struct request_queue *q,
  584. struct bio *bio)
  585. {
  586. blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
  587. }
  588. static void blk_add_trace_bio_frontmerge(struct request_queue *q,
  589. struct bio *bio)
  590. {
  591. blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
  592. }
  593. static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
  594. {
  595. blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
  596. }
  597. static void blk_add_trace_getrq(struct request_queue *q,
  598. struct bio *bio, int rw)
  599. {
  600. if (bio)
  601. blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
  602. else {
  603. struct blk_trace *bt = q->blk_trace;
  604. if (bt)
  605. __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
  606. }
  607. }
  608. static void blk_add_trace_sleeprq(struct request_queue *q,
  609. struct bio *bio, int rw)
  610. {
  611. if (bio)
  612. blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
  613. else {
  614. struct blk_trace *bt = q->blk_trace;
  615. if (bt)
  616. __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
  617. 0, 0, NULL);
  618. }
  619. }
  620. static void blk_add_trace_plug(struct request_queue *q)
  621. {
  622. struct blk_trace *bt = q->blk_trace;
  623. if (bt)
  624. __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
  625. }
  626. static void blk_add_trace_unplug_io(struct request_queue *q)
  627. {
  628. struct blk_trace *bt = q->blk_trace;
  629. if (bt) {
  630. unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
  631. __be64 rpdu = cpu_to_be64(pdu);
  632. __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
  633. sizeof(rpdu), &rpdu);
  634. }
  635. }
  636. static void blk_add_trace_unplug_timer(struct request_queue *q)
  637. {
  638. struct blk_trace *bt = q->blk_trace;
  639. if (bt) {
  640. unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
  641. __be64 rpdu = cpu_to_be64(pdu);
  642. __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
  643. sizeof(rpdu), &rpdu);
  644. }
  645. }
  646. static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
  647. unsigned int pdu)
  648. {
  649. struct blk_trace *bt = q->blk_trace;
  650. if (bt) {
  651. __be64 rpdu = cpu_to_be64(pdu);
  652. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
  653. BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
  654. sizeof(rpdu), &rpdu);
  655. }
  656. }
  657. /**
  658. * blk_add_trace_remap - Add a trace for a remap operation
  659. * @q: queue the io is for
  660. * @bio: the source bio
  661. * @dev: target device
  662. * @from: source sector
  663. * @to: target sector
  664. *
  665. * Description:
  666. * Device mapper or raid target sometimes need to split a bio because
  667. * it spans a stripe (or similar). Add a trace for that action.
  668. *
  669. **/
  670. static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
  671. dev_t dev, sector_t from, sector_t to)
  672. {
  673. struct blk_trace *bt = q->blk_trace;
  674. struct blk_io_trace_remap r;
  675. if (likely(!bt))
  676. return;
  677. r.device = cpu_to_be32(dev);
  678. r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
  679. r.sector = cpu_to_be64(to);
  680. __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
  681. !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
  682. }
  683. /**
  684. * blk_add_driver_data - Add binary message with driver-specific data
  685. * @q: queue the io is for
  686. * @rq: io request
  687. * @data: driver-specific data
  688. * @len: length of driver-specific data
  689. *
  690. * Description:
  691. * Some drivers might want to write driver-specific data per request.
  692. *
  693. **/
  694. void blk_add_driver_data(struct request_queue *q,
  695. struct request *rq,
  696. void *data, size_t len)
  697. {
  698. struct blk_trace *bt = q->blk_trace;
  699. if (likely(!bt))
  700. return;
  701. if (blk_pc_request(rq))
  702. __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
  703. rq->errors, len, data);
  704. else
  705. __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
  706. 0, BLK_TA_DRV_DATA, rq->errors, len, data);
  707. }
  708. EXPORT_SYMBOL_GPL(blk_add_driver_data);
  709. static void blk_register_tracepoints(void)
  710. {
  711. int ret;
  712. ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
  713. WARN_ON(ret);
  714. ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
  715. WARN_ON(ret);
  716. ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
  717. WARN_ON(ret);
  718. ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
  719. WARN_ON(ret);
  720. ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
  721. WARN_ON(ret);
  722. ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
  723. WARN_ON(ret);
  724. ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
  725. WARN_ON(ret);
  726. ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
  727. WARN_ON(ret);
  728. ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
  729. WARN_ON(ret);
  730. ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
  731. WARN_ON(ret);
  732. ret = register_trace_block_getrq(blk_add_trace_getrq);
  733. WARN_ON(ret);
  734. ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
  735. WARN_ON(ret);
  736. ret = register_trace_block_plug(blk_add_trace_plug);
  737. WARN_ON(ret);
  738. ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
  739. WARN_ON(ret);
  740. ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
  741. WARN_ON(ret);
  742. ret = register_trace_block_split(blk_add_trace_split);
  743. WARN_ON(ret);
  744. ret = register_trace_block_remap(blk_add_trace_remap);
  745. WARN_ON(ret);
  746. }
  747. static void blk_unregister_tracepoints(void)
  748. {
  749. unregister_trace_block_remap(blk_add_trace_remap);
  750. unregister_trace_block_split(blk_add_trace_split);
  751. unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
  752. unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
  753. unregister_trace_block_plug(blk_add_trace_plug);
  754. unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
  755. unregister_trace_block_getrq(blk_add_trace_getrq);
  756. unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
  757. unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
  758. unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
  759. unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
  760. unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
  761. unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
  762. unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
  763. unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
  764. unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
  765. unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
  766. tracepoint_synchronize_unregister();
  767. }
  768. /*
  769. * struct blk_io_tracer formatting routines
  770. */
  771. static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
  772. {
  773. int i = 0;
  774. int tc = t->action >> BLK_TC_SHIFT;
  775. if (tc & BLK_TC_DISCARD)
  776. rwbs[i++] = 'D';
  777. else if (tc & BLK_TC_WRITE)
  778. rwbs[i++] = 'W';
  779. else if (t->bytes)
  780. rwbs[i++] = 'R';
  781. else
  782. rwbs[i++] = 'N';
  783. if (tc & BLK_TC_AHEAD)
  784. rwbs[i++] = 'A';
  785. if (tc & BLK_TC_BARRIER)
  786. rwbs[i++] = 'B';
  787. if (tc & BLK_TC_SYNC)
  788. rwbs[i++] = 'S';
  789. if (tc & BLK_TC_META)
  790. rwbs[i++] = 'M';
  791. rwbs[i] = '\0';
  792. }
  793. static inline
  794. const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
  795. {
  796. return (const struct blk_io_trace *)ent;
  797. }
  798. static inline const void *pdu_start(const struct trace_entry *ent)
  799. {
  800. return te_blk_io_trace(ent) + 1;
  801. }
  802. static inline u32 t_sec(const struct trace_entry *ent)
  803. {
  804. return te_blk_io_trace(ent)->bytes >> 9;
  805. }
  806. static inline unsigned long long t_sector(const struct trace_entry *ent)
  807. {
  808. return te_blk_io_trace(ent)->sector;
  809. }
  810. static inline __u16 t_error(const struct trace_entry *ent)
  811. {
  812. return te_blk_io_trace(ent)->error;
  813. }
  814. static __u64 get_pdu_int(const struct trace_entry *ent)
  815. {
  816. const __u64 *val = pdu_start(ent);
  817. return be64_to_cpu(*val);
  818. }
  819. static void get_pdu_remap(const struct trace_entry *ent,
  820. struct blk_io_trace_remap *r)
  821. {
  822. const struct blk_io_trace_remap *__r = pdu_start(ent);
  823. __u64 sector = __r->sector;
  824. r->device = be32_to_cpu(__r->device);
  825. r->device_from = be32_to_cpu(__r->device_from);
  826. r->sector = be64_to_cpu(sector);
  827. }
  828. static int blk_log_action_iter(struct trace_iterator *iter, const char *act)
  829. {
  830. char rwbs[6];
  831. unsigned long long ts = ns2usecs(iter->ts);
  832. unsigned long usec_rem = do_div(ts, USEC_PER_SEC);
  833. unsigned secs = (unsigned long)ts;
  834. const struct trace_entry *ent = iter->ent;
  835. const struct blk_io_trace *t = (const struct blk_io_trace *)ent;
  836. fill_rwbs(rwbs, t);
  837. return trace_seq_printf(&iter->seq,
  838. "%3d,%-3d %2d %5d.%06lu %5u %2s %3s ",
  839. MAJOR(t->device), MINOR(t->device), iter->cpu,
  840. secs, usec_rem, ent->pid, act, rwbs);
  841. }
  842. static int blk_log_action_seq(struct trace_seq *s, const struct blk_io_trace *t,
  843. const char *act)
  844. {
  845. char rwbs[6];
  846. fill_rwbs(rwbs, t);
  847. return trace_seq_printf(s, "%3d,%-3d %2s %3s ",
  848. MAJOR(t->device), MINOR(t->device), act, rwbs);
  849. }
  850. static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
  851. {
  852. char cmd[TASK_COMM_LEN];
  853. trace_find_cmdline(ent->pid, cmd);
  854. if (t_sec(ent))
  855. return trace_seq_printf(s, "%llu + %u [%s]\n",
  856. t_sector(ent), t_sec(ent), cmd);
  857. return trace_seq_printf(s, "[%s]\n", cmd);
  858. }
  859. static int blk_log_with_error(struct trace_seq *s,
  860. const struct trace_entry *ent)
  861. {
  862. if (t_sec(ent))
  863. return trace_seq_printf(s, "%llu + %u [%d]\n", t_sector(ent),
  864. t_sec(ent), t_error(ent));
  865. return trace_seq_printf(s, "%llu [%d]\n", t_sector(ent), t_error(ent));
  866. }
  867. static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
  868. {
  869. struct blk_io_trace_remap r = { .device = 0, };
  870. get_pdu_remap(ent, &r);
  871. return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
  872. t_sector(ent),
  873. t_sec(ent), MAJOR(r.device), MINOR(r.device),
  874. (unsigned long long)r.sector);
  875. }
  876. static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
  877. {
  878. char cmd[TASK_COMM_LEN];
  879. trace_find_cmdline(ent->pid, cmd);
  880. return trace_seq_printf(s, "[%s]\n", cmd);
  881. }
  882. static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
  883. {
  884. char cmd[TASK_COMM_LEN];
  885. trace_find_cmdline(ent->pid, cmd);
  886. return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
  887. }
  888. static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
  889. {
  890. char cmd[TASK_COMM_LEN];
  891. trace_find_cmdline(ent->pid, cmd);
  892. return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
  893. get_pdu_int(ent), cmd);
  894. }
  895. /*
  896. * struct tracer operations
  897. */
  898. static void blk_tracer_print_header(struct seq_file *m)
  899. {
  900. if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
  901. return;
  902. seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
  903. "# | | | | | |\n");
  904. }
  905. static void blk_tracer_start(struct trace_array *tr)
  906. {
  907. if (atomic_add_return(1, &blk_probes_ref) == 1)
  908. blk_register_tracepoints();
  909. trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
  910. }
  911. static int blk_tracer_init(struct trace_array *tr)
  912. {
  913. blk_tr = tr;
  914. blk_tracer_start(tr);
  915. blk_tracer_enabled = true;
  916. return 0;
  917. }
  918. static void blk_tracer_stop(struct trace_array *tr)
  919. {
  920. trace_flags |= TRACE_ITER_CONTEXT_INFO;
  921. if (atomic_dec_and_test(&blk_probes_ref))
  922. blk_unregister_tracepoints();
  923. }
  924. static void blk_tracer_reset(struct trace_array *tr)
  925. {
  926. if (!atomic_read(&blk_probes_ref))
  927. return;
  928. blk_tracer_enabled = false;
  929. blk_tracer_stop(tr);
  930. }
  931. static const struct {
  932. const char *act[2];
  933. int (*print)(struct trace_seq *s, const struct trace_entry *ent);
  934. } what2act[] = {
  935. [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
  936. [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
  937. [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
  938. [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
  939. [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
  940. [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
  941. [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
  942. [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
  943. [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
  944. [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
  945. [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
  946. [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
  947. [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
  948. [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
  949. [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
  950. };
  951. static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
  952. int flags)
  953. {
  954. struct trace_seq *s = &iter->seq;
  955. const struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
  956. const u16 what = t->action & ((1 << BLK_TC_SHIFT) - 1);
  957. int ret;
  958. if (!trace_print_context(iter))
  959. return TRACE_TYPE_PARTIAL_LINE;
  960. if (unlikely(what == 0 || what > ARRAY_SIZE(what2act)))
  961. ret = trace_seq_printf(s, "Bad pc action %x\n", what);
  962. else {
  963. const bool long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
  964. ret = blk_log_action_seq(s, t, what2act[what].act[long_act]);
  965. if (ret)
  966. ret = what2act[what].print(s, iter->ent);
  967. }
  968. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  969. }
  970. static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
  971. {
  972. struct trace_seq *s = &iter->seq;
  973. struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
  974. const int offset = offsetof(struct blk_io_trace, sector);
  975. struct blk_io_trace old = {
  976. .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
  977. .time = ns2usecs(iter->ts),
  978. };
  979. if (!trace_seq_putmem(s, &old, offset))
  980. return 0;
  981. return trace_seq_putmem(s, &t->sector,
  982. sizeof(old) - offset + t->pdu_len);
  983. }
  984. static enum print_line_t
  985. blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
  986. {
  987. return blk_trace_synthesize_old_trace(iter) ?
  988. TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  989. }
  990. static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
  991. {
  992. const struct blk_io_trace *t;
  993. u16 what;
  994. int ret;
  995. if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
  996. return TRACE_TYPE_UNHANDLED;
  997. t = (const struct blk_io_trace *)iter->ent;
  998. what = t->action & ((1 << BLK_TC_SHIFT) - 1);
  999. if (unlikely(what == 0 || what > ARRAY_SIZE(what2act)))
  1000. ret = trace_seq_printf(&iter->seq, "Bad pc action %x\n", what);
  1001. else {
  1002. const bool long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
  1003. ret = blk_log_action_iter(iter, what2act[what].act[long_act]);
  1004. if (ret)
  1005. ret = what2act[what].print(&iter->seq, iter->ent);
  1006. }
  1007. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  1008. }
  1009. static struct tracer blk_tracer __read_mostly = {
  1010. .name = "blk",
  1011. .init = blk_tracer_init,
  1012. .reset = blk_tracer_reset,
  1013. .start = blk_tracer_start,
  1014. .stop = blk_tracer_stop,
  1015. .print_header = blk_tracer_print_header,
  1016. .print_line = blk_tracer_print_line,
  1017. .flags = &blk_tracer_flags,
  1018. };
  1019. static struct trace_event trace_blk_event = {
  1020. .type = TRACE_BLK,
  1021. .trace = blk_trace_event_print,
  1022. .binary = blk_trace_event_print_binary,
  1023. };
  1024. static int __init init_blk_tracer(void)
  1025. {
  1026. if (!register_ftrace_event(&trace_blk_event)) {
  1027. pr_warning("Warning: could not register block events\n");
  1028. return 1;
  1029. }
  1030. if (register_tracer(&blk_tracer) != 0) {
  1031. pr_warning("Warning: could not register the block tracer\n");
  1032. unregister_ftrace_event(&trace_blk_event);
  1033. return 1;
  1034. }
  1035. return 0;
  1036. }
  1037. device_initcall(init_blk_tracer);
  1038. static int blk_trace_remove_queue(struct request_queue *q)
  1039. {
  1040. struct blk_trace *bt;
  1041. bt = xchg(&q->blk_trace, NULL);
  1042. if (bt == NULL)
  1043. return -EINVAL;
  1044. kfree(bt);
  1045. return 0;
  1046. }
  1047. /*
  1048. * Setup everything required to start tracing
  1049. */
  1050. static int blk_trace_setup_queue(struct request_queue *q, dev_t dev)
  1051. {
  1052. struct blk_trace *old_bt, *bt = NULL;
  1053. bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  1054. if (!bt)
  1055. return -ENOMEM;
  1056. bt->dev = dev;
  1057. bt->act_mask = (u16)-1;
  1058. bt->end_lba = -1ULL;
  1059. bt->trace_state = Blktrace_running;
  1060. old_bt = xchg(&q->blk_trace, bt);
  1061. if (old_bt != NULL) {
  1062. (void)xchg(&q->blk_trace, old_bt);
  1063. kfree(bt);
  1064. return -EBUSY;
  1065. }
  1066. return 0;
  1067. }
  1068. /*
  1069. * sysfs interface to enable and configure tracing
  1070. */
  1071. static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
  1072. struct device_attribute *attr,
  1073. char *buf);
  1074. static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
  1075. struct device_attribute *attr,
  1076. const char *buf, size_t count);
  1077. #define BLK_TRACE_DEVICE_ATTR(_name) \
  1078. DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
  1079. sysfs_blk_trace_attr_show, \
  1080. sysfs_blk_trace_attr_store)
  1081. static BLK_TRACE_DEVICE_ATTR(enable);
  1082. static BLK_TRACE_DEVICE_ATTR(act_mask);
  1083. static BLK_TRACE_DEVICE_ATTR(pid);
  1084. static BLK_TRACE_DEVICE_ATTR(start_lba);
  1085. static BLK_TRACE_DEVICE_ATTR(end_lba);
  1086. static struct attribute *blk_trace_attrs[] = {
  1087. &dev_attr_enable.attr,
  1088. &dev_attr_act_mask.attr,
  1089. &dev_attr_pid.attr,
  1090. &dev_attr_start_lba.attr,
  1091. &dev_attr_end_lba.attr,
  1092. NULL
  1093. };
  1094. struct attribute_group blk_trace_attr_group = {
  1095. .name = "trace",
  1096. .attrs = blk_trace_attrs,
  1097. };
  1098. static int blk_str2act_mask(const char *str)
  1099. {
  1100. int mask = 0;
  1101. char *copy = kstrdup(str, GFP_KERNEL), *s;
  1102. if (copy == NULL)
  1103. return -ENOMEM;
  1104. s = strstrip(copy);
  1105. while (1) {
  1106. char *sep = strchr(s, ',');
  1107. if (sep != NULL)
  1108. *sep = '\0';
  1109. if (strcasecmp(s, "barrier") == 0)
  1110. mask |= BLK_TC_BARRIER;
  1111. else if (strcasecmp(s, "complete") == 0)
  1112. mask |= BLK_TC_COMPLETE;
  1113. else if (strcasecmp(s, "fs") == 0)
  1114. mask |= BLK_TC_FS;
  1115. else if (strcasecmp(s, "issue") == 0)
  1116. mask |= BLK_TC_ISSUE;
  1117. else if (strcasecmp(s, "pc") == 0)
  1118. mask |= BLK_TC_PC;
  1119. else if (strcasecmp(s, "queue") == 0)
  1120. mask |= BLK_TC_QUEUE;
  1121. else if (strcasecmp(s, "read") == 0)
  1122. mask |= BLK_TC_READ;
  1123. else if (strcasecmp(s, "requeue") == 0)
  1124. mask |= BLK_TC_REQUEUE;
  1125. else if (strcasecmp(s, "sync") == 0)
  1126. mask |= BLK_TC_SYNC;
  1127. else if (strcasecmp(s, "write") == 0)
  1128. mask |= BLK_TC_WRITE;
  1129. if (sep == NULL)
  1130. break;
  1131. s = sep + 1;
  1132. }
  1133. kfree(copy);
  1134. return mask;
  1135. }
  1136. static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
  1137. {
  1138. if (bdev->bd_disk == NULL)
  1139. return NULL;
  1140. return bdev_get_queue(bdev);
  1141. }
  1142. static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
  1143. struct device_attribute *attr,
  1144. char *buf)
  1145. {
  1146. struct hd_struct *p = dev_to_part(dev);
  1147. struct request_queue *q;
  1148. struct block_device *bdev;
  1149. ssize_t ret = -ENXIO;
  1150. lock_kernel();
  1151. bdev = bdget(part_devt(p));
  1152. if (bdev == NULL)
  1153. goto out_unlock_kernel;
  1154. q = blk_trace_get_queue(bdev);
  1155. if (q == NULL)
  1156. goto out_bdput;
  1157. mutex_lock(&bdev->bd_mutex);
  1158. if (attr == &dev_attr_enable) {
  1159. ret = sprintf(buf, "%u\n", !!q->blk_trace);
  1160. goto out_unlock_bdev;
  1161. }
  1162. if (q->blk_trace == NULL)
  1163. ret = sprintf(buf, "disabled\n");
  1164. else if (attr == &dev_attr_act_mask)
  1165. ret = sprintf(buf, "%#x\n", q->blk_trace->act_mask);
  1166. else if (attr == &dev_attr_pid)
  1167. ret = sprintf(buf, "%u\n", q->blk_trace->pid);
  1168. else if (attr == &dev_attr_start_lba)
  1169. ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
  1170. else if (attr == &dev_attr_end_lba)
  1171. ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
  1172. out_unlock_bdev:
  1173. mutex_unlock(&bdev->bd_mutex);
  1174. out_bdput:
  1175. bdput(bdev);
  1176. out_unlock_kernel:
  1177. unlock_kernel();
  1178. return ret;
  1179. }
  1180. static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
  1181. struct device_attribute *attr,
  1182. const char *buf, size_t count)
  1183. {
  1184. struct block_device *bdev;
  1185. struct request_queue *q;
  1186. struct hd_struct *p;
  1187. u64 value;
  1188. ssize_t ret = -ENXIO;
  1189. if (count == 0)
  1190. goto out;
  1191. if (attr == &dev_attr_act_mask) {
  1192. if (sscanf(buf, "%llx", &value) != 1) {
  1193. /* Assume it is a list of trace category names */
  1194. value = blk_str2act_mask(buf);
  1195. if (value < 0)
  1196. goto out;
  1197. }
  1198. } else if (sscanf(buf, "%llu", &value) != 1)
  1199. goto out;
  1200. lock_kernel();
  1201. p = dev_to_part(dev);
  1202. bdev = bdget(part_devt(p));
  1203. if (bdev == NULL)
  1204. goto out_unlock_kernel;
  1205. q = blk_trace_get_queue(bdev);
  1206. if (q == NULL)
  1207. goto out_bdput;
  1208. mutex_lock(&bdev->bd_mutex);
  1209. if (attr == &dev_attr_enable) {
  1210. if (value)
  1211. ret = blk_trace_setup_queue(q, bdev->bd_dev);
  1212. else
  1213. ret = blk_trace_remove_queue(q);
  1214. goto out_unlock_bdev;
  1215. }
  1216. ret = 0;
  1217. if (q->blk_trace == NULL)
  1218. ret = blk_trace_setup_queue(q, bdev->bd_dev);
  1219. if (ret == 0) {
  1220. if (attr == &dev_attr_act_mask)
  1221. q->blk_trace->act_mask = value;
  1222. else if (attr == &dev_attr_pid)
  1223. q->blk_trace->pid = value;
  1224. else if (attr == &dev_attr_start_lba)
  1225. q->blk_trace->start_lba = value;
  1226. else if (attr == &dev_attr_end_lba)
  1227. q->blk_trace->end_lba = value;
  1228. }
  1229. out_unlock_bdev:
  1230. mutex_unlock(&bdev->bd_mutex);
  1231. out_bdput:
  1232. bdput(bdev);
  1233. out_unlock_kernel:
  1234. unlock_kernel();
  1235. out:
  1236. return ret ? ret : count;
  1237. }