blktrace.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824
  1. /*
  2. * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/blktrace_api.h>
  21. #include <linux/percpu.h>
  22. #include <linux/init.h>
  23. #include <linux/mutex.h>
  24. #include <linux/slab.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/time.h>
  27. #include <linux/uaccess.h>
  28. #include <trace/events/block.h>
  29. #include "trace_output.h"
  30. #ifdef CONFIG_BLK_DEV_IO_TRACE
  31. static unsigned int blktrace_seq __read_mostly = 1;
  32. static struct trace_array *blk_tr;
  33. static bool blk_tracer_enabled __read_mostly;
  34. /* Select an alternative, minimalistic output than the original one */
  35. #define TRACE_BLK_OPT_CLASSIC 0x1
  36. static struct tracer_opt blk_tracer_opts[] = {
  37. /* Default disable the minimalistic output */
  38. { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
  39. { }
  40. };
  41. static struct tracer_flags blk_tracer_flags = {
  42. .val = 0,
  43. .opts = blk_tracer_opts,
  44. };
  45. /* Global reference count of probes */
  46. static atomic_t blk_probes_ref = ATOMIC_INIT(0);
  47. static void blk_register_tracepoints(void);
  48. static void blk_unregister_tracepoints(void);
  49. /*
  50. * Send out a notify message.
  51. */
  52. static void trace_note(struct blk_trace *bt, pid_t pid, int action,
  53. const void *data, size_t len)
  54. {
  55. struct blk_io_trace *t;
  56. struct ring_buffer_event *event = NULL;
  57. struct ring_buffer *buffer = NULL;
  58. int pc = 0;
  59. int cpu = smp_processor_id();
  60. bool blk_tracer = blk_tracer_enabled;
  61. if (blk_tracer) {
  62. buffer = blk_tr->buffer;
  63. pc = preempt_count();
  64. event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
  65. sizeof(*t) + len,
  66. 0, pc);
  67. if (!event)
  68. return;
  69. t = ring_buffer_event_data(event);
  70. goto record_it;
  71. }
  72. if (!bt->rchan)
  73. return;
  74. t = relay_reserve(bt->rchan, sizeof(*t) + len);
  75. if (t) {
  76. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  77. t->time = ktime_to_ns(ktime_get());
  78. record_it:
  79. t->device = bt->dev;
  80. t->action = action;
  81. t->pid = pid;
  82. t->cpu = cpu;
  83. t->pdu_len = len;
  84. memcpy((void *) t + sizeof(*t), data, len);
  85. if (blk_tracer)
  86. trace_buffer_unlock_commit(buffer, event, 0, pc);
  87. }
  88. }
  89. /*
  90. * Send out a notify for this process, if we haven't done so since a trace
  91. * started
  92. */
  93. static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
  94. {
  95. tsk->btrace_seq = blktrace_seq;
  96. trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
  97. }
  98. static void trace_note_time(struct blk_trace *bt)
  99. {
  100. struct timespec now;
  101. unsigned long flags;
  102. u32 words[2];
  103. getnstimeofday(&now);
  104. words[0] = now.tv_sec;
  105. words[1] = now.tv_nsec;
  106. local_irq_save(flags);
  107. trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
  108. local_irq_restore(flags);
  109. }
  110. void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
  111. {
  112. int n;
  113. va_list args;
  114. unsigned long flags;
  115. char *buf;
  116. if (unlikely(bt->trace_state != Blktrace_running &&
  117. !blk_tracer_enabled))
  118. return;
  119. /*
  120. * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
  121. * message to the trace.
  122. */
  123. if (!(bt->act_mask & BLK_TC_NOTIFY))
  124. return;
  125. local_irq_save(flags);
  126. buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
  127. va_start(args, fmt);
  128. n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
  129. va_end(args);
  130. trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
  131. local_irq_restore(flags);
  132. }
  133. EXPORT_SYMBOL_GPL(__trace_note_message);
  134. static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
  135. pid_t pid)
  136. {
  137. if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
  138. return 1;
  139. if (sector && (sector < bt->start_lba || sector > bt->end_lba))
  140. return 1;
  141. if (bt->pid && pid != bt->pid)
  142. return 1;
  143. return 0;
  144. }
  145. /*
  146. * Data direction bit lookup
  147. */
  148. static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
  149. BLK_TC_ACT(BLK_TC_WRITE) };
  150. #define BLK_TC_RAHEAD BLK_TC_AHEAD
  151. /* The ilog2() calls fall out because they're constant */
  152. #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
  153. (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
  154. /*
  155. * The worker for the various blk_add_trace*() types. Fills out a
  156. * blk_io_trace structure and places it in a per-cpu subbuffer.
  157. */
  158. static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
  159. int rw, u32 what, int error, int pdu_len, void *pdu_data)
  160. {
  161. struct task_struct *tsk = current;
  162. struct ring_buffer_event *event = NULL;
  163. struct ring_buffer *buffer = NULL;
  164. struct blk_io_trace *t;
  165. unsigned long flags = 0;
  166. unsigned long *sequence;
  167. pid_t pid;
  168. int cpu, pc = 0;
  169. bool blk_tracer = blk_tracer_enabled;
  170. if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
  171. return;
  172. what |= ddir_act[rw & WRITE];
  173. what |= MASK_TC_BIT(rw, SYNC);
  174. what |= MASK_TC_BIT(rw, RAHEAD);
  175. what |= MASK_TC_BIT(rw, META);
  176. what |= MASK_TC_BIT(rw, DISCARD);
  177. pid = tsk->pid;
  178. if (act_log_check(bt, what, sector, pid))
  179. return;
  180. cpu = raw_smp_processor_id();
  181. if (blk_tracer) {
  182. tracing_record_cmdline(current);
  183. buffer = blk_tr->buffer;
  184. pc = preempt_count();
  185. event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
  186. sizeof(*t) + pdu_len,
  187. 0, pc);
  188. if (!event)
  189. return;
  190. t = ring_buffer_event_data(event);
  191. goto record_it;
  192. }
  193. /*
  194. * A word about the locking here - we disable interrupts to reserve
  195. * some space in the relay per-cpu buffer, to prevent an irq
  196. * from coming in and stepping on our toes.
  197. */
  198. local_irq_save(flags);
  199. if (unlikely(tsk->btrace_seq != blktrace_seq))
  200. trace_note_tsk(bt, tsk);
  201. t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
  202. if (t) {
  203. sequence = per_cpu_ptr(bt->sequence, cpu);
  204. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  205. t->sequence = ++(*sequence);
  206. t->time = ktime_to_ns(ktime_get());
  207. record_it:
  208. /*
  209. * These two are not needed in ftrace as they are in the
  210. * generic trace_entry, filled by tracing_generic_entry_update,
  211. * but for the trace_event->bin() synthesizer benefit we do it
  212. * here too.
  213. */
  214. t->cpu = cpu;
  215. t->pid = pid;
  216. t->sector = sector;
  217. t->bytes = bytes;
  218. t->action = what;
  219. t->device = bt->dev;
  220. t->error = error;
  221. t->pdu_len = pdu_len;
  222. if (pdu_len)
  223. memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
  224. if (blk_tracer) {
  225. trace_buffer_unlock_commit(buffer, event, 0, pc);
  226. return;
  227. }
  228. }
  229. local_irq_restore(flags);
  230. }
  231. static struct dentry *blk_tree_root;
  232. static DEFINE_MUTEX(blk_tree_mutex);
  233. static void blk_trace_free(struct blk_trace *bt)
  234. {
  235. debugfs_remove(bt->msg_file);
  236. debugfs_remove(bt->dropped_file);
  237. relay_close(bt->rchan);
  238. debugfs_remove(bt->dir);
  239. free_percpu(bt->sequence);
  240. free_percpu(bt->msg_data);
  241. kfree(bt);
  242. }
  243. static void blk_trace_cleanup(struct blk_trace *bt)
  244. {
  245. blk_trace_free(bt);
  246. if (atomic_dec_and_test(&blk_probes_ref))
  247. blk_unregister_tracepoints();
  248. }
  249. int blk_trace_remove(struct request_queue *q)
  250. {
  251. struct blk_trace *bt;
  252. bt = xchg(&q->blk_trace, NULL);
  253. if (!bt)
  254. return -EINVAL;
  255. if (bt->trace_state != Blktrace_running)
  256. blk_trace_cleanup(bt);
  257. return 0;
  258. }
  259. EXPORT_SYMBOL_GPL(blk_trace_remove);
  260. static int blk_dropped_open(struct inode *inode, struct file *filp)
  261. {
  262. filp->private_data = inode->i_private;
  263. return 0;
  264. }
  265. static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
  266. size_t count, loff_t *ppos)
  267. {
  268. struct blk_trace *bt = filp->private_data;
  269. char buf[16];
  270. snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
  271. return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
  272. }
  273. static const struct file_operations blk_dropped_fops = {
  274. .owner = THIS_MODULE,
  275. .open = blk_dropped_open,
  276. .read = blk_dropped_read,
  277. .llseek = default_llseek,
  278. };
  279. static int blk_msg_open(struct inode *inode, struct file *filp)
  280. {
  281. filp->private_data = inode->i_private;
  282. return 0;
  283. }
  284. static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
  285. size_t count, loff_t *ppos)
  286. {
  287. char *msg;
  288. struct blk_trace *bt;
  289. if (count >= BLK_TN_MAX_MSG)
  290. return -EINVAL;
  291. msg = kmalloc(count + 1, GFP_KERNEL);
  292. if (msg == NULL)
  293. return -ENOMEM;
  294. if (copy_from_user(msg, buffer, count)) {
  295. kfree(msg);
  296. return -EFAULT;
  297. }
  298. msg[count] = '\0';
  299. bt = filp->private_data;
  300. __trace_note_message(bt, "%s", msg);
  301. kfree(msg);
  302. return count;
  303. }
  304. static const struct file_operations blk_msg_fops = {
  305. .owner = THIS_MODULE,
  306. .open = blk_msg_open,
  307. .write = blk_msg_write,
  308. .llseek = noop_llseek,
  309. };
  310. /*
  311. * Keep track of how many times we encountered a full subbuffer, to aid
  312. * the user space app in telling how many lost events there were.
  313. */
  314. static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
  315. void *prev_subbuf, size_t prev_padding)
  316. {
  317. struct blk_trace *bt;
  318. if (!relay_buf_full(buf))
  319. return 1;
  320. bt = buf->chan->private_data;
  321. atomic_inc(&bt->dropped);
  322. return 0;
  323. }
  324. static int blk_remove_buf_file_callback(struct dentry *dentry)
  325. {
  326. debugfs_remove(dentry);
  327. return 0;
  328. }
  329. static struct dentry *blk_create_buf_file_callback(const char *filename,
  330. struct dentry *parent,
  331. int mode,
  332. struct rchan_buf *buf,
  333. int *is_global)
  334. {
  335. return debugfs_create_file(filename, mode, parent, buf,
  336. &relay_file_operations);
  337. }
  338. static struct rchan_callbacks blk_relay_callbacks = {
  339. .subbuf_start = blk_subbuf_start_callback,
  340. .create_buf_file = blk_create_buf_file_callback,
  341. .remove_buf_file = blk_remove_buf_file_callback,
  342. };
  343. static void blk_trace_setup_lba(struct blk_trace *bt,
  344. struct block_device *bdev)
  345. {
  346. struct hd_struct *part = NULL;
  347. if (bdev)
  348. part = bdev->bd_part;
  349. if (part) {
  350. bt->start_lba = part->start_sect;
  351. bt->end_lba = part->start_sect + part->nr_sects;
  352. } else {
  353. bt->start_lba = 0;
  354. bt->end_lba = -1ULL;
  355. }
  356. }
  357. /*
  358. * Setup everything required to start tracing
  359. */
  360. int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  361. struct block_device *bdev,
  362. struct blk_user_trace_setup *buts)
  363. {
  364. struct blk_trace *old_bt, *bt = NULL;
  365. struct dentry *dir = NULL;
  366. int ret, i;
  367. if (!buts->buf_size || !buts->buf_nr)
  368. return -EINVAL;
  369. strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
  370. buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
  371. /*
  372. * some device names have larger paths - convert the slashes
  373. * to underscores for this to work as expected
  374. */
  375. for (i = 0; i < strlen(buts->name); i++)
  376. if (buts->name[i] == '/')
  377. buts->name[i] = '_';
  378. bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  379. if (!bt)
  380. return -ENOMEM;
  381. ret = -ENOMEM;
  382. bt->sequence = alloc_percpu(unsigned long);
  383. if (!bt->sequence)
  384. goto err;
  385. bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
  386. if (!bt->msg_data)
  387. goto err;
  388. ret = -ENOENT;
  389. mutex_lock(&blk_tree_mutex);
  390. if (!blk_tree_root) {
  391. blk_tree_root = debugfs_create_dir("block", NULL);
  392. if (!blk_tree_root) {
  393. mutex_unlock(&blk_tree_mutex);
  394. goto err;
  395. }
  396. }
  397. mutex_unlock(&blk_tree_mutex);
  398. dir = debugfs_create_dir(buts->name, blk_tree_root);
  399. if (!dir)
  400. goto err;
  401. bt->dir = dir;
  402. bt->dev = dev;
  403. atomic_set(&bt->dropped, 0);
  404. ret = -EIO;
  405. bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
  406. &blk_dropped_fops);
  407. if (!bt->dropped_file)
  408. goto err;
  409. bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
  410. if (!bt->msg_file)
  411. goto err;
  412. bt->rchan = relay_open("trace", dir, buts->buf_size,
  413. buts->buf_nr, &blk_relay_callbacks, bt);
  414. if (!bt->rchan)
  415. goto err;
  416. bt->act_mask = buts->act_mask;
  417. if (!bt->act_mask)
  418. bt->act_mask = (u16) -1;
  419. blk_trace_setup_lba(bt, bdev);
  420. /* overwrite with user settings */
  421. if (buts->start_lba)
  422. bt->start_lba = buts->start_lba;
  423. if (buts->end_lba)
  424. bt->end_lba = buts->end_lba;
  425. bt->pid = buts->pid;
  426. bt->trace_state = Blktrace_setup;
  427. ret = -EBUSY;
  428. old_bt = xchg(&q->blk_trace, bt);
  429. if (old_bt) {
  430. (void) xchg(&q->blk_trace, old_bt);
  431. goto err;
  432. }
  433. if (atomic_inc_return(&blk_probes_ref) == 1)
  434. blk_register_tracepoints();
  435. return 0;
  436. err:
  437. blk_trace_free(bt);
  438. return ret;
  439. }
  440. int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  441. struct block_device *bdev,
  442. char __user *arg)
  443. {
  444. struct blk_user_trace_setup buts;
  445. int ret;
  446. ret = copy_from_user(&buts, arg, sizeof(buts));
  447. if (ret)
  448. return -EFAULT;
  449. ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
  450. if (ret)
  451. return ret;
  452. if (copy_to_user(arg, &buts, sizeof(buts))) {
  453. blk_trace_remove(q);
  454. return -EFAULT;
  455. }
  456. return 0;
  457. }
  458. EXPORT_SYMBOL_GPL(blk_trace_setup);
  459. #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
  460. static int compat_blk_trace_setup(struct request_queue *q, char *name,
  461. dev_t dev, struct block_device *bdev,
  462. char __user *arg)
  463. {
  464. struct blk_user_trace_setup buts;
  465. struct compat_blk_user_trace_setup cbuts;
  466. int ret;
  467. if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
  468. return -EFAULT;
  469. buts = (struct blk_user_trace_setup) {
  470. .act_mask = cbuts.act_mask,
  471. .buf_size = cbuts.buf_size,
  472. .buf_nr = cbuts.buf_nr,
  473. .start_lba = cbuts.start_lba,
  474. .end_lba = cbuts.end_lba,
  475. .pid = cbuts.pid,
  476. };
  477. memcpy(&buts.name, &cbuts.name, 32);
  478. ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
  479. if (ret)
  480. return ret;
  481. if (copy_to_user(arg, &buts.name, 32)) {
  482. blk_trace_remove(q);
  483. return -EFAULT;
  484. }
  485. return 0;
  486. }
  487. #endif
  488. int blk_trace_startstop(struct request_queue *q, int start)
  489. {
  490. int ret;
  491. struct blk_trace *bt = q->blk_trace;
  492. if (bt == NULL)
  493. return -EINVAL;
  494. /*
  495. * For starting a trace, we can transition from a setup or stopped
  496. * trace. For stopping a trace, the state must be running
  497. */
  498. ret = -EINVAL;
  499. if (start) {
  500. if (bt->trace_state == Blktrace_setup ||
  501. bt->trace_state == Blktrace_stopped) {
  502. blktrace_seq++;
  503. smp_mb();
  504. bt->trace_state = Blktrace_running;
  505. trace_note_time(bt);
  506. ret = 0;
  507. }
  508. } else {
  509. if (bt->trace_state == Blktrace_running) {
  510. bt->trace_state = Blktrace_stopped;
  511. relay_flush(bt->rchan);
  512. ret = 0;
  513. }
  514. }
  515. return ret;
  516. }
  517. EXPORT_SYMBOL_GPL(blk_trace_startstop);
  518. /**
  519. * blk_trace_ioctl: - handle the ioctls associated with tracing
  520. * @bdev: the block device
  521. * @cmd: the ioctl cmd
  522. * @arg: the argument data, if any
  523. *
  524. **/
  525. int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
  526. {
  527. struct request_queue *q;
  528. int ret, start = 0;
  529. char b[BDEVNAME_SIZE];
  530. q = bdev_get_queue(bdev);
  531. if (!q)
  532. return -ENXIO;
  533. mutex_lock(&bdev->bd_mutex);
  534. switch (cmd) {
  535. case BLKTRACESETUP:
  536. bdevname(bdev, b);
  537. ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
  538. break;
  539. #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
  540. case BLKTRACESETUP32:
  541. bdevname(bdev, b);
  542. ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
  543. break;
  544. #endif
  545. case BLKTRACESTART:
  546. start = 1;
  547. case BLKTRACESTOP:
  548. ret = blk_trace_startstop(q, start);
  549. break;
  550. case BLKTRACETEARDOWN:
  551. ret = blk_trace_remove(q);
  552. break;
  553. default:
  554. ret = -ENOTTY;
  555. break;
  556. }
  557. mutex_unlock(&bdev->bd_mutex);
  558. return ret;
  559. }
  560. /**
  561. * blk_trace_shutdown: - stop and cleanup trace structures
  562. * @q: the request queue associated with the device
  563. *
  564. **/
  565. void blk_trace_shutdown(struct request_queue *q)
  566. {
  567. if (q->blk_trace) {
  568. blk_trace_startstop(q, 0);
  569. blk_trace_remove(q);
  570. }
  571. }
  572. /*
  573. * blktrace probes
  574. */
  575. /**
  576. * blk_add_trace_rq - Add a trace for a request oriented action
  577. * @q: queue the io is for
  578. * @rq: the source request
  579. * @what: the action
  580. *
  581. * Description:
  582. * Records an action against a request. Will log the bio offset + size.
  583. *
  584. **/
  585. static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
  586. u32 what)
  587. {
  588. struct blk_trace *bt = q->blk_trace;
  589. if (likely(!bt))
  590. return;
  591. if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
  592. what |= BLK_TC_ACT(BLK_TC_PC);
  593. __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
  594. what, rq->errors, rq->cmd_len, rq->cmd);
  595. } else {
  596. what |= BLK_TC_ACT(BLK_TC_FS);
  597. __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
  598. rq->cmd_flags, what, rq->errors, 0, NULL);
  599. }
  600. }
  601. static void blk_add_trace_rq_abort(void *ignore,
  602. struct request_queue *q, struct request *rq)
  603. {
  604. blk_add_trace_rq(q, rq, BLK_TA_ABORT);
  605. }
  606. static void blk_add_trace_rq_insert(void *ignore,
  607. struct request_queue *q, struct request *rq)
  608. {
  609. blk_add_trace_rq(q, rq, BLK_TA_INSERT);
  610. }
  611. static void blk_add_trace_rq_issue(void *ignore,
  612. struct request_queue *q, struct request *rq)
  613. {
  614. blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
  615. }
  616. static void blk_add_trace_rq_requeue(void *ignore,
  617. struct request_queue *q,
  618. struct request *rq)
  619. {
  620. blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
  621. }
  622. static void blk_add_trace_rq_complete(void *ignore,
  623. struct request_queue *q,
  624. struct request *rq)
  625. {
  626. blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
  627. }
  628. /**
  629. * blk_add_trace_bio - Add a trace for a bio oriented action
  630. * @q: queue the io is for
  631. * @bio: the source bio
  632. * @what: the action
  633. * @error: error, if any
  634. *
  635. * Description:
  636. * Records an action against a bio. Will log the bio offset + size.
  637. *
  638. **/
  639. static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
  640. u32 what, int error)
  641. {
  642. struct blk_trace *bt = q->blk_trace;
  643. if (likely(!bt))
  644. return;
  645. if (!error && !bio_flagged(bio, BIO_UPTODATE))
  646. error = EIO;
  647. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
  648. error, 0, NULL);
  649. }
  650. static void blk_add_trace_bio_bounce(void *ignore,
  651. struct request_queue *q, struct bio *bio)
  652. {
  653. blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
  654. }
  655. static void blk_add_trace_bio_complete(void *ignore,
  656. struct request_queue *q, struct bio *bio,
  657. int error)
  658. {
  659. blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
  660. }
  661. static void blk_add_trace_bio_backmerge(void *ignore,
  662. struct request_queue *q,
  663. struct bio *bio)
  664. {
  665. blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
  666. }
  667. static void blk_add_trace_bio_frontmerge(void *ignore,
  668. struct request_queue *q,
  669. struct bio *bio)
  670. {
  671. blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
  672. }
  673. static void blk_add_trace_bio_queue(void *ignore,
  674. struct request_queue *q, struct bio *bio)
  675. {
  676. blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
  677. }
  678. static void blk_add_trace_getrq(void *ignore,
  679. struct request_queue *q,
  680. struct bio *bio, int rw)
  681. {
  682. if (bio)
  683. blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
  684. else {
  685. struct blk_trace *bt = q->blk_trace;
  686. if (bt)
  687. __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
  688. }
  689. }
  690. static void blk_add_trace_sleeprq(void *ignore,
  691. struct request_queue *q,
  692. struct bio *bio, int rw)
  693. {
  694. if (bio)
  695. blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
  696. else {
  697. struct blk_trace *bt = q->blk_trace;
  698. if (bt)
  699. __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
  700. 0, 0, NULL);
  701. }
  702. }
  703. static void blk_add_trace_plug(void *ignore, struct request_queue *q)
  704. {
  705. struct blk_trace *bt = q->blk_trace;
  706. if (bt)
  707. __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
  708. }
  709. static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q)
  710. {
  711. struct blk_trace *bt = q->blk_trace;
  712. if (bt) {
  713. unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
  714. __be64 rpdu = cpu_to_be64(pdu);
  715. __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
  716. sizeof(rpdu), &rpdu);
  717. }
  718. }
  719. static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
  720. {
  721. struct blk_trace *bt = q->blk_trace;
  722. if (bt) {
  723. unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
  724. __be64 rpdu = cpu_to_be64(pdu);
  725. __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
  726. sizeof(rpdu), &rpdu);
  727. }
  728. }
  729. static void blk_add_trace_split(void *ignore,
  730. struct request_queue *q, struct bio *bio,
  731. unsigned int pdu)
  732. {
  733. struct blk_trace *bt = q->blk_trace;
  734. if (bt) {
  735. __be64 rpdu = cpu_to_be64(pdu);
  736. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
  737. BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
  738. sizeof(rpdu), &rpdu);
  739. }
  740. }
  741. /**
  742. * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
  743. * @ignore: trace callback data parameter (not used)
  744. * @q: queue the io is for
  745. * @bio: the source bio
  746. * @dev: target device
  747. * @from: source sector
  748. *
  749. * Description:
  750. * Device mapper or raid target sometimes need to split a bio because
  751. * it spans a stripe (or similar). Add a trace for that action.
  752. *
  753. **/
  754. static void blk_add_trace_bio_remap(void *ignore,
  755. struct request_queue *q, struct bio *bio,
  756. dev_t dev, sector_t from)
  757. {
  758. struct blk_trace *bt = q->blk_trace;
  759. struct blk_io_trace_remap r;
  760. if (likely(!bt))
  761. return;
  762. r.device_from = cpu_to_be32(dev);
  763. r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
  764. r.sector_from = cpu_to_be64(from);
  765. __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
  766. BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
  767. sizeof(r), &r);
  768. }
  769. /**
  770. * blk_add_trace_rq_remap - Add a trace for a request-remap operation
  771. * @ignore: trace callback data parameter (not used)
  772. * @q: queue the io is for
  773. * @rq: the source request
  774. * @dev: target device
  775. * @from: source sector
  776. *
  777. * Description:
  778. * Device mapper remaps request to other devices.
  779. * Add a trace for that action.
  780. *
  781. **/
  782. static void blk_add_trace_rq_remap(void *ignore,
  783. struct request_queue *q,
  784. struct request *rq, dev_t dev,
  785. sector_t from)
  786. {
  787. struct blk_trace *bt = q->blk_trace;
  788. struct blk_io_trace_remap r;
  789. if (likely(!bt))
  790. return;
  791. r.device_from = cpu_to_be32(dev);
  792. r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
  793. r.sector_from = cpu_to_be64(from);
  794. __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
  795. rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
  796. sizeof(r), &r);
  797. }
  798. /**
  799. * blk_add_driver_data - Add binary message with driver-specific data
  800. * @q: queue the io is for
  801. * @rq: io request
  802. * @data: driver-specific data
  803. * @len: length of driver-specific data
  804. *
  805. * Description:
  806. * Some drivers might want to write driver-specific data per request.
  807. *
  808. **/
  809. void blk_add_driver_data(struct request_queue *q,
  810. struct request *rq,
  811. void *data, size_t len)
  812. {
  813. struct blk_trace *bt = q->blk_trace;
  814. if (likely(!bt))
  815. return;
  816. if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
  817. __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
  818. BLK_TA_DRV_DATA, rq->errors, len, data);
  819. else
  820. __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
  821. BLK_TA_DRV_DATA, rq->errors, len, data);
  822. }
  823. EXPORT_SYMBOL_GPL(blk_add_driver_data);
  824. static void blk_register_tracepoints(void)
  825. {
  826. int ret;
  827. ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
  828. WARN_ON(ret);
  829. ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
  830. WARN_ON(ret);
  831. ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
  832. WARN_ON(ret);
  833. ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
  834. WARN_ON(ret);
  835. ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
  836. WARN_ON(ret);
  837. ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
  838. WARN_ON(ret);
  839. ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
  840. WARN_ON(ret);
  841. ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
  842. WARN_ON(ret);
  843. ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
  844. WARN_ON(ret);
  845. ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
  846. WARN_ON(ret);
  847. ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
  848. WARN_ON(ret);
  849. ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
  850. WARN_ON(ret);
  851. ret = register_trace_block_plug(blk_add_trace_plug, NULL);
  852. WARN_ON(ret);
  853. ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
  854. WARN_ON(ret);
  855. ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
  856. WARN_ON(ret);
  857. ret = register_trace_block_split(blk_add_trace_split, NULL);
  858. WARN_ON(ret);
  859. ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
  860. WARN_ON(ret);
  861. ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
  862. WARN_ON(ret);
  863. }
  864. static void blk_unregister_tracepoints(void)
  865. {
  866. unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
  867. unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
  868. unregister_trace_block_split(blk_add_trace_split, NULL);
  869. unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
  870. unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
  871. unregister_trace_block_plug(blk_add_trace_plug, NULL);
  872. unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
  873. unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
  874. unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
  875. unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
  876. unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
  877. unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
  878. unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
  879. unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
  880. unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
  881. unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
  882. unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
  883. unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
  884. tracepoint_synchronize_unregister();
  885. }
  886. /*
  887. * struct blk_io_tracer formatting routines
  888. */
  889. static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
  890. {
  891. int i = 0;
  892. int tc = t->action >> BLK_TC_SHIFT;
  893. if (t->action == BLK_TN_MESSAGE) {
  894. rwbs[i++] = 'N';
  895. goto out;
  896. }
  897. if (tc & BLK_TC_DISCARD)
  898. rwbs[i++] = 'D';
  899. else if (tc & BLK_TC_WRITE)
  900. rwbs[i++] = 'W';
  901. else if (t->bytes)
  902. rwbs[i++] = 'R';
  903. else
  904. rwbs[i++] = 'N';
  905. if (tc & BLK_TC_AHEAD)
  906. rwbs[i++] = 'A';
  907. if (tc & BLK_TC_BARRIER)
  908. rwbs[i++] = 'B';
  909. if (tc & BLK_TC_SYNC)
  910. rwbs[i++] = 'S';
  911. if (tc & BLK_TC_META)
  912. rwbs[i++] = 'M';
  913. out:
  914. rwbs[i] = '\0';
  915. }
  916. static inline
  917. const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
  918. {
  919. return (const struct blk_io_trace *)ent;
  920. }
  921. static inline const void *pdu_start(const struct trace_entry *ent)
  922. {
  923. return te_blk_io_trace(ent) + 1;
  924. }
  925. static inline u32 t_action(const struct trace_entry *ent)
  926. {
  927. return te_blk_io_trace(ent)->action;
  928. }
  929. static inline u32 t_bytes(const struct trace_entry *ent)
  930. {
  931. return te_blk_io_trace(ent)->bytes;
  932. }
  933. static inline u32 t_sec(const struct trace_entry *ent)
  934. {
  935. return te_blk_io_trace(ent)->bytes >> 9;
  936. }
  937. static inline unsigned long long t_sector(const struct trace_entry *ent)
  938. {
  939. return te_blk_io_trace(ent)->sector;
  940. }
  941. static inline __u16 t_error(const struct trace_entry *ent)
  942. {
  943. return te_blk_io_trace(ent)->error;
  944. }
  945. static __u64 get_pdu_int(const struct trace_entry *ent)
  946. {
  947. const __u64 *val = pdu_start(ent);
  948. return be64_to_cpu(*val);
  949. }
  950. static void get_pdu_remap(const struct trace_entry *ent,
  951. struct blk_io_trace_remap *r)
  952. {
  953. const struct blk_io_trace_remap *__r = pdu_start(ent);
  954. __u64 sector_from = __r->sector_from;
  955. r->device_from = be32_to_cpu(__r->device_from);
  956. r->device_to = be32_to_cpu(__r->device_to);
  957. r->sector_from = be64_to_cpu(sector_from);
  958. }
  959. typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
  960. static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
  961. {
  962. char rwbs[6];
  963. unsigned long long ts = iter->ts;
  964. unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
  965. unsigned secs = (unsigned long)ts;
  966. const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
  967. fill_rwbs(rwbs, t);
  968. return trace_seq_printf(&iter->seq,
  969. "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
  970. MAJOR(t->device), MINOR(t->device), iter->cpu,
  971. secs, nsec_rem, iter->ent->pid, act, rwbs);
  972. }
  973. static int blk_log_action(struct trace_iterator *iter, const char *act)
  974. {
  975. char rwbs[6];
  976. const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
  977. fill_rwbs(rwbs, t);
  978. return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
  979. MAJOR(t->device), MINOR(t->device), act, rwbs);
  980. }
  981. static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
  982. {
  983. const unsigned char *pdu_buf;
  984. int pdu_len;
  985. int i, end, ret;
  986. pdu_buf = pdu_start(ent);
  987. pdu_len = te_blk_io_trace(ent)->pdu_len;
  988. if (!pdu_len)
  989. return 1;
  990. /* find the last zero that needs to be printed */
  991. for (end = pdu_len - 1; end >= 0; end--)
  992. if (pdu_buf[end])
  993. break;
  994. end++;
  995. if (!trace_seq_putc(s, '('))
  996. return 0;
  997. for (i = 0; i < pdu_len; i++) {
  998. ret = trace_seq_printf(s, "%s%02x",
  999. i == 0 ? "" : " ", pdu_buf[i]);
  1000. if (!ret)
  1001. return ret;
  1002. /*
  1003. * stop when the rest is just zeroes and indicate so
  1004. * with a ".." appended
  1005. */
  1006. if (i == end && end != pdu_len - 1)
  1007. return trace_seq_puts(s, " ..) ");
  1008. }
  1009. return trace_seq_puts(s, ") ");
  1010. }
  1011. static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
  1012. {
  1013. char cmd[TASK_COMM_LEN];
  1014. trace_find_cmdline(ent->pid, cmd);
  1015. if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
  1016. int ret;
  1017. ret = trace_seq_printf(s, "%u ", t_bytes(ent));
  1018. if (!ret)
  1019. return 0;
  1020. ret = blk_log_dump_pdu(s, ent);
  1021. if (!ret)
  1022. return 0;
  1023. return trace_seq_printf(s, "[%s]\n", cmd);
  1024. } else {
  1025. if (t_sec(ent))
  1026. return trace_seq_printf(s, "%llu + %u [%s]\n",
  1027. t_sector(ent), t_sec(ent), cmd);
  1028. return trace_seq_printf(s, "[%s]\n", cmd);
  1029. }
  1030. }
  1031. static int blk_log_with_error(struct trace_seq *s,
  1032. const struct trace_entry *ent)
  1033. {
  1034. if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
  1035. int ret;
  1036. ret = blk_log_dump_pdu(s, ent);
  1037. if (ret)
  1038. return trace_seq_printf(s, "[%d]\n", t_error(ent));
  1039. return 0;
  1040. } else {
  1041. if (t_sec(ent))
  1042. return trace_seq_printf(s, "%llu + %u [%d]\n",
  1043. t_sector(ent),
  1044. t_sec(ent), t_error(ent));
  1045. return trace_seq_printf(s, "%llu [%d]\n",
  1046. t_sector(ent), t_error(ent));
  1047. }
  1048. }
  1049. static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
  1050. {
  1051. struct blk_io_trace_remap r = { .device_from = 0, };
  1052. get_pdu_remap(ent, &r);
  1053. return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
  1054. t_sector(ent), t_sec(ent),
  1055. MAJOR(r.device_from), MINOR(r.device_from),
  1056. (unsigned long long)r.sector_from);
  1057. }
  1058. static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
  1059. {
  1060. char cmd[TASK_COMM_LEN];
  1061. trace_find_cmdline(ent->pid, cmd);
  1062. return trace_seq_printf(s, "[%s]\n", cmd);
  1063. }
  1064. static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
  1065. {
  1066. char cmd[TASK_COMM_LEN];
  1067. trace_find_cmdline(ent->pid, cmd);
  1068. return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
  1069. }
  1070. static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
  1071. {
  1072. char cmd[TASK_COMM_LEN];
  1073. trace_find_cmdline(ent->pid, cmd);
  1074. return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
  1075. get_pdu_int(ent), cmd);
  1076. }
  1077. static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
  1078. {
  1079. int ret;
  1080. const struct blk_io_trace *t = te_blk_io_trace(ent);
  1081. ret = trace_seq_putmem(s, t + 1, t->pdu_len);
  1082. if (ret)
  1083. return trace_seq_putc(s, '\n');
  1084. return ret;
  1085. }
  1086. /*
  1087. * struct tracer operations
  1088. */
  1089. static void blk_tracer_print_header(struct seq_file *m)
  1090. {
  1091. if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
  1092. return;
  1093. seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
  1094. "# | | | | | |\n");
  1095. }
  1096. static void blk_tracer_start(struct trace_array *tr)
  1097. {
  1098. blk_tracer_enabled = true;
  1099. }
  1100. static int blk_tracer_init(struct trace_array *tr)
  1101. {
  1102. blk_tr = tr;
  1103. blk_tracer_start(tr);
  1104. return 0;
  1105. }
  1106. static void blk_tracer_stop(struct trace_array *tr)
  1107. {
  1108. blk_tracer_enabled = false;
  1109. }
  1110. static void blk_tracer_reset(struct trace_array *tr)
  1111. {
  1112. blk_tracer_stop(tr);
  1113. }
  1114. static const struct {
  1115. const char *act[2];
  1116. int (*print)(struct trace_seq *s, const struct trace_entry *ent);
  1117. } what2act[] = {
  1118. [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
  1119. [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
  1120. [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
  1121. [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
  1122. [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
  1123. [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
  1124. [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
  1125. [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
  1126. [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
  1127. [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
  1128. [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
  1129. [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
  1130. [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
  1131. [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
  1132. [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
  1133. };
  1134. static enum print_line_t print_one_line(struct trace_iterator *iter,
  1135. bool classic)
  1136. {
  1137. struct trace_seq *s = &iter->seq;
  1138. const struct blk_io_trace *t;
  1139. u16 what;
  1140. int ret;
  1141. bool long_act;
  1142. blk_log_action_t *log_action;
  1143. t = te_blk_io_trace(iter->ent);
  1144. what = t->action & ((1 << BLK_TC_SHIFT) - 1);
  1145. long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
  1146. log_action = classic ? &blk_log_action_classic : &blk_log_action;
  1147. if (t->action == BLK_TN_MESSAGE) {
  1148. ret = log_action(iter, long_act ? "message" : "m");
  1149. if (ret)
  1150. ret = blk_log_msg(s, iter->ent);
  1151. goto out;
  1152. }
  1153. if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
  1154. ret = trace_seq_printf(s, "Unknown action %x\n", what);
  1155. else {
  1156. ret = log_action(iter, what2act[what].act[long_act]);
  1157. if (ret)
  1158. ret = what2act[what].print(s, iter->ent);
  1159. }
  1160. out:
  1161. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  1162. }
  1163. static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
  1164. int flags, struct trace_event *event)
  1165. {
  1166. return print_one_line(iter, false);
  1167. }
  1168. static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
  1169. {
  1170. struct trace_seq *s = &iter->seq;
  1171. struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
  1172. const int offset = offsetof(struct blk_io_trace, sector);
  1173. struct blk_io_trace old = {
  1174. .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
  1175. .time = iter->ts,
  1176. };
  1177. if (!trace_seq_putmem(s, &old, offset))
  1178. return 0;
  1179. return trace_seq_putmem(s, &t->sector,
  1180. sizeof(old) - offset + t->pdu_len);
  1181. }
  1182. static enum print_line_t
  1183. blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
  1184. struct trace_event *event)
  1185. {
  1186. return blk_trace_synthesize_old_trace(iter) ?
  1187. TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  1188. }
  1189. static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
  1190. {
  1191. if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
  1192. return TRACE_TYPE_UNHANDLED;
  1193. return print_one_line(iter, true);
  1194. }
  1195. static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
  1196. {
  1197. /* don't output context-info for blk_classic output */
  1198. if (bit == TRACE_BLK_OPT_CLASSIC) {
  1199. if (set)
  1200. trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
  1201. else
  1202. trace_flags |= TRACE_ITER_CONTEXT_INFO;
  1203. }
  1204. return 0;
  1205. }
  1206. static struct tracer blk_tracer __read_mostly = {
  1207. .name = "blk",
  1208. .init = blk_tracer_init,
  1209. .reset = blk_tracer_reset,
  1210. .start = blk_tracer_start,
  1211. .stop = blk_tracer_stop,
  1212. .print_header = blk_tracer_print_header,
  1213. .print_line = blk_tracer_print_line,
  1214. .flags = &blk_tracer_flags,
  1215. .set_flag = blk_tracer_set_flag,
  1216. };
  1217. static struct trace_event_functions trace_blk_event_funcs = {
  1218. .trace = blk_trace_event_print,
  1219. .binary = blk_trace_event_print_binary,
  1220. };
  1221. static struct trace_event trace_blk_event = {
  1222. .type = TRACE_BLK,
  1223. .funcs = &trace_blk_event_funcs,
  1224. };
  1225. static int __init init_blk_tracer(void)
  1226. {
  1227. if (!register_ftrace_event(&trace_blk_event)) {
  1228. pr_warning("Warning: could not register block events\n");
  1229. return 1;
  1230. }
  1231. if (register_tracer(&blk_tracer) != 0) {
  1232. pr_warning("Warning: could not register the block tracer\n");
  1233. unregister_ftrace_event(&trace_blk_event);
  1234. return 1;
  1235. }
  1236. return 0;
  1237. }
  1238. device_initcall(init_blk_tracer);
  1239. static int blk_trace_remove_queue(struct request_queue *q)
  1240. {
  1241. struct blk_trace *bt;
  1242. bt = xchg(&q->blk_trace, NULL);
  1243. if (bt == NULL)
  1244. return -EINVAL;
  1245. if (atomic_dec_and_test(&blk_probes_ref))
  1246. blk_unregister_tracepoints();
  1247. blk_trace_free(bt);
  1248. return 0;
  1249. }
  1250. /*
  1251. * Setup everything required to start tracing
  1252. */
  1253. static int blk_trace_setup_queue(struct request_queue *q,
  1254. struct block_device *bdev)
  1255. {
  1256. struct blk_trace *old_bt, *bt = NULL;
  1257. int ret = -ENOMEM;
  1258. bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  1259. if (!bt)
  1260. return -ENOMEM;
  1261. bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
  1262. if (!bt->msg_data)
  1263. goto free_bt;
  1264. bt->dev = bdev->bd_dev;
  1265. bt->act_mask = (u16)-1;
  1266. blk_trace_setup_lba(bt, bdev);
  1267. old_bt = xchg(&q->blk_trace, bt);
  1268. if (old_bt != NULL) {
  1269. (void)xchg(&q->blk_trace, old_bt);
  1270. ret = -EBUSY;
  1271. goto free_bt;
  1272. }
  1273. if (atomic_inc_return(&blk_probes_ref) == 1)
  1274. blk_register_tracepoints();
  1275. return 0;
  1276. free_bt:
  1277. blk_trace_free(bt);
  1278. return ret;
  1279. }
  1280. /*
  1281. * sysfs interface to enable and configure tracing
  1282. */
  1283. static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
  1284. struct device_attribute *attr,
  1285. char *buf);
  1286. static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
  1287. struct device_attribute *attr,
  1288. const char *buf, size_t count);
  1289. #define BLK_TRACE_DEVICE_ATTR(_name) \
  1290. DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
  1291. sysfs_blk_trace_attr_show, \
  1292. sysfs_blk_trace_attr_store)
  1293. static BLK_TRACE_DEVICE_ATTR(enable);
  1294. static BLK_TRACE_DEVICE_ATTR(act_mask);
  1295. static BLK_TRACE_DEVICE_ATTR(pid);
  1296. static BLK_TRACE_DEVICE_ATTR(start_lba);
  1297. static BLK_TRACE_DEVICE_ATTR(end_lba);
  1298. static struct attribute *blk_trace_attrs[] = {
  1299. &dev_attr_enable.attr,
  1300. &dev_attr_act_mask.attr,
  1301. &dev_attr_pid.attr,
  1302. &dev_attr_start_lba.attr,
  1303. &dev_attr_end_lba.attr,
  1304. NULL
  1305. };
  1306. struct attribute_group blk_trace_attr_group = {
  1307. .name = "trace",
  1308. .attrs = blk_trace_attrs,
  1309. };
  1310. static const struct {
  1311. int mask;
  1312. const char *str;
  1313. } mask_maps[] = {
  1314. { BLK_TC_READ, "read" },
  1315. { BLK_TC_WRITE, "write" },
  1316. { BLK_TC_BARRIER, "barrier" },
  1317. { BLK_TC_SYNC, "sync" },
  1318. { BLK_TC_QUEUE, "queue" },
  1319. { BLK_TC_REQUEUE, "requeue" },
  1320. { BLK_TC_ISSUE, "issue" },
  1321. { BLK_TC_COMPLETE, "complete" },
  1322. { BLK_TC_FS, "fs" },
  1323. { BLK_TC_PC, "pc" },
  1324. { BLK_TC_AHEAD, "ahead" },
  1325. { BLK_TC_META, "meta" },
  1326. { BLK_TC_DISCARD, "discard" },
  1327. { BLK_TC_DRV_DATA, "drv_data" },
  1328. };
  1329. static int blk_trace_str2mask(const char *str)
  1330. {
  1331. int i;
  1332. int mask = 0;
  1333. char *buf, *s, *token;
  1334. buf = kstrdup(str, GFP_KERNEL);
  1335. if (buf == NULL)
  1336. return -ENOMEM;
  1337. s = strstrip(buf);
  1338. while (1) {
  1339. token = strsep(&s, ",");
  1340. if (token == NULL)
  1341. break;
  1342. if (*token == '\0')
  1343. continue;
  1344. for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
  1345. if (strcasecmp(token, mask_maps[i].str) == 0) {
  1346. mask |= mask_maps[i].mask;
  1347. break;
  1348. }
  1349. }
  1350. if (i == ARRAY_SIZE(mask_maps)) {
  1351. mask = -EINVAL;
  1352. break;
  1353. }
  1354. }
  1355. kfree(buf);
  1356. return mask;
  1357. }
  1358. static ssize_t blk_trace_mask2str(char *buf, int mask)
  1359. {
  1360. int i;
  1361. char *p = buf;
  1362. for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
  1363. if (mask & mask_maps[i].mask) {
  1364. p += sprintf(p, "%s%s",
  1365. (p == buf) ? "" : ",", mask_maps[i].str);
  1366. }
  1367. }
  1368. *p++ = '\n';
  1369. return p - buf;
  1370. }
  1371. static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
  1372. {
  1373. if (bdev->bd_disk == NULL)
  1374. return NULL;
  1375. return bdev_get_queue(bdev);
  1376. }
  1377. static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
  1378. struct device_attribute *attr,
  1379. char *buf)
  1380. {
  1381. struct hd_struct *p = dev_to_part(dev);
  1382. struct request_queue *q;
  1383. struct block_device *bdev;
  1384. ssize_t ret = -ENXIO;
  1385. bdev = bdget(part_devt(p));
  1386. if (bdev == NULL)
  1387. goto out;
  1388. q = blk_trace_get_queue(bdev);
  1389. if (q == NULL)
  1390. goto out_bdput;
  1391. mutex_lock(&bdev->bd_mutex);
  1392. if (attr == &dev_attr_enable) {
  1393. ret = sprintf(buf, "%u\n", !!q->blk_trace);
  1394. goto out_unlock_bdev;
  1395. }
  1396. if (q->blk_trace == NULL)
  1397. ret = sprintf(buf, "disabled\n");
  1398. else if (attr == &dev_attr_act_mask)
  1399. ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
  1400. else if (attr == &dev_attr_pid)
  1401. ret = sprintf(buf, "%u\n", q->blk_trace->pid);
  1402. else if (attr == &dev_attr_start_lba)
  1403. ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
  1404. else if (attr == &dev_attr_end_lba)
  1405. ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
  1406. out_unlock_bdev:
  1407. mutex_unlock(&bdev->bd_mutex);
  1408. out_bdput:
  1409. bdput(bdev);
  1410. out:
  1411. return ret;
  1412. }
  1413. static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
  1414. struct device_attribute *attr,
  1415. const char *buf, size_t count)
  1416. {
  1417. struct block_device *bdev;
  1418. struct request_queue *q;
  1419. struct hd_struct *p;
  1420. u64 value;
  1421. ssize_t ret = -EINVAL;
  1422. if (count == 0)
  1423. goto out;
  1424. if (attr == &dev_attr_act_mask) {
  1425. if (sscanf(buf, "%llx", &value) != 1) {
  1426. /* Assume it is a list of trace category names */
  1427. ret = blk_trace_str2mask(buf);
  1428. if (ret < 0)
  1429. goto out;
  1430. value = ret;
  1431. }
  1432. } else if (sscanf(buf, "%llu", &value) != 1)
  1433. goto out;
  1434. ret = -ENXIO;
  1435. p = dev_to_part(dev);
  1436. bdev = bdget(part_devt(p));
  1437. if (bdev == NULL)
  1438. goto out;
  1439. q = blk_trace_get_queue(bdev);
  1440. if (q == NULL)
  1441. goto out_bdput;
  1442. mutex_lock(&bdev->bd_mutex);
  1443. if (attr == &dev_attr_enable) {
  1444. if (value)
  1445. ret = blk_trace_setup_queue(q, bdev);
  1446. else
  1447. ret = blk_trace_remove_queue(q);
  1448. goto out_unlock_bdev;
  1449. }
  1450. ret = 0;
  1451. if (q->blk_trace == NULL)
  1452. ret = blk_trace_setup_queue(q, bdev);
  1453. if (ret == 0) {
  1454. if (attr == &dev_attr_act_mask)
  1455. q->blk_trace->act_mask = value;
  1456. else if (attr == &dev_attr_pid)
  1457. q->blk_trace->pid = value;
  1458. else if (attr == &dev_attr_start_lba)
  1459. q->blk_trace->start_lba = value;
  1460. else if (attr == &dev_attr_end_lba)
  1461. q->blk_trace->end_lba = value;
  1462. }
  1463. out_unlock_bdev:
  1464. mutex_unlock(&bdev->bd_mutex);
  1465. out_bdput:
  1466. bdput(bdev);
  1467. out:
  1468. return ret ? ret : count;
  1469. }
  1470. int blk_trace_init_sysfs(struct device *dev)
  1471. {
  1472. return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
  1473. }
  1474. void blk_trace_remove_sysfs(struct device *dev)
  1475. {
  1476. sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
  1477. }
  1478. #endif /* CONFIG_BLK_DEV_IO_TRACE */
  1479. #ifdef CONFIG_EVENT_TRACING
  1480. void blk_dump_cmd(char *buf, struct request *rq)
  1481. {
  1482. int i, end;
  1483. int len = rq->cmd_len;
  1484. unsigned char *cmd = rq->cmd;
  1485. if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
  1486. buf[0] = '\0';
  1487. return;
  1488. }
  1489. for (end = len - 1; end >= 0; end--)
  1490. if (cmd[end])
  1491. break;
  1492. end++;
  1493. for (i = 0; i < len; i++) {
  1494. buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
  1495. if (i == end && end != len - 1) {
  1496. sprintf(buf, " ..");
  1497. break;
  1498. }
  1499. }
  1500. }
  1501. void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
  1502. {
  1503. int i = 0;
  1504. if (rw & WRITE)
  1505. rwbs[i++] = 'W';
  1506. else if (rw & REQ_DISCARD)
  1507. rwbs[i++] = 'D';
  1508. else if (bytes)
  1509. rwbs[i++] = 'R';
  1510. else
  1511. rwbs[i++] = 'N';
  1512. if (rw & REQ_RAHEAD)
  1513. rwbs[i++] = 'A';
  1514. if (rw & REQ_SYNC)
  1515. rwbs[i++] = 'S';
  1516. if (rw & REQ_META)
  1517. rwbs[i++] = 'M';
  1518. if (rw & REQ_SECURE)
  1519. rwbs[i++] = 'E';
  1520. rwbs[i] = '\0';
  1521. }
  1522. #endif /* CONFIG_EVENT_TRACING */