xfs_trace.h 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369
  1. /*
  2. * Copyright (c) 2009, Christoph Hellwig
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #undef TRACE_SYSTEM
  19. #define TRACE_SYSTEM xfs
  20. #if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
  21. #define _TRACE_XFS_H
  22. #include <linux/tracepoint.h>
  23. struct xfs_agf;
  24. struct xfs_alloc_arg;
  25. struct xfs_attr_list_context;
  26. struct xfs_buf_log_item;
  27. struct xfs_da_args;
  28. struct xfs_da_node_entry;
  29. struct xfs_dquot;
  30. struct xlog_ticket;
  31. struct log;
  32. #define DEFINE_ATTR_LIST_EVENT(name) \
  33. TRACE_EVENT(name, \
  34. TP_PROTO(struct xfs_attr_list_context *ctx), \
  35. TP_ARGS(ctx), \
  36. TP_STRUCT__entry( \
  37. __field(dev_t, dev) \
  38. __field(xfs_ino_t, ino) \
  39. __field(u32, hashval) \
  40. __field(u32, blkno) \
  41. __field(u32, offset) \
  42. __field(void *, alist) \
  43. __field(int, bufsize) \
  44. __field(int, count) \
  45. __field(int, firstu) \
  46. __field(int, dupcnt) \
  47. __field(int, flags) \
  48. ), \
  49. TP_fast_assign( \
  50. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; \
  51. __entry->ino = ctx->dp->i_ino; \
  52. __entry->hashval = ctx->cursor->hashval; \
  53. __entry->blkno = ctx->cursor->blkno; \
  54. __entry->offset = ctx->cursor->offset; \
  55. __entry->alist = ctx->alist; \
  56. __entry->bufsize = ctx->bufsize; \
  57. __entry->count = ctx->count; \
  58. __entry->firstu = ctx->firstu; \
  59. __entry->flags = ctx->flags; \
  60. ), \
  61. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " \
  62. "alist 0x%p size %u count %u firstu %u flags %d %s", \
  63. MAJOR(__entry->dev), MINOR(__entry->dev), \
  64. __entry->ino, \
  65. __entry->hashval, \
  66. __entry->blkno, \
  67. __entry->offset, \
  68. __entry->dupcnt, \
  69. __entry->alist, \
  70. __entry->bufsize, \
  71. __entry->count, \
  72. __entry->firstu, \
  73. __entry->flags, \
  74. __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS) \
  75. ) \
  76. )
  77. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
  78. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
  79. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
  80. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end);
  81. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full);
  82. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
  83. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
  84. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
  85. TRACE_EVENT(xfs_attr_list_node_descend,
  86. TP_PROTO(struct xfs_attr_list_context *ctx,
  87. struct xfs_da_node_entry *btree),
  88. TP_ARGS(ctx, btree),
  89. TP_STRUCT__entry(
  90. __field(dev_t, dev)
  91. __field(xfs_ino_t, ino)
  92. __field(u32, hashval)
  93. __field(u32, blkno)
  94. __field(u32, offset)
  95. __field(void *, alist)
  96. __field(int, bufsize)
  97. __field(int, count)
  98. __field(int, firstu)
  99. __field(int, dupcnt)
  100. __field(int, flags)
  101. __field(u32, bt_hashval)
  102. __field(u32, bt_before)
  103. ),
  104. TP_fast_assign(
  105. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  106. __entry->ino = ctx->dp->i_ino;
  107. __entry->hashval = ctx->cursor->hashval;
  108. __entry->blkno = ctx->cursor->blkno;
  109. __entry->offset = ctx->cursor->offset;
  110. __entry->alist = ctx->alist;
  111. __entry->bufsize = ctx->bufsize;
  112. __entry->count = ctx->count;
  113. __entry->firstu = ctx->firstu;
  114. __entry->flags = ctx->flags;
  115. __entry->bt_hashval = be32_to_cpu(btree->hashval);
  116. __entry->bt_before = be32_to_cpu(btree->before);
  117. ),
  118. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  119. "alist 0x%p size %u count %u firstu %u flags %d %s "
  120. "node hashval %u, node before %u",
  121. MAJOR(__entry->dev), MINOR(__entry->dev),
  122. __entry->ino,
  123. __entry->hashval,
  124. __entry->blkno,
  125. __entry->offset,
  126. __entry->dupcnt,
  127. __entry->alist,
  128. __entry->bufsize,
  129. __entry->count,
  130. __entry->firstu,
  131. __entry->flags,
  132. __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
  133. __entry->bt_hashval,
  134. __entry->bt_before)
  135. );
  136. TRACE_EVENT(xfs_iext_insert,
  137. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx,
  138. struct xfs_bmbt_irec *r, int state, unsigned long caller_ip),
  139. TP_ARGS(ip, idx, r, state, caller_ip),
  140. TP_STRUCT__entry(
  141. __field(dev_t, dev)
  142. __field(xfs_ino_t, ino)
  143. __field(xfs_extnum_t, idx)
  144. __field(xfs_fileoff_t, startoff)
  145. __field(xfs_fsblock_t, startblock)
  146. __field(xfs_filblks_t, blockcount)
  147. __field(xfs_exntst_t, state)
  148. __field(int, bmap_state)
  149. __field(unsigned long, caller_ip)
  150. ),
  151. TP_fast_assign(
  152. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  153. __entry->ino = ip->i_ino;
  154. __entry->idx = idx;
  155. __entry->startoff = r->br_startoff;
  156. __entry->startblock = r->br_startblock;
  157. __entry->blockcount = r->br_blockcount;
  158. __entry->state = r->br_state;
  159. __entry->bmap_state = state;
  160. __entry->caller_ip = caller_ip;
  161. ),
  162. TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
  163. "offset %lld block %s count %lld flag %d caller %pf",
  164. MAJOR(__entry->dev), MINOR(__entry->dev),
  165. __entry->ino,
  166. __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
  167. (long)__entry->idx,
  168. __entry->startoff,
  169. xfs_fmtfsblock(__entry->startblock),
  170. __entry->blockcount,
  171. __entry->state,
  172. (char *)__entry->caller_ip)
  173. );
  174. #define DEFINE_BMAP_EVENT(name) \
  175. TRACE_EVENT(name, \
  176. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
  177. unsigned long caller_ip), \
  178. TP_ARGS(ip, idx, state, caller_ip), \
  179. TP_STRUCT__entry( \
  180. __field(dev_t, dev) \
  181. __field(xfs_ino_t, ino) \
  182. __field(xfs_extnum_t, idx) \
  183. __field(xfs_fileoff_t, startoff) \
  184. __field(xfs_fsblock_t, startblock) \
  185. __field(xfs_filblks_t, blockcount) \
  186. __field(xfs_exntst_t, state) \
  187. __field(int, bmap_state) \
  188. __field(unsigned long, caller_ip) \
  189. ), \
  190. TP_fast_assign( \
  191. struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ? \
  192. ip->i_afp : &ip->i_df; \
  193. struct xfs_bmbt_irec r; \
  194. \
  195. xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r); \
  196. __entry->dev = VFS_I(ip)->i_sb->s_dev; \
  197. __entry->ino = ip->i_ino; \
  198. __entry->idx = idx; \
  199. __entry->startoff = r.br_startoff; \
  200. __entry->startblock = r.br_startblock; \
  201. __entry->blockcount = r.br_blockcount; \
  202. __entry->state = r.br_state; \
  203. __entry->bmap_state = state; \
  204. __entry->caller_ip = caller_ip; \
  205. ), \
  206. TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " \
  207. "offset %lld block %s count %lld flag %d caller %pf", \
  208. MAJOR(__entry->dev), MINOR(__entry->dev), \
  209. __entry->ino, \
  210. __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), \
  211. (long)__entry->idx, \
  212. __entry->startoff, \
  213. xfs_fmtfsblock(__entry->startblock), \
  214. __entry->blockcount, \
  215. __entry->state, \
  216. (char *)__entry->caller_ip) \
  217. )
  218. DEFINE_BMAP_EVENT(xfs_iext_remove);
  219. DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
  220. DEFINE_BMAP_EVENT(xfs_bmap_post_update);
  221. DEFINE_BMAP_EVENT(xfs_extlist);
  222. #define DEFINE_BUF_EVENT(tname) \
  223. TRACE_EVENT(tname, \
  224. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
  225. TP_ARGS(bp, caller_ip), \
  226. TP_STRUCT__entry( \
  227. __field(dev_t, dev) \
  228. __field(xfs_daddr_t, bno) \
  229. __field(size_t, buffer_length) \
  230. __field(int, hold) \
  231. __field(int, pincount) \
  232. __field(unsigned, lockval) \
  233. __field(unsigned, flags) \
  234. __field(unsigned long, caller_ip) \
  235. ), \
  236. TP_fast_assign( \
  237. __entry->dev = bp->b_target->bt_dev; \
  238. __entry->bno = bp->b_bn; \
  239. __entry->buffer_length = bp->b_buffer_length; \
  240. __entry->hold = atomic_read(&bp->b_hold); \
  241. __entry->pincount = atomic_read(&bp->b_pin_count); \
  242. __entry->lockval = xfs_buf_lock_value(bp); \
  243. __entry->flags = bp->b_flags; \
  244. __entry->caller_ip = caller_ip; \
  245. ), \
  246. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
  247. "lock %d flags %s caller %pf", \
  248. MAJOR(__entry->dev), MINOR(__entry->dev), \
  249. (unsigned long long)__entry->bno, \
  250. __entry->buffer_length, \
  251. __entry->hold, \
  252. __entry->pincount, \
  253. __entry->lockval, \
  254. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
  255. (void *)__entry->caller_ip) \
  256. )
  257. DEFINE_BUF_EVENT(xfs_buf_init);
  258. DEFINE_BUF_EVENT(xfs_buf_free);
  259. DEFINE_BUF_EVENT(xfs_buf_hold);
  260. DEFINE_BUF_EVENT(xfs_buf_rele);
  261. DEFINE_BUF_EVENT(xfs_buf_pin);
  262. DEFINE_BUF_EVENT(xfs_buf_unpin);
  263. DEFINE_BUF_EVENT(xfs_buf_iodone);
  264. DEFINE_BUF_EVENT(xfs_buf_iorequest);
  265. DEFINE_BUF_EVENT(xfs_buf_bawrite);
  266. DEFINE_BUF_EVENT(xfs_buf_bdwrite);
  267. DEFINE_BUF_EVENT(xfs_buf_lock);
  268. DEFINE_BUF_EVENT(xfs_buf_lock_done);
  269. DEFINE_BUF_EVENT(xfs_buf_cond_lock);
  270. DEFINE_BUF_EVENT(xfs_buf_unlock);
  271. DEFINE_BUF_EVENT(xfs_buf_ordered_retry);
  272. DEFINE_BUF_EVENT(xfs_buf_iowait);
  273. DEFINE_BUF_EVENT(xfs_buf_iowait_done);
  274. DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
  275. DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue);
  276. DEFINE_BUF_EVENT(xfs_buf_delwri_split);
  277. DEFINE_BUF_EVENT(xfs_buf_get_noaddr);
  278. DEFINE_BUF_EVENT(xfs_bdstrat_shut);
  279. DEFINE_BUF_EVENT(xfs_buf_item_relse);
  280. DEFINE_BUF_EVENT(xfs_buf_item_iodone);
  281. DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
  282. DEFINE_BUF_EVENT(xfs_buf_error_relse);
  283. DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
  284. DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
  285. /* not really buffer traces, but the buf provides useful information */
  286. DEFINE_BUF_EVENT(xfs_btree_corrupt);
  287. DEFINE_BUF_EVENT(xfs_da_btree_corrupt);
  288. DEFINE_BUF_EVENT(xfs_reset_dqcounts);
  289. DEFINE_BUF_EVENT(xfs_inode_item_push);
  290. /* pass flags explicitly */
  291. #define DEFINE_BUF_FLAGS_EVENT(tname) \
  292. TRACE_EVENT(tname, \
  293. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
  294. TP_ARGS(bp, flags, caller_ip), \
  295. TP_STRUCT__entry( \
  296. __field(dev_t, dev) \
  297. __field(xfs_daddr_t, bno) \
  298. __field(size_t, buffer_length) \
  299. __field(int, hold) \
  300. __field(int, pincount) \
  301. __field(unsigned, lockval) \
  302. __field(unsigned, flags) \
  303. __field(unsigned long, caller_ip) \
  304. ), \
  305. TP_fast_assign( \
  306. __entry->dev = bp->b_target->bt_dev; \
  307. __entry->bno = bp->b_bn; \
  308. __entry->buffer_length = bp->b_buffer_length; \
  309. __entry->flags = flags; \
  310. __entry->hold = atomic_read(&bp->b_hold); \
  311. __entry->pincount = atomic_read(&bp->b_pin_count); \
  312. __entry->lockval = xfs_buf_lock_value(bp); \
  313. __entry->caller_ip = caller_ip; \
  314. ), \
  315. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
  316. "lock %d flags %s caller %pf", \
  317. MAJOR(__entry->dev), MINOR(__entry->dev), \
  318. (unsigned long long)__entry->bno, \
  319. __entry->buffer_length, \
  320. __entry->hold, \
  321. __entry->pincount, \
  322. __entry->lockval, \
  323. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
  324. (void *)__entry->caller_ip) \
  325. )
  326. DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
  327. DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
  328. DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
  329. TRACE_EVENT(xfs_buf_ioerror,
  330. TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip),
  331. TP_ARGS(bp, error, caller_ip),
  332. TP_STRUCT__entry(
  333. __field(dev_t, dev)
  334. __field(xfs_daddr_t, bno)
  335. __field(size_t, buffer_length)
  336. __field(unsigned, flags)
  337. __field(int, hold)
  338. __field(int, pincount)
  339. __field(unsigned, lockval)
  340. __field(int, error)
  341. __field(unsigned long, caller_ip)
  342. ),
  343. TP_fast_assign(
  344. __entry->dev = bp->b_target->bt_dev;
  345. __entry->bno = bp->b_bn;
  346. __entry->buffer_length = bp->b_buffer_length;
  347. __entry->hold = atomic_read(&bp->b_hold);
  348. __entry->pincount = atomic_read(&bp->b_pin_count);
  349. __entry->lockval = xfs_buf_lock_value(bp);
  350. __entry->error = error;
  351. __entry->flags = bp->b_flags;
  352. __entry->caller_ip = caller_ip;
  353. ),
  354. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  355. "lock %d error %d flags %s caller %pf",
  356. MAJOR(__entry->dev), MINOR(__entry->dev),
  357. (unsigned long long)__entry->bno,
  358. __entry->buffer_length,
  359. __entry->hold,
  360. __entry->pincount,
  361. __entry->lockval,
  362. __entry->error,
  363. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  364. (void *)__entry->caller_ip)
  365. );
  366. #define DEFINE_BUF_ITEM_EVENT(tname) \
  367. TRACE_EVENT(tname, \
  368. TP_PROTO(struct xfs_buf_log_item *bip), \
  369. TP_ARGS(bip), \
  370. TP_STRUCT__entry( \
  371. __field(dev_t, dev) \
  372. __field(xfs_daddr_t, buf_bno) \
  373. __field(size_t, buf_len) \
  374. __field(int, buf_hold) \
  375. __field(int, buf_pincount) \
  376. __field(int, buf_lockval) \
  377. __field(unsigned, buf_flags) \
  378. __field(unsigned, bli_recur) \
  379. __field(int, bli_refcount) \
  380. __field(unsigned, bli_flags) \
  381. __field(void *, li_desc) \
  382. __field(unsigned, li_flags) \
  383. ), \
  384. TP_fast_assign( \
  385. __entry->dev = bip->bli_buf->b_target->bt_dev; \
  386. __entry->bli_flags = bip->bli_flags; \
  387. __entry->bli_recur = bip->bli_recur; \
  388. __entry->bli_refcount = atomic_read(&bip->bli_refcount); \
  389. __entry->buf_bno = bip->bli_buf->b_bn; \
  390. __entry->buf_len = bip->bli_buf->b_buffer_length; \
  391. __entry->buf_flags = bip->bli_buf->b_flags; \
  392. __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); \
  393. __entry->buf_pincount = \
  394. atomic_read(&bip->bli_buf->b_pin_count); \
  395. __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf); \
  396. __entry->li_desc = bip->bli_item.li_desc; \
  397. __entry->li_flags = bip->bli_item.li_flags; \
  398. ), \
  399. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
  400. "lock %d flags %s recur %d refcount %d bliflags %s " \
  401. "lidesc 0x%p liflags %s", \
  402. MAJOR(__entry->dev), MINOR(__entry->dev), \
  403. (unsigned long long)__entry->buf_bno, \
  404. __entry->buf_len, \
  405. __entry->buf_hold, \
  406. __entry->buf_pincount, \
  407. __entry->buf_lockval, \
  408. __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), \
  409. __entry->bli_recur, \
  410. __entry->bli_refcount, \
  411. __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), \
  412. __entry->li_desc, \
  413. __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) \
  414. )
  415. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
  416. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
  417. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
  418. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
  419. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
  420. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
  421. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
  422. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_trylock);
  423. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
  424. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
  425. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
  426. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
  427. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
  428. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
  429. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
  430. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur);
  431. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf);
  432. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur);
  433. DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf);
  434. DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse);
  435. DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
  436. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
  437. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
  438. DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
  439. #define DEFINE_LOCK_EVENT(name) \
  440. TRACE_EVENT(name, \
  441. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
  442. unsigned long caller_ip), \
  443. TP_ARGS(ip, lock_flags, caller_ip), \
  444. TP_STRUCT__entry( \
  445. __field(dev_t, dev) \
  446. __field(xfs_ino_t, ino) \
  447. __field(int, lock_flags) \
  448. __field(unsigned long, caller_ip) \
  449. ), \
  450. TP_fast_assign( \
  451. __entry->dev = VFS_I(ip)->i_sb->s_dev; \
  452. __entry->ino = ip->i_ino; \
  453. __entry->lock_flags = lock_flags; \
  454. __entry->caller_ip = caller_ip; \
  455. ), \
  456. TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", \
  457. MAJOR(__entry->dev), MINOR(__entry->dev), \
  458. __entry->ino, \
  459. __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), \
  460. (void *)__entry->caller_ip) \
  461. )
  462. DEFINE_LOCK_EVENT(xfs_ilock);
  463. DEFINE_LOCK_EVENT(xfs_ilock_nowait);
  464. DEFINE_LOCK_EVENT(xfs_ilock_demote);
  465. DEFINE_LOCK_EVENT(xfs_iunlock);
  466. #define DEFINE_IGET_EVENT(name) \
  467. TRACE_EVENT(name, \
  468. TP_PROTO(struct xfs_inode *ip), \
  469. TP_ARGS(ip), \
  470. TP_STRUCT__entry( \
  471. __field(dev_t, dev) \
  472. __field(xfs_ino_t, ino) \
  473. ), \
  474. TP_fast_assign( \
  475. __entry->dev = VFS_I(ip)->i_sb->s_dev; \
  476. __entry->ino = ip->i_ino; \
  477. ), \
  478. TP_printk("dev %d:%d ino 0x%llx", \
  479. MAJOR(__entry->dev), MINOR(__entry->dev), \
  480. __entry->ino) \
  481. )
  482. DEFINE_IGET_EVENT(xfs_iget_skip);
  483. DEFINE_IGET_EVENT(xfs_iget_reclaim);
  484. DEFINE_IGET_EVENT(xfs_iget_found);
  485. DEFINE_IGET_EVENT(xfs_iget_alloc);
  486. #define DEFINE_INODE_EVENT(name) \
  487. TRACE_EVENT(name, \
  488. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
  489. TP_ARGS(ip, caller_ip), \
  490. TP_STRUCT__entry( \
  491. __field(dev_t, dev) \
  492. __field(xfs_ino_t, ino) \
  493. __field(int, count) \
  494. __field(unsigned long, caller_ip) \
  495. ), \
  496. TP_fast_assign( \
  497. __entry->dev = VFS_I(ip)->i_sb->s_dev; \
  498. __entry->ino = ip->i_ino; \
  499. __entry->count = atomic_read(&VFS_I(ip)->i_count); \
  500. __entry->caller_ip = caller_ip; \
  501. ), \
  502. TP_printk("dev %d:%d ino 0x%llx count %d caller %pf", \
  503. MAJOR(__entry->dev), MINOR(__entry->dev), \
  504. __entry->ino, \
  505. __entry->count, \
  506. (char *)__entry->caller_ip) \
  507. )
  508. DEFINE_INODE_EVENT(xfs_ihold);
  509. DEFINE_INODE_EVENT(xfs_irele);
  510. /* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
  511. DEFINE_INODE_EVENT(xfs_inode);
  512. #define xfs_itrace_entry(ip) \
  513. trace_xfs_inode(ip, _THIS_IP_)
  514. #define DEFINE_DQUOT_EVENT(tname) \
  515. TRACE_EVENT(tname, \
  516. TP_PROTO(struct xfs_dquot *dqp), \
  517. TP_ARGS(dqp), \
  518. TP_STRUCT__entry( \
  519. __field(dev_t, dev) \
  520. __field(__be32, id) \
  521. __field(unsigned, flags) \
  522. __field(unsigned, nrefs) \
  523. __field(unsigned long long, res_bcount) \
  524. __field(unsigned long long, bcount) \
  525. __field(unsigned long long, icount) \
  526. __field(unsigned long long, blk_hardlimit) \
  527. __field(unsigned long long, blk_softlimit) \
  528. __field(unsigned long long, ino_hardlimit) \
  529. __field(unsigned long long, ino_softlimit) \
  530. ), \
  531. TP_fast_assign( \
  532. __entry->dev = dqp->q_mount->m_super->s_dev; \
  533. __entry->id = dqp->q_core.d_id; \
  534. __entry->flags = dqp->dq_flags; \
  535. __entry->nrefs = dqp->q_nrefs; \
  536. __entry->res_bcount = dqp->q_res_bcount; \
  537. __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); \
  538. __entry->icount = be64_to_cpu(dqp->q_core.d_icount); \
  539. __entry->blk_hardlimit = \
  540. be64_to_cpu(dqp->q_core.d_blk_hardlimit); \
  541. __entry->blk_softlimit = \
  542. be64_to_cpu(dqp->q_core.d_blk_softlimit); \
  543. __entry->ino_hardlimit = \
  544. be64_to_cpu(dqp->q_core.d_ino_hardlimit); \
  545. __entry->ino_softlimit = \
  546. be64_to_cpu(dqp->q_core.d_ino_softlimit); \
  547. ), \
  548. TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " \
  549. "bcnt 0x%llx [hard 0x%llx | soft 0x%llx] " \
  550. "icnt 0x%llx [hard 0x%llx | soft 0x%llx]", \
  551. MAJOR(__entry->dev), MINOR(__entry->dev), \
  552. be32_to_cpu(__entry->id), \
  553. __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), \
  554. __entry->nrefs, \
  555. __entry->res_bcount, \
  556. __entry->bcount, \
  557. __entry->blk_hardlimit, \
  558. __entry->blk_softlimit, \
  559. __entry->icount, \
  560. __entry->ino_hardlimit, \
  561. __entry->ino_softlimit) \
  562. )
  563. DEFINE_DQUOT_EVENT(xfs_dqadjust);
  564. DEFINE_DQUOT_EVENT(xfs_dqshake_dirty);
  565. DEFINE_DQUOT_EVENT(xfs_dqshake_unlink);
  566. DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
  567. DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
  568. DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink);
  569. DEFINE_DQUOT_EVENT(xfs_dqattach_found);
  570. DEFINE_DQUOT_EVENT(xfs_dqattach_get);
  571. DEFINE_DQUOT_EVENT(xfs_dqinit);
  572. DEFINE_DQUOT_EVENT(xfs_dqreuse);
  573. DEFINE_DQUOT_EVENT(xfs_dqalloc);
  574. DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
  575. DEFINE_DQUOT_EVENT(xfs_dqread);
  576. DEFINE_DQUOT_EVENT(xfs_dqread_fail);
  577. DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
  578. DEFINE_DQUOT_EVENT(xfs_dqlookup_want);
  579. DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist);
  580. DEFINE_DQUOT_EVENT(xfs_dqlookup_move);
  581. DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
  582. DEFINE_DQUOT_EVENT(xfs_dqget_hit);
  583. DEFINE_DQUOT_EVENT(xfs_dqget_miss);
  584. DEFINE_DQUOT_EVENT(xfs_dqput);
  585. DEFINE_DQUOT_EVENT(xfs_dqput_wait);
  586. DEFINE_DQUOT_EVENT(xfs_dqput_free);
  587. DEFINE_DQUOT_EVENT(xfs_dqrele);
  588. DEFINE_DQUOT_EVENT(xfs_dqflush);
  589. DEFINE_DQUOT_EVENT(xfs_dqflush_force);
  590. DEFINE_DQUOT_EVENT(xfs_dqflush_done);
  591. /* not really iget events, but we re-use the format */
  592. DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
  593. DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
  594. #define DEFINE_LOGGRANT_EVENT(tname) \
  595. TRACE_EVENT(tname, \
  596. TP_PROTO(struct log *log, struct xlog_ticket *tic), \
  597. TP_ARGS(log, tic), \
  598. TP_STRUCT__entry( \
  599. __field(dev_t, dev) \
  600. __field(unsigned, trans_type) \
  601. __field(char, ocnt) \
  602. __field(char, cnt) \
  603. __field(int, curr_res) \
  604. __field(int, unit_res) \
  605. __field(unsigned int, flags) \
  606. __field(void *, reserve_headq) \
  607. __field(void *, write_headq) \
  608. __field(int, grant_reserve_cycle) \
  609. __field(int, grant_reserve_bytes) \
  610. __field(int, grant_write_cycle) \
  611. __field(int, grant_write_bytes) \
  612. __field(int, curr_cycle) \
  613. __field(int, curr_block) \
  614. __field(xfs_lsn_t, tail_lsn) \
  615. ), \
  616. TP_fast_assign( \
  617. __entry->dev = log->l_mp->m_super->s_dev; \
  618. __entry->trans_type = tic->t_trans_type; \
  619. __entry->ocnt = tic->t_ocnt; \
  620. __entry->cnt = tic->t_cnt; \
  621. __entry->curr_res = tic->t_curr_res; \
  622. __entry->unit_res = tic->t_unit_res; \
  623. __entry->flags = tic->t_flags; \
  624. __entry->reserve_headq = log->l_reserve_headq; \
  625. __entry->write_headq = log->l_write_headq; \
  626. __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; \
  627. __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; \
  628. __entry->grant_write_cycle = log->l_grant_write_cycle; \
  629. __entry->grant_write_bytes = log->l_grant_write_bytes; \
  630. __entry->curr_cycle = log->l_curr_cycle; \
  631. __entry->curr_block = log->l_curr_block; \
  632. __entry->tail_lsn = log->l_tail_lsn; \
  633. ), \
  634. TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " \
  635. "t_unit_res %u t_flags %s reserve_headq 0x%p " \
  636. "write_headq 0x%p grant_reserve_cycle %d " \
  637. "grant_reserve_bytes %d grant_write_cycle %d " \
  638. "grant_write_bytes %d curr_cycle %d curr_block %d " \
  639. "tail_cycle %d tail_block %d", \
  640. MAJOR(__entry->dev), MINOR(__entry->dev), \
  641. __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES), \
  642. __entry->ocnt, \
  643. __entry->cnt, \
  644. __entry->curr_res, \
  645. __entry->unit_res, \
  646. __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), \
  647. __entry->reserve_headq, \
  648. __entry->write_headq, \
  649. __entry->grant_reserve_cycle, \
  650. __entry->grant_reserve_bytes, \
  651. __entry->grant_write_cycle, \
  652. __entry->grant_write_bytes, \
  653. __entry->curr_cycle, \
  654. __entry->curr_block, \
  655. CYCLE_LSN(__entry->tail_lsn), \
  656. BLOCK_LSN(__entry->tail_lsn) \
  657. ) \
  658. )
  659. DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
  660. DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
  661. DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
  662. DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
  663. DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
  664. DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
  665. DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
  666. DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
  667. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
  668. DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
  669. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
  670. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
  671. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
  672. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
  673. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
  674. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
  675. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
  676. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
  677. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
  678. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
  679. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
  680. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter);
  681. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit);
  682. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub);
  683. #define DEFINE_RW_EVENT(name) \
  684. TRACE_EVENT(name, \
  685. TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
  686. TP_ARGS(ip, count, offset, flags), \
  687. TP_STRUCT__entry( \
  688. __field(dev_t, dev) \
  689. __field(xfs_ino_t, ino) \
  690. __field(xfs_fsize_t, size) \
  691. __field(xfs_fsize_t, new_size) \
  692. __field(loff_t, offset) \
  693. __field(size_t, count) \
  694. __field(int, flags) \
  695. ), \
  696. TP_fast_assign( \
  697. __entry->dev = VFS_I(ip)->i_sb->s_dev; \
  698. __entry->ino = ip->i_ino; \
  699. __entry->size = ip->i_d.di_size; \
  700. __entry->new_size = ip->i_new_size; \
  701. __entry->offset = offset; \
  702. __entry->count = count; \
  703. __entry->flags = flags; \
  704. ), \
  705. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
  706. "offset 0x%llx count 0x%zx ioflags %s", \
  707. MAJOR(__entry->dev), MINOR(__entry->dev), \
  708. __entry->ino, \
  709. __entry->size, \
  710. __entry->new_size, \
  711. __entry->offset, \
  712. __entry->count, \
  713. __print_flags(__entry->flags, "|", XFS_IO_FLAGS)) \
  714. )
  715. DEFINE_RW_EVENT(xfs_file_read);
  716. DEFINE_RW_EVENT(xfs_file_buffered_write);
  717. DEFINE_RW_EVENT(xfs_file_direct_write);
  718. DEFINE_RW_EVENT(xfs_file_splice_read);
  719. DEFINE_RW_EVENT(xfs_file_splice_write);
  720. #define DEFINE_PAGE_EVENT(name) \
  721. TRACE_EVENT(name, \
  722. TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \
  723. TP_ARGS(inode, page, off), \
  724. TP_STRUCT__entry( \
  725. __field(dev_t, dev) \
  726. __field(xfs_ino_t, ino) \
  727. __field(pgoff_t, pgoff) \
  728. __field(loff_t, size) \
  729. __field(unsigned long, offset) \
  730. __field(int, delalloc) \
  731. __field(int, unmapped) \
  732. __field(int, unwritten) \
  733. ), \
  734. TP_fast_assign( \
  735. int delalloc = -1, unmapped = -1, unwritten = -1; \
  736. \
  737. if (page_has_buffers(page)) \
  738. xfs_count_page_state(page, &delalloc, \
  739. &unmapped, &unwritten); \
  740. __entry->dev = inode->i_sb->s_dev; \
  741. __entry->ino = XFS_I(inode)->i_ino; \
  742. __entry->pgoff = page_offset(page); \
  743. __entry->size = i_size_read(inode); \
  744. __entry->offset = off; \
  745. __entry->delalloc = delalloc; \
  746. __entry->unmapped = unmapped; \
  747. __entry->unwritten = unwritten; \
  748. ), \
  749. TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " \
  750. "delalloc %d unmapped %d unwritten %d", \
  751. MAJOR(__entry->dev), MINOR(__entry->dev), \
  752. __entry->ino, \
  753. __entry->pgoff, \
  754. __entry->size, \
  755. __entry->offset, \
  756. __entry->delalloc, \
  757. __entry->unmapped, \
  758. __entry->unwritten) \
  759. )
  760. DEFINE_PAGE_EVENT(xfs_writepage);
  761. DEFINE_PAGE_EVENT(xfs_releasepage);
  762. DEFINE_PAGE_EVENT(xfs_invalidatepage);
  763. #define DEFINE_IOMAP_EVENT(name) \
  764. TRACE_EVENT(name, \
  765. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
  766. int flags, struct xfs_bmbt_irec *irec), \
  767. TP_ARGS(ip, offset, count, flags, irec), \
  768. TP_STRUCT__entry( \
  769. __field(dev_t, dev) \
  770. __field(xfs_ino_t, ino) \
  771. __field(loff_t, size) \
  772. __field(loff_t, new_size) \
  773. __field(loff_t, offset) \
  774. __field(size_t, count) \
  775. __field(int, flags) \
  776. __field(xfs_fileoff_t, startoff) \
  777. __field(xfs_fsblock_t, startblock) \
  778. __field(xfs_filblks_t, blockcount) \
  779. ), \
  780. TP_fast_assign( \
  781. __entry->dev = VFS_I(ip)->i_sb->s_dev; \
  782. __entry->ino = ip->i_ino; \
  783. __entry->size = ip->i_d.di_size; \
  784. __entry->new_size = ip->i_new_size; \
  785. __entry->offset = offset; \
  786. __entry->count = count; \
  787. __entry->flags = flags; \
  788. __entry->startoff = irec ? irec->br_startoff : 0; \
  789. __entry->startblock = irec ? irec->br_startblock : 0; \
  790. __entry->blockcount = irec ? irec->br_blockcount : 0; \
  791. ), \
  792. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
  793. "offset 0x%llx count %zd flags %s " \
  794. "startoff 0x%llx startblock 0x%llx blockcount 0x%llx", \
  795. MAJOR(__entry->dev), MINOR(__entry->dev), \
  796. __entry->ino, \
  797. __entry->size, \
  798. __entry->new_size, \
  799. __entry->offset, \
  800. __entry->count, \
  801. __print_flags(__entry->flags, "|", BMAPI_FLAGS), \
  802. __entry->startoff, \
  803. __entry->startblock, \
  804. __entry->blockcount) \
  805. )
  806. DEFINE_IOMAP_EVENT(xfs_iomap_enter);
  807. DEFINE_IOMAP_EVENT(xfs_iomap_found);
  808. DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
  809. #define DEFINE_SIMPLE_IO_EVENT(name) \
  810. TRACE_EVENT(name, \
  811. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
  812. TP_ARGS(ip, offset, count), \
  813. TP_STRUCT__entry( \
  814. __field(dev_t, dev) \
  815. __field(xfs_ino_t, ino) \
  816. __field(loff_t, size) \
  817. __field(loff_t, new_size) \
  818. __field(loff_t, offset) \
  819. __field(size_t, count) \
  820. ), \
  821. TP_fast_assign( \
  822. __entry->dev = VFS_I(ip)->i_sb->s_dev; \
  823. __entry->ino = ip->i_ino; \
  824. __entry->size = ip->i_d.di_size; \
  825. __entry->new_size = ip->i_new_size; \
  826. __entry->offset = offset; \
  827. __entry->count = count; \
  828. ), \
  829. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
  830. "offset 0x%llx count %zd", \
  831. MAJOR(__entry->dev), MINOR(__entry->dev), \
  832. __entry->ino, \
  833. __entry->size, \
  834. __entry->new_size, \
  835. __entry->offset, \
  836. __entry->count) \
  837. );
  838. DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
  839. DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
  840. TRACE_EVENT(xfs_itruncate_start,
  841. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size, int flag,
  842. xfs_off_t toss_start, xfs_off_t toss_finish),
  843. TP_ARGS(ip, new_size, flag, toss_start, toss_finish),
  844. TP_STRUCT__entry(
  845. __field(dev_t, dev)
  846. __field(xfs_ino_t, ino)
  847. __field(xfs_fsize_t, size)
  848. __field(xfs_fsize_t, new_size)
  849. __field(xfs_off_t, toss_start)
  850. __field(xfs_off_t, toss_finish)
  851. __field(int, flag)
  852. ),
  853. TP_fast_assign(
  854. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  855. __entry->ino = ip->i_ino;
  856. __entry->size = ip->i_d.di_size;
  857. __entry->new_size = new_size;
  858. __entry->toss_start = toss_start;
  859. __entry->toss_finish = toss_finish;
  860. __entry->flag = flag;
  861. ),
  862. TP_printk("dev %d:%d ino 0x%llx %s size 0x%llx new_size 0x%llx "
  863. "toss start 0x%llx toss finish 0x%llx",
  864. MAJOR(__entry->dev), MINOR(__entry->dev),
  865. __entry->ino,
  866. __print_flags(__entry->flag, "|", XFS_ITRUNC_FLAGS),
  867. __entry->size,
  868. __entry->new_size,
  869. __entry->toss_start,
  870. __entry->toss_finish)
  871. );
  872. #define DEFINE_ITRUNC_EVENT(name) \
  873. TRACE_EVENT(name, \
  874. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
  875. TP_ARGS(ip, new_size), \
  876. TP_STRUCT__entry( \
  877. __field(dev_t, dev) \
  878. __field(xfs_ino_t, ino) \
  879. __field(xfs_fsize_t, size) \
  880. __field(xfs_fsize_t, new_size) \
  881. ), \
  882. TP_fast_assign( \
  883. __entry->dev = VFS_I(ip)->i_sb->s_dev; \
  884. __entry->ino = ip->i_ino; \
  885. __entry->size = ip->i_d.di_size; \
  886. __entry->new_size = new_size; \
  887. ), \
  888. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx", \
  889. MAJOR(__entry->dev), MINOR(__entry->dev), \
  890. __entry->ino, \
  891. __entry->size, \
  892. __entry->new_size) \
  893. )
  894. DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start);
  895. DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end);
  896. TRACE_EVENT(xfs_pagecache_inval,
  897. TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
  898. TP_ARGS(ip, start, finish),
  899. TP_STRUCT__entry(
  900. __field(dev_t, dev)
  901. __field(xfs_ino_t, ino)
  902. __field(xfs_fsize_t, size)
  903. __field(xfs_off_t, start)
  904. __field(xfs_off_t, finish)
  905. ),
  906. TP_fast_assign(
  907. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  908. __entry->ino = ip->i_ino;
  909. __entry->size = ip->i_d.di_size;
  910. __entry->start = start;
  911. __entry->finish = finish;
  912. ),
  913. TP_printk("dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx",
  914. MAJOR(__entry->dev), MINOR(__entry->dev),
  915. __entry->ino,
  916. __entry->size,
  917. __entry->start,
  918. __entry->finish)
  919. );
  920. TRACE_EVENT(xfs_bunmap,
  921. TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len,
  922. int flags, unsigned long caller_ip),
  923. TP_ARGS(ip, bno, len, flags, caller_ip),
  924. TP_STRUCT__entry(
  925. __field(dev_t, dev)
  926. __field(xfs_ino_t, ino)
  927. __field(xfs_fsize_t, size)
  928. __field(xfs_fileoff_t, bno)
  929. __field(xfs_filblks_t, len)
  930. __field(unsigned long, caller_ip)
  931. __field(int, flags)
  932. ),
  933. TP_fast_assign(
  934. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  935. __entry->ino = ip->i_ino;
  936. __entry->size = ip->i_d.di_size;
  937. __entry->bno = bno;
  938. __entry->len = len;
  939. __entry->caller_ip = caller_ip;
  940. __entry->flags = flags;
  941. ),
  942. TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx"
  943. "flags %s caller %pf",
  944. MAJOR(__entry->dev), MINOR(__entry->dev),
  945. __entry->ino,
  946. __entry->size,
  947. __entry->bno,
  948. __entry->len,
  949. __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS),
  950. (void *)__entry->caller_ip)
  951. );
  952. TRACE_EVENT(xfs_alloc_busy,
  953. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
  954. xfs_extlen_t len, int slot),
  955. TP_ARGS(mp, agno, agbno, len, slot),
  956. TP_STRUCT__entry(
  957. __field(dev_t, dev)
  958. __field(xfs_agnumber_t, agno)
  959. __field(xfs_agblock_t, agbno)
  960. __field(xfs_extlen_t, len)
  961. __field(int, slot)
  962. ),
  963. TP_fast_assign(
  964. __entry->dev = mp->m_super->s_dev;
  965. __entry->agno = agno;
  966. __entry->agbno = agbno;
  967. __entry->len = len;
  968. __entry->slot = slot;
  969. ),
  970. TP_printk("dev %d:%d agno %u agbno %u len %u slot %d",
  971. MAJOR(__entry->dev), MINOR(__entry->dev),
  972. __entry->agno,
  973. __entry->agbno,
  974. __entry->len,
  975. __entry->slot)
  976. );
  977. #define XFS_BUSY_STATES \
  978. { 0, "found" }, \
  979. { 1, "missing" }
  980. TRACE_EVENT(xfs_alloc_unbusy,
  981. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  982. int slot, int found),
  983. TP_ARGS(mp, agno, slot, found),
  984. TP_STRUCT__entry(
  985. __field(dev_t, dev)
  986. __field(xfs_agnumber_t, agno)
  987. __field(int, slot)
  988. __field(int, found)
  989. ),
  990. TP_fast_assign(
  991. __entry->dev = mp->m_super->s_dev;
  992. __entry->agno = agno;
  993. __entry->slot = slot;
  994. __entry->found = found;
  995. ),
  996. TP_printk("dev %d:%d agno %u slot %d %s",
  997. MAJOR(__entry->dev), MINOR(__entry->dev),
  998. __entry->agno,
  999. __entry->slot,
  1000. __print_symbolic(__entry->found, XFS_BUSY_STATES))
  1001. );
  1002. TRACE_EVENT(xfs_alloc_busysearch,
  1003. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
  1004. xfs_extlen_t len, int found),
  1005. TP_ARGS(mp, agno, agbno, len, found),
  1006. TP_STRUCT__entry(
  1007. __field(dev_t, dev)
  1008. __field(xfs_agnumber_t, agno)
  1009. __field(xfs_agblock_t, agbno)
  1010. __field(xfs_extlen_t, len)
  1011. __field(int, found)
  1012. ),
  1013. TP_fast_assign(
  1014. __entry->dev = mp->m_super->s_dev;
  1015. __entry->agno = agno;
  1016. __entry->agbno = agbno;
  1017. __entry->len = len;
  1018. __entry->found = found;
  1019. ),
  1020. TP_printk("dev %d:%d agno %u agbno %u len %u %s",
  1021. MAJOR(__entry->dev), MINOR(__entry->dev),
  1022. __entry->agno,
  1023. __entry->agbno,
  1024. __entry->len,
  1025. __print_symbolic(__entry->found, XFS_BUSY_STATES))
  1026. );
  1027. TRACE_EVENT(xfs_agf,
  1028. TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
  1029. unsigned long caller_ip),
  1030. TP_ARGS(mp, agf, flags, caller_ip),
  1031. TP_STRUCT__entry(
  1032. __field(dev_t, dev)
  1033. __field(xfs_agnumber_t, agno)
  1034. __field(int, flags)
  1035. __field(__u32, length)
  1036. __field(__u32, bno_root)
  1037. __field(__u32, cnt_root)
  1038. __field(__u32, bno_level)
  1039. __field(__u32, cnt_level)
  1040. __field(__u32, flfirst)
  1041. __field(__u32, fllast)
  1042. __field(__u32, flcount)
  1043. __field(__u32, freeblks)
  1044. __field(__u32, longest)
  1045. __field(unsigned long, caller_ip)
  1046. ),
  1047. TP_fast_assign(
  1048. __entry->dev = mp->m_super->s_dev;
  1049. __entry->agno = be32_to_cpu(agf->agf_seqno),
  1050. __entry->flags = flags;
  1051. __entry->length = be32_to_cpu(agf->agf_length),
  1052. __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
  1053. __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
  1054. __entry->bno_level =
  1055. be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
  1056. __entry->cnt_level =
  1057. be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
  1058. __entry->flfirst = be32_to_cpu(agf->agf_flfirst),
  1059. __entry->fllast = be32_to_cpu(agf->agf_fllast),
  1060. __entry->flcount = be32_to_cpu(agf->agf_flcount),
  1061. __entry->freeblks = be32_to_cpu(agf->agf_freeblks),
  1062. __entry->longest = be32_to_cpu(agf->agf_longest);
  1063. __entry->caller_ip = caller_ip;
  1064. ),
  1065. TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u "
  1066. "levels b %u c %u flfirst %u fllast %u flcount %u "
  1067. "freeblks %u longest %u caller %pf",
  1068. MAJOR(__entry->dev), MINOR(__entry->dev),
  1069. __entry->agno,
  1070. __print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
  1071. __entry->length,
  1072. __entry->bno_root,
  1073. __entry->cnt_root,
  1074. __entry->bno_level,
  1075. __entry->cnt_level,
  1076. __entry->flfirst,
  1077. __entry->fllast,
  1078. __entry->flcount,
  1079. __entry->freeblks,
  1080. __entry->longest,
  1081. (void *)__entry->caller_ip)
  1082. );
  1083. TRACE_EVENT(xfs_free_extent,
  1084. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
  1085. xfs_extlen_t len, bool isfl, int haveleft, int haveright),
  1086. TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright),
  1087. TP_STRUCT__entry(
  1088. __field(dev_t, dev)
  1089. __field(xfs_agnumber_t, agno)
  1090. __field(xfs_agblock_t, agbno)
  1091. __field(xfs_extlen_t, len)
  1092. __field(int, isfl)
  1093. __field(int, haveleft)
  1094. __field(int, haveright)
  1095. ),
  1096. TP_fast_assign(
  1097. __entry->dev = mp->m_super->s_dev;
  1098. __entry->agno = agno;
  1099. __entry->agbno = agbno;
  1100. __entry->len = len;
  1101. __entry->isfl = isfl;
  1102. __entry->haveleft = haveleft;
  1103. __entry->haveright = haveright;
  1104. ),
  1105. TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s",
  1106. MAJOR(__entry->dev), MINOR(__entry->dev),
  1107. __entry->agno,
  1108. __entry->agbno,
  1109. __entry->len,
  1110. __entry->isfl,
  1111. __entry->haveleft ?
  1112. (__entry->haveright ? "both" : "left") :
  1113. (__entry->haveright ? "right" : "none"))
  1114. );
  1115. #define DEFINE_ALLOC_EVENT(name) \
  1116. TRACE_EVENT(name, \
  1117. TP_PROTO(struct xfs_alloc_arg *args), \
  1118. TP_ARGS(args), \
  1119. TP_STRUCT__entry( \
  1120. __field(dev_t, dev) \
  1121. __field(xfs_agnumber_t, agno) \
  1122. __field(xfs_agblock_t, agbno) \
  1123. __field(xfs_extlen_t, minlen) \
  1124. __field(xfs_extlen_t, maxlen) \
  1125. __field(xfs_extlen_t, mod) \
  1126. __field(xfs_extlen_t, prod) \
  1127. __field(xfs_extlen_t, minleft) \
  1128. __field(xfs_extlen_t, total) \
  1129. __field(xfs_extlen_t, alignment) \
  1130. __field(xfs_extlen_t, minalignslop) \
  1131. __field(xfs_extlen_t, len) \
  1132. __field(short, type) \
  1133. __field(short, otype) \
  1134. __field(char, wasdel) \
  1135. __field(char, wasfromfl) \
  1136. __field(char, isfl) \
  1137. __field(char, userdata) \
  1138. __field(xfs_fsblock_t, firstblock) \
  1139. ), \
  1140. TP_fast_assign( \
  1141. __entry->dev = args->mp->m_super->s_dev; \
  1142. __entry->agno = args->agno; \
  1143. __entry->agbno = args->agbno; \
  1144. __entry->minlen = args->minlen; \
  1145. __entry->maxlen = args->maxlen; \
  1146. __entry->mod = args->mod; \
  1147. __entry->prod = args->prod; \
  1148. __entry->minleft = args->minleft; \
  1149. __entry->total = args->total; \
  1150. __entry->alignment = args->alignment; \
  1151. __entry->minalignslop = args->minalignslop; \
  1152. __entry->len = args->len; \
  1153. __entry->type = args->type; \
  1154. __entry->otype = args->otype; \
  1155. __entry->wasdel = args->wasdel; \
  1156. __entry->wasfromfl = args->wasfromfl; \
  1157. __entry->isfl = args->isfl; \
  1158. __entry->userdata = args->userdata; \
  1159. __entry->firstblock = args->firstblock; \
  1160. ), \
  1161. TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " \
  1162. "prod %u minleft %u total %u alignment %u minalignslop %u " \
  1163. "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " \
  1164. "userdata %d firstblock 0x%llx", \
  1165. MAJOR(__entry->dev), MINOR(__entry->dev), \
  1166. __entry->agno, \
  1167. __entry->agbno, \
  1168. __entry->minlen, \
  1169. __entry->maxlen, \
  1170. __entry->mod, \
  1171. __entry->prod, \
  1172. __entry->minleft, \
  1173. __entry->total, \
  1174. __entry->alignment, \
  1175. __entry->minalignslop, \
  1176. __entry->len, \
  1177. __print_symbolic(__entry->type, XFS_ALLOC_TYPES), \
  1178. __print_symbolic(__entry->otype, XFS_ALLOC_TYPES), \
  1179. __entry->wasdel, \
  1180. __entry->wasfromfl, \
  1181. __entry->isfl, \
  1182. __entry->userdata, \
  1183. __entry->firstblock) \
  1184. )
  1185. DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
  1186. DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
  1187. DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
  1188. DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
  1189. DEFINE_ALLOC_EVENT(xfs_alloc_near_greater);
  1190. DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser);
  1191. DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
  1192. DEFINE_ALLOC_EVENT(xfs_alloc_size_neither);
  1193. DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry);
  1194. DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft);
  1195. DEFINE_ALLOC_EVENT(xfs_alloc_size_done);
  1196. DEFINE_ALLOC_EVENT(xfs_alloc_size_error);
  1197. DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist);
  1198. DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
  1199. DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
  1200. DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
  1201. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
  1202. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
  1203. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
  1204. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
  1205. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
  1206. #define DEFINE_DIR2_TRACE(tname) \
  1207. TRACE_EVENT(tname, \
  1208. TP_PROTO(struct xfs_da_args *args), \
  1209. TP_ARGS(args), \
  1210. TP_STRUCT__entry( \
  1211. __field(dev_t, dev) \
  1212. __field(xfs_ino_t, ino) \
  1213. __dynamic_array(char, name, args->namelen) \
  1214. __field(int, namelen) \
  1215. __field(xfs_dahash_t, hashval) \
  1216. __field(xfs_ino_t, inumber) \
  1217. __field(int, op_flags) \
  1218. ), \
  1219. TP_fast_assign( \
  1220. __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
  1221. __entry->ino = args->dp->i_ino; \
  1222. if (args->namelen) \
  1223. memcpy(__get_str(name), args->name, args->namelen); \
  1224. __entry->namelen = args->namelen; \
  1225. __entry->hashval = args->hashval; \
  1226. __entry->inumber = args->inumber; \
  1227. __entry->op_flags = args->op_flags; \
  1228. ), \
  1229. TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " \
  1230. "inumber 0x%llx op_flags %s", \
  1231. MAJOR(__entry->dev), MINOR(__entry->dev), \
  1232. __entry->ino, \
  1233. __entry->namelen, \
  1234. __entry->namelen ? __get_str(name) : NULL, \
  1235. __entry->namelen, \
  1236. __entry->hashval, \
  1237. __entry->inumber, \
  1238. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) \
  1239. )
  1240. DEFINE_DIR2_TRACE(xfs_dir2_sf_addname);
  1241. DEFINE_DIR2_TRACE(xfs_dir2_sf_create);
  1242. DEFINE_DIR2_TRACE(xfs_dir2_sf_lookup);
  1243. DEFINE_DIR2_TRACE(xfs_dir2_sf_replace);
  1244. DEFINE_DIR2_TRACE(xfs_dir2_sf_removename);
  1245. DEFINE_DIR2_TRACE(xfs_dir2_sf_toino4);
  1246. DEFINE_DIR2_TRACE(xfs_dir2_sf_toino8);
  1247. DEFINE_DIR2_TRACE(xfs_dir2_sf_to_block);
  1248. DEFINE_DIR2_TRACE(xfs_dir2_block_addname);
  1249. DEFINE_DIR2_TRACE(xfs_dir2_block_lookup);
  1250. DEFINE_DIR2_TRACE(xfs_dir2_block_replace);
  1251. DEFINE_DIR2_TRACE(xfs_dir2_block_removename);
  1252. DEFINE_DIR2_TRACE(xfs_dir2_block_to_sf);
  1253. DEFINE_DIR2_TRACE(xfs_dir2_block_to_leaf);
  1254. DEFINE_DIR2_TRACE(xfs_dir2_leaf_addname);
  1255. DEFINE_DIR2_TRACE(xfs_dir2_leaf_lookup);
  1256. DEFINE_DIR2_TRACE(xfs_dir2_leaf_replace);
  1257. DEFINE_DIR2_TRACE(xfs_dir2_leaf_removename);
  1258. DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_block);
  1259. DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_node);
  1260. DEFINE_DIR2_TRACE(xfs_dir2_node_addname);
  1261. DEFINE_DIR2_TRACE(xfs_dir2_node_lookup);
  1262. DEFINE_DIR2_TRACE(xfs_dir2_node_replace);
  1263. DEFINE_DIR2_TRACE(xfs_dir2_node_removename);
  1264. DEFINE_DIR2_TRACE(xfs_dir2_node_to_leaf);
  1265. #define DEFINE_DIR2_SPACE_TRACE(tname) \
  1266. TRACE_EVENT(tname, \
  1267. TP_PROTO(struct xfs_da_args *args, int idx), \
  1268. TP_ARGS(args, idx), \
  1269. TP_STRUCT__entry( \
  1270. __field(dev_t, dev) \
  1271. __field(xfs_ino_t, ino) \
  1272. __field(int, op_flags) \
  1273. __field(int, idx) \
  1274. ), \
  1275. TP_fast_assign( \
  1276. __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
  1277. __entry->ino = args->dp->i_ino; \
  1278. __entry->op_flags = args->op_flags; \
  1279. __entry->idx = idx; \
  1280. ), \
  1281. TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", \
  1282. MAJOR(__entry->dev), MINOR(__entry->dev), \
  1283. __entry->ino, \
  1284. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), \
  1285. __entry->idx) \
  1286. )
  1287. DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_add);
  1288. DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_remove);
  1289. DEFINE_DIR2_SPACE_TRACE(xfs_dir2_grow_inode);
  1290. DEFINE_DIR2_SPACE_TRACE(xfs_dir2_shrink_inode);
  1291. TRACE_EVENT(xfs_dir2_leafn_moveents,
  1292. TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
  1293. TP_ARGS(args, src_idx, dst_idx, count),
  1294. TP_STRUCT__entry(
  1295. __field(dev_t, dev)
  1296. __field(xfs_ino_t, ino)
  1297. __field(int, op_flags)
  1298. __field(int, src_idx)
  1299. __field(int, dst_idx)
  1300. __field(int, count)
  1301. ),
  1302. TP_fast_assign(
  1303. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1304. __entry->ino = args->dp->i_ino;
  1305. __entry->op_flags = args->op_flags;
  1306. __entry->src_idx = src_idx;
  1307. __entry->dst_idx = dst_idx;
  1308. __entry->count = count;
  1309. ),
  1310. TP_printk("dev %d:%d ino 0x%llx op_flags %s "
  1311. "src_idx %d dst_idx %d count %d",
  1312. MAJOR(__entry->dev), MINOR(__entry->dev),
  1313. __entry->ino,
  1314. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  1315. __entry->src_idx,
  1316. __entry->dst_idx,
  1317. __entry->count)
  1318. );
  1319. #endif /* _TRACE_XFS_H */
  1320. #undef TRACE_INCLUDE_PATH
  1321. #define TRACE_INCLUDE_PATH .
  1322. #define TRACE_INCLUDE_FILE xfs_trace
  1323. #include <trace/define_trace.h>