xfs_trace.h 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783
  1. /*
  2. * Copyright (c) 2009, Christoph Hellwig
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #undef TRACE_SYSTEM
  19. #define TRACE_SYSTEM xfs
  20. #if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
  21. #define _TRACE_XFS_H
  22. #include <linux/tracepoint.h>
  23. struct xfs_agf;
  24. struct xfs_alloc_arg;
  25. struct xfs_attr_list_context;
  26. struct xfs_buf_log_item;
  27. struct xfs_da_args;
  28. struct xfs_da_node_entry;
  29. struct xfs_dquot;
  30. struct xfs_log_item;
  31. struct xlog_ticket;
  32. struct log;
  33. struct xlog_recover;
  34. struct xlog_recover_item;
  35. struct xfs_buf_log_format;
  36. struct xfs_inode_log_format;
  37. DECLARE_EVENT_CLASS(xfs_attr_list_class,
  38. TP_PROTO(struct xfs_attr_list_context *ctx),
  39. TP_ARGS(ctx),
  40. TP_STRUCT__entry(
  41. __field(dev_t, dev)
  42. __field(xfs_ino_t, ino)
  43. __field(u32, hashval)
  44. __field(u32, blkno)
  45. __field(u32, offset)
  46. __field(void *, alist)
  47. __field(int, bufsize)
  48. __field(int, count)
  49. __field(int, firstu)
  50. __field(int, dupcnt)
  51. __field(int, flags)
  52. ),
  53. TP_fast_assign(
  54. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  55. __entry->ino = ctx->dp->i_ino;
  56. __entry->hashval = ctx->cursor->hashval;
  57. __entry->blkno = ctx->cursor->blkno;
  58. __entry->offset = ctx->cursor->offset;
  59. __entry->alist = ctx->alist;
  60. __entry->bufsize = ctx->bufsize;
  61. __entry->count = ctx->count;
  62. __entry->firstu = ctx->firstu;
  63. __entry->flags = ctx->flags;
  64. ),
  65. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  66. "alist 0x%p size %u count %u firstu %u flags %d %s",
  67. MAJOR(__entry->dev), MINOR(__entry->dev),
  68. __entry->ino,
  69. __entry->hashval,
  70. __entry->blkno,
  71. __entry->offset,
  72. __entry->dupcnt,
  73. __entry->alist,
  74. __entry->bufsize,
  75. __entry->count,
  76. __entry->firstu,
  77. __entry->flags,
  78. __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS)
  79. )
  80. )
  81. #define DEFINE_ATTR_LIST_EVENT(name) \
  82. DEFINE_EVENT(xfs_attr_list_class, name, \
  83. TP_PROTO(struct xfs_attr_list_context *ctx), \
  84. TP_ARGS(ctx))
  85. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
  86. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
  87. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
  88. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end);
  89. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full);
  90. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
  91. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
  92. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
  93. DECLARE_EVENT_CLASS(xfs_perag_class,
  94. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount,
  95. unsigned long caller_ip),
  96. TP_ARGS(mp, agno, refcount, caller_ip),
  97. TP_STRUCT__entry(
  98. __field(dev_t, dev)
  99. __field(xfs_agnumber_t, agno)
  100. __field(int, refcount)
  101. __field(unsigned long, caller_ip)
  102. ),
  103. TP_fast_assign(
  104. __entry->dev = mp->m_super->s_dev;
  105. __entry->agno = agno;
  106. __entry->refcount = refcount;
  107. __entry->caller_ip = caller_ip;
  108. ),
  109. TP_printk("dev %d:%d agno %u refcount %d caller %pf",
  110. MAJOR(__entry->dev), MINOR(__entry->dev),
  111. __entry->agno,
  112. __entry->refcount,
  113. (char *)__entry->caller_ip)
  114. );
  115. #define DEFINE_PERAG_REF_EVENT(name) \
  116. DEFINE_EVENT(xfs_perag_class, name, \
  117. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \
  118. unsigned long caller_ip), \
  119. TP_ARGS(mp, agno, refcount, caller_ip))
  120. DEFINE_PERAG_REF_EVENT(xfs_perag_get);
  121. DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag);
  122. DEFINE_PERAG_REF_EVENT(xfs_perag_put);
  123. DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
  124. DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
  125. TRACE_EVENT(xfs_attr_list_node_descend,
  126. TP_PROTO(struct xfs_attr_list_context *ctx,
  127. struct xfs_da_node_entry *btree),
  128. TP_ARGS(ctx, btree),
  129. TP_STRUCT__entry(
  130. __field(dev_t, dev)
  131. __field(xfs_ino_t, ino)
  132. __field(u32, hashval)
  133. __field(u32, blkno)
  134. __field(u32, offset)
  135. __field(void *, alist)
  136. __field(int, bufsize)
  137. __field(int, count)
  138. __field(int, firstu)
  139. __field(int, dupcnt)
  140. __field(int, flags)
  141. __field(u32, bt_hashval)
  142. __field(u32, bt_before)
  143. ),
  144. TP_fast_assign(
  145. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  146. __entry->ino = ctx->dp->i_ino;
  147. __entry->hashval = ctx->cursor->hashval;
  148. __entry->blkno = ctx->cursor->blkno;
  149. __entry->offset = ctx->cursor->offset;
  150. __entry->alist = ctx->alist;
  151. __entry->bufsize = ctx->bufsize;
  152. __entry->count = ctx->count;
  153. __entry->firstu = ctx->firstu;
  154. __entry->flags = ctx->flags;
  155. __entry->bt_hashval = be32_to_cpu(btree->hashval);
  156. __entry->bt_before = be32_to_cpu(btree->before);
  157. ),
  158. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  159. "alist 0x%p size %u count %u firstu %u flags %d %s "
  160. "node hashval %u, node before %u",
  161. MAJOR(__entry->dev), MINOR(__entry->dev),
  162. __entry->ino,
  163. __entry->hashval,
  164. __entry->blkno,
  165. __entry->offset,
  166. __entry->dupcnt,
  167. __entry->alist,
  168. __entry->bufsize,
  169. __entry->count,
  170. __entry->firstu,
  171. __entry->flags,
  172. __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
  173. __entry->bt_hashval,
  174. __entry->bt_before)
  175. );
  176. TRACE_EVENT(xfs_iext_insert,
  177. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx,
  178. struct xfs_bmbt_irec *r, int state, unsigned long caller_ip),
  179. TP_ARGS(ip, idx, r, state, caller_ip),
  180. TP_STRUCT__entry(
  181. __field(dev_t, dev)
  182. __field(xfs_ino_t, ino)
  183. __field(xfs_extnum_t, idx)
  184. __field(xfs_fileoff_t, startoff)
  185. __field(xfs_fsblock_t, startblock)
  186. __field(xfs_filblks_t, blockcount)
  187. __field(xfs_exntst_t, state)
  188. __field(int, bmap_state)
  189. __field(unsigned long, caller_ip)
  190. ),
  191. TP_fast_assign(
  192. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  193. __entry->ino = ip->i_ino;
  194. __entry->idx = idx;
  195. __entry->startoff = r->br_startoff;
  196. __entry->startblock = r->br_startblock;
  197. __entry->blockcount = r->br_blockcount;
  198. __entry->state = r->br_state;
  199. __entry->bmap_state = state;
  200. __entry->caller_ip = caller_ip;
  201. ),
  202. TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
  203. "offset %lld block %lld count %lld flag %d caller %pf",
  204. MAJOR(__entry->dev), MINOR(__entry->dev),
  205. __entry->ino,
  206. __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
  207. (long)__entry->idx,
  208. __entry->startoff,
  209. (__int64_t)__entry->startblock,
  210. __entry->blockcount,
  211. __entry->state,
  212. (char *)__entry->caller_ip)
  213. );
  214. DECLARE_EVENT_CLASS(xfs_bmap_class,
  215. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state,
  216. unsigned long caller_ip),
  217. TP_ARGS(ip, idx, state, caller_ip),
  218. TP_STRUCT__entry(
  219. __field(dev_t, dev)
  220. __field(xfs_ino_t, ino)
  221. __field(xfs_extnum_t, idx)
  222. __field(xfs_fileoff_t, startoff)
  223. __field(xfs_fsblock_t, startblock)
  224. __field(xfs_filblks_t, blockcount)
  225. __field(xfs_exntst_t, state)
  226. __field(int, bmap_state)
  227. __field(unsigned long, caller_ip)
  228. ),
  229. TP_fast_assign(
  230. struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ?
  231. ip->i_afp : &ip->i_df;
  232. struct xfs_bmbt_irec r;
  233. xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r);
  234. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  235. __entry->ino = ip->i_ino;
  236. __entry->idx = idx;
  237. __entry->startoff = r.br_startoff;
  238. __entry->startblock = r.br_startblock;
  239. __entry->blockcount = r.br_blockcount;
  240. __entry->state = r.br_state;
  241. __entry->bmap_state = state;
  242. __entry->caller_ip = caller_ip;
  243. ),
  244. TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
  245. "offset %lld block %lld count %lld flag %d caller %pf",
  246. MAJOR(__entry->dev), MINOR(__entry->dev),
  247. __entry->ino,
  248. __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
  249. (long)__entry->idx,
  250. __entry->startoff,
  251. (__int64_t)__entry->startblock,
  252. __entry->blockcount,
  253. __entry->state,
  254. (char *)__entry->caller_ip)
  255. )
  256. #define DEFINE_BMAP_EVENT(name) \
  257. DEFINE_EVENT(xfs_bmap_class, name, \
  258. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
  259. unsigned long caller_ip), \
  260. TP_ARGS(ip, idx, state, caller_ip))
  261. DEFINE_BMAP_EVENT(xfs_iext_remove);
  262. DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
  263. DEFINE_BMAP_EVENT(xfs_bmap_post_update);
  264. DEFINE_BMAP_EVENT(xfs_extlist);
  265. DECLARE_EVENT_CLASS(xfs_buf_class,
  266. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip),
  267. TP_ARGS(bp, caller_ip),
  268. TP_STRUCT__entry(
  269. __field(dev_t, dev)
  270. __field(xfs_daddr_t, bno)
  271. __field(size_t, buffer_length)
  272. __field(int, hold)
  273. __field(int, pincount)
  274. __field(unsigned, lockval)
  275. __field(unsigned, flags)
  276. __field(unsigned long, caller_ip)
  277. ),
  278. TP_fast_assign(
  279. __entry->dev = bp->b_target->bt_dev;
  280. __entry->bno = bp->b_bn;
  281. __entry->buffer_length = bp->b_buffer_length;
  282. __entry->hold = atomic_read(&bp->b_hold);
  283. __entry->pincount = atomic_read(&bp->b_pin_count);
  284. __entry->lockval = bp->b_sema.count;
  285. __entry->flags = bp->b_flags;
  286. __entry->caller_ip = caller_ip;
  287. ),
  288. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  289. "lock %d flags %s caller %pf",
  290. MAJOR(__entry->dev), MINOR(__entry->dev),
  291. (unsigned long long)__entry->bno,
  292. __entry->buffer_length,
  293. __entry->hold,
  294. __entry->pincount,
  295. __entry->lockval,
  296. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  297. (void *)__entry->caller_ip)
  298. )
  299. #define DEFINE_BUF_EVENT(name) \
  300. DEFINE_EVENT(xfs_buf_class, name, \
  301. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
  302. TP_ARGS(bp, caller_ip))
  303. DEFINE_BUF_EVENT(xfs_buf_init);
  304. DEFINE_BUF_EVENT(xfs_buf_free);
  305. DEFINE_BUF_EVENT(xfs_buf_hold);
  306. DEFINE_BUF_EVENT(xfs_buf_rele);
  307. DEFINE_BUF_EVENT(xfs_buf_iodone);
  308. DEFINE_BUF_EVENT(xfs_buf_iorequest);
  309. DEFINE_BUF_EVENT(xfs_buf_bawrite);
  310. DEFINE_BUF_EVENT(xfs_buf_lock);
  311. DEFINE_BUF_EVENT(xfs_buf_lock_done);
  312. DEFINE_BUF_EVENT(xfs_buf_trylock);
  313. DEFINE_BUF_EVENT(xfs_buf_unlock);
  314. DEFINE_BUF_EVENT(xfs_buf_iowait);
  315. DEFINE_BUF_EVENT(xfs_buf_iowait_done);
  316. DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
  317. DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue);
  318. DEFINE_BUF_EVENT(xfs_buf_delwri_split);
  319. DEFINE_BUF_EVENT(xfs_buf_get_uncached);
  320. DEFINE_BUF_EVENT(xfs_bdstrat_shut);
  321. DEFINE_BUF_EVENT(xfs_buf_item_relse);
  322. DEFINE_BUF_EVENT(xfs_buf_item_iodone);
  323. DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
  324. DEFINE_BUF_EVENT(xfs_buf_error_relse);
  325. DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
  326. DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
  327. /* not really buffer traces, but the buf provides useful information */
  328. DEFINE_BUF_EVENT(xfs_btree_corrupt);
  329. DEFINE_BUF_EVENT(xfs_da_btree_corrupt);
  330. DEFINE_BUF_EVENT(xfs_reset_dqcounts);
  331. DEFINE_BUF_EVENT(xfs_inode_item_push);
  332. /* pass flags explicitly */
  333. DECLARE_EVENT_CLASS(xfs_buf_flags_class,
  334. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip),
  335. TP_ARGS(bp, flags, caller_ip),
  336. TP_STRUCT__entry(
  337. __field(dev_t, dev)
  338. __field(xfs_daddr_t, bno)
  339. __field(size_t, buffer_length)
  340. __field(int, hold)
  341. __field(int, pincount)
  342. __field(unsigned, lockval)
  343. __field(unsigned, flags)
  344. __field(unsigned long, caller_ip)
  345. ),
  346. TP_fast_assign(
  347. __entry->dev = bp->b_target->bt_dev;
  348. __entry->bno = bp->b_bn;
  349. __entry->buffer_length = bp->b_buffer_length;
  350. __entry->flags = flags;
  351. __entry->hold = atomic_read(&bp->b_hold);
  352. __entry->pincount = atomic_read(&bp->b_pin_count);
  353. __entry->lockval = bp->b_sema.count;
  354. __entry->caller_ip = caller_ip;
  355. ),
  356. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  357. "lock %d flags %s caller %pf",
  358. MAJOR(__entry->dev), MINOR(__entry->dev),
  359. (unsigned long long)__entry->bno,
  360. __entry->buffer_length,
  361. __entry->hold,
  362. __entry->pincount,
  363. __entry->lockval,
  364. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  365. (void *)__entry->caller_ip)
  366. )
  367. #define DEFINE_BUF_FLAGS_EVENT(name) \
  368. DEFINE_EVENT(xfs_buf_flags_class, name, \
  369. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
  370. TP_ARGS(bp, flags, caller_ip))
  371. DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
  372. DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
  373. DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
  374. TRACE_EVENT(xfs_buf_ioerror,
  375. TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip),
  376. TP_ARGS(bp, error, caller_ip),
  377. TP_STRUCT__entry(
  378. __field(dev_t, dev)
  379. __field(xfs_daddr_t, bno)
  380. __field(size_t, buffer_length)
  381. __field(unsigned, flags)
  382. __field(int, hold)
  383. __field(int, pincount)
  384. __field(unsigned, lockval)
  385. __field(int, error)
  386. __field(unsigned long, caller_ip)
  387. ),
  388. TP_fast_assign(
  389. __entry->dev = bp->b_target->bt_dev;
  390. __entry->bno = bp->b_bn;
  391. __entry->buffer_length = bp->b_buffer_length;
  392. __entry->hold = atomic_read(&bp->b_hold);
  393. __entry->pincount = atomic_read(&bp->b_pin_count);
  394. __entry->lockval = bp->b_sema.count;
  395. __entry->error = error;
  396. __entry->flags = bp->b_flags;
  397. __entry->caller_ip = caller_ip;
  398. ),
  399. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  400. "lock %d error %d flags %s caller %pf",
  401. MAJOR(__entry->dev), MINOR(__entry->dev),
  402. (unsigned long long)__entry->bno,
  403. __entry->buffer_length,
  404. __entry->hold,
  405. __entry->pincount,
  406. __entry->lockval,
  407. __entry->error,
  408. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  409. (void *)__entry->caller_ip)
  410. );
  411. DECLARE_EVENT_CLASS(xfs_buf_item_class,
  412. TP_PROTO(struct xfs_buf_log_item *bip),
  413. TP_ARGS(bip),
  414. TP_STRUCT__entry(
  415. __field(dev_t, dev)
  416. __field(xfs_daddr_t, buf_bno)
  417. __field(size_t, buf_len)
  418. __field(int, buf_hold)
  419. __field(int, buf_pincount)
  420. __field(int, buf_lockval)
  421. __field(unsigned, buf_flags)
  422. __field(unsigned, bli_recur)
  423. __field(int, bli_refcount)
  424. __field(unsigned, bli_flags)
  425. __field(void *, li_desc)
  426. __field(unsigned, li_flags)
  427. ),
  428. TP_fast_assign(
  429. __entry->dev = bip->bli_buf->b_target->bt_dev;
  430. __entry->bli_flags = bip->bli_flags;
  431. __entry->bli_recur = bip->bli_recur;
  432. __entry->bli_refcount = atomic_read(&bip->bli_refcount);
  433. __entry->buf_bno = bip->bli_buf->b_bn;
  434. __entry->buf_len = bip->bli_buf->b_buffer_length;
  435. __entry->buf_flags = bip->bli_buf->b_flags;
  436. __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
  437. __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
  438. __entry->buf_lockval = bip->bli_buf->b_sema.count;
  439. __entry->li_desc = bip->bli_item.li_desc;
  440. __entry->li_flags = bip->bli_item.li_flags;
  441. ),
  442. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  443. "lock %d flags %s recur %d refcount %d bliflags %s "
  444. "lidesc 0x%p liflags %s",
  445. MAJOR(__entry->dev), MINOR(__entry->dev),
  446. (unsigned long long)__entry->buf_bno,
  447. __entry->buf_len,
  448. __entry->buf_hold,
  449. __entry->buf_pincount,
  450. __entry->buf_lockval,
  451. __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS),
  452. __entry->bli_recur,
  453. __entry->bli_refcount,
  454. __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS),
  455. __entry->li_desc,
  456. __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS))
  457. )
  458. #define DEFINE_BUF_ITEM_EVENT(name) \
  459. DEFINE_EVENT(xfs_buf_item_class, name, \
  460. TP_PROTO(struct xfs_buf_log_item *bip), \
  461. TP_ARGS(bip))
  462. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
  463. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
  464. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
  465. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
  466. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
  467. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
  468. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
  469. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_trylock);
  470. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
  471. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
  472. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
  473. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
  474. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pushbuf);
  475. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
  476. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
  477. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
  478. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur);
  479. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf);
  480. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur);
  481. DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf);
  482. DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse);
  483. DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
  484. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
  485. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
  486. DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
  487. DECLARE_EVENT_CLASS(xfs_lock_class,
  488. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags,
  489. unsigned long caller_ip),
  490. TP_ARGS(ip, lock_flags, caller_ip),
  491. TP_STRUCT__entry(
  492. __field(dev_t, dev)
  493. __field(xfs_ino_t, ino)
  494. __field(int, lock_flags)
  495. __field(unsigned long, caller_ip)
  496. ),
  497. TP_fast_assign(
  498. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  499. __entry->ino = ip->i_ino;
  500. __entry->lock_flags = lock_flags;
  501. __entry->caller_ip = caller_ip;
  502. ),
  503. TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf",
  504. MAJOR(__entry->dev), MINOR(__entry->dev),
  505. __entry->ino,
  506. __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS),
  507. (void *)__entry->caller_ip)
  508. )
  509. #define DEFINE_LOCK_EVENT(name) \
  510. DEFINE_EVENT(xfs_lock_class, name, \
  511. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
  512. unsigned long caller_ip), \
  513. TP_ARGS(ip, lock_flags, caller_ip))
  514. DEFINE_LOCK_EVENT(xfs_ilock);
  515. DEFINE_LOCK_EVENT(xfs_ilock_nowait);
  516. DEFINE_LOCK_EVENT(xfs_ilock_demote);
  517. DEFINE_LOCK_EVENT(xfs_iunlock);
  518. DECLARE_EVENT_CLASS(xfs_inode_class,
  519. TP_PROTO(struct xfs_inode *ip),
  520. TP_ARGS(ip),
  521. TP_STRUCT__entry(
  522. __field(dev_t, dev)
  523. __field(xfs_ino_t, ino)
  524. ),
  525. TP_fast_assign(
  526. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  527. __entry->ino = ip->i_ino;
  528. ),
  529. TP_printk("dev %d:%d ino 0x%llx",
  530. MAJOR(__entry->dev), MINOR(__entry->dev),
  531. __entry->ino)
  532. )
  533. #define DEFINE_INODE_EVENT(name) \
  534. DEFINE_EVENT(xfs_inode_class, name, \
  535. TP_PROTO(struct xfs_inode *ip), \
  536. TP_ARGS(ip))
  537. DEFINE_INODE_EVENT(xfs_iget_skip);
  538. DEFINE_INODE_EVENT(xfs_iget_reclaim);
  539. DEFINE_INODE_EVENT(xfs_iget_reclaim_fail);
  540. DEFINE_INODE_EVENT(xfs_iget_hit);
  541. DEFINE_INODE_EVENT(xfs_iget_miss);
  542. DEFINE_INODE_EVENT(xfs_getattr);
  543. DEFINE_INODE_EVENT(xfs_setattr);
  544. DEFINE_INODE_EVENT(xfs_readlink);
  545. DEFINE_INODE_EVENT(xfs_alloc_file_space);
  546. DEFINE_INODE_EVENT(xfs_free_file_space);
  547. DEFINE_INODE_EVENT(xfs_readdir);
  548. #ifdef CONFIG_XFS_POSIX_ACL
  549. DEFINE_INODE_EVENT(xfs_get_acl);
  550. #endif
  551. DEFINE_INODE_EVENT(xfs_vm_bmap);
  552. DEFINE_INODE_EVENT(xfs_file_ioctl);
  553. DEFINE_INODE_EVENT(xfs_file_compat_ioctl);
  554. DEFINE_INODE_EVENT(xfs_ioctl_setattr);
  555. DEFINE_INODE_EVENT(xfs_dir_fsync);
  556. DEFINE_INODE_EVENT(xfs_file_fsync);
  557. DEFINE_INODE_EVENT(xfs_destroy_inode);
  558. DEFINE_INODE_EVENT(xfs_write_inode);
  559. DEFINE_INODE_EVENT(xfs_evict_inode);
  560. DEFINE_INODE_EVENT(xfs_dquot_dqalloc);
  561. DEFINE_INODE_EVENT(xfs_dquot_dqdetach);
  562. DECLARE_EVENT_CLASS(xfs_iref_class,
  563. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
  564. TP_ARGS(ip, caller_ip),
  565. TP_STRUCT__entry(
  566. __field(dev_t, dev)
  567. __field(xfs_ino_t, ino)
  568. __field(int, count)
  569. __field(int, pincount)
  570. __field(unsigned long, caller_ip)
  571. ),
  572. TP_fast_assign(
  573. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  574. __entry->ino = ip->i_ino;
  575. __entry->count = atomic_read(&VFS_I(ip)->i_count);
  576. __entry->pincount = atomic_read(&ip->i_pincount);
  577. __entry->caller_ip = caller_ip;
  578. ),
  579. TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pf",
  580. MAJOR(__entry->dev), MINOR(__entry->dev),
  581. __entry->ino,
  582. __entry->count,
  583. __entry->pincount,
  584. (char *)__entry->caller_ip)
  585. )
  586. #define DEFINE_IREF_EVENT(name) \
  587. DEFINE_EVENT(xfs_iref_class, name, \
  588. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
  589. TP_ARGS(ip, caller_ip))
  590. DEFINE_IREF_EVENT(xfs_ihold);
  591. DEFINE_IREF_EVENT(xfs_irele);
  592. DEFINE_IREF_EVENT(xfs_inode_pin);
  593. DEFINE_IREF_EVENT(xfs_inode_unpin);
  594. DEFINE_IREF_EVENT(xfs_inode_unpin_nowait);
  595. DECLARE_EVENT_CLASS(xfs_namespace_class,
  596. TP_PROTO(struct xfs_inode *dp, struct xfs_name *name),
  597. TP_ARGS(dp, name),
  598. TP_STRUCT__entry(
  599. __field(dev_t, dev)
  600. __field(xfs_ino_t, dp_ino)
  601. __dynamic_array(char, name, name->len)
  602. ),
  603. TP_fast_assign(
  604. __entry->dev = VFS_I(dp)->i_sb->s_dev;
  605. __entry->dp_ino = dp->i_ino;
  606. memcpy(__get_str(name), name->name, name->len);
  607. ),
  608. TP_printk("dev %d:%d dp ino 0x%llx name %s",
  609. MAJOR(__entry->dev), MINOR(__entry->dev),
  610. __entry->dp_ino,
  611. __get_str(name))
  612. )
  613. #define DEFINE_NAMESPACE_EVENT(name) \
  614. DEFINE_EVENT(xfs_namespace_class, name, \
  615. TP_PROTO(struct xfs_inode *dp, struct xfs_name *name), \
  616. TP_ARGS(dp, name))
  617. DEFINE_NAMESPACE_EVENT(xfs_remove);
  618. DEFINE_NAMESPACE_EVENT(xfs_link);
  619. DEFINE_NAMESPACE_EVENT(xfs_lookup);
  620. DEFINE_NAMESPACE_EVENT(xfs_create);
  621. DEFINE_NAMESPACE_EVENT(xfs_symlink);
  622. TRACE_EVENT(xfs_rename,
  623. TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp,
  624. struct xfs_name *src_name, struct xfs_name *target_name),
  625. TP_ARGS(src_dp, target_dp, src_name, target_name),
  626. TP_STRUCT__entry(
  627. __field(dev_t, dev)
  628. __field(xfs_ino_t, src_dp_ino)
  629. __field(xfs_ino_t, target_dp_ino)
  630. __dynamic_array(char, src_name, src_name->len)
  631. __dynamic_array(char, target_name, target_name->len)
  632. ),
  633. TP_fast_assign(
  634. __entry->dev = VFS_I(src_dp)->i_sb->s_dev;
  635. __entry->src_dp_ino = src_dp->i_ino;
  636. __entry->target_dp_ino = target_dp->i_ino;
  637. memcpy(__get_str(src_name), src_name->name, src_name->len);
  638. memcpy(__get_str(target_name), target_name->name, target_name->len);
  639. ),
  640. TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx"
  641. " src name %s target name %s",
  642. MAJOR(__entry->dev), MINOR(__entry->dev),
  643. __entry->src_dp_ino,
  644. __entry->target_dp_ino,
  645. __get_str(src_name),
  646. __get_str(target_name))
  647. )
  648. DECLARE_EVENT_CLASS(xfs_dquot_class,
  649. TP_PROTO(struct xfs_dquot *dqp),
  650. TP_ARGS(dqp),
  651. TP_STRUCT__entry(
  652. __field(dev_t, dev)
  653. __field(u32, id)
  654. __field(unsigned, flags)
  655. __field(unsigned, nrefs)
  656. __field(unsigned long long, res_bcount)
  657. __field(unsigned long long, bcount)
  658. __field(unsigned long long, icount)
  659. __field(unsigned long long, blk_hardlimit)
  660. __field(unsigned long long, blk_softlimit)
  661. __field(unsigned long long, ino_hardlimit)
  662. __field(unsigned long long, ino_softlimit)
  663. ), \
  664. TP_fast_assign(
  665. __entry->dev = dqp->q_mount->m_super->s_dev;
  666. __entry->id = be32_to_cpu(dqp->q_core.d_id);
  667. __entry->flags = dqp->dq_flags;
  668. __entry->nrefs = dqp->q_nrefs;
  669. __entry->res_bcount = dqp->q_res_bcount;
  670. __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount);
  671. __entry->icount = be64_to_cpu(dqp->q_core.d_icount);
  672. __entry->blk_hardlimit =
  673. be64_to_cpu(dqp->q_core.d_blk_hardlimit);
  674. __entry->blk_softlimit =
  675. be64_to_cpu(dqp->q_core.d_blk_softlimit);
  676. __entry->ino_hardlimit =
  677. be64_to_cpu(dqp->q_core.d_ino_hardlimit);
  678. __entry->ino_softlimit =
  679. be64_to_cpu(dqp->q_core.d_ino_softlimit);
  680. ),
  681. TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx "
  682. "bcnt 0x%llx bhardlimit 0x%llx bsoftlimit 0x%llx "
  683. "icnt 0x%llx ihardlimit 0x%llx isoftlimit 0x%llx]",
  684. MAJOR(__entry->dev), MINOR(__entry->dev),
  685. __entry->id,
  686. __print_flags(__entry->flags, "|", XFS_DQ_FLAGS),
  687. __entry->nrefs,
  688. __entry->res_bcount,
  689. __entry->bcount,
  690. __entry->blk_hardlimit,
  691. __entry->blk_softlimit,
  692. __entry->icount,
  693. __entry->ino_hardlimit,
  694. __entry->ino_softlimit)
  695. )
  696. #define DEFINE_DQUOT_EVENT(name) \
  697. DEFINE_EVENT(xfs_dquot_class, name, \
  698. TP_PROTO(struct xfs_dquot *dqp), \
  699. TP_ARGS(dqp))
  700. DEFINE_DQUOT_EVENT(xfs_dqadjust);
  701. DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
  702. DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
  703. DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink);
  704. DEFINE_DQUOT_EVENT(xfs_dqattach_found);
  705. DEFINE_DQUOT_EVENT(xfs_dqattach_get);
  706. DEFINE_DQUOT_EVENT(xfs_dqinit);
  707. DEFINE_DQUOT_EVENT(xfs_dqreuse);
  708. DEFINE_DQUOT_EVENT(xfs_dqalloc);
  709. DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
  710. DEFINE_DQUOT_EVENT(xfs_dqread);
  711. DEFINE_DQUOT_EVENT(xfs_dqread_fail);
  712. DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
  713. DEFINE_DQUOT_EVENT(xfs_dqlookup_want);
  714. DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist);
  715. DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
  716. DEFINE_DQUOT_EVENT(xfs_dqget_hit);
  717. DEFINE_DQUOT_EVENT(xfs_dqget_miss);
  718. DEFINE_DQUOT_EVENT(xfs_dqput);
  719. DEFINE_DQUOT_EVENT(xfs_dqput_wait);
  720. DEFINE_DQUOT_EVENT(xfs_dqput_free);
  721. DEFINE_DQUOT_EVENT(xfs_dqrele);
  722. DEFINE_DQUOT_EVENT(xfs_dqflush);
  723. DEFINE_DQUOT_EVENT(xfs_dqflush_force);
  724. DEFINE_DQUOT_EVENT(xfs_dqflush_done);
  725. DECLARE_EVENT_CLASS(xfs_loggrant_class,
  726. TP_PROTO(struct log *log, struct xlog_ticket *tic),
  727. TP_ARGS(log, tic),
  728. TP_STRUCT__entry(
  729. __field(dev_t, dev)
  730. __field(unsigned, trans_type)
  731. __field(char, ocnt)
  732. __field(char, cnt)
  733. __field(int, curr_res)
  734. __field(int, unit_res)
  735. __field(unsigned int, flags)
  736. __field(int, reserveq)
  737. __field(int, writeq)
  738. __field(int, grant_reserve_cycle)
  739. __field(int, grant_reserve_bytes)
  740. __field(int, grant_write_cycle)
  741. __field(int, grant_write_bytes)
  742. __field(int, curr_cycle)
  743. __field(int, curr_block)
  744. __field(xfs_lsn_t, tail_lsn)
  745. ),
  746. TP_fast_assign(
  747. __entry->dev = log->l_mp->m_super->s_dev;
  748. __entry->trans_type = tic->t_trans_type;
  749. __entry->ocnt = tic->t_ocnt;
  750. __entry->cnt = tic->t_cnt;
  751. __entry->curr_res = tic->t_curr_res;
  752. __entry->unit_res = tic->t_unit_res;
  753. __entry->flags = tic->t_flags;
  754. __entry->reserveq = list_empty(&log->l_reserveq);
  755. __entry->writeq = list_empty(&log->l_writeq);
  756. xlog_crack_grant_head(&log->l_grant_reserve_head,
  757. &__entry->grant_reserve_cycle,
  758. &__entry->grant_reserve_bytes);
  759. xlog_crack_grant_head(&log->l_grant_write_head,
  760. &__entry->grant_write_cycle,
  761. &__entry->grant_write_bytes);
  762. __entry->curr_cycle = log->l_curr_cycle;
  763. __entry->curr_block = log->l_curr_block;
  764. __entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
  765. ),
  766. TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
  767. "t_unit_res %u t_flags %s reserveq %s "
  768. "writeq %s grant_reserve_cycle %d "
  769. "grant_reserve_bytes %d grant_write_cycle %d "
  770. "grant_write_bytes %d curr_cycle %d curr_block %d "
  771. "tail_cycle %d tail_block %d",
  772. MAJOR(__entry->dev), MINOR(__entry->dev),
  773. __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES),
  774. __entry->ocnt,
  775. __entry->cnt,
  776. __entry->curr_res,
  777. __entry->unit_res,
  778. __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
  779. __entry->reserveq ? "empty" : "active",
  780. __entry->writeq ? "empty" : "active",
  781. __entry->grant_reserve_cycle,
  782. __entry->grant_reserve_bytes,
  783. __entry->grant_write_cycle,
  784. __entry->grant_write_bytes,
  785. __entry->curr_cycle,
  786. __entry->curr_block,
  787. CYCLE_LSN(__entry->tail_lsn),
  788. BLOCK_LSN(__entry->tail_lsn)
  789. )
  790. )
  791. #define DEFINE_LOGGRANT_EVENT(name) \
  792. DEFINE_EVENT(xfs_loggrant_class, name, \
  793. TP_PROTO(struct log *log, struct xlog_ticket *tic), \
  794. TP_ARGS(log, tic))
  795. DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
  796. DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
  797. DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
  798. DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
  799. DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
  800. DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
  801. DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
  802. DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
  803. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
  804. DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
  805. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
  806. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
  807. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
  808. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
  809. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
  810. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
  811. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
  812. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
  813. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
  814. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
  815. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
  816. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
  817. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
  818. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter);
  819. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit);
  820. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub);
  821. DECLARE_EVENT_CLASS(xfs_log_item_class,
  822. TP_PROTO(struct xfs_log_item *lip),
  823. TP_ARGS(lip),
  824. TP_STRUCT__entry(
  825. __field(dev_t, dev)
  826. __field(void *, lip)
  827. __field(uint, type)
  828. __field(uint, flags)
  829. __field(xfs_lsn_t, lsn)
  830. ),
  831. TP_fast_assign(
  832. __entry->dev = lip->li_mountp->m_super->s_dev;
  833. __entry->lip = lip;
  834. __entry->type = lip->li_type;
  835. __entry->flags = lip->li_flags;
  836. __entry->lsn = lip->li_lsn;
  837. ),
  838. TP_printk("dev %d:%d lip 0x%p lsn %d/%d type %s flags %s",
  839. MAJOR(__entry->dev), MINOR(__entry->dev),
  840. __entry->lip,
  841. CYCLE_LSN(__entry->lsn), BLOCK_LSN(__entry->lsn),
  842. __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
  843. __print_flags(__entry->flags, "|", XFS_LI_FLAGS))
  844. )
  845. #define DEFINE_LOG_ITEM_EVENT(name) \
  846. DEFINE_EVENT(xfs_log_item_class, name, \
  847. TP_PROTO(struct xfs_log_item *lip), \
  848. TP_ARGS(lip))
  849. DEFINE_LOG_ITEM_EVENT(xfs_ail_push);
  850. DEFINE_LOG_ITEM_EVENT(xfs_ail_pushbuf);
  851. DEFINE_LOG_ITEM_EVENT(xfs_ail_pushbuf_pinned);
  852. DEFINE_LOG_ITEM_EVENT(xfs_ail_pinned);
  853. DEFINE_LOG_ITEM_EVENT(xfs_ail_locked);
  854. DECLARE_EVENT_CLASS(xfs_file_class,
  855. TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags),
  856. TP_ARGS(ip, count, offset, flags),
  857. TP_STRUCT__entry(
  858. __field(dev_t, dev)
  859. __field(xfs_ino_t, ino)
  860. __field(xfs_fsize_t, size)
  861. __field(xfs_fsize_t, new_size)
  862. __field(loff_t, offset)
  863. __field(size_t, count)
  864. __field(int, flags)
  865. ),
  866. TP_fast_assign(
  867. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  868. __entry->ino = ip->i_ino;
  869. __entry->size = ip->i_d.di_size;
  870. __entry->new_size = ip->i_new_size;
  871. __entry->offset = offset;
  872. __entry->count = count;
  873. __entry->flags = flags;
  874. ),
  875. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
  876. "offset 0x%llx count 0x%zx ioflags %s",
  877. MAJOR(__entry->dev), MINOR(__entry->dev),
  878. __entry->ino,
  879. __entry->size,
  880. __entry->new_size,
  881. __entry->offset,
  882. __entry->count,
  883. __print_flags(__entry->flags, "|", XFS_IO_FLAGS))
  884. )
  885. #define DEFINE_RW_EVENT(name) \
  886. DEFINE_EVENT(xfs_file_class, name, \
  887. TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
  888. TP_ARGS(ip, count, offset, flags))
  889. DEFINE_RW_EVENT(xfs_file_read);
  890. DEFINE_RW_EVENT(xfs_file_buffered_write);
  891. DEFINE_RW_EVENT(xfs_file_direct_write);
  892. DEFINE_RW_EVENT(xfs_file_splice_read);
  893. DEFINE_RW_EVENT(xfs_file_splice_write);
  894. DECLARE_EVENT_CLASS(xfs_page_class,
  895. TP_PROTO(struct inode *inode, struct page *page, unsigned long off),
  896. TP_ARGS(inode, page, off),
  897. TP_STRUCT__entry(
  898. __field(dev_t, dev)
  899. __field(xfs_ino_t, ino)
  900. __field(pgoff_t, pgoff)
  901. __field(loff_t, size)
  902. __field(unsigned long, offset)
  903. __field(int, delalloc)
  904. __field(int, unwritten)
  905. ),
  906. TP_fast_assign(
  907. int delalloc = -1, unwritten = -1;
  908. if (page_has_buffers(page))
  909. xfs_count_page_state(page, &delalloc, &unwritten);
  910. __entry->dev = inode->i_sb->s_dev;
  911. __entry->ino = XFS_I(inode)->i_ino;
  912. __entry->pgoff = page_offset(page);
  913. __entry->size = i_size_read(inode);
  914. __entry->offset = off;
  915. __entry->delalloc = delalloc;
  916. __entry->unwritten = unwritten;
  917. ),
  918. TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
  919. "delalloc %d unwritten %d",
  920. MAJOR(__entry->dev), MINOR(__entry->dev),
  921. __entry->ino,
  922. __entry->pgoff,
  923. __entry->size,
  924. __entry->offset,
  925. __entry->delalloc,
  926. __entry->unwritten)
  927. )
  928. #define DEFINE_PAGE_EVENT(name) \
  929. DEFINE_EVENT(xfs_page_class, name, \
  930. TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \
  931. TP_ARGS(inode, page, off))
  932. DEFINE_PAGE_EVENT(xfs_writepage);
  933. DEFINE_PAGE_EVENT(xfs_releasepage);
  934. DEFINE_PAGE_EVENT(xfs_invalidatepage);
  935. DECLARE_EVENT_CLASS(xfs_imap_class,
  936. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
  937. int type, struct xfs_bmbt_irec *irec),
  938. TP_ARGS(ip, offset, count, type, irec),
  939. TP_STRUCT__entry(
  940. __field(dev_t, dev)
  941. __field(xfs_ino_t, ino)
  942. __field(loff_t, size)
  943. __field(loff_t, new_size)
  944. __field(loff_t, offset)
  945. __field(size_t, count)
  946. __field(int, type)
  947. __field(xfs_fileoff_t, startoff)
  948. __field(xfs_fsblock_t, startblock)
  949. __field(xfs_filblks_t, blockcount)
  950. ),
  951. TP_fast_assign(
  952. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  953. __entry->ino = ip->i_ino;
  954. __entry->size = ip->i_d.di_size;
  955. __entry->new_size = ip->i_new_size;
  956. __entry->offset = offset;
  957. __entry->count = count;
  958. __entry->type = type;
  959. __entry->startoff = irec ? irec->br_startoff : 0;
  960. __entry->startblock = irec ? irec->br_startblock : 0;
  961. __entry->blockcount = irec ? irec->br_blockcount : 0;
  962. ),
  963. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
  964. "offset 0x%llx count %zd type %s "
  965. "startoff 0x%llx startblock %lld blockcount 0x%llx",
  966. MAJOR(__entry->dev), MINOR(__entry->dev),
  967. __entry->ino,
  968. __entry->size,
  969. __entry->new_size,
  970. __entry->offset,
  971. __entry->count,
  972. __print_symbolic(__entry->type, XFS_IO_TYPES),
  973. __entry->startoff,
  974. (__int64_t)__entry->startblock,
  975. __entry->blockcount)
  976. )
  977. #define DEFINE_IOMAP_EVENT(name) \
  978. DEFINE_EVENT(xfs_imap_class, name, \
  979. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
  980. int type, struct xfs_bmbt_irec *irec), \
  981. TP_ARGS(ip, offset, count, type, irec))
  982. DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
  983. DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
  984. DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
  985. DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
  986. DECLARE_EVENT_CLASS(xfs_simple_io_class,
  987. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
  988. TP_ARGS(ip, offset, count),
  989. TP_STRUCT__entry(
  990. __field(dev_t, dev)
  991. __field(xfs_ino_t, ino)
  992. __field(loff_t, isize)
  993. __field(loff_t, disize)
  994. __field(loff_t, new_size)
  995. __field(loff_t, offset)
  996. __field(size_t, count)
  997. ),
  998. TP_fast_assign(
  999. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1000. __entry->ino = ip->i_ino;
  1001. __entry->isize = ip->i_size;
  1002. __entry->disize = ip->i_d.di_size;
  1003. __entry->new_size = ip->i_new_size;
  1004. __entry->offset = offset;
  1005. __entry->count = count;
  1006. ),
  1007. TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx new_size 0x%llx "
  1008. "offset 0x%llx count %zd",
  1009. MAJOR(__entry->dev), MINOR(__entry->dev),
  1010. __entry->ino,
  1011. __entry->isize,
  1012. __entry->disize,
  1013. __entry->new_size,
  1014. __entry->offset,
  1015. __entry->count)
  1016. );
  1017. #define DEFINE_SIMPLE_IO_EVENT(name) \
  1018. DEFINE_EVENT(xfs_simple_io_class, name, \
  1019. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
  1020. TP_ARGS(ip, offset, count))
  1021. DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
  1022. DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
  1023. DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
  1024. DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
  1025. DECLARE_EVENT_CLASS(xfs_itrunc_class,
  1026. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
  1027. TP_ARGS(ip, new_size),
  1028. TP_STRUCT__entry(
  1029. __field(dev_t, dev)
  1030. __field(xfs_ino_t, ino)
  1031. __field(xfs_fsize_t, size)
  1032. __field(xfs_fsize_t, new_size)
  1033. ),
  1034. TP_fast_assign(
  1035. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1036. __entry->ino = ip->i_ino;
  1037. __entry->size = ip->i_d.di_size;
  1038. __entry->new_size = new_size;
  1039. ),
  1040. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx",
  1041. MAJOR(__entry->dev), MINOR(__entry->dev),
  1042. __entry->ino,
  1043. __entry->size,
  1044. __entry->new_size)
  1045. )
  1046. #define DEFINE_ITRUNC_EVENT(name) \
  1047. DEFINE_EVENT(xfs_itrunc_class, name, \
  1048. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
  1049. TP_ARGS(ip, new_size))
  1050. DEFINE_ITRUNC_EVENT(xfs_itruncate_data_start);
  1051. DEFINE_ITRUNC_EVENT(xfs_itruncate_data_end);
  1052. TRACE_EVENT(xfs_pagecache_inval,
  1053. TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
  1054. TP_ARGS(ip, start, finish),
  1055. TP_STRUCT__entry(
  1056. __field(dev_t, dev)
  1057. __field(xfs_ino_t, ino)
  1058. __field(xfs_fsize_t, size)
  1059. __field(xfs_off_t, start)
  1060. __field(xfs_off_t, finish)
  1061. ),
  1062. TP_fast_assign(
  1063. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1064. __entry->ino = ip->i_ino;
  1065. __entry->size = ip->i_d.di_size;
  1066. __entry->start = start;
  1067. __entry->finish = finish;
  1068. ),
  1069. TP_printk("dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx",
  1070. MAJOR(__entry->dev), MINOR(__entry->dev),
  1071. __entry->ino,
  1072. __entry->size,
  1073. __entry->start,
  1074. __entry->finish)
  1075. );
  1076. TRACE_EVENT(xfs_bunmap,
  1077. TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len,
  1078. int flags, unsigned long caller_ip),
  1079. TP_ARGS(ip, bno, len, flags, caller_ip),
  1080. TP_STRUCT__entry(
  1081. __field(dev_t, dev)
  1082. __field(xfs_ino_t, ino)
  1083. __field(xfs_fsize_t, size)
  1084. __field(xfs_fileoff_t, bno)
  1085. __field(xfs_filblks_t, len)
  1086. __field(unsigned long, caller_ip)
  1087. __field(int, flags)
  1088. ),
  1089. TP_fast_assign(
  1090. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1091. __entry->ino = ip->i_ino;
  1092. __entry->size = ip->i_d.di_size;
  1093. __entry->bno = bno;
  1094. __entry->len = len;
  1095. __entry->caller_ip = caller_ip;
  1096. __entry->flags = flags;
  1097. ),
  1098. TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx"
  1099. "flags %s caller %pf",
  1100. MAJOR(__entry->dev), MINOR(__entry->dev),
  1101. __entry->ino,
  1102. __entry->size,
  1103. __entry->bno,
  1104. __entry->len,
  1105. __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS),
  1106. (void *)__entry->caller_ip)
  1107. );
  1108. DECLARE_EVENT_CLASS(xfs_busy_class,
  1109. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1110. xfs_agblock_t agbno, xfs_extlen_t len),
  1111. TP_ARGS(mp, agno, agbno, len),
  1112. TP_STRUCT__entry(
  1113. __field(dev_t, dev)
  1114. __field(xfs_agnumber_t, agno)
  1115. __field(xfs_agblock_t, agbno)
  1116. __field(xfs_extlen_t, len)
  1117. ),
  1118. TP_fast_assign(
  1119. __entry->dev = mp->m_super->s_dev;
  1120. __entry->agno = agno;
  1121. __entry->agbno = agbno;
  1122. __entry->len = len;
  1123. ),
  1124. TP_printk("dev %d:%d agno %u agbno %u len %u",
  1125. MAJOR(__entry->dev), MINOR(__entry->dev),
  1126. __entry->agno,
  1127. __entry->agbno,
  1128. __entry->len)
  1129. );
  1130. #define DEFINE_BUSY_EVENT(name) \
  1131. DEFINE_EVENT(xfs_busy_class, name, \
  1132. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  1133. xfs_agblock_t agbno, xfs_extlen_t len), \
  1134. TP_ARGS(mp, agno, agbno, len))
  1135. DEFINE_BUSY_EVENT(xfs_alloc_busy);
  1136. DEFINE_BUSY_EVENT(xfs_alloc_busy_enomem);
  1137. DEFINE_BUSY_EVENT(xfs_alloc_busy_force);
  1138. DEFINE_BUSY_EVENT(xfs_alloc_busy_reuse);
  1139. DEFINE_BUSY_EVENT(xfs_alloc_busy_clear);
  1140. TRACE_EVENT(xfs_alloc_busy_trim,
  1141. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1142. xfs_agblock_t agbno, xfs_extlen_t len,
  1143. xfs_agblock_t tbno, xfs_extlen_t tlen),
  1144. TP_ARGS(mp, agno, agbno, len, tbno, tlen),
  1145. TP_STRUCT__entry(
  1146. __field(dev_t, dev)
  1147. __field(xfs_agnumber_t, agno)
  1148. __field(xfs_agblock_t, agbno)
  1149. __field(xfs_extlen_t, len)
  1150. __field(xfs_agblock_t, tbno)
  1151. __field(xfs_extlen_t, tlen)
  1152. ),
  1153. TP_fast_assign(
  1154. __entry->dev = mp->m_super->s_dev;
  1155. __entry->agno = agno;
  1156. __entry->agbno = agbno;
  1157. __entry->len = len;
  1158. __entry->tbno = tbno;
  1159. __entry->tlen = tlen;
  1160. ),
  1161. TP_printk("dev %d:%d agno %u agbno %u len %u tbno %u tlen %u",
  1162. MAJOR(__entry->dev), MINOR(__entry->dev),
  1163. __entry->agno,
  1164. __entry->agbno,
  1165. __entry->len,
  1166. __entry->tbno,
  1167. __entry->tlen)
  1168. );
  1169. TRACE_EVENT(xfs_trans_commit_lsn,
  1170. TP_PROTO(struct xfs_trans *trans),
  1171. TP_ARGS(trans),
  1172. TP_STRUCT__entry(
  1173. __field(dev_t, dev)
  1174. __field(struct xfs_trans *, tp)
  1175. __field(xfs_lsn_t, lsn)
  1176. ),
  1177. TP_fast_assign(
  1178. __entry->dev = trans->t_mountp->m_super->s_dev;
  1179. __entry->tp = trans;
  1180. __entry->lsn = trans->t_commit_lsn;
  1181. ),
  1182. TP_printk("dev %d:%d trans 0x%p commit_lsn 0x%llx",
  1183. MAJOR(__entry->dev), MINOR(__entry->dev),
  1184. __entry->tp,
  1185. __entry->lsn)
  1186. );
  1187. TRACE_EVENT(xfs_agf,
  1188. TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
  1189. unsigned long caller_ip),
  1190. TP_ARGS(mp, agf, flags, caller_ip),
  1191. TP_STRUCT__entry(
  1192. __field(dev_t, dev)
  1193. __field(xfs_agnumber_t, agno)
  1194. __field(int, flags)
  1195. __field(__u32, length)
  1196. __field(__u32, bno_root)
  1197. __field(__u32, cnt_root)
  1198. __field(__u32, bno_level)
  1199. __field(__u32, cnt_level)
  1200. __field(__u32, flfirst)
  1201. __field(__u32, fllast)
  1202. __field(__u32, flcount)
  1203. __field(__u32, freeblks)
  1204. __field(__u32, longest)
  1205. __field(unsigned long, caller_ip)
  1206. ),
  1207. TP_fast_assign(
  1208. __entry->dev = mp->m_super->s_dev;
  1209. __entry->agno = be32_to_cpu(agf->agf_seqno),
  1210. __entry->flags = flags;
  1211. __entry->length = be32_to_cpu(agf->agf_length),
  1212. __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
  1213. __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
  1214. __entry->bno_level =
  1215. be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
  1216. __entry->cnt_level =
  1217. be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
  1218. __entry->flfirst = be32_to_cpu(agf->agf_flfirst),
  1219. __entry->fllast = be32_to_cpu(agf->agf_fllast),
  1220. __entry->flcount = be32_to_cpu(agf->agf_flcount),
  1221. __entry->freeblks = be32_to_cpu(agf->agf_freeblks),
  1222. __entry->longest = be32_to_cpu(agf->agf_longest);
  1223. __entry->caller_ip = caller_ip;
  1224. ),
  1225. TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u "
  1226. "levels b %u c %u flfirst %u fllast %u flcount %u "
  1227. "freeblks %u longest %u caller %pf",
  1228. MAJOR(__entry->dev), MINOR(__entry->dev),
  1229. __entry->agno,
  1230. __print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
  1231. __entry->length,
  1232. __entry->bno_root,
  1233. __entry->cnt_root,
  1234. __entry->bno_level,
  1235. __entry->cnt_level,
  1236. __entry->flfirst,
  1237. __entry->fllast,
  1238. __entry->flcount,
  1239. __entry->freeblks,
  1240. __entry->longest,
  1241. (void *)__entry->caller_ip)
  1242. );
  1243. TRACE_EVENT(xfs_free_extent,
  1244. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
  1245. xfs_extlen_t len, bool isfl, int haveleft, int haveright),
  1246. TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright),
  1247. TP_STRUCT__entry(
  1248. __field(dev_t, dev)
  1249. __field(xfs_agnumber_t, agno)
  1250. __field(xfs_agblock_t, agbno)
  1251. __field(xfs_extlen_t, len)
  1252. __field(int, isfl)
  1253. __field(int, haveleft)
  1254. __field(int, haveright)
  1255. ),
  1256. TP_fast_assign(
  1257. __entry->dev = mp->m_super->s_dev;
  1258. __entry->agno = agno;
  1259. __entry->agbno = agbno;
  1260. __entry->len = len;
  1261. __entry->isfl = isfl;
  1262. __entry->haveleft = haveleft;
  1263. __entry->haveright = haveright;
  1264. ),
  1265. TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s",
  1266. MAJOR(__entry->dev), MINOR(__entry->dev),
  1267. __entry->agno,
  1268. __entry->agbno,
  1269. __entry->len,
  1270. __entry->isfl,
  1271. __entry->haveleft ?
  1272. (__entry->haveright ? "both" : "left") :
  1273. (__entry->haveright ? "right" : "none"))
  1274. );
  1275. DECLARE_EVENT_CLASS(xfs_alloc_class,
  1276. TP_PROTO(struct xfs_alloc_arg *args),
  1277. TP_ARGS(args),
  1278. TP_STRUCT__entry(
  1279. __field(dev_t, dev)
  1280. __field(xfs_agnumber_t, agno)
  1281. __field(xfs_agblock_t, agbno)
  1282. __field(xfs_extlen_t, minlen)
  1283. __field(xfs_extlen_t, maxlen)
  1284. __field(xfs_extlen_t, mod)
  1285. __field(xfs_extlen_t, prod)
  1286. __field(xfs_extlen_t, minleft)
  1287. __field(xfs_extlen_t, total)
  1288. __field(xfs_extlen_t, alignment)
  1289. __field(xfs_extlen_t, minalignslop)
  1290. __field(xfs_extlen_t, len)
  1291. __field(short, type)
  1292. __field(short, otype)
  1293. __field(char, wasdel)
  1294. __field(char, wasfromfl)
  1295. __field(char, isfl)
  1296. __field(char, userdata)
  1297. __field(xfs_fsblock_t, firstblock)
  1298. ),
  1299. TP_fast_assign(
  1300. __entry->dev = args->mp->m_super->s_dev;
  1301. __entry->agno = args->agno;
  1302. __entry->agbno = args->agbno;
  1303. __entry->minlen = args->minlen;
  1304. __entry->maxlen = args->maxlen;
  1305. __entry->mod = args->mod;
  1306. __entry->prod = args->prod;
  1307. __entry->minleft = args->minleft;
  1308. __entry->total = args->total;
  1309. __entry->alignment = args->alignment;
  1310. __entry->minalignslop = args->minalignslop;
  1311. __entry->len = args->len;
  1312. __entry->type = args->type;
  1313. __entry->otype = args->otype;
  1314. __entry->wasdel = args->wasdel;
  1315. __entry->wasfromfl = args->wasfromfl;
  1316. __entry->isfl = args->isfl;
  1317. __entry->userdata = args->userdata;
  1318. __entry->firstblock = args->firstblock;
  1319. ),
  1320. TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u "
  1321. "prod %u minleft %u total %u alignment %u minalignslop %u "
  1322. "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d "
  1323. "userdata %d firstblock 0x%llx",
  1324. MAJOR(__entry->dev), MINOR(__entry->dev),
  1325. __entry->agno,
  1326. __entry->agbno,
  1327. __entry->minlen,
  1328. __entry->maxlen,
  1329. __entry->mod,
  1330. __entry->prod,
  1331. __entry->minleft,
  1332. __entry->total,
  1333. __entry->alignment,
  1334. __entry->minalignslop,
  1335. __entry->len,
  1336. __print_symbolic(__entry->type, XFS_ALLOC_TYPES),
  1337. __print_symbolic(__entry->otype, XFS_ALLOC_TYPES),
  1338. __entry->wasdel,
  1339. __entry->wasfromfl,
  1340. __entry->isfl,
  1341. __entry->userdata,
  1342. (unsigned long long)__entry->firstblock)
  1343. )
  1344. #define DEFINE_ALLOC_EVENT(name) \
  1345. DEFINE_EVENT(xfs_alloc_class, name, \
  1346. TP_PROTO(struct xfs_alloc_arg *args), \
  1347. TP_ARGS(args))
  1348. DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
  1349. DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound);
  1350. DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
  1351. DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
  1352. DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
  1353. DEFINE_ALLOC_EVENT(xfs_alloc_near_greater);
  1354. DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser);
  1355. DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
  1356. DEFINE_ALLOC_EVENT(xfs_alloc_near_noentry);
  1357. DEFINE_ALLOC_EVENT(xfs_alloc_near_busy);
  1358. DEFINE_ALLOC_EVENT(xfs_alloc_size_neither);
  1359. DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry);
  1360. DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft);
  1361. DEFINE_ALLOC_EVENT(xfs_alloc_size_done);
  1362. DEFINE_ALLOC_EVENT(xfs_alloc_size_error);
  1363. DEFINE_ALLOC_EVENT(xfs_alloc_size_busy);
  1364. DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist);
  1365. DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
  1366. DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
  1367. DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
  1368. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
  1369. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
  1370. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
  1371. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
  1372. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
  1373. DECLARE_EVENT_CLASS(xfs_dir2_class,
  1374. TP_PROTO(struct xfs_da_args *args),
  1375. TP_ARGS(args),
  1376. TP_STRUCT__entry(
  1377. __field(dev_t, dev)
  1378. __field(xfs_ino_t, ino)
  1379. __dynamic_array(char, name, args->namelen)
  1380. __field(int, namelen)
  1381. __field(xfs_dahash_t, hashval)
  1382. __field(xfs_ino_t, inumber)
  1383. __field(int, op_flags)
  1384. ),
  1385. TP_fast_assign(
  1386. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1387. __entry->ino = args->dp->i_ino;
  1388. if (args->namelen)
  1389. memcpy(__get_str(name), args->name, args->namelen);
  1390. __entry->namelen = args->namelen;
  1391. __entry->hashval = args->hashval;
  1392. __entry->inumber = args->inumber;
  1393. __entry->op_flags = args->op_flags;
  1394. ),
  1395. TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x "
  1396. "inumber 0x%llx op_flags %s",
  1397. MAJOR(__entry->dev), MINOR(__entry->dev),
  1398. __entry->ino,
  1399. __entry->namelen,
  1400. __entry->namelen ? __get_str(name) : NULL,
  1401. __entry->namelen,
  1402. __entry->hashval,
  1403. __entry->inumber,
  1404. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS))
  1405. )
  1406. #define DEFINE_DIR2_EVENT(name) \
  1407. DEFINE_EVENT(xfs_dir2_class, name, \
  1408. TP_PROTO(struct xfs_da_args *args), \
  1409. TP_ARGS(args))
  1410. DEFINE_DIR2_EVENT(xfs_dir2_sf_addname);
  1411. DEFINE_DIR2_EVENT(xfs_dir2_sf_create);
  1412. DEFINE_DIR2_EVENT(xfs_dir2_sf_lookup);
  1413. DEFINE_DIR2_EVENT(xfs_dir2_sf_replace);
  1414. DEFINE_DIR2_EVENT(xfs_dir2_sf_removename);
  1415. DEFINE_DIR2_EVENT(xfs_dir2_sf_toino4);
  1416. DEFINE_DIR2_EVENT(xfs_dir2_sf_toino8);
  1417. DEFINE_DIR2_EVENT(xfs_dir2_sf_to_block);
  1418. DEFINE_DIR2_EVENT(xfs_dir2_block_addname);
  1419. DEFINE_DIR2_EVENT(xfs_dir2_block_lookup);
  1420. DEFINE_DIR2_EVENT(xfs_dir2_block_replace);
  1421. DEFINE_DIR2_EVENT(xfs_dir2_block_removename);
  1422. DEFINE_DIR2_EVENT(xfs_dir2_block_to_sf);
  1423. DEFINE_DIR2_EVENT(xfs_dir2_block_to_leaf);
  1424. DEFINE_DIR2_EVENT(xfs_dir2_leaf_addname);
  1425. DEFINE_DIR2_EVENT(xfs_dir2_leaf_lookup);
  1426. DEFINE_DIR2_EVENT(xfs_dir2_leaf_replace);
  1427. DEFINE_DIR2_EVENT(xfs_dir2_leaf_removename);
  1428. DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_block);
  1429. DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_node);
  1430. DEFINE_DIR2_EVENT(xfs_dir2_node_addname);
  1431. DEFINE_DIR2_EVENT(xfs_dir2_node_lookup);
  1432. DEFINE_DIR2_EVENT(xfs_dir2_node_replace);
  1433. DEFINE_DIR2_EVENT(xfs_dir2_node_removename);
  1434. DEFINE_DIR2_EVENT(xfs_dir2_node_to_leaf);
  1435. DECLARE_EVENT_CLASS(xfs_dir2_space_class,
  1436. TP_PROTO(struct xfs_da_args *args, int idx),
  1437. TP_ARGS(args, idx),
  1438. TP_STRUCT__entry(
  1439. __field(dev_t, dev)
  1440. __field(xfs_ino_t, ino)
  1441. __field(int, op_flags)
  1442. __field(int, idx)
  1443. ),
  1444. TP_fast_assign(
  1445. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1446. __entry->ino = args->dp->i_ino;
  1447. __entry->op_flags = args->op_flags;
  1448. __entry->idx = idx;
  1449. ),
  1450. TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d",
  1451. MAJOR(__entry->dev), MINOR(__entry->dev),
  1452. __entry->ino,
  1453. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  1454. __entry->idx)
  1455. )
  1456. #define DEFINE_DIR2_SPACE_EVENT(name) \
  1457. DEFINE_EVENT(xfs_dir2_space_class, name, \
  1458. TP_PROTO(struct xfs_da_args *args, int idx), \
  1459. TP_ARGS(args, idx))
  1460. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_add);
  1461. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_remove);
  1462. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_grow_inode);
  1463. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_shrink_inode);
  1464. TRACE_EVENT(xfs_dir2_leafn_moveents,
  1465. TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
  1466. TP_ARGS(args, src_idx, dst_idx, count),
  1467. TP_STRUCT__entry(
  1468. __field(dev_t, dev)
  1469. __field(xfs_ino_t, ino)
  1470. __field(int, op_flags)
  1471. __field(int, src_idx)
  1472. __field(int, dst_idx)
  1473. __field(int, count)
  1474. ),
  1475. TP_fast_assign(
  1476. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1477. __entry->ino = args->dp->i_ino;
  1478. __entry->op_flags = args->op_flags;
  1479. __entry->src_idx = src_idx;
  1480. __entry->dst_idx = dst_idx;
  1481. __entry->count = count;
  1482. ),
  1483. TP_printk("dev %d:%d ino 0x%llx op_flags %s "
  1484. "src_idx %d dst_idx %d count %d",
  1485. MAJOR(__entry->dev), MINOR(__entry->dev),
  1486. __entry->ino,
  1487. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  1488. __entry->src_idx,
  1489. __entry->dst_idx,
  1490. __entry->count)
  1491. );
  1492. #define XFS_SWAPEXT_INODES \
  1493. { 0, "target" }, \
  1494. { 1, "temp" }
  1495. #define XFS_INODE_FORMAT_STR \
  1496. { 0, "invalid" }, \
  1497. { 1, "local" }, \
  1498. { 2, "extent" }, \
  1499. { 3, "btree" }
  1500. DECLARE_EVENT_CLASS(xfs_swap_extent_class,
  1501. TP_PROTO(struct xfs_inode *ip, int which),
  1502. TP_ARGS(ip, which),
  1503. TP_STRUCT__entry(
  1504. __field(dev_t, dev)
  1505. __field(int, which)
  1506. __field(xfs_ino_t, ino)
  1507. __field(int, format)
  1508. __field(int, nex)
  1509. __field(int, max_nex)
  1510. __field(int, broot_size)
  1511. __field(int, fork_off)
  1512. ),
  1513. TP_fast_assign(
  1514. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1515. __entry->which = which;
  1516. __entry->ino = ip->i_ino;
  1517. __entry->format = ip->i_d.di_format;
  1518. __entry->nex = ip->i_d.di_nextents;
  1519. __entry->max_nex = ip->i_df.if_ext_max;
  1520. __entry->broot_size = ip->i_df.if_broot_bytes;
  1521. __entry->fork_off = XFS_IFORK_BOFF(ip);
  1522. ),
  1523. TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, "
  1524. "Max in-fork extents %d, broot size %d, fork offset %d",
  1525. MAJOR(__entry->dev), MINOR(__entry->dev),
  1526. __entry->ino,
  1527. __print_symbolic(__entry->which, XFS_SWAPEXT_INODES),
  1528. __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR),
  1529. __entry->nex,
  1530. __entry->max_nex,
  1531. __entry->broot_size,
  1532. __entry->fork_off)
  1533. )
  1534. #define DEFINE_SWAPEXT_EVENT(name) \
  1535. DEFINE_EVENT(xfs_swap_extent_class, name, \
  1536. TP_PROTO(struct xfs_inode *ip, int which), \
  1537. TP_ARGS(ip, which))
  1538. DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
  1539. DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
  1540. DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
  1541. TP_PROTO(struct log *log, struct xlog_recover *trans,
  1542. struct xlog_recover_item *item, int pass),
  1543. TP_ARGS(log, trans, item, pass),
  1544. TP_STRUCT__entry(
  1545. __field(dev_t, dev)
  1546. __field(unsigned long, item)
  1547. __field(xlog_tid_t, tid)
  1548. __field(int, type)
  1549. __field(int, pass)
  1550. __field(int, count)
  1551. __field(int, total)
  1552. ),
  1553. TP_fast_assign(
  1554. __entry->dev = log->l_mp->m_super->s_dev;
  1555. __entry->item = (unsigned long)item;
  1556. __entry->tid = trans->r_log_tid;
  1557. __entry->type = ITEM_TYPE(item);
  1558. __entry->pass = pass;
  1559. __entry->count = item->ri_cnt;
  1560. __entry->total = item->ri_total;
  1561. ),
  1562. TP_printk("dev %d:%d trans 0x%x, pass %d, item 0x%p, item type %s "
  1563. "item region count/total %d/%d",
  1564. MAJOR(__entry->dev), MINOR(__entry->dev),
  1565. __entry->tid,
  1566. __entry->pass,
  1567. (void *)__entry->item,
  1568. __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
  1569. __entry->count,
  1570. __entry->total)
  1571. )
  1572. #define DEFINE_LOG_RECOVER_ITEM(name) \
  1573. DEFINE_EVENT(xfs_log_recover_item_class, name, \
  1574. TP_PROTO(struct log *log, struct xlog_recover *trans, \
  1575. struct xlog_recover_item *item, int pass), \
  1576. TP_ARGS(log, trans, item, pass))
  1577. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add);
  1578. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add_cont);
  1579. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_head);
  1580. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
  1581. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
  1582. DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
  1583. TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f),
  1584. TP_ARGS(log, buf_f),
  1585. TP_STRUCT__entry(
  1586. __field(dev_t, dev)
  1587. __field(__int64_t, blkno)
  1588. __field(unsigned short, len)
  1589. __field(unsigned short, flags)
  1590. __field(unsigned short, size)
  1591. __field(unsigned int, map_size)
  1592. ),
  1593. TP_fast_assign(
  1594. __entry->dev = log->l_mp->m_super->s_dev;
  1595. __entry->blkno = buf_f->blf_blkno;
  1596. __entry->len = buf_f->blf_len;
  1597. __entry->flags = buf_f->blf_flags;
  1598. __entry->size = buf_f->blf_size;
  1599. __entry->map_size = buf_f->blf_map_size;
  1600. ),
  1601. TP_printk("dev %d:%d blkno 0x%llx, len %u, flags 0x%x, size %d, "
  1602. "map_size %d",
  1603. MAJOR(__entry->dev), MINOR(__entry->dev),
  1604. __entry->blkno,
  1605. __entry->len,
  1606. __entry->flags,
  1607. __entry->size,
  1608. __entry->map_size)
  1609. )
  1610. #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
  1611. DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
  1612. TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \
  1613. TP_ARGS(log, buf_f))
  1614. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
  1615. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel);
  1616. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_add);
  1617. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_ref_inc);
  1618. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_recover);
  1619. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_inode_buf);
  1620. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
  1621. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
  1622. DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
  1623. TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f),
  1624. TP_ARGS(log, in_f),
  1625. TP_STRUCT__entry(
  1626. __field(dev_t, dev)
  1627. __field(xfs_ino_t, ino)
  1628. __field(unsigned short, size)
  1629. __field(int, fields)
  1630. __field(unsigned short, asize)
  1631. __field(unsigned short, dsize)
  1632. __field(__int64_t, blkno)
  1633. __field(int, len)
  1634. __field(int, boffset)
  1635. ),
  1636. TP_fast_assign(
  1637. __entry->dev = log->l_mp->m_super->s_dev;
  1638. __entry->ino = in_f->ilf_ino;
  1639. __entry->size = in_f->ilf_size;
  1640. __entry->fields = in_f->ilf_fields;
  1641. __entry->asize = in_f->ilf_asize;
  1642. __entry->dsize = in_f->ilf_dsize;
  1643. __entry->blkno = in_f->ilf_blkno;
  1644. __entry->len = in_f->ilf_len;
  1645. __entry->boffset = in_f->ilf_boffset;
  1646. ),
  1647. TP_printk("dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, "
  1648. "dsize %d, blkno 0x%llx, len %d, boffset %d",
  1649. MAJOR(__entry->dev), MINOR(__entry->dev),
  1650. __entry->ino,
  1651. __entry->size,
  1652. __entry->fields,
  1653. __entry->asize,
  1654. __entry->dsize,
  1655. __entry->blkno,
  1656. __entry->len,
  1657. __entry->boffset)
  1658. )
  1659. #define DEFINE_LOG_RECOVER_INO_ITEM(name) \
  1660. DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
  1661. TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \
  1662. TP_ARGS(log, in_f))
  1663. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
  1664. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel);
  1665. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip);
  1666. DECLARE_EVENT_CLASS(xfs_discard_class,
  1667. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1668. xfs_agblock_t agbno, xfs_extlen_t len),
  1669. TP_ARGS(mp, agno, agbno, len),
  1670. TP_STRUCT__entry(
  1671. __field(dev_t, dev)
  1672. __field(xfs_agnumber_t, agno)
  1673. __field(xfs_agblock_t, agbno)
  1674. __field(xfs_extlen_t, len)
  1675. ),
  1676. TP_fast_assign(
  1677. __entry->dev = mp->m_super->s_dev;
  1678. __entry->agno = agno;
  1679. __entry->agbno = agbno;
  1680. __entry->len = len;
  1681. ),
  1682. TP_printk("dev %d:%d agno %u agbno %u len %u\n",
  1683. MAJOR(__entry->dev), MINOR(__entry->dev),
  1684. __entry->agno,
  1685. __entry->agbno,
  1686. __entry->len)
  1687. )
  1688. #define DEFINE_DISCARD_EVENT(name) \
  1689. DEFINE_EVENT(xfs_discard_class, name, \
  1690. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  1691. xfs_agblock_t agbno, xfs_extlen_t len), \
  1692. TP_ARGS(mp, agno, agbno, len))
  1693. DEFINE_DISCARD_EVENT(xfs_discard_extent);
  1694. DEFINE_DISCARD_EVENT(xfs_discard_toosmall);
  1695. DEFINE_DISCARD_EVENT(xfs_discard_exclude);
  1696. DEFINE_DISCARD_EVENT(xfs_discard_busy);
  1697. #endif /* _TRACE_XFS_H */
  1698. #undef TRACE_INCLUDE_PATH
  1699. #define TRACE_INCLUDE_PATH .
  1700. #define TRACE_INCLUDE_FILE xfs_trace
  1701. #include <trace/define_trace.h>