xfs_trace.h 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923
  1. /*
  2. * Copyright (c) 2009, Christoph Hellwig
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #undef TRACE_SYSTEM
  19. #define TRACE_SYSTEM xfs
  20. #if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
  21. #define _TRACE_XFS_H
  22. #include <linux/tracepoint.h>
  23. struct xfs_agf;
  24. struct xfs_alloc_arg;
  25. struct xfs_attr_list_context;
  26. struct xfs_buf_log_item;
  27. struct xfs_da_args;
  28. struct xfs_da_node_entry;
  29. struct xfs_dquot;
  30. struct xfs_log_item;
  31. struct xlog_ticket;
  32. struct xlog;
  33. struct xlog_recover;
  34. struct xlog_recover_item;
  35. struct xfs_buf_log_format;
  36. struct xfs_inode_log_format;
  37. struct xfs_bmbt_irec;
  38. DECLARE_EVENT_CLASS(xfs_attr_list_class,
  39. TP_PROTO(struct xfs_attr_list_context *ctx),
  40. TP_ARGS(ctx),
  41. TP_STRUCT__entry(
  42. __field(dev_t, dev)
  43. __field(xfs_ino_t, ino)
  44. __field(u32, hashval)
  45. __field(u32, blkno)
  46. __field(u32, offset)
  47. __field(void *, alist)
  48. __field(int, bufsize)
  49. __field(int, count)
  50. __field(int, firstu)
  51. __field(int, dupcnt)
  52. __field(int, flags)
  53. ),
  54. TP_fast_assign(
  55. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  56. __entry->ino = ctx->dp->i_ino;
  57. __entry->hashval = ctx->cursor->hashval;
  58. __entry->blkno = ctx->cursor->blkno;
  59. __entry->offset = ctx->cursor->offset;
  60. __entry->alist = ctx->alist;
  61. __entry->bufsize = ctx->bufsize;
  62. __entry->count = ctx->count;
  63. __entry->firstu = ctx->firstu;
  64. __entry->flags = ctx->flags;
  65. ),
  66. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  67. "alist 0x%p size %u count %u firstu %u flags %d %s",
  68. MAJOR(__entry->dev), MINOR(__entry->dev),
  69. __entry->ino,
  70. __entry->hashval,
  71. __entry->blkno,
  72. __entry->offset,
  73. __entry->dupcnt,
  74. __entry->alist,
  75. __entry->bufsize,
  76. __entry->count,
  77. __entry->firstu,
  78. __entry->flags,
  79. __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS)
  80. )
  81. )
  82. #define DEFINE_ATTR_LIST_EVENT(name) \
  83. DEFINE_EVENT(xfs_attr_list_class, name, \
  84. TP_PROTO(struct xfs_attr_list_context *ctx), \
  85. TP_ARGS(ctx))
  86. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
  87. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
  88. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
  89. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end);
  90. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full);
  91. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
  92. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
  93. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
  94. DEFINE_ATTR_LIST_EVENT(xfs_attr_leaf_list);
  95. DEFINE_ATTR_LIST_EVENT(xfs_attr_node_list);
  96. DECLARE_EVENT_CLASS(xfs_perag_class,
  97. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount,
  98. unsigned long caller_ip),
  99. TP_ARGS(mp, agno, refcount, caller_ip),
  100. TP_STRUCT__entry(
  101. __field(dev_t, dev)
  102. __field(xfs_agnumber_t, agno)
  103. __field(int, refcount)
  104. __field(unsigned long, caller_ip)
  105. ),
  106. TP_fast_assign(
  107. __entry->dev = mp->m_super->s_dev;
  108. __entry->agno = agno;
  109. __entry->refcount = refcount;
  110. __entry->caller_ip = caller_ip;
  111. ),
  112. TP_printk("dev %d:%d agno %u refcount %d caller %pf",
  113. MAJOR(__entry->dev), MINOR(__entry->dev),
  114. __entry->agno,
  115. __entry->refcount,
  116. (char *)__entry->caller_ip)
  117. );
  118. #define DEFINE_PERAG_REF_EVENT(name) \
  119. DEFINE_EVENT(xfs_perag_class, name, \
  120. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \
  121. unsigned long caller_ip), \
  122. TP_ARGS(mp, agno, refcount, caller_ip))
  123. DEFINE_PERAG_REF_EVENT(xfs_perag_get);
  124. DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag);
  125. DEFINE_PERAG_REF_EVENT(xfs_perag_put);
  126. DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
  127. DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
  128. DEFINE_PERAG_REF_EVENT(xfs_perag_set_eofblocks);
  129. DEFINE_PERAG_REF_EVENT(xfs_perag_clear_eofblocks);
  130. TRACE_EVENT(xfs_attr_list_node_descend,
  131. TP_PROTO(struct xfs_attr_list_context *ctx,
  132. struct xfs_da_node_entry *btree),
  133. TP_ARGS(ctx, btree),
  134. TP_STRUCT__entry(
  135. __field(dev_t, dev)
  136. __field(xfs_ino_t, ino)
  137. __field(u32, hashval)
  138. __field(u32, blkno)
  139. __field(u32, offset)
  140. __field(void *, alist)
  141. __field(int, bufsize)
  142. __field(int, count)
  143. __field(int, firstu)
  144. __field(int, dupcnt)
  145. __field(int, flags)
  146. __field(u32, bt_hashval)
  147. __field(u32, bt_before)
  148. ),
  149. TP_fast_assign(
  150. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  151. __entry->ino = ctx->dp->i_ino;
  152. __entry->hashval = ctx->cursor->hashval;
  153. __entry->blkno = ctx->cursor->blkno;
  154. __entry->offset = ctx->cursor->offset;
  155. __entry->alist = ctx->alist;
  156. __entry->bufsize = ctx->bufsize;
  157. __entry->count = ctx->count;
  158. __entry->firstu = ctx->firstu;
  159. __entry->flags = ctx->flags;
  160. __entry->bt_hashval = be32_to_cpu(btree->hashval);
  161. __entry->bt_before = be32_to_cpu(btree->before);
  162. ),
  163. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  164. "alist 0x%p size %u count %u firstu %u flags %d %s "
  165. "node hashval %u, node before %u",
  166. MAJOR(__entry->dev), MINOR(__entry->dev),
  167. __entry->ino,
  168. __entry->hashval,
  169. __entry->blkno,
  170. __entry->offset,
  171. __entry->dupcnt,
  172. __entry->alist,
  173. __entry->bufsize,
  174. __entry->count,
  175. __entry->firstu,
  176. __entry->flags,
  177. __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
  178. __entry->bt_hashval,
  179. __entry->bt_before)
  180. );
  181. TRACE_EVENT(xfs_iext_insert,
  182. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx,
  183. struct xfs_bmbt_irec *r, int state, unsigned long caller_ip),
  184. TP_ARGS(ip, idx, r, state, caller_ip),
  185. TP_STRUCT__entry(
  186. __field(dev_t, dev)
  187. __field(xfs_ino_t, ino)
  188. __field(xfs_extnum_t, idx)
  189. __field(xfs_fileoff_t, startoff)
  190. __field(xfs_fsblock_t, startblock)
  191. __field(xfs_filblks_t, blockcount)
  192. __field(xfs_exntst_t, state)
  193. __field(int, bmap_state)
  194. __field(unsigned long, caller_ip)
  195. ),
  196. TP_fast_assign(
  197. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  198. __entry->ino = ip->i_ino;
  199. __entry->idx = idx;
  200. __entry->startoff = r->br_startoff;
  201. __entry->startblock = r->br_startblock;
  202. __entry->blockcount = r->br_blockcount;
  203. __entry->state = r->br_state;
  204. __entry->bmap_state = state;
  205. __entry->caller_ip = caller_ip;
  206. ),
  207. TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
  208. "offset %lld block %lld count %lld flag %d caller %pf",
  209. MAJOR(__entry->dev), MINOR(__entry->dev),
  210. __entry->ino,
  211. __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
  212. (long)__entry->idx,
  213. __entry->startoff,
  214. (__int64_t)__entry->startblock,
  215. __entry->blockcount,
  216. __entry->state,
  217. (char *)__entry->caller_ip)
  218. );
  219. DECLARE_EVENT_CLASS(xfs_bmap_class,
  220. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state,
  221. unsigned long caller_ip),
  222. TP_ARGS(ip, idx, state, caller_ip),
  223. TP_STRUCT__entry(
  224. __field(dev_t, dev)
  225. __field(xfs_ino_t, ino)
  226. __field(xfs_extnum_t, idx)
  227. __field(xfs_fileoff_t, startoff)
  228. __field(xfs_fsblock_t, startblock)
  229. __field(xfs_filblks_t, blockcount)
  230. __field(xfs_exntst_t, state)
  231. __field(int, bmap_state)
  232. __field(unsigned long, caller_ip)
  233. ),
  234. TP_fast_assign(
  235. struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ?
  236. ip->i_afp : &ip->i_df;
  237. struct xfs_bmbt_irec r;
  238. xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r);
  239. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  240. __entry->ino = ip->i_ino;
  241. __entry->idx = idx;
  242. __entry->startoff = r.br_startoff;
  243. __entry->startblock = r.br_startblock;
  244. __entry->blockcount = r.br_blockcount;
  245. __entry->state = r.br_state;
  246. __entry->bmap_state = state;
  247. __entry->caller_ip = caller_ip;
  248. ),
  249. TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
  250. "offset %lld block %lld count %lld flag %d caller %pf",
  251. MAJOR(__entry->dev), MINOR(__entry->dev),
  252. __entry->ino,
  253. __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
  254. (long)__entry->idx,
  255. __entry->startoff,
  256. (__int64_t)__entry->startblock,
  257. __entry->blockcount,
  258. __entry->state,
  259. (char *)__entry->caller_ip)
  260. )
  261. #define DEFINE_BMAP_EVENT(name) \
  262. DEFINE_EVENT(xfs_bmap_class, name, \
  263. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
  264. unsigned long caller_ip), \
  265. TP_ARGS(ip, idx, state, caller_ip))
  266. DEFINE_BMAP_EVENT(xfs_iext_remove);
  267. DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
  268. DEFINE_BMAP_EVENT(xfs_bmap_post_update);
  269. DEFINE_BMAP_EVENT(xfs_extlist);
  270. DECLARE_EVENT_CLASS(xfs_buf_class,
  271. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip),
  272. TP_ARGS(bp, caller_ip),
  273. TP_STRUCT__entry(
  274. __field(dev_t, dev)
  275. __field(xfs_daddr_t, bno)
  276. __field(int, nblks)
  277. __field(int, hold)
  278. __field(int, pincount)
  279. __field(unsigned, lockval)
  280. __field(unsigned, flags)
  281. __field(unsigned long, caller_ip)
  282. ),
  283. TP_fast_assign(
  284. __entry->dev = bp->b_target->bt_dev;
  285. __entry->bno = bp->b_bn;
  286. __entry->nblks = bp->b_length;
  287. __entry->hold = atomic_read(&bp->b_hold);
  288. __entry->pincount = atomic_read(&bp->b_pin_count);
  289. __entry->lockval = bp->b_sema.count;
  290. __entry->flags = bp->b_flags;
  291. __entry->caller_ip = caller_ip;
  292. ),
  293. TP_printk("dev %d:%d bno 0x%llx nblks 0x%x hold %d pincount %d "
  294. "lock %d flags %s caller %pf",
  295. MAJOR(__entry->dev), MINOR(__entry->dev),
  296. (unsigned long long)__entry->bno,
  297. __entry->nblks,
  298. __entry->hold,
  299. __entry->pincount,
  300. __entry->lockval,
  301. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  302. (void *)__entry->caller_ip)
  303. )
  304. #define DEFINE_BUF_EVENT(name) \
  305. DEFINE_EVENT(xfs_buf_class, name, \
  306. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
  307. TP_ARGS(bp, caller_ip))
  308. DEFINE_BUF_EVENT(xfs_buf_init);
  309. DEFINE_BUF_EVENT(xfs_buf_free);
  310. DEFINE_BUF_EVENT(xfs_buf_hold);
  311. DEFINE_BUF_EVENT(xfs_buf_rele);
  312. DEFINE_BUF_EVENT(xfs_buf_iodone);
  313. DEFINE_BUF_EVENT(xfs_buf_iorequest);
  314. DEFINE_BUF_EVENT(xfs_buf_bawrite);
  315. DEFINE_BUF_EVENT(xfs_buf_lock);
  316. DEFINE_BUF_EVENT(xfs_buf_lock_done);
  317. DEFINE_BUF_EVENT(xfs_buf_trylock);
  318. DEFINE_BUF_EVENT(xfs_buf_unlock);
  319. DEFINE_BUF_EVENT(xfs_buf_iowait);
  320. DEFINE_BUF_EVENT(xfs_buf_iowait_done);
  321. DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
  322. DEFINE_BUF_EVENT(xfs_buf_delwri_queued);
  323. DEFINE_BUF_EVENT(xfs_buf_delwri_split);
  324. DEFINE_BUF_EVENT(xfs_buf_get_uncached);
  325. DEFINE_BUF_EVENT(xfs_bdstrat_shut);
  326. DEFINE_BUF_EVENT(xfs_buf_item_relse);
  327. DEFINE_BUF_EVENT(xfs_buf_item_iodone);
  328. DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
  329. DEFINE_BUF_EVENT(xfs_buf_error_relse);
  330. DEFINE_BUF_EVENT(xfs_buf_wait_buftarg);
  331. DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
  332. DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
  333. /* not really buffer traces, but the buf provides useful information */
  334. DEFINE_BUF_EVENT(xfs_btree_corrupt);
  335. DEFINE_BUF_EVENT(xfs_da_btree_corrupt);
  336. DEFINE_BUF_EVENT(xfs_reset_dqcounts);
  337. DEFINE_BUF_EVENT(xfs_inode_item_push);
  338. /* pass flags explicitly */
  339. DECLARE_EVENT_CLASS(xfs_buf_flags_class,
  340. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip),
  341. TP_ARGS(bp, flags, caller_ip),
  342. TP_STRUCT__entry(
  343. __field(dev_t, dev)
  344. __field(xfs_daddr_t, bno)
  345. __field(size_t, buffer_length)
  346. __field(int, hold)
  347. __field(int, pincount)
  348. __field(unsigned, lockval)
  349. __field(unsigned, flags)
  350. __field(unsigned long, caller_ip)
  351. ),
  352. TP_fast_assign(
  353. __entry->dev = bp->b_target->bt_dev;
  354. __entry->bno = bp->b_bn;
  355. __entry->buffer_length = BBTOB(bp->b_length);
  356. __entry->flags = flags;
  357. __entry->hold = atomic_read(&bp->b_hold);
  358. __entry->pincount = atomic_read(&bp->b_pin_count);
  359. __entry->lockval = bp->b_sema.count;
  360. __entry->caller_ip = caller_ip;
  361. ),
  362. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  363. "lock %d flags %s caller %pf",
  364. MAJOR(__entry->dev), MINOR(__entry->dev),
  365. (unsigned long long)__entry->bno,
  366. __entry->buffer_length,
  367. __entry->hold,
  368. __entry->pincount,
  369. __entry->lockval,
  370. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  371. (void *)__entry->caller_ip)
  372. )
  373. #define DEFINE_BUF_FLAGS_EVENT(name) \
  374. DEFINE_EVENT(xfs_buf_flags_class, name, \
  375. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
  376. TP_ARGS(bp, flags, caller_ip))
  377. DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
  378. DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
  379. DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
  380. TRACE_EVENT(xfs_buf_ioerror,
  381. TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip),
  382. TP_ARGS(bp, error, caller_ip),
  383. TP_STRUCT__entry(
  384. __field(dev_t, dev)
  385. __field(xfs_daddr_t, bno)
  386. __field(size_t, buffer_length)
  387. __field(unsigned, flags)
  388. __field(int, hold)
  389. __field(int, pincount)
  390. __field(unsigned, lockval)
  391. __field(int, error)
  392. __field(unsigned long, caller_ip)
  393. ),
  394. TP_fast_assign(
  395. __entry->dev = bp->b_target->bt_dev;
  396. __entry->bno = bp->b_bn;
  397. __entry->buffer_length = BBTOB(bp->b_length);
  398. __entry->hold = atomic_read(&bp->b_hold);
  399. __entry->pincount = atomic_read(&bp->b_pin_count);
  400. __entry->lockval = bp->b_sema.count;
  401. __entry->error = error;
  402. __entry->flags = bp->b_flags;
  403. __entry->caller_ip = caller_ip;
  404. ),
  405. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  406. "lock %d error %d flags %s caller %pf",
  407. MAJOR(__entry->dev), MINOR(__entry->dev),
  408. (unsigned long long)__entry->bno,
  409. __entry->buffer_length,
  410. __entry->hold,
  411. __entry->pincount,
  412. __entry->lockval,
  413. __entry->error,
  414. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  415. (void *)__entry->caller_ip)
  416. );
  417. DECLARE_EVENT_CLASS(xfs_buf_item_class,
  418. TP_PROTO(struct xfs_buf_log_item *bip),
  419. TP_ARGS(bip),
  420. TP_STRUCT__entry(
  421. __field(dev_t, dev)
  422. __field(xfs_daddr_t, buf_bno)
  423. __field(size_t, buf_len)
  424. __field(int, buf_hold)
  425. __field(int, buf_pincount)
  426. __field(int, buf_lockval)
  427. __field(unsigned, buf_flags)
  428. __field(unsigned, bli_recur)
  429. __field(int, bli_refcount)
  430. __field(unsigned, bli_flags)
  431. __field(void *, li_desc)
  432. __field(unsigned, li_flags)
  433. ),
  434. TP_fast_assign(
  435. __entry->dev = bip->bli_buf->b_target->bt_dev;
  436. __entry->bli_flags = bip->bli_flags;
  437. __entry->bli_recur = bip->bli_recur;
  438. __entry->bli_refcount = atomic_read(&bip->bli_refcount);
  439. __entry->buf_bno = bip->bli_buf->b_bn;
  440. __entry->buf_len = BBTOB(bip->bli_buf->b_length);
  441. __entry->buf_flags = bip->bli_buf->b_flags;
  442. __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
  443. __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
  444. __entry->buf_lockval = bip->bli_buf->b_sema.count;
  445. __entry->li_desc = bip->bli_item.li_desc;
  446. __entry->li_flags = bip->bli_item.li_flags;
  447. ),
  448. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  449. "lock %d flags %s recur %d refcount %d bliflags %s "
  450. "lidesc 0x%p liflags %s",
  451. MAJOR(__entry->dev), MINOR(__entry->dev),
  452. (unsigned long long)__entry->buf_bno,
  453. __entry->buf_len,
  454. __entry->buf_hold,
  455. __entry->buf_pincount,
  456. __entry->buf_lockval,
  457. __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS),
  458. __entry->bli_recur,
  459. __entry->bli_refcount,
  460. __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS),
  461. __entry->li_desc,
  462. __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS))
  463. )
  464. #define DEFINE_BUF_ITEM_EVENT(name) \
  465. DEFINE_EVENT(xfs_buf_item_class, name, \
  466. TP_PROTO(struct xfs_buf_log_item *bip), \
  467. TP_ARGS(bip))
  468. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
  469. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
  470. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
  471. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
  472. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
  473. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
  474. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
  475. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
  476. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
  477. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
  478. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
  479. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
  480. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
  481. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
  482. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur);
  483. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf);
  484. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur);
  485. DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf);
  486. DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse);
  487. DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
  488. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
  489. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
  490. DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
  491. DECLARE_EVENT_CLASS(xfs_lock_class,
  492. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags,
  493. unsigned long caller_ip),
  494. TP_ARGS(ip, lock_flags, caller_ip),
  495. TP_STRUCT__entry(
  496. __field(dev_t, dev)
  497. __field(xfs_ino_t, ino)
  498. __field(int, lock_flags)
  499. __field(unsigned long, caller_ip)
  500. ),
  501. TP_fast_assign(
  502. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  503. __entry->ino = ip->i_ino;
  504. __entry->lock_flags = lock_flags;
  505. __entry->caller_ip = caller_ip;
  506. ),
  507. TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf",
  508. MAJOR(__entry->dev), MINOR(__entry->dev),
  509. __entry->ino,
  510. __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS),
  511. (void *)__entry->caller_ip)
  512. )
  513. #define DEFINE_LOCK_EVENT(name) \
  514. DEFINE_EVENT(xfs_lock_class, name, \
  515. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
  516. unsigned long caller_ip), \
  517. TP_ARGS(ip, lock_flags, caller_ip))
  518. DEFINE_LOCK_EVENT(xfs_ilock);
  519. DEFINE_LOCK_EVENT(xfs_ilock_nowait);
  520. DEFINE_LOCK_EVENT(xfs_ilock_demote);
  521. DEFINE_LOCK_EVENT(xfs_iunlock);
  522. DECLARE_EVENT_CLASS(xfs_inode_class,
  523. TP_PROTO(struct xfs_inode *ip),
  524. TP_ARGS(ip),
  525. TP_STRUCT__entry(
  526. __field(dev_t, dev)
  527. __field(xfs_ino_t, ino)
  528. ),
  529. TP_fast_assign(
  530. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  531. __entry->ino = ip->i_ino;
  532. ),
  533. TP_printk("dev %d:%d ino 0x%llx",
  534. MAJOR(__entry->dev), MINOR(__entry->dev),
  535. __entry->ino)
  536. )
  537. #define DEFINE_INODE_EVENT(name) \
  538. DEFINE_EVENT(xfs_inode_class, name, \
  539. TP_PROTO(struct xfs_inode *ip), \
  540. TP_ARGS(ip))
  541. DEFINE_INODE_EVENT(xfs_iget_skip);
  542. DEFINE_INODE_EVENT(xfs_iget_reclaim);
  543. DEFINE_INODE_EVENT(xfs_iget_reclaim_fail);
  544. DEFINE_INODE_EVENT(xfs_iget_hit);
  545. DEFINE_INODE_EVENT(xfs_iget_miss);
  546. DEFINE_INODE_EVENT(xfs_getattr);
  547. DEFINE_INODE_EVENT(xfs_setattr);
  548. DEFINE_INODE_EVENT(xfs_readlink);
  549. DEFINE_INODE_EVENT(xfs_inactive_symlink);
  550. DEFINE_INODE_EVENT(xfs_alloc_file_space);
  551. DEFINE_INODE_EVENT(xfs_free_file_space);
  552. DEFINE_INODE_EVENT(xfs_readdir);
  553. #ifdef CONFIG_XFS_POSIX_ACL
  554. DEFINE_INODE_EVENT(xfs_get_acl);
  555. #endif
  556. DEFINE_INODE_EVENT(xfs_vm_bmap);
  557. DEFINE_INODE_EVENT(xfs_file_ioctl);
  558. DEFINE_INODE_EVENT(xfs_file_compat_ioctl);
  559. DEFINE_INODE_EVENT(xfs_ioctl_setattr);
  560. DEFINE_INODE_EVENT(xfs_dir_fsync);
  561. DEFINE_INODE_EVENT(xfs_file_fsync);
  562. DEFINE_INODE_EVENT(xfs_destroy_inode);
  563. DEFINE_INODE_EVENT(xfs_evict_inode);
  564. DEFINE_INODE_EVENT(xfs_update_time);
  565. DEFINE_INODE_EVENT(xfs_dquot_dqalloc);
  566. DEFINE_INODE_EVENT(xfs_dquot_dqdetach);
  567. DEFINE_INODE_EVENT(xfs_inode_set_eofblocks_tag);
  568. DEFINE_INODE_EVENT(xfs_inode_clear_eofblocks_tag);
  569. DEFINE_INODE_EVENT(xfs_inode_free_eofblocks_invalid);
  570. DECLARE_EVENT_CLASS(xfs_iref_class,
  571. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
  572. TP_ARGS(ip, caller_ip),
  573. TP_STRUCT__entry(
  574. __field(dev_t, dev)
  575. __field(xfs_ino_t, ino)
  576. __field(int, count)
  577. __field(int, pincount)
  578. __field(unsigned long, caller_ip)
  579. ),
  580. TP_fast_assign(
  581. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  582. __entry->ino = ip->i_ino;
  583. __entry->count = atomic_read(&VFS_I(ip)->i_count);
  584. __entry->pincount = atomic_read(&ip->i_pincount);
  585. __entry->caller_ip = caller_ip;
  586. ),
  587. TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pf",
  588. MAJOR(__entry->dev), MINOR(__entry->dev),
  589. __entry->ino,
  590. __entry->count,
  591. __entry->pincount,
  592. (char *)__entry->caller_ip)
  593. )
  594. TRACE_EVENT(xfs_iomap_prealloc_size,
  595. TP_PROTO(struct xfs_inode *ip, xfs_fsblock_t blocks, int shift,
  596. unsigned int writeio_blocks),
  597. TP_ARGS(ip, blocks, shift, writeio_blocks),
  598. TP_STRUCT__entry(
  599. __field(dev_t, dev)
  600. __field(xfs_ino_t, ino)
  601. __field(xfs_fsblock_t, blocks)
  602. __field(int, shift)
  603. __field(unsigned int, writeio_blocks)
  604. ),
  605. TP_fast_assign(
  606. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  607. __entry->ino = ip->i_ino;
  608. __entry->blocks = blocks;
  609. __entry->shift = shift;
  610. __entry->writeio_blocks = writeio_blocks;
  611. ),
  612. TP_printk("dev %d:%d ino 0x%llx prealloc blocks %llu shift %d "
  613. "m_writeio_blocks %u",
  614. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino,
  615. __entry->blocks, __entry->shift, __entry->writeio_blocks)
  616. )
  617. #define DEFINE_IREF_EVENT(name) \
  618. DEFINE_EVENT(xfs_iref_class, name, \
  619. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
  620. TP_ARGS(ip, caller_ip))
  621. DEFINE_IREF_EVENT(xfs_ihold);
  622. DEFINE_IREF_EVENT(xfs_irele);
  623. DEFINE_IREF_EVENT(xfs_inode_pin);
  624. DEFINE_IREF_EVENT(xfs_inode_unpin);
  625. DEFINE_IREF_EVENT(xfs_inode_unpin_nowait);
  626. DECLARE_EVENT_CLASS(xfs_namespace_class,
  627. TP_PROTO(struct xfs_inode *dp, struct xfs_name *name),
  628. TP_ARGS(dp, name),
  629. TP_STRUCT__entry(
  630. __field(dev_t, dev)
  631. __field(xfs_ino_t, dp_ino)
  632. __field(int, namelen)
  633. __dynamic_array(char, name, name->len)
  634. ),
  635. TP_fast_assign(
  636. __entry->dev = VFS_I(dp)->i_sb->s_dev;
  637. __entry->dp_ino = dp->i_ino;
  638. __entry->namelen = name->len;
  639. memcpy(__get_str(name), name->name, name->len);
  640. ),
  641. TP_printk("dev %d:%d dp ino 0x%llx name %.*s",
  642. MAJOR(__entry->dev), MINOR(__entry->dev),
  643. __entry->dp_ino,
  644. __entry->namelen,
  645. __get_str(name))
  646. )
  647. #define DEFINE_NAMESPACE_EVENT(name) \
  648. DEFINE_EVENT(xfs_namespace_class, name, \
  649. TP_PROTO(struct xfs_inode *dp, struct xfs_name *name), \
  650. TP_ARGS(dp, name))
  651. DEFINE_NAMESPACE_EVENT(xfs_remove);
  652. DEFINE_NAMESPACE_EVENT(xfs_link);
  653. DEFINE_NAMESPACE_EVENT(xfs_lookup);
  654. DEFINE_NAMESPACE_EVENT(xfs_create);
  655. DEFINE_NAMESPACE_EVENT(xfs_symlink);
  656. TRACE_EVENT(xfs_rename,
  657. TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp,
  658. struct xfs_name *src_name, struct xfs_name *target_name),
  659. TP_ARGS(src_dp, target_dp, src_name, target_name),
  660. TP_STRUCT__entry(
  661. __field(dev_t, dev)
  662. __field(xfs_ino_t, src_dp_ino)
  663. __field(xfs_ino_t, target_dp_ino)
  664. __field(int, src_namelen)
  665. __field(int, target_namelen)
  666. __dynamic_array(char, src_name, src_name->len)
  667. __dynamic_array(char, target_name, target_name->len)
  668. ),
  669. TP_fast_assign(
  670. __entry->dev = VFS_I(src_dp)->i_sb->s_dev;
  671. __entry->src_dp_ino = src_dp->i_ino;
  672. __entry->target_dp_ino = target_dp->i_ino;
  673. __entry->src_namelen = src_name->len;
  674. __entry->target_namelen = target_name->len;
  675. memcpy(__get_str(src_name), src_name->name, src_name->len);
  676. memcpy(__get_str(target_name), target_name->name,
  677. target_name->len);
  678. ),
  679. TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx"
  680. " src name %.*s target name %.*s",
  681. MAJOR(__entry->dev), MINOR(__entry->dev),
  682. __entry->src_dp_ino,
  683. __entry->target_dp_ino,
  684. __entry->src_namelen,
  685. __get_str(src_name),
  686. __entry->target_namelen,
  687. __get_str(target_name))
  688. )
  689. DECLARE_EVENT_CLASS(xfs_dquot_class,
  690. TP_PROTO(struct xfs_dquot *dqp),
  691. TP_ARGS(dqp),
  692. TP_STRUCT__entry(
  693. __field(dev_t, dev)
  694. __field(u32, id)
  695. __field(unsigned, flags)
  696. __field(unsigned, nrefs)
  697. __field(unsigned long long, res_bcount)
  698. __field(unsigned long long, bcount)
  699. __field(unsigned long long, icount)
  700. __field(unsigned long long, blk_hardlimit)
  701. __field(unsigned long long, blk_softlimit)
  702. __field(unsigned long long, ino_hardlimit)
  703. __field(unsigned long long, ino_softlimit)
  704. ), \
  705. TP_fast_assign(
  706. __entry->dev = dqp->q_mount->m_super->s_dev;
  707. __entry->id = be32_to_cpu(dqp->q_core.d_id);
  708. __entry->flags = dqp->dq_flags;
  709. __entry->nrefs = dqp->q_nrefs;
  710. __entry->res_bcount = dqp->q_res_bcount;
  711. __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount);
  712. __entry->icount = be64_to_cpu(dqp->q_core.d_icount);
  713. __entry->blk_hardlimit =
  714. be64_to_cpu(dqp->q_core.d_blk_hardlimit);
  715. __entry->blk_softlimit =
  716. be64_to_cpu(dqp->q_core.d_blk_softlimit);
  717. __entry->ino_hardlimit =
  718. be64_to_cpu(dqp->q_core.d_ino_hardlimit);
  719. __entry->ino_softlimit =
  720. be64_to_cpu(dqp->q_core.d_ino_softlimit);
  721. ),
  722. TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx "
  723. "bcnt 0x%llx bhardlimit 0x%llx bsoftlimit 0x%llx "
  724. "icnt 0x%llx ihardlimit 0x%llx isoftlimit 0x%llx]",
  725. MAJOR(__entry->dev), MINOR(__entry->dev),
  726. __entry->id,
  727. __print_flags(__entry->flags, "|", XFS_DQ_FLAGS),
  728. __entry->nrefs,
  729. __entry->res_bcount,
  730. __entry->bcount,
  731. __entry->blk_hardlimit,
  732. __entry->blk_softlimit,
  733. __entry->icount,
  734. __entry->ino_hardlimit,
  735. __entry->ino_softlimit)
  736. )
  737. #define DEFINE_DQUOT_EVENT(name) \
  738. DEFINE_EVENT(xfs_dquot_class, name, \
  739. TP_PROTO(struct xfs_dquot *dqp), \
  740. TP_ARGS(dqp))
  741. DEFINE_DQUOT_EVENT(xfs_dqadjust);
  742. DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
  743. DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
  744. DEFINE_DQUOT_EVENT(xfs_dqreclaim_busy);
  745. DEFINE_DQUOT_EVENT(xfs_dqreclaim_done);
  746. DEFINE_DQUOT_EVENT(xfs_dqattach_found);
  747. DEFINE_DQUOT_EVENT(xfs_dqattach_get);
  748. DEFINE_DQUOT_EVENT(xfs_dqalloc);
  749. DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
  750. DEFINE_DQUOT_EVENT(xfs_dqread);
  751. DEFINE_DQUOT_EVENT(xfs_dqread_fail);
  752. DEFINE_DQUOT_EVENT(xfs_dqget_hit);
  753. DEFINE_DQUOT_EVENT(xfs_dqget_miss);
  754. DEFINE_DQUOT_EVENT(xfs_dqget_freeing);
  755. DEFINE_DQUOT_EVENT(xfs_dqget_dup);
  756. DEFINE_DQUOT_EVENT(xfs_dqput);
  757. DEFINE_DQUOT_EVENT(xfs_dqput_wait);
  758. DEFINE_DQUOT_EVENT(xfs_dqput_free);
  759. DEFINE_DQUOT_EVENT(xfs_dqrele);
  760. DEFINE_DQUOT_EVENT(xfs_dqflush);
  761. DEFINE_DQUOT_EVENT(xfs_dqflush_force);
  762. DEFINE_DQUOT_EVENT(xfs_dqflush_done);
  763. DECLARE_EVENT_CLASS(xfs_loggrant_class,
  764. TP_PROTO(struct xlog *log, struct xlog_ticket *tic),
  765. TP_ARGS(log, tic),
  766. TP_STRUCT__entry(
  767. __field(dev_t, dev)
  768. __field(unsigned, trans_type)
  769. __field(char, ocnt)
  770. __field(char, cnt)
  771. __field(int, curr_res)
  772. __field(int, unit_res)
  773. __field(unsigned int, flags)
  774. __field(int, reserveq)
  775. __field(int, writeq)
  776. __field(int, grant_reserve_cycle)
  777. __field(int, grant_reserve_bytes)
  778. __field(int, grant_write_cycle)
  779. __field(int, grant_write_bytes)
  780. __field(int, curr_cycle)
  781. __field(int, curr_block)
  782. __field(xfs_lsn_t, tail_lsn)
  783. ),
  784. TP_fast_assign(
  785. __entry->dev = log->l_mp->m_super->s_dev;
  786. __entry->trans_type = tic->t_trans_type;
  787. __entry->ocnt = tic->t_ocnt;
  788. __entry->cnt = tic->t_cnt;
  789. __entry->curr_res = tic->t_curr_res;
  790. __entry->unit_res = tic->t_unit_res;
  791. __entry->flags = tic->t_flags;
  792. __entry->reserveq = list_empty(&log->l_reserve_head.waiters);
  793. __entry->writeq = list_empty(&log->l_write_head.waiters);
  794. xlog_crack_grant_head(&log->l_reserve_head.grant,
  795. &__entry->grant_reserve_cycle,
  796. &__entry->grant_reserve_bytes);
  797. xlog_crack_grant_head(&log->l_write_head.grant,
  798. &__entry->grant_write_cycle,
  799. &__entry->grant_write_bytes);
  800. __entry->curr_cycle = log->l_curr_cycle;
  801. __entry->curr_block = log->l_curr_block;
  802. __entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
  803. ),
  804. TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
  805. "t_unit_res %u t_flags %s reserveq %s "
  806. "writeq %s grant_reserve_cycle %d "
  807. "grant_reserve_bytes %d grant_write_cycle %d "
  808. "grant_write_bytes %d curr_cycle %d curr_block %d "
  809. "tail_cycle %d tail_block %d",
  810. MAJOR(__entry->dev), MINOR(__entry->dev),
  811. __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES),
  812. __entry->ocnt,
  813. __entry->cnt,
  814. __entry->curr_res,
  815. __entry->unit_res,
  816. __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
  817. __entry->reserveq ? "empty" : "active",
  818. __entry->writeq ? "empty" : "active",
  819. __entry->grant_reserve_cycle,
  820. __entry->grant_reserve_bytes,
  821. __entry->grant_write_cycle,
  822. __entry->grant_write_bytes,
  823. __entry->curr_cycle,
  824. __entry->curr_block,
  825. CYCLE_LSN(__entry->tail_lsn),
  826. BLOCK_LSN(__entry->tail_lsn)
  827. )
  828. )
  829. #define DEFINE_LOGGRANT_EVENT(name) \
  830. DEFINE_EVENT(xfs_loggrant_class, name, \
  831. TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \
  832. TP_ARGS(log, tic))
  833. DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
  834. DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
  835. DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
  836. DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
  837. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
  838. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
  839. DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
  840. DEFINE_LOGGRANT_EVENT(xfs_log_reserve_exit);
  841. DEFINE_LOGGRANT_EVENT(xfs_log_regrant);
  842. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_exit);
  843. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
  844. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
  845. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
  846. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter);
  847. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit);
  848. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub);
  849. DECLARE_EVENT_CLASS(xfs_log_item_class,
  850. TP_PROTO(struct xfs_log_item *lip),
  851. TP_ARGS(lip),
  852. TP_STRUCT__entry(
  853. __field(dev_t, dev)
  854. __field(void *, lip)
  855. __field(uint, type)
  856. __field(uint, flags)
  857. __field(xfs_lsn_t, lsn)
  858. ),
  859. TP_fast_assign(
  860. __entry->dev = lip->li_mountp->m_super->s_dev;
  861. __entry->lip = lip;
  862. __entry->type = lip->li_type;
  863. __entry->flags = lip->li_flags;
  864. __entry->lsn = lip->li_lsn;
  865. ),
  866. TP_printk("dev %d:%d lip 0x%p lsn %d/%d type %s flags %s",
  867. MAJOR(__entry->dev), MINOR(__entry->dev),
  868. __entry->lip,
  869. CYCLE_LSN(__entry->lsn), BLOCK_LSN(__entry->lsn),
  870. __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
  871. __print_flags(__entry->flags, "|", XFS_LI_FLAGS))
  872. )
  873. TRACE_EVENT(xfs_log_force,
  874. TP_PROTO(struct xfs_mount *mp, xfs_lsn_t lsn),
  875. TP_ARGS(mp, lsn),
  876. TP_STRUCT__entry(
  877. __field(dev_t, dev)
  878. __field(xfs_lsn_t, lsn)
  879. ),
  880. TP_fast_assign(
  881. __entry->dev = mp->m_super->s_dev;
  882. __entry->lsn = lsn;
  883. ),
  884. TP_printk("dev %d:%d lsn 0x%llx",
  885. MAJOR(__entry->dev), MINOR(__entry->dev),
  886. __entry->lsn)
  887. )
  888. #define DEFINE_LOG_ITEM_EVENT(name) \
  889. DEFINE_EVENT(xfs_log_item_class, name, \
  890. TP_PROTO(struct xfs_log_item *lip), \
  891. TP_ARGS(lip))
  892. DEFINE_LOG_ITEM_EVENT(xfs_ail_push);
  893. DEFINE_LOG_ITEM_EVENT(xfs_ail_pinned);
  894. DEFINE_LOG_ITEM_EVENT(xfs_ail_locked);
  895. DEFINE_LOG_ITEM_EVENT(xfs_ail_flushing);
  896. DECLARE_EVENT_CLASS(xfs_file_class,
  897. TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags),
  898. TP_ARGS(ip, count, offset, flags),
  899. TP_STRUCT__entry(
  900. __field(dev_t, dev)
  901. __field(xfs_ino_t, ino)
  902. __field(xfs_fsize_t, size)
  903. __field(loff_t, offset)
  904. __field(size_t, count)
  905. __field(int, flags)
  906. ),
  907. TP_fast_assign(
  908. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  909. __entry->ino = ip->i_ino;
  910. __entry->size = ip->i_d.di_size;
  911. __entry->offset = offset;
  912. __entry->count = count;
  913. __entry->flags = flags;
  914. ),
  915. TP_printk("dev %d:%d ino 0x%llx size 0x%llx "
  916. "offset 0x%llx count 0x%zx ioflags %s",
  917. MAJOR(__entry->dev), MINOR(__entry->dev),
  918. __entry->ino,
  919. __entry->size,
  920. __entry->offset,
  921. __entry->count,
  922. __print_flags(__entry->flags, "|", XFS_IO_FLAGS))
  923. )
  924. #define DEFINE_RW_EVENT(name) \
  925. DEFINE_EVENT(xfs_file_class, name, \
  926. TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
  927. TP_ARGS(ip, count, offset, flags))
  928. DEFINE_RW_EVENT(xfs_file_read);
  929. DEFINE_RW_EVENT(xfs_file_buffered_write);
  930. DEFINE_RW_EVENT(xfs_file_direct_write);
  931. DEFINE_RW_EVENT(xfs_file_splice_read);
  932. DEFINE_RW_EVENT(xfs_file_splice_write);
  933. DECLARE_EVENT_CLASS(xfs_page_class,
  934. TP_PROTO(struct inode *inode, struct page *page, unsigned long off),
  935. TP_ARGS(inode, page, off),
  936. TP_STRUCT__entry(
  937. __field(dev_t, dev)
  938. __field(xfs_ino_t, ino)
  939. __field(pgoff_t, pgoff)
  940. __field(loff_t, size)
  941. __field(unsigned long, offset)
  942. __field(int, delalloc)
  943. __field(int, unwritten)
  944. ),
  945. TP_fast_assign(
  946. int delalloc = -1, unwritten = -1;
  947. if (page_has_buffers(page))
  948. xfs_count_page_state(page, &delalloc, &unwritten);
  949. __entry->dev = inode->i_sb->s_dev;
  950. __entry->ino = XFS_I(inode)->i_ino;
  951. __entry->pgoff = page_offset(page);
  952. __entry->size = i_size_read(inode);
  953. __entry->offset = off;
  954. __entry->delalloc = delalloc;
  955. __entry->unwritten = unwritten;
  956. ),
  957. TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
  958. "delalloc %d unwritten %d",
  959. MAJOR(__entry->dev), MINOR(__entry->dev),
  960. __entry->ino,
  961. __entry->pgoff,
  962. __entry->size,
  963. __entry->offset,
  964. __entry->delalloc,
  965. __entry->unwritten)
  966. )
  967. #define DEFINE_PAGE_EVENT(name) \
  968. DEFINE_EVENT(xfs_page_class, name, \
  969. TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \
  970. TP_ARGS(inode, page, off))
  971. DEFINE_PAGE_EVENT(xfs_writepage);
  972. DEFINE_PAGE_EVENT(xfs_releasepage);
  973. DEFINE_PAGE_EVENT(xfs_invalidatepage);
  974. DECLARE_EVENT_CLASS(xfs_imap_class,
  975. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
  976. int type, struct xfs_bmbt_irec *irec),
  977. TP_ARGS(ip, offset, count, type, irec),
  978. TP_STRUCT__entry(
  979. __field(dev_t, dev)
  980. __field(xfs_ino_t, ino)
  981. __field(loff_t, size)
  982. __field(loff_t, offset)
  983. __field(size_t, count)
  984. __field(int, type)
  985. __field(xfs_fileoff_t, startoff)
  986. __field(xfs_fsblock_t, startblock)
  987. __field(xfs_filblks_t, blockcount)
  988. ),
  989. TP_fast_assign(
  990. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  991. __entry->ino = ip->i_ino;
  992. __entry->size = ip->i_d.di_size;
  993. __entry->offset = offset;
  994. __entry->count = count;
  995. __entry->type = type;
  996. __entry->startoff = irec ? irec->br_startoff : 0;
  997. __entry->startblock = irec ? irec->br_startblock : 0;
  998. __entry->blockcount = irec ? irec->br_blockcount : 0;
  999. ),
  1000. TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset 0x%llx count %zd "
  1001. "type %s startoff 0x%llx startblock %lld blockcount 0x%llx",
  1002. MAJOR(__entry->dev), MINOR(__entry->dev),
  1003. __entry->ino,
  1004. __entry->size,
  1005. __entry->offset,
  1006. __entry->count,
  1007. __print_symbolic(__entry->type, XFS_IO_TYPES),
  1008. __entry->startoff,
  1009. (__int64_t)__entry->startblock,
  1010. __entry->blockcount)
  1011. )
  1012. #define DEFINE_IOMAP_EVENT(name) \
  1013. DEFINE_EVENT(xfs_imap_class, name, \
  1014. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
  1015. int type, struct xfs_bmbt_irec *irec), \
  1016. TP_ARGS(ip, offset, count, type, irec))
  1017. DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
  1018. DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
  1019. DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
  1020. DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
  1021. DECLARE_EVENT_CLASS(xfs_simple_io_class,
  1022. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
  1023. TP_ARGS(ip, offset, count),
  1024. TP_STRUCT__entry(
  1025. __field(dev_t, dev)
  1026. __field(xfs_ino_t, ino)
  1027. __field(loff_t, isize)
  1028. __field(loff_t, disize)
  1029. __field(loff_t, offset)
  1030. __field(size_t, count)
  1031. ),
  1032. TP_fast_assign(
  1033. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1034. __entry->ino = ip->i_ino;
  1035. __entry->isize = VFS_I(ip)->i_size;
  1036. __entry->disize = ip->i_d.di_size;
  1037. __entry->offset = offset;
  1038. __entry->count = count;
  1039. ),
  1040. TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx "
  1041. "offset 0x%llx count %zd",
  1042. MAJOR(__entry->dev), MINOR(__entry->dev),
  1043. __entry->ino,
  1044. __entry->isize,
  1045. __entry->disize,
  1046. __entry->offset,
  1047. __entry->count)
  1048. );
  1049. #define DEFINE_SIMPLE_IO_EVENT(name) \
  1050. DEFINE_EVENT(xfs_simple_io_class, name, \
  1051. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
  1052. TP_ARGS(ip, offset, count))
  1053. DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
  1054. DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
  1055. DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
  1056. DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
  1057. DECLARE_EVENT_CLASS(xfs_itrunc_class,
  1058. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
  1059. TP_ARGS(ip, new_size),
  1060. TP_STRUCT__entry(
  1061. __field(dev_t, dev)
  1062. __field(xfs_ino_t, ino)
  1063. __field(xfs_fsize_t, size)
  1064. __field(xfs_fsize_t, new_size)
  1065. ),
  1066. TP_fast_assign(
  1067. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1068. __entry->ino = ip->i_ino;
  1069. __entry->size = ip->i_d.di_size;
  1070. __entry->new_size = new_size;
  1071. ),
  1072. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx",
  1073. MAJOR(__entry->dev), MINOR(__entry->dev),
  1074. __entry->ino,
  1075. __entry->size,
  1076. __entry->new_size)
  1077. )
  1078. #define DEFINE_ITRUNC_EVENT(name) \
  1079. DEFINE_EVENT(xfs_itrunc_class, name, \
  1080. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
  1081. TP_ARGS(ip, new_size))
  1082. DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_start);
  1083. DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_end);
  1084. TRACE_EVENT(xfs_pagecache_inval,
  1085. TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
  1086. TP_ARGS(ip, start, finish),
  1087. TP_STRUCT__entry(
  1088. __field(dev_t, dev)
  1089. __field(xfs_ino_t, ino)
  1090. __field(xfs_fsize_t, size)
  1091. __field(xfs_off_t, start)
  1092. __field(xfs_off_t, finish)
  1093. ),
  1094. TP_fast_assign(
  1095. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1096. __entry->ino = ip->i_ino;
  1097. __entry->size = ip->i_d.di_size;
  1098. __entry->start = start;
  1099. __entry->finish = finish;
  1100. ),
  1101. TP_printk("dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx",
  1102. MAJOR(__entry->dev), MINOR(__entry->dev),
  1103. __entry->ino,
  1104. __entry->size,
  1105. __entry->start,
  1106. __entry->finish)
  1107. );
  1108. TRACE_EVENT(xfs_bunmap,
  1109. TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len,
  1110. int flags, unsigned long caller_ip),
  1111. TP_ARGS(ip, bno, len, flags, caller_ip),
  1112. TP_STRUCT__entry(
  1113. __field(dev_t, dev)
  1114. __field(xfs_ino_t, ino)
  1115. __field(xfs_fsize_t, size)
  1116. __field(xfs_fileoff_t, bno)
  1117. __field(xfs_filblks_t, len)
  1118. __field(unsigned long, caller_ip)
  1119. __field(int, flags)
  1120. ),
  1121. TP_fast_assign(
  1122. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1123. __entry->ino = ip->i_ino;
  1124. __entry->size = ip->i_d.di_size;
  1125. __entry->bno = bno;
  1126. __entry->len = len;
  1127. __entry->caller_ip = caller_ip;
  1128. __entry->flags = flags;
  1129. ),
  1130. TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx"
  1131. "flags %s caller %pf",
  1132. MAJOR(__entry->dev), MINOR(__entry->dev),
  1133. __entry->ino,
  1134. __entry->size,
  1135. __entry->bno,
  1136. __entry->len,
  1137. __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS),
  1138. (void *)__entry->caller_ip)
  1139. );
  1140. DECLARE_EVENT_CLASS(xfs_extent_busy_class,
  1141. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1142. xfs_agblock_t agbno, xfs_extlen_t len),
  1143. TP_ARGS(mp, agno, agbno, len),
  1144. TP_STRUCT__entry(
  1145. __field(dev_t, dev)
  1146. __field(xfs_agnumber_t, agno)
  1147. __field(xfs_agblock_t, agbno)
  1148. __field(xfs_extlen_t, len)
  1149. ),
  1150. TP_fast_assign(
  1151. __entry->dev = mp->m_super->s_dev;
  1152. __entry->agno = agno;
  1153. __entry->agbno = agbno;
  1154. __entry->len = len;
  1155. ),
  1156. TP_printk("dev %d:%d agno %u agbno %u len %u",
  1157. MAJOR(__entry->dev), MINOR(__entry->dev),
  1158. __entry->agno,
  1159. __entry->agbno,
  1160. __entry->len)
  1161. );
  1162. #define DEFINE_BUSY_EVENT(name) \
  1163. DEFINE_EVENT(xfs_extent_busy_class, name, \
  1164. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  1165. xfs_agblock_t agbno, xfs_extlen_t len), \
  1166. TP_ARGS(mp, agno, agbno, len))
  1167. DEFINE_BUSY_EVENT(xfs_extent_busy);
  1168. DEFINE_BUSY_EVENT(xfs_extent_busy_enomem);
  1169. DEFINE_BUSY_EVENT(xfs_extent_busy_force);
  1170. DEFINE_BUSY_EVENT(xfs_extent_busy_reuse);
  1171. DEFINE_BUSY_EVENT(xfs_extent_busy_clear);
  1172. TRACE_EVENT(xfs_extent_busy_trim,
  1173. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1174. xfs_agblock_t agbno, xfs_extlen_t len,
  1175. xfs_agblock_t tbno, xfs_extlen_t tlen),
  1176. TP_ARGS(mp, agno, agbno, len, tbno, tlen),
  1177. TP_STRUCT__entry(
  1178. __field(dev_t, dev)
  1179. __field(xfs_agnumber_t, agno)
  1180. __field(xfs_agblock_t, agbno)
  1181. __field(xfs_extlen_t, len)
  1182. __field(xfs_agblock_t, tbno)
  1183. __field(xfs_extlen_t, tlen)
  1184. ),
  1185. TP_fast_assign(
  1186. __entry->dev = mp->m_super->s_dev;
  1187. __entry->agno = agno;
  1188. __entry->agbno = agbno;
  1189. __entry->len = len;
  1190. __entry->tbno = tbno;
  1191. __entry->tlen = tlen;
  1192. ),
  1193. TP_printk("dev %d:%d agno %u agbno %u len %u tbno %u tlen %u",
  1194. MAJOR(__entry->dev), MINOR(__entry->dev),
  1195. __entry->agno,
  1196. __entry->agbno,
  1197. __entry->len,
  1198. __entry->tbno,
  1199. __entry->tlen)
  1200. );
  1201. TRACE_EVENT(xfs_trans_commit_lsn,
  1202. TP_PROTO(struct xfs_trans *trans),
  1203. TP_ARGS(trans),
  1204. TP_STRUCT__entry(
  1205. __field(dev_t, dev)
  1206. __field(struct xfs_trans *, tp)
  1207. __field(xfs_lsn_t, lsn)
  1208. ),
  1209. TP_fast_assign(
  1210. __entry->dev = trans->t_mountp->m_super->s_dev;
  1211. __entry->tp = trans;
  1212. __entry->lsn = trans->t_commit_lsn;
  1213. ),
  1214. TP_printk("dev %d:%d trans 0x%p commit_lsn 0x%llx",
  1215. MAJOR(__entry->dev), MINOR(__entry->dev),
  1216. __entry->tp,
  1217. __entry->lsn)
  1218. );
  1219. TRACE_EVENT(xfs_agf,
  1220. TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
  1221. unsigned long caller_ip),
  1222. TP_ARGS(mp, agf, flags, caller_ip),
  1223. TP_STRUCT__entry(
  1224. __field(dev_t, dev)
  1225. __field(xfs_agnumber_t, agno)
  1226. __field(int, flags)
  1227. __field(__u32, length)
  1228. __field(__u32, bno_root)
  1229. __field(__u32, cnt_root)
  1230. __field(__u32, bno_level)
  1231. __field(__u32, cnt_level)
  1232. __field(__u32, flfirst)
  1233. __field(__u32, fllast)
  1234. __field(__u32, flcount)
  1235. __field(__u32, freeblks)
  1236. __field(__u32, longest)
  1237. __field(unsigned long, caller_ip)
  1238. ),
  1239. TP_fast_assign(
  1240. __entry->dev = mp->m_super->s_dev;
  1241. __entry->agno = be32_to_cpu(agf->agf_seqno),
  1242. __entry->flags = flags;
  1243. __entry->length = be32_to_cpu(agf->agf_length),
  1244. __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
  1245. __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
  1246. __entry->bno_level =
  1247. be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
  1248. __entry->cnt_level =
  1249. be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
  1250. __entry->flfirst = be32_to_cpu(agf->agf_flfirst),
  1251. __entry->fllast = be32_to_cpu(agf->agf_fllast),
  1252. __entry->flcount = be32_to_cpu(agf->agf_flcount),
  1253. __entry->freeblks = be32_to_cpu(agf->agf_freeblks),
  1254. __entry->longest = be32_to_cpu(agf->agf_longest);
  1255. __entry->caller_ip = caller_ip;
  1256. ),
  1257. TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u "
  1258. "levels b %u c %u flfirst %u fllast %u flcount %u "
  1259. "freeblks %u longest %u caller %pf",
  1260. MAJOR(__entry->dev), MINOR(__entry->dev),
  1261. __entry->agno,
  1262. __print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
  1263. __entry->length,
  1264. __entry->bno_root,
  1265. __entry->cnt_root,
  1266. __entry->bno_level,
  1267. __entry->cnt_level,
  1268. __entry->flfirst,
  1269. __entry->fllast,
  1270. __entry->flcount,
  1271. __entry->freeblks,
  1272. __entry->longest,
  1273. (void *)__entry->caller_ip)
  1274. );
  1275. TRACE_EVENT(xfs_free_extent,
  1276. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
  1277. xfs_extlen_t len, bool isfl, int haveleft, int haveright),
  1278. TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright),
  1279. TP_STRUCT__entry(
  1280. __field(dev_t, dev)
  1281. __field(xfs_agnumber_t, agno)
  1282. __field(xfs_agblock_t, agbno)
  1283. __field(xfs_extlen_t, len)
  1284. __field(int, isfl)
  1285. __field(int, haveleft)
  1286. __field(int, haveright)
  1287. ),
  1288. TP_fast_assign(
  1289. __entry->dev = mp->m_super->s_dev;
  1290. __entry->agno = agno;
  1291. __entry->agbno = agbno;
  1292. __entry->len = len;
  1293. __entry->isfl = isfl;
  1294. __entry->haveleft = haveleft;
  1295. __entry->haveright = haveright;
  1296. ),
  1297. TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s",
  1298. MAJOR(__entry->dev), MINOR(__entry->dev),
  1299. __entry->agno,
  1300. __entry->agbno,
  1301. __entry->len,
  1302. __entry->isfl,
  1303. __entry->haveleft ?
  1304. (__entry->haveright ? "both" : "left") :
  1305. (__entry->haveright ? "right" : "none"))
  1306. );
  1307. DECLARE_EVENT_CLASS(xfs_alloc_class,
  1308. TP_PROTO(struct xfs_alloc_arg *args),
  1309. TP_ARGS(args),
  1310. TP_STRUCT__entry(
  1311. __field(dev_t, dev)
  1312. __field(xfs_agnumber_t, agno)
  1313. __field(xfs_agblock_t, agbno)
  1314. __field(xfs_extlen_t, minlen)
  1315. __field(xfs_extlen_t, maxlen)
  1316. __field(xfs_extlen_t, mod)
  1317. __field(xfs_extlen_t, prod)
  1318. __field(xfs_extlen_t, minleft)
  1319. __field(xfs_extlen_t, total)
  1320. __field(xfs_extlen_t, alignment)
  1321. __field(xfs_extlen_t, minalignslop)
  1322. __field(xfs_extlen_t, len)
  1323. __field(short, type)
  1324. __field(short, otype)
  1325. __field(char, wasdel)
  1326. __field(char, wasfromfl)
  1327. __field(char, isfl)
  1328. __field(char, userdata)
  1329. __field(xfs_fsblock_t, firstblock)
  1330. ),
  1331. TP_fast_assign(
  1332. __entry->dev = args->mp->m_super->s_dev;
  1333. __entry->agno = args->agno;
  1334. __entry->agbno = args->agbno;
  1335. __entry->minlen = args->minlen;
  1336. __entry->maxlen = args->maxlen;
  1337. __entry->mod = args->mod;
  1338. __entry->prod = args->prod;
  1339. __entry->minleft = args->minleft;
  1340. __entry->total = args->total;
  1341. __entry->alignment = args->alignment;
  1342. __entry->minalignslop = args->minalignslop;
  1343. __entry->len = args->len;
  1344. __entry->type = args->type;
  1345. __entry->otype = args->otype;
  1346. __entry->wasdel = args->wasdel;
  1347. __entry->wasfromfl = args->wasfromfl;
  1348. __entry->isfl = args->isfl;
  1349. __entry->userdata = args->userdata;
  1350. __entry->firstblock = args->firstblock;
  1351. ),
  1352. TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u "
  1353. "prod %u minleft %u total %u alignment %u minalignslop %u "
  1354. "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d "
  1355. "userdata %d firstblock 0x%llx",
  1356. MAJOR(__entry->dev), MINOR(__entry->dev),
  1357. __entry->agno,
  1358. __entry->agbno,
  1359. __entry->minlen,
  1360. __entry->maxlen,
  1361. __entry->mod,
  1362. __entry->prod,
  1363. __entry->minleft,
  1364. __entry->total,
  1365. __entry->alignment,
  1366. __entry->minalignslop,
  1367. __entry->len,
  1368. __print_symbolic(__entry->type, XFS_ALLOC_TYPES),
  1369. __print_symbolic(__entry->otype, XFS_ALLOC_TYPES),
  1370. __entry->wasdel,
  1371. __entry->wasfromfl,
  1372. __entry->isfl,
  1373. __entry->userdata,
  1374. (unsigned long long)__entry->firstblock)
  1375. )
  1376. #define DEFINE_ALLOC_EVENT(name) \
  1377. DEFINE_EVENT(xfs_alloc_class, name, \
  1378. TP_PROTO(struct xfs_alloc_arg *args), \
  1379. TP_ARGS(args))
  1380. DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
  1381. DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound);
  1382. DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
  1383. DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
  1384. DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
  1385. DEFINE_ALLOC_EVENT(xfs_alloc_near_greater);
  1386. DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser);
  1387. DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
  1388. DEFINE_ALLOC_EVENT(xfs_alloc_near_noentry);
  1389. DEFINE_ALLOC_EVENT(xfs_alloc_near_busy);
  1390. DEFINE_ALLOC_EVENT(xfs_alloc_size_neither);
  1391. DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry);
  1392. DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft);
  1393. DEFINE_ALLOC_EVENT(xfs_alloc_size_done);
  1394. DEFINE_ALLOC_EVENT(xfs_alloc_size_error);
  1395. DEFINE_ALLOC_EVENT(xfs_alloc_size_busy);
  1396. DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist);
  1397. DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
  1398. DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
  1399. DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
  1400. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
  1401. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
  1402. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
  1403. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
  1404. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
  1405. DECLARE_EVENT_CLASS(xfs_da_class,
  1406. TP_PROTO(struct xfs_da_args *args),
  1407. TP_ARGS(args),
  1408. TP_STRUCT__entry(
  1409. __field(dev_t, dev)
  1410. __field(xfs_ino_t, ino)
  1411. __dynamic_array(char, name, args->namelen)
  1412. __field(int, namelen)
  1413. __field(xfs_dahash_t, hashval)
  1414. __field(xfs_ino_t, inumber)
  1415. __field(int, op_flags)
  1416. ),
  1417. TP_fast_assign(
  1418. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1419. __entry->ino = args->dp->i_ino;
  1420. if (args->namelen)
  1421. memcpy(__get_str(name), args->name, args->namelen);
  1422. __entry->namelen = args->namelen;
  1423. __entry->hashval = args->hashval;
  1424. __entry->inumber = args->inumber;
  1425. __entry->op_flags = args->op_flags;
  1426. ),
  1427. TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x "
  1428. "inumber 0x%llx op_flags %s",
  1429. MAJOR(__entry->dev), MINOR(__entry->dev),
  1430. __entry->ino,
  1431. __entry->namelen,
  1432. __entry->namelen ? __get_str(name) : NULL,
  1433. __entry->namelen,
  1434. __entry->hashval,
  1435. __entry->inumber,
  1436. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS))
  1437. )
  1438. #define DEFINE_DIR2_EVENT(name) \
  1439. DEFINE_EVENT(xfs_da_class, name, \
  1440. TP_PROTO(struct xfs_da_args *args), \
  1441. TP_ARGS(args))
  1442. DEFINE_DIR2_EVENT(xfs_dir2_sf_addname);
  1443. DEFINE_DIR2_EVENT(xfs_dir2_sf_create);
  1444. DEFINE_DIR2_EVENT(xfs_dir2_sf_lookup);
  1445. DEFINE_DIR2_EVENT(xfs_dir2_sf_replace);
  1446. DEFINE_DIR2_EVENT(xfs_dir2_sf_removename);
  1447. DEFINE_DIR2_EVENT(xfs_dir2_sf_toino4);
  1448. DEFINE_DIR2_EVENT(xfs_dir2_sf_toino8);
  1449. DEFINE_DIR2_EVENT(xfs_dir2_sf_to_block);
  1450. DEFINE_DIR2_EVENT(xfs_dir2_block_addname);
  1451. DEFINE_DIR2_EVENT(xfs_dir2_block_lookup);
  1452. DEFINE_DIR2_EVENT(xfs_dir2_block_replace);
  1453. DEFINE_DIR2_EVENT(xfs_dir2_block_removename);
  1454. DEFINE_DIR2_EVENT(xfs_dir2_block_to_sf);
  1455. DEFINE_DIR2_EVENT(xfs_dir2_block_to_leaf);
  1456. DEFINE_DIR2_EVENT(xfs_dir2_leaf_addname);
  1457. DEFINE_DIR2_EVENT(xfs_dir2_leaf_lookup);
  1458. DEFINE_DIR2_EVENT(xfs_dir2_leaf_replace);
  1459. DEFINE_DIR2_EVENT(xfs_dir2_leaf_removename);
  1460. DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_block);
  1461. DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_node);
  1462. DEFINE_DIR2_EVENT(xfs_dir2_node_addname);
  1463. DEFINE_DIR2_EVENT(xfs_dir2_node_lookup);
  1464. DEFINE_DIR2_EVENT(xfs_dir2_node_replace);
  1465. DEFINE_DIR2_EVENT(xfs_dir2_node_removename);
  1466. DEFINE_DIR2_EVENT(xfs_dir2_node_to_leaf);
  1467. DECLARE_EVENT_CLASS(xfs_attr_class,
  1468. TP_PROTO(struct xfs_da_args *args),
  1469. TP_ARGS(args),
  1470. TP_STRUCT__entry(
  1471. __field(dev_t, dev)
  1472. __field(xfs_ino_t, ino)
  1473. __dynamic_array(char, name, args->namelen)
  1474. __field(int, namelen)
  1475. __field(int, valuelen)
  1476. __field(xfs_dahash_t, hashval)
  1477. __field(int, op_flags)
  1478. ),
  1479. TP_fast_assign(
  1480. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1481. __entry->ino = args->dp->i_ino;
  1482. if (args->namelen)
  1483. memcpy(__get_str(name), args->name, args->namelen);
  1484. __entry->namelen = args->namelen;
  1485. __entry->valuelen = args->valuelen;
  1486. __entry->hashval = args->hashval;
  1487. __entry->op_flags = args->op_flags;
  1488. ),
  1489. TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d valuelen %d "
  1490. "hashval 0x%x op_flags %s",
  1491. MAJOR(__entry->dev), MINOR(__entry->dev),
  1492. __entry->ino,
  1493. __entry->namelen,
  1494. __entry->namelen ? __get_str(name) : NULL,
  1495. __entry->namelen,
  1496. __entry->valuelen,
  1497. __entry->hashval,
  1498. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS))
  1499. )
  1500. #define DEFINE_ATTR_EVENT(name) \
  1501. DEFINE_EVENT(xfs_attr_class, name, \
  1502. TP_PROTO(struct xfs_da_args *args), \
  1503. TP_ARGS(args))
  1504. DEFINE_ATTR_EVENT(xfs_attr_sf_add);
  1505. DEFINE_ATTR_EVENT(xfs_attr_sf_addname);
  1506. DEFINE_ATTR_EVENT(xfs_attr_sf_create);
  1507. DEFINE_ATTR_EVENT(xfs_attr_sf_lookup);
  1508. DEFINE_ATTR_EVENT(xfs_attr_sf_remove);
  1509. DEFINE_ATTR_EVENT(xfs_attr_sf_removename);
  1510. DEFINE_ATTR_EVENT(xfs_attr_sf_to_leaf);
  1511. DEFINE_ATTR_EVENT(xfs_attr_leaf_add);
  1512. DEFINE_ATTR_EVENT(xfs_attr_leaf_add_old);
  1513. DEFINE_ATTR_EVENT(xfs_attr_leaf_add_new);
  1514. DEFINE_ATTR_EVENT(xfs_attr_leaf_add_work);
  1515. DEFINE_ATTR_EVENT(xfs_attr_leaf_addname);
  1516. DEFINE_ATTR_EVENT(xfs_attr_leaf_create);
  1517. DEFINE_ATTR_EVENT(xfs_attr_leaf_compact);
  1518. DEFINE_ATTR_EVENT(xfs_attr_leaf_get);
  1519. DEFINE_ATTR_EVENT(xfs_attr_leaf_lookup);
  1520. DEFINE_ATTR_EVENT(xfs_attr_leaf_replace);
  1521. DEFINE_ATTR_EVENT(xfs_attr_leaf_remove);
  1522. DEFINE_ATTR_EVENT(xfs_attr_leaf_removename);
  1523. DEFINE_ATTR_EVENT(xfs_attr_leaf_split);
  1524. DEFINE_ATTR_EVENT(xfs_attr_leaf_split_before);
  1525. DEFINE_ATTR_EVENT(xfs_attr_leaf_split_after);
  1526. DEFINE_ATTR_EVENT(xfs_attr_leaf_clearflag);
  1527. DEFINE_ATTR_EVENT(xfs_attr_leaf_setflag);
  1528. DEFINE_ATTR_EVENT(xfs_attr_leaf_flipflags);
  1529. DEFINE_ATTR_EVENT(xfs_attr_leaf_to_sf);
  1530. DEFINE_ATTR_EVENT(xfs_attr_leaf_to_node);
  1531. DEFINE_ATTR_EVENT(xfs_attr_leaf_rebalance);
  1532. DEFINE_ATTR_EVENT(xfs_attr_leaf_unbalance);
  1533. DEFINE_ATTR_EVENT(xfs_attr_leaf_toosmall);
  1534. DEFINE_ATTR_EVENT(xfs_attr_node_addname);
  1535. DEFINE_ATTR_EVENT(xfs_attr_node_get);
  1536. DEFINE_ATTR_EVENT(xfs_attr_node_lookup);
  1537. DEFINE_ATTR_EVENT(xfs_attr_node_replace);
  1538. DEFINE_ATTR_EVENT(xfs_attr_node_removename);
  1539. DEFINE_ATTR_EVENT(xfs_attr_fillstate);
  1540. DEFINE_ATTR_EVENT(xfs_attr_refillstate);
  1541. DEFINE_ATTR_EVENT(xfs_attr_rmtval_get);
  1542. DEFINE_ATTR_EVENT(xfs_attr_rmtval_set);
  1543. DEFINE_ATTR_EVENT(xfs_attr_rmtval_remove);
  1544. #define DEFINE_DA_EVENT(name) \
  1545. DEFINE_EVENT(xfs_da_class, name, \
  1546. TP_PROTO(struct xfs_da_args *args), \
  1547. TP_ARGS(args))
  1548. DEFINE_DA_EVENT(xfs_da_split);
  1549. DEFINE_DA_EVENT(xfs_da_join);
  1550. DEFINE_DA_EVENT(xfs_da_link_before);
  1551. DEFINE_DA_EVENT(xfs_da_link_after);
  1552. DEFINE_DA_EVENT(xfs_da_unlink_back);
  1553. DEFINE_DA_EVENT(xfs_da_unlink_forward);
  1554. DEFINE_DA_EVENT(xfs_da_root_split);
  1555. DEFINE_DA_EVENT(xfs_da_root_join);
  1556. DEFINE_DA_EVENT(xfs_da_node_add);
  1557. DEFINE_DA_EVENT(xfs_da_node_create);
  1558. DEFINE_DA_EVENT(xfs_da_node_split);
  1559. DEFINE_DA_EVENT(xfs_da_node_remove);
  1560. DEFINE_DA_EVENT(xfs_da_node_rebalance);
  1561. DEFINE_DA_EVENT(xfs_da_node_unbalance);
  1562. DEFINE_DA_EVENT(xfs_da_node_toosmall);
  1563. DEFINE_DA_EVENT(xfs_da_swap_lastblock);
  1564. DEFINE_DA_EVENT(xfs_da_grow_inode);
  1565. DEFINE_DA_EVENT(xfs_da_shrink_inode);
  1566. DEFINE_DA_EVENT(xfs_da_fixhashpath);
  1567. DEFINE_DA_EVENT(xfs_da_path_shift);
  1568. DECLARE_EVENT_CLASS(xfs_dir2_space_class,
  1569. TP_PROTO(struct xfs_da_args *args, int idx),
  1570. TP_ARGS(args, idx),
  1571. TP_STRUCT__entry(
  1572. __field(dev_t, dev)
  1573. __field(xfs_ino_t, ino)
  1574. __field(int, op_flags)
  1575. __field(int, idx)
  1576. ),
  1577. TP_fast_assign(
  1578. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1579. __entry->ino = args->dp->i_ino;
  1580. __entry->op_flags = args->op_flags;
  1581. __entry->idx = idx;
  1582. ),
  1583. TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d",
  1584. MAJOR(__entry->dev), MINOR(__entry->dev),
  1585. __entry->ino,
  1586. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  1587. __entry->idx)
  1588. )
  1589. #define DEFINE_DIR2_SPACE_EVENT(name) \
  1590. DEFINE_EVENT(xfs_dir2_space_class, name, \
  1591. TP_PROTO(struct xfs_da_args *args, int idx), \
  1592. TP_ARGS(args, idx))
  1593. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_add);
  1594. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_remove);
  1595. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_grow_inode);
  1596. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_shrink_inode);
  1597. TRACE_EVENT(xfs_dir2_leafn_moveents,
  1598. TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
  1599. TP_ARGS(args, src_idx, dst_idx, count),
  1600. TP_STRUCT__entry(
  1601. __field(dev_t, dev)
  1602. __field(xfs_ino_t, ino)
  1603. __field(int, op_flags)
  1604. __field(int, src_idx)
  1605. __field(int, dst_idx)
  1606. __field(int, count)
  1607. ),
  1608. TP_fast_assign(
  1609. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1610. __entry->ino = args->dp->i_ino;
  1611. __entry->op_flags = args->op_flags;
  1612. __entry->src_idx = src_idx;
  1613. __entry->dst_idx = dst_idx;
  1614. __entry->count = count;
  1615. ),
  1616. TP_printk("dev %d:%d ino 0x%llx op_flags %s "
  1617. "src_idx %d dst_idx %d count %d",
  1618. MAJOR(__entry->dev), MINOR(__entry->dev),
  1619. __entry->ino,
  1620. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  1621. __entry->src_idx,
  1622. __entry->dst_idx,
  1623. __entry->count)
  1624. );
  1625. #define XFS_SWAPEXT_INODES \
  1626. { 0, "target" }, \
  1627. { 1, "temp" }
  1628. #define XFS_INODE_FORMAT_STR \
  1629. { 0, "invalid" }, \
  1630. { 1, "local" }, \
  1631. { 2, "extent" }, \
  1632. { 3, "btree" }
  1633. DECLARE_EVENT_CLASS(xfs_swap_extent_class,
  1634. TP_PROTO(struct xfs_inode *ip, int which),
  1635. TP_ARGS(ip, which),
  1636. TP_STRUCT__entry(
  1637. __field(dev_t, dev)
  1638. __field(int, which)
  1639. __field(xfs_ino_t, ino)
  1640. __field(int, format)
  1641. __field(int, nex)
  1642. __field(int, broot_size)
  1643. __field(int, fork_off)
  1644. ),
  1645. TP_fast_assign(
  1646. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1647. __entry->which = which;
  1648. __entry->ino = ip->i_ino;
  1649. __entry->format = ip->i_d.di_format;
  1650. __entry->nex = ip->i_d.di_nextents;
  1651. __entry->broot_size = ip->i_df.if_broot_bytes;
  1652. __entry->fork_off = XFS_IFORK_BOFF(ip);
  1653. ),
  1654. TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, "
  1655. "broot size %d, fork offset %d",
  1656. MAJOR(__entry->dev), MINOR(__entry->dev),
  1657. __entry->ino,
  1658. __print_symbolic(__entry->which, XFS_SWAPEXT_INODES),
  1659. __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR),
  1660. __entry->nex,
  1661. __entry->broot_size,
  1662. __entry->fork_off)
  1663. )
  1664. #define DEFINE_SWAPEXT_EVENT(name) \
  1665. DEFINE_EVENT(xfs_swap_extent_class, name, \
  1666. TP_PROTO(struct xfs_inode *ip, int which), \
  1667. TP_ARGS(ip, which))
  1668. DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
  1669. DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
  1670. DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
  1671. TP_PROTO(struct xlog *log, struct xlog_recover *trans,
  1672. struct xlog_recover_item *item, int pass),
  1673. TP_ARGS(log, trans, item, pass),
  1674. TP_STRUCT__entry(
  1675. __field(dev_t, dev)
  1676. __field(unsigned long, item)
  1677. __field(xlog_tid_t, tid)
  1678. __field(int, type)
  1679. __field(int, pass)
  1680. __field(int, count)
  1681. __field(int, total)
  1682. ),
  1683. TP_fast_assign(
  1684. __entry->dev = log->l_mp->m_super->s_dev;
  1685. __entry->item = (unsigned long)item;
  1686. __entry->tid = trans->r_log_tid;
  1687. __entry->type = ITEM_TYPE(item);
  1688. __entry->pass = pass;
  1689. __entry->count = item->ri_cnt;
  1690. __entry->total = item->ri_total;
  1691. ),
  1692. TP_printk("dev %d:%d trans 0x%x, pass %d, item 0x%p, item type %s "
  1693. "item region count/total %d/%d",
  1694. MAJOR(__entry->dev), MINOR(__entry->dev),
  1695. __entry->tid,
  1696. __entry->pass,
  1697. (void *)__entry->item,
  1698. __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
  1699. __entry->count,
  1700. __entry->total)
  1701. )
  1702. #define DEFINE_LOG_RECOVER_ITEM(name) \
  1703. DEFINE_EVENT(xfs_log_recover_item_class, name, \
  1704. TP_PROTO(struct xlog *log, struct xlog_recover *trans, \
  1705. struct xlog_recover_item *item, int pass), \
  1706. TP_ARGS(log, trans, item, pass))
  1707. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add);
  1708. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add_cont);
  1709. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_head);
  1710. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
  1711. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
  1712. DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
  1713. TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f),
  1714. TP_ARGS(log, buf_f),
  1715. TP_STRUCT__entry(
  1716. __field(dev_t, dev)
  1717. __field(__int64_t, blkno)
  1718. __field(unsigned short, len)
  1719. __field(unsigned short, flags)
  1720. __field(unsigned short, size)
  1721. __field(unsigned int, map_size)
  1722. ),
  1723. TP_fast_assign(
  1724. __entry->dev = log->l_mp->m_super->s_dev;
  1725. __entry->blkno = buf_f->blf_blkno;
  1726. __entry->len = buf_f->blf_len;
  1727. __entry->flags = buf_f->blf_flags;
  1728. __entry->size = buf_f->blf_size;
  1729. __entry->map_size = buf_f->blf_map_size;
  1730. ),
  1731. TP_printk("dev %d:%d blkno 0x%llx, len %u, flags 0x%x, size %d, "
  1732. "map_size %d",
  1733. MAJOR(__entry->dev), MINOR(__entry->dev),
  1734. __entry->blkno,
  1735. __entry->len,
  1736. __entry->flags,
  1737. __entry->size,
  1738. __entry->map_size)
  1739. )
  1740. #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
  1741. DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
  1742. TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), \
  1743. TP_ARGS(log, buf_f))
  1744. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
  1745. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel);
  1746. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_add);
  1747. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_ref_inc);
  1748. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_recover);
  1749. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_inode_buf);
  1750. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
  1751. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
  1752. DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
  1753. TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f),
  1754. TP_ARGS(log, in_f),
  1755. TP_STRUCT__entry(
  1756. __field(dev_t, dev)
  1757. __field(xfs_ino_t, ino)
  1758. __field(unsigned short, size)
  1759. __field(int, fields)
  1760. __field(unsigned short, asize)
  1761. __field(unsigned short, dsize)
  1762. __field(__int64_t, blkno)
  1763. __field(int, len)
  1764. __field(int, boffset)
  1765. ),
  1766. TP_fast_assign(
  1767. __entry->dev = log->l_mp->m_super->s_dev;
  1768. __entry->ino = in_f->ilf_ino;
  1769. __entry->size = in_f->ilf_size;
  1770. __entry->fields = in_f->ilf_fields;
  1771. __entry->asize = in_f->ilf_asize;
  1772. __entry->dsize = in_f->ilf_dsize;
  1773. __entry->blkno = in_f->ilf_blkno;
  1774. __entry->len = in_f->ilf_len;
  1775. __entry->boffset = in_f->ilf_boffset;
  1776. ),
  1777. TP_printk("dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, "
  1778. "dsize %d, blkno 0x%llx, len %d, boffset %d",
  1779. MAJOR(__entry->dev), MINOR(__entry->dev),
  1780. __entry->ino,
  1781. __entry->size,
  1782. __entry->fields,
  1783. __entry->asize,
  1784. __entry->dsize,
  1785. __entry->blkno,
  1786. __entry->len,
  1787. __entry->boffset)
  1788. )
  1789. #define DEFINE_LOG_RECOVER_INO_ITEM(name) \
  1790. DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
  1791. TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), \
  1792. TP_ARGS(log, in_f))
  1793. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
  1794. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel);
  1795. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip);
  1796. DECLARE_EVENT_CLASS(xfs_discard_class,
  1797. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1798. xfs_agblock_t agbno, xfs_extlen_t len),
  1799. TP_ARGS(mp, agno, agbno, len),
  1800. TP_STRUCT__entry(
  1801. __field(dev_t, dev)
  1802. __field(xfs_agnumber_t, agno)
  1803. __field(xfs_agblock_t, agbno)
  1804. __field(xfs_extlen_t, len)
  1805. ),
  1806. TP_fast_assign(
  1807. __entry->dev = mp->m_super->s_dev;
  1808. __entry->agno = agno;
  1809. __entry->agbno = agbno;
  1810. __entry->len = len;
  1811. ),
  1812. TP_printk("dev %d:%d agno %u agbno %u len %u\n",
  1813. MAJOR(__entry->dev), MINOR(__entry->dev),
  1814. __entry->agno,
  1815. __entry->agbno,
  1816. __entry->len)
  1817. )
  1818. #define DEFINE_DISCARD_EVENT(name) \
  1819. DEFINE_EVENT(xfs_discard_class, name, \
  1820. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  1821. xfs_agblock_t agbno, xfs_extlen_t len), \
  1822. TP_ARGS(mp, agno, agbno, len))
  1823. DEFINE_DISCARD_EVENT(xfs_discard_extent);
  1824. DEFINE_DISCARD_EVENT(xfs_discard_toosmall);
  1825. DEFINE_DISCARD_EVENT(xfs_discard_exclude);
  1826. DEFINE_DISCARD_EVENT(xfs_discard_busy);
  1827. #endif /* _TRACE_XFS_H */
  1828. #undef TRACE_INCLUDE_PATH
  1829. #define TRACE_INCLUDE_PATH .
  1830. #define TRACE_INCLUDE_FILE xfs_trace
  1831. #include <trace/define_trace.h>