xfs_trace.h 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697
  1. /*
  2. * Copyright (c) 2009, Christoph Hellwig
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #undef TRACE_SYSTEM
  19. #define TRACE_SYSTEM xfs
  20. #if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
  21. #define _TRACE_XFS_H
  22. #include <linux/tracepoint.h>
  23. struct xfs_agf;
  24. struct xfs_alloc_arg;
  25. struct xfs_attr_list_context;
  26. struct xfs_buf_log_item;
  27. struct xfs_da_args;
  28. struct xfs_da_node_entry;
  29. struct xfs_dquot;
  30. struct xlog_ticket;
  31. struct log;
  32. struct xlog_recover;
  33. struct xlog_recover_item;
  34. struct xfs_buf_log_format;
  35. struct xfs_inode_log_format;
  36. DECLARE_EVENT_CLASS(xfs_attr_list_class,
  37. TP_PROTO(struct xfs_attr_list_context *ctx),
  38. TP_ARGS(ctx),
  39. TP_STRUCT__entry(
  40. __field(dev_t, dev)
  41. __field(xfs_ino_t, ino)
  42. __field(u32, hashval)
  43. __field(u32, blkno)
  44. __field(u32, offset)
  45. __field(void *, alist)
  46. __field(int, bufsize)
  47. __field(int, count)
  48. __field(int, firstu)
  49. __field(int, dupcnt)
  50. __field(int, flags)
  51. ),
  52. TP_fast_assign(
  53. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  54. __entry->ino = ctx->dp->i_ino;
  55. __entry->hashval = ctx->cursor->hashval;
  56. __entry->blkno = ctx->cursor->blkno;
  57. __entry->offset = ctx->cursor->offset;
  58. __entry->alist = ctx->alist;
  59. __entry->bufsize = ctx->bufsize;
  60. __entry->count = ctx->count;
  61. __entry->firstu = ctx->firstu;
  62. __entry->flags = ctx->flags;
  63. ),
  64. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  65. "alist 0x%p size %u count %u firstu %u flags %d %s",
  66. MAJOR(__entry->dev), MINOR(__entry->dev),
  67. __entry->ino,
  68. __entry->hashval,
  69. __entry->blkno,
  70. __entry->offset,
  71. __entry->dupcnt,
  72. __entry->alist,
  73. __entry->bufsize,
  74. __entry->count,
  75. __entry->firstu,
  76. __entry->flags,
  77. __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS)
  78. )
  79. )
  80. #define DEFINE_ATTR_LIST_EVENT(name) \
  81. DEFINE_EVENT(xfs_attr_list_class, name, \
  82. TP_PROTO(struct xfs_attr_list_context *ctx), \
  83. TP_ARGS(ctx))
  84. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
  85. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
  86. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
  87. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end);
  88. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full);
  89. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
  90. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
  91. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
  92. DECLARE_EVENT_CLASS(xfs_perag_class,
  93. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount,
  94. unsigned long caller_ip),
  95. TP_ARGS(mp, agno, refcount, caller_ip),
  96. TP_STRUCT__entry(
  97. __field(dev_t, dev)
  98. __field(xfs_agnumber_t, agno)
  99. __field(int, refcount)
  100. __field(unsigned long, caller_ip)
  101. ),
  102. TP_fast_assign(
  103. __entry->dev = mp->m_super->s_dev;
  104. __entry->agno = agno;
  105. __entry->refcount = refcount;
  106. __entry->caller_ip = caller_ip;
  107. ),
  108. TP_printk("dev %d:%d agno %u refcount %d caller %pf",
  109. MAJOR(__entry->dev), MINOR(__entry->dev),
  110. __entry->agno,
  111. __entry->refcount,
  112. (char *)__entry->caller_ip)
  113. );
  114. #define DEFINE_PERAG_REF_EVENT(name) \
  115. DEFINE_EVENT(xfs_perag_class, name, \
  116. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \
  117. unsigned long caller_ip), \
  118. TP_ARGS(mp, agno, refcount, caller_ip))
  119. DEFINE_PERAG_REF_EVENT(xfs_perag_get);
  120. DEFINE_PERAG_REF_EVENT(xfs_perag_get_reclaim);
  121. DEFINE_PERAG_REF_EVENT(xfs_perag_put);
  122. DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
  123. DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
  124. TRACE_EVENT(xfs_attr_list_node_descend,
  125. TP_PROTO(struct xfs_attr_list_context *ctx,
  126. struct xfs_da_node_entry *btree),
  127. TP_ARGS(ctx, btree),
  128. TP_STRUCT__entry(
  129. __field(dev_t, dev)
  130. __field(xfs_ino_t, ino)
  131. __field(u32, hashval)
  132. __field(u32, blkno)
  133. __field(u32, offset)
  134. __field(void *, alist)
  135. __field(int, bufsize)
  136. __field(int, count)
  137. __field(int, firstu)
  138. __field(int, dupcnt)
  139. __field(int, flags)
  140. __field(u32, bt_hashval)
  141. __field(u32, bt_before)
  142. ),
  143. TP_fast_assign(
  144. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  145. __entry->ino = ctx->dp->i_ino;
  146. __entry->hashval = ctx->cursor->hashval;
  147. __entry->blkno = ctx->cursor->blkno;
  148. __entry->offset = ctx->cursor->offset;
  149. __entry->alist = ctx->alist;
  150. __entry->bufsize = ctx->bufsize;
  151. __entry->count = ctx->count;
  152. __entry->firstu = ctx->firstu;
  153. __entry->flags = ctx->flags;
  154. __entry->bt_hashval = be32_to_cpu(btree->hashval);
  155. __entry->bt_before = be32_to_cpu(btree->before);
  156. ),
  157. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  158. "alist 0x%p size %u count %u firstu %u flags %d %s "
  159. "node hashval %u, node before %u",
  160. MAJOR(__entry->dev), MINOR(__entry->dev),
  161. __entry->ino,
  162. __entry->hashval,
  163. __entry->blkno,
  164. __entry->offset,
  165. __entry->dupcnt,
  166. __entry->alist,
  167. __entry->bufsize,
  168. __entry->count,
  169. __entry->firstu,
  170. __entry->flags,
  171. __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
  172. __entry->bt_hashval,
  173. __entry->bt_before)
  174. );
  175. TRACE_EVENT(xfs_iext_insert,
  176. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx,
  177. struct xfs_bmbt_irec *r, int state, unsigned long caller_ip),
  178. TP_ARGS(ip, idx, r, state, caller_ip),
  179. TP_STRUCT__entry(
  180. __field(dev_t, dev)
  181. __field(xfs_ino_t, ino)
  182. __field(xfs_extnum_t, idx)
  183. __field(xfs_fileoff_t, startoff)
  184. __field(xfs_fsblock_t, startblock)
  185. __field(xfs_filblks_t, blockcount)
  186. __field(xfs_exntst_t, state)
  187. __field(int, bmap_state)
  188. __field(unsigned long, caller_ip)
  189. ),
  190. TP_fast_assign(
  191. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  192. __entry->ino = ip->i_ino;
  193. __entry->idx = idx;
  194. __entry->startoff = r->br_startoff;
  195. __entry->startblock = r->br_startblock;
  196. __entry->blockcount = r->br_blockcount;
  197. __entry->state = r->br_state;
  198. __entry->bmap_state = state;
  199. __entry->caller_ip = caller_ip;
  200. ),
  201. TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
  202. "offset %lld block %lld count %lld flag %d caller %pf",
  203. MAJOR(__entry->dev), MINOR(__entry->dev),
  204. __entry->ino,
  205. __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
  206. (long)__entry->idx,
  207. __entry->startoff,
  208. (__int64_t)__entry->startblock,
  209. __entry->blockcount,
  210. __entry->state,
  211. (char *)__entry->caller_ip)
  212. );
  213. DECLARE_EVENT_CLASS(xfs_bmap_class,
  214. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state,
  215. unsigned long caller_ip),
  216. TP_ARGS(ip, idx, state, caller_ip),
  217. TP_STRUCT__entry(
  218. __field(dev_t, dev)
  219. __field(xfs_ino_t, ino)
  220. __field(xfs_extnum_t, idx)
  221. __field(xfs_fileoff_t, startoff)
  222. __field(xfs_fsblock_t, startblock)
  223. __field(xfs_filblks_t, blockcount)
  224. __field(xfs_exntst_t, state)
  225. __field(int, bmap_state)
  226. __field(unsigned long, caller_ip)
  227. ),
  228. TP_fast_assign(
  229. struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ?
  230. ip->i_afp : &ip->i_df;
  231. struct xfs_bmbt_irec r;
  232. xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r);
  233. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  234. __entry->ino = ip->i_ino;
  235. __entry->idx = idx;
  236. __entry->startoff = r.br_startoff;
  237. __entry->startblock = r.br_startblock;
  238. __entry->blockcount = r.br_blockcount;
  239. __entry->state = r.br_state;
  240. __entry->bmap_state = state;
  241. __entry->caller_ip = caller_ip;
  242. ),
  243. TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
  244. "offset %lld block %lld count %lld flag %d caller %pf",
  245. MAJOR(__entry->dev), MINOR(__entry->dev),
  246. __entry->ino,
  247. __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
  248. (long)__entry->idx,
  249. __entry->startoff,
  250. (__int64_t)__entry->startblock,
  251. __entry->blockcount,
  252. __entry->state,
  253. (char *)__entry->caller_ip)
  254. )
  255. #define DEFINE_BMAP_EVENT(name) \
  256. DEFINE_EVENT(xfs_bmap_class, name, \
  257. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
  258. unsigned long caller_ip), \
  259. TP_ARGS(ip, idx, state, caller_ip))
  260. DEFINE_BMAP_EVENT(xfs_iext_remove);
  261. DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
  262. DEFINE_BMAP_EVENT(xfs_bmap_post_update);
  263. DEFINE_BMAP_EVENT(xfs_extlist);
  264. DECLARE_EVENT_CLASS(xfs_buf_class,
  265. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip),
  266. TP_ARGS(bp, caller_ip),
  267. TP_STRUCT__entry(
  268. __field(dev_t, dev)
  269. __field(xfs_daddr_t, bno)
  270. __field(size_t, buffer_length)
  271. __field(int, hold)
  272. __field(int, pincount)
  273. __field(unsigned, lockval)
  274. __field(unsigned, flags)
  275. __field(unsigned long, caller_ip)
  276. ),
  277. TP_fast_assign(
  278. __entry->dev = bp->b_target->bt_dev;
  279. __entry->bno = bp->b_bn;
  280. __entry->buffer_length = bp->b_buffer_length;
  281. __entry->hold = atomic_read(&bp->b_hold);
  282. __entry->pincount = atomic_read(&bp->b_pin_count);
  283. __entry->lockval = xfs_buf_lock_value(bp);
  284. __entry->flags = bp->b_flags;
  285. __entry->caller_ip = caller_ip;
  286. ),
  287. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  288. "lock %d flags %s caller %pf",
  289. MAJOR(__entry->dev), MINOR(__entry->dev),
  290. (unsigned long long)__entry->bno,
  291. __entry->buffer_length,
  292. __entry->hold,
  293. __entry->pincount,
  294. __entry->lockval,
  295. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  296. (void *)__entry->caller_ip)
  297. )
  298. #define DEFINE_BUF_EVENT(name) \
  299. DEFINE_EVENT(xfs_buf_class, name, \
  300. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
  301. TP_ARGS(bp, caller_ip))
  302. DEFINE_BUF_EVENT(xfs_buf_init);
  303. DEFINE_BUF_EVENT(xfs_buf_free);
  304. DEFINE_BUF_EVENT(xfs_buf_hold);
  305. DEFINE_BUF_EVENT(xfs_buf_rele);
  306. DEFINE_BUF_EVENT(xfs_buf_pin);
  307. DEFINE_BUF_EVENT(xfs_buf_unpin);
  308. DEFINE_BUF_EVENT(xfs_buf_iodone);
  309. DEFINE_BUF_EVENT(xfs_buf_iorequest);
  310. DEFINE_BUF_EVENT(xfs_buf_bawrite);
  311. DEFINE_BUF_EVENT(xfs_buf_bdwrite);
  312. DEFINE_BUF_EVENT(xfs_buf_lock);
  313. DEFINE_BUF_EVENT(xfs_buf_lock_done);
  314. DEFINE_BUF_EVENT(xfs_buf_cond_lock);
  315. DEFINE_BUF_EVENT(xfs_buf_unlock);
  316. DEFINE_BUF_EVENT(xfs_buf_ordered_retry);
  317. DEFINE_BUF_EVENT(xfs_buf_iowait);
  318. DEFINE_BUF_EVENT(xfs_buf_iowait_done);
  319. DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
  320. DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue);
  321. DEFINE_BUF_EVENT(xfs_buf_delwri_split);
  322. DEFINE_BUF_EVENT(xfs_buf_get_noaddr);
  323. DEFINE_BUF_EVENT(xfs_bdstrat_shut);
  324. DEFINE_BUF_EVENT(xfs_buf_item_relse);
  325. DEFINE_BUF_EVENT(xfs_buf_item_iodone);
  326. DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
  327. DEFINE_BUF_EVENT(xfs_buf_error_relse);
  328. DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
  329. DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
  330. /* not really buffer traces, but the buf provides useful information */
  331. DEFINE_BUF_EVENT(xfs_btree_corrupt);
  332. DEFINE_BUF_EVENT(xfs_da_btree_corrupt);
  333. DEFINE_BUF_EVENT(xfs_reset_dqcounts);
  334. DEFINE_BUF_EVENT(xfs_inode_item_push);
  335. /* pass flags explicitly */
  336. DECLARE_EVENT_CLASS(xfs_buf_flags_class,
  337. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip),
  338. TP_ARGS(bp, flags, caller_ip),
  339. TP_STRUCT__entry(
  340. __field(dev_t, dev)
  341. __field(xfs_daddr_t, bno)
  342. __field(size_t, buffer_length)
  343. __field(int, hold)
  344. __field(int, pincount)
  345. __field(unsigned, lockval)
  346. __field(unsigned, flags)
  347. __field(unsigned long, caller_ip)
  348. ),
  349. TP_fast_assign(
  350. __entry->dev = bp->b_target->bt_dev;
  351. __entry->bno = bp->b_bn;
  352. __entry->buffer_length = bp->b_buffer_length;
  353. __entry->flags = flags;
  354. __entry->hold = atomic_read(&bp->b_hold);
  355. __entry->pincount = atomic_read(&bp->b_pin_count);
  356. __entry->lockval = xfs_buf_lock_value(bp);
  357. __entry->caller_ip = caller_ip;
  358. ),
  359. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  360. "lock %d flags %s caller %pf",
  361. MAJOR(__entry->dev), MINOR(__entry->dev),
  362. (unsigned long long)__entry->bno,
  363. __entry->buffer_length,
  364. __entry->hold,
  365. __entry->pincount,
  366. __entry->lockval,
  367. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  368. (void *)__entry->caller_ip)
  369. )
  370. #define DEFINE_BUF_FLAGS_EVENT(name) \
  371. DEFINE_EVENT(xfs_buf_flags_class, name, \
  372. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
  373. TP_ARGS(bp, flags, caller_ip))
  374. DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
  375. DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
  376. DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
  377. TRACE_EVENT(xfs_buf_ioerror,
  378. TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip),
  379. TP_ARGS(bp, error, caller_ip),
  380. TP_STRUCT__entry(
  381. __field(dev_t, dev)
  382. __field(xfs_daddr_t, bno)
  383. __field(size_t, buffer_length)
  384. __field(unsigned, flags)
  385. __field(int, hold)
  386. __field(int, pincount)
  387. __field(unsigned, lockval)
  388. __field(int, error)
  389. __field(unsigned long, caller_ip)
  390. ),
  391. TP_fast_assign(
  392. __entry->dev = bp->b_target->bt_dev;
  393. __entry->bno = bp->b_bn;
  394. __entry->buffer_length = bp->b_buffer_length;
  395. __entry->hold = atomic_read(&bp->b_hold);
  396. __entry->pincount = atomic_read(&bp->b_pin_count);
  397. __entry->lockval = xfs_buf_lock_value(bp);
  398. __entry->error = error;
  399. __entry->flags = bp->b_flags;
  400. __entry->caller_ip = caller_ip;
  401. ),
  402. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  403. "lock %d error %d flags %s caller %pf",
  404. MAJOR(__entry->dev), MINOR(__entry->dev),
  405. (unsigned long long)__entry->bno,
  406. __entry->buffer_length,
  407. __entry->hold,
  408. __entry->pincount,
  409. __entry->lockval,
  410. __entry->error,
  411. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  412. (void *)__entry->caller_ip)
  413. );
  414. DECLARE_EVENT_CLASS(xfs_buf_item_class,
  415. TP_PROTO(struct xfs_buf_log_item *bip),
  416. TP_ARGS(bip),
  417. TP_STRUCT__entry(
  418. __field(dev_t, dev)
  419. __field(xfs_daddr_t, buf_bno)
  420. __field(size_t, buf_len)
  421. __field(int, buf_hold)
  422. __field(int, buf_pincount)
  423. __field(int, buf_lockval)
  424. __field(unsigned, buf_flags)
  425. __field(unsigned, bli_recur)
  426. __field(int, bli_refcount)
  427. __field(unsigned, bli_flags)
  428. __field(void *, li_desc)
  429. __field(unsigned, li_flags)
  430. ),
  431. TP_fast_assign(
  432. __entry->dev = bip->bli_buf->b_target->bt_dev;
  433. __entry->bli_flags = bip->bli_flags;
  434. __entry->bli_recur = bip->bli_recur;
  435. __entry->bli_refcount = atomic_read(&bip->bli_refcount);
  436. __entry->buf_bno = bip->bli_buf->b_bn;
  437. __entry->buf_len = bip->bli_buf->b_buffer_length;
  438. __entry->buf_flags = bip->bli_buf->b_flags;
  439. __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
  440. __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
  441. __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf);
  442. __entry->li_desc = bip->bli_item.li_desc;
  443. __entry->li_flags = bip->bli_item.li_flags;
  444. ),
  445. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  446. "lock %d flags %s recur %d refcount %d bliflags %s "
  447. "lidesc 0x%p liflags %s",
  448. MAJOR(__entry->dev), MINOR(__entry->dev),
  449. (unsigned long long)__entry->buf_bno,
  450. __entry->buf_len,
  451. __entry->buf_hold,
  452. __entry->buf_pincount,
  453. __entry->buf_lockval,
  454. __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS),
  455. __entry->bli_recur,
  456. __entry->bli_refcount,
  457. __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS),
  458. __entry->li_desc,
  459. __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS))
  460. )
  461. #define DEFINE_BUF_ITEM_EVENT(name) \
  462. DEFINE_EVENT(xfs_buf_item_class, name, \
  463. TP_PROTO(struct xfs_buf_log_item *bip), \
  464. TP_ARGS(bip))
  465. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
  466. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
  467. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
  468. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
  469. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
  470. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
  471. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
  472. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_trylock);
  473. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
  474. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
  475. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
  476. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
  477. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pushbuf);
  478. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
  479. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
  480. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
  481. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur);
  482. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf);
  483. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur);
  484. DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf);
  485. DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse);
  486. DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
  487. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
  488. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
  489. DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
  490. DECLARE_EVENT_CLASS(xfs_lock_class,
  491. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags,
  492. unsigned long caller_ip),
  493. TP_ARGS(ip, lock_flags, caller_ip),
  494. TP_STRUCT__entry(
  495. __field(dev_t, dev)
  496. __field(xfs_ino_t, ino)
  497. __field(int, lock_flags)
  498. __field(unsigned long, caller_ip)
  499. ),
  500. TP_fast_assign(
  501. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  502. __entry->ino = ip->i_ino;
  503. __entry->lock_flags = lock_flags;
  504. __entry->caller_ip = caller_ip;
  505. ),
  506. TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf",
  507. MAJOR(__entry->dev), MINOR(__entry->dev),
  508. __entry->ino,
  509. __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS),
  510. (void *)__entry->caller_ip)
  511. )
  512. #define DEFINE_LOCK_EVENT(name) \
  513. DEFINE_EVENT(xfs_lock_class, name, \
  514. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
  515. unsigned long caller_ip), \
  516. TP_ARGS(ip, lock_flags, caller_ip))
  517. DEFINE_LOCK_EVENT(xfs_ilock);
  518. DEFINE_LOCK_EVENT(xfs_ilock_nowait);
  519. DEFINE_LOCK_EVENT(xfs_ilock_demote);
  520. DEFINE_LOCK_EVENT(xfs_iunlock);
  521. DECLARE_EVENT_CLASS(xfs_iget_class,
  522. TP_PROTO(struct xfs_inode *ip),
  523. TP_ARGS(ip),
  524. TP_STRUCT__entry(
  525. __field(dev_t, dev)
  526. __field(xfs_ino_t, ino)
  527. ),
  528. TP_fast_assign(
  529. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  530. __entry->ino = ip->i_ino;
  531. ),
  532. TP_printk("dev %d:%d ino 0x%llx",
  533. MAJOR(__entry->dev), MINOR(__entry->dev),
  534. __entry->ino)
  535. )
  536. #define DEFINE_IGET_EVENT(name) \
  537. DEFINE_EVENT(xfs_iget_class, name, \
  538. TP_PROTO(struct xfs_inode *ip), \
  539. TP_ARGS(ip))
  540. DEFINE_IGET_EVENT(xfs_iget_skip);
  541. DEFINE_IGET_EVENT(xfs_iget_reclaim);
  542. DEFINE_IGET_EVENT(xfs_iget_found);
  543. DEFINE_IGET_EVENT(xfs_iget_alloc);
  544. DECLARE_EVENT_CLASS(xfs_inode_class,
  545. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
  546. TP_ARGS(ip, caller_ip),
  547. TP_STRUCT__entry(
  548. __field(dev_t, dev)
  549. __field(xfs_ino_t, ino)
  550. __field(int, count)
  551. __field(int, pincount)
  552. __field(unsigned long, caller_ip)
  553. ),
  554. TP_fast_assign(
  555. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  556. __entry->ino = ip->i_ino;
  557. __entry->count = atomic_read(&VFS_I(ip)->i_count);
  558. __entry->pincount = atomic_read(&ip->i_pincount);
  559. __entry->caller_ip = caller_ip;
  560. ),
  561. TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pf",
  562. MAJOR(__entry->dev), MINOR(__entry->dev),
  563. __entry->ino,
  564. __entry->count,
  565. __entry->pincount,
  566. (char *)__entry->caller_ip)
  567. )
  568. #define DEFINE_INODE_EVENT(name) \
  569. DEFINE_EVENT(xfs_inode_class, name, \
  570. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
  571. TP_ARGS(ip, caller_ip))
  572. DEFINE_INODE_EVENT(xfs_ihold);
  573. DEFINE_INODE_EVENT(xfs_irele);
  574. DEFINE_INODE_EVENT(xfs_inode_pin);
  575. DEFINE_INODE_EVENT(xfs_inode_unpin);
  576. DEFINE_INODE_EVENT(xfs_inode_unpin_nowait);
  577. /* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
  578. DEFINE_INODE_EVENT(xfs_inode);
  579. #define xfs_itrace_entry(ip) \
  580. trace_xfs_inode(ip, _THIS_IP_)
  581. DECLARE_EVENT_CLASS(xfs_dquot_class,
  582. TP_PROTO(struct xfs_dquot *dqp),
  583. TP_ARGS(dqp),
  584. TP_STRUCT__entry(
  585. __field(dev_t, dev)
  586. __field(u32, id)
  587. __field(unsigned, flags)
  588. __field(unsigned, nrefs)
  589. __field(unsigned long long, res_bcount)
  590. __field(unsigned long long, bcount)
  591. __field(unsigned long long, icount)
  592. __field(unsigned long long, blk_hardlimit)
  593. __field(unsigned long long, blk_softlimit)
  594. __field(unsigned long long, ino_hardlimit)
  595. __field(unsigned long long, ino_softlimit)
  596. ), \
  597. TP_fast_assign(
  598. __entry->dev = dqp->q_mount->m_super->s_dev;
  599. __entry->id = be32_to_cpu(dqp->q_core.d_id);
  600. __entry->flags = dqp->dq_flags;
  601. __entry->nrefs = dqp->q_nrefs;
  602. __entry->res_bcount = dqp->q_res_bcount;
  603. __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount);
  604. __entry->icount = be64_to_cpu(dqp->q_core.d_icount);
  605. __entry->blk_hardlimit =
  606. be64_to_cpu(dqp->q_core.d_blk_hardlimit);
  607. __entry->blk_softlimit =
  608. be64_to_cpu(dqp->q_core.d_blk_softlimit);
  609. __entry->ino_hardlimit =
  610. be64_to_cpu(dqp->q_core.d_ino_hardlimit);
  611. __entry->ino_softlimit =
  612. be64_to_cpu(dqp->q_core.d_ino_softlimit);
  613. ),
  614. TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx "
  615. "bcnt 0x%llx bhardlimit 0x%llx bsoftlimit 0x%llx "
  616. "icnt 0x%llx ihardlimit 0x%llx isoftlimit 0x%llx]",
  617. MAJOR(__entry->dev), MINOR(__entry->dev),
  618. __entry->id,
  619. __print_flags(__entry->flags, "|", XFS_DQ_FLAGS),
  620. __entry->nrefs,
  621. __entry->res_bcount,
  622. __entry->bcount,
  623. __entry->blk_hardlimit,
  624. __entry->blk_softlimit,
  625. __entry->icount,
  626. __entry->ino_hardlimit,
  627. __entry->ino_softlimit)
  628. )
  629. #define DEFINE_DQUOT_EVENT(name) \
  630. DEFINE_EVENT(xfs_dquot_class, name, \
  631. TP_PROTO(struct xfs_dquot *dqp), \
  632. TP_ARGS(dqp))
  633. DEFINE_DQUOT_EVENT(xfs_dqadjust);
  634. DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
  635. DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
  636. DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink);
  637. DEFINE_DQUOT_EVENT(xfs_dqattach_found);
  638. DEFINE_DQUOT_EVENT(xfs_dqattach_get);
  639. DEFINE_DQUOT_EVENT(xfs_dqinit);
  640. DEFINE_DQUOT_EVENT(xfs_dqreuse);
  641. DEFINE_DQUOT_EVENT(xfs_dqalloc);
  642. DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
  643. DEFINE_DQUOT_EVENT(xfs_dqread);
  644. DEFINE_DQUOT_EVENT(xfs_dqread_fail);
  645. DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
  646. DEFINE_DQUOT_EVENT(xfs_dqlookup_want);
  647. DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist);
  648. DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
  649. DEFINE_DQUOT_EVENT(xfs_dqget_hit);
  650. DEFINE_DQUOT_EVENT(xfs_dqget_miss);
  651. DEFINE_DQUOT_EVENT(xfs_dqput);
  652. DEFINE_DQUOT_EVENT(xfs_dqput_wait);
  653. DEFINE_DQUOT_EVENT(xfs_dqput_free);
  654. DEFINE_DQUOT_EVENT(xfs_dqrele);
  655. DEFINE_DQUOT_EVENT(xfs_dqflush);
  656. DEFINE_DQUOT_EVENT(xfs_dqflush_force);
  657. DEFINE_DQUOT_EVENT(xfs_dqflush_done);
  658. /* not really iget events, but we re-use the format */
  659. DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
  660. DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
  661. DECLARE_EVENT_CLASS(xfs_loggrant_class,
  662. TP_PROTO(struct log *log, struct xlog_ticket *tic),
  663. TP_ARGS(log, tic),
  664. TP_STRUCT__entry(
  665. __field(dev_t, dev)
  666. __field(unsigned, trans_type)
  667. __field(char, ocnt)
  668. __field(char, cnt)
  669. __field(int, curr_res)
  670. __field(int, unit_res)
  671. __field(unsigned int, flags)
  672. __field(void *, reserve_headq)
  673. __field(void *, write_headq)
  674. __field(int, grant_reserve_cycle)
  675. __field(int, grant_reserve_bytes)
  676. __field(int, grant_write_cycle)
  677. __field(int, grant_write_bytes)
  678. __field(int, curr_cycle)
  679. __field(int, curr_block)
  680. __field(xfs_lsn_t, tail_lsn)
  681. ),
  682. TP_fast_assign(
  683. __entry->dev = log->l_mp->m_super->s_dev;
  684. __entry->trans_type = tic->t_trans_type;
  685. __entry->ocnt = tic->t_ocnt;
  686. __entry->cnt = tic->t_cnt;
  687. __entry->curr_res = tic->t_curr_res;
  688. __entry->unit_res = tic->t_unit_res;
  689. __entry->flags = tic->t_flags;
  690. __entry->reserve_headq = log->l_reserve_headq;
  691. __entry->write_headq = log->l_write_headq;
  692. __entry->grant_reserve_cycle = log->l_grant_reserve_cycle;
  693. __entry->grant_reserve_bytes = log->l_grant_reserve_bytes;
  694. __entry->grant_write_cycle = log->l_grant_write_cycle;
  695. __entry->grant_write_bytes = log->l_grant_write_bytes;
  696. __entry->curr_cycle = log->l_curr_cycle;
  697. __entry->curr_block = log->l_curr_block;
  698. __entry->tail_lsn = log->l_tail_lsn;
  699. ),
  700. TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
  701. "t_unit_res %u t_flags %s reserve_headq 0x%p "
  702. "write_headq 0x%p grant_reserve_cycle %d "
  703. "grant_reserve_bytes %d grant_write_cycle %d "
  704. "grant_write_bytes %d curr_cycle %d curr_block %d "
  705. "tail_cycle %d tail_block %d",
  706. MAJOR(__entry->dev), MINOR(__entry->dev),
  707. __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES),
  708. __entry->ocnt,
  709. __entry->cnt,
  710. __entry->curr_res,
  711. __entry->unit_res,
  712. __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
  713. __entry->reserve_headq,
  714. __entry->write_headq,
  715. __entry->grant_reserve_cycle,
  716. __entry->grant_reserve_bytes,
  717. __entry->grant_write_cycle,
  718. __entry->grant_write_bytes,
  719. __entry->curr_cycle,
  720. __entry->curr_block,
  721. CYCLE_LSN(__entry->tail_lsn),
  722. BLOCK_LSN(__entry->tail_lsn)
  723. )
  724. )
  725. #define DEFINE_LOGGRANT_EVENT(name) \
  726. DEFINE_EVENT(xfs_loggrant_class, name, \
  727. TP_PROTO(struct log *log, struct xlog_ticket *tic), \
  728. TP_ARGS(log, tic))
  729. DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
  730. DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
  731. DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
  732. DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
  733. DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
  734. DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
  735. DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
  736. DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
  737. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
  738. DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
  739. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
  740. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
  741. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
  742. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
  743. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
  744. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
  745. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
  746. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
  747. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
  748. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
  749. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
  750. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter);
  751. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit);
  752. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub);
  753. DECLARE_EVENT_CLASS(xfs_file_class,
  754. TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags),
  755. TP_ARGS(ip, count, offset, flags),
  756. TP_STRUCT__entry(
  757. __field(dev_t, dev)
  758. __field(xfs_ino_t, ino)
  759. __field(xfs_fsize_t, size)
  760. __field(xfs_fsize_t, new_size)
  761. __field(loff_t, offset)
  762. __field(size_t, count)
  763. __field(int, flags)
  764. ),
  765. TP_fast_assign(
  766. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  767. __entry->ino = ip->i_ino;
  768. __entry->size = ip->i_d.di_size;
  769. __entry->new_size = ip->i_new_size;
  770. __entry->offset = offset;
  771. __entry->count = count;
  772. __entry->flags = flags;
  773. ),
  774. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
  775. "offset 0x%llx count 0x%zx ioflags %s",
  776. MAJOR(__entry->dev), MINOR(__entry->dev),
  777. __entry->ino,
  778. __entry->size,
  779. __entry->new_size,
  780. __entry->offset,
  781. __entry->count,
  782. __print_flags(__entry->flags, "|", XFS_IO_FLAGS))
  783. )
  784. #define DEFINE_RW_EVENT(name) \
  785. DEFINE_EVENT(xfs_file_class, name, \
  786. TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
  787. TP_ARGS(ip, count, offset, flags))
  788. DEFINE_RW_EVENT(xfs_file_read);
  789. DEFINE_RW_EVENT(xfs_file_buffered_write);
  790. DEFINE_RW_EVENT(xfs_file_direct_write);
  791. DEFINE_RW_EVENT(xfs_file_splice_read);
  792. DEFINE_RW_EVENT(xfs_file_splice_write);
  793. DECLARE_EVENT_CLASS(xfs_page_class,
  794. TP_PROTO(struct inode *inode, struct page *page, unsigned long off),
  795. TP_ARGS(inode, page, off),
  796. TP_STRUCT__entry(
  797. __field(dev_t, dev)
  798. __field(xfs_ino_t, ino)
  799. __field(pgoff_t, pgoff)
  800. __field(loff_t, size)
  801. __field(unsigned long, offset)
  802. __field(int, delalloc)
  803. __field(int, unmapped)
  804. __field(int, unwritten)
  805. ),
  806. TP_fast_assign(
  807. int delalloc = -1, unmapped = -1, unwritten = -1;
  808. if (page_has_buffers(page))
  809. xfs_count_page_state(page, &delalloc,
  810. &unmapped, &unwritten);
  811. __entry->dev = inode->i_sb->s_dev;
  812. __entry->ino = XFS_I(inode)->i_ino;
  813. __entry->pgoff = page_offset(page);
  814. __entry->size = i_size_read(inode);
  815. __entry->offset = off;
  816. __entry->delalloc = delalloc;
  817. __entry->unmapped = unmapped;
  818. __entry->unwritten = unwritten;
  819. ),
  820. TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
  821. "delalloc %d unmapped %d unwritten %d",
  822. MAJOR(__entry->dev), MINOR(__entry->dev),
  823. __entry->ino,
  824. __entry->pgoff,
  825. __entry->size,
  826. __entry->offset,
  827. __entry->delalloc,
  828. __entry->unmapped,
  829. __entry->unwritten)
  830. )
  831. #define DEFINE_PAGE_EVENT(name) \
  832. DEFINE_EVENT(xfs_page_class, name, \
  833. TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \
  834. TP_ARGS(inode, page, off))
  835. DEFINE_PAGE_EVENT(xfs_writepage);
  836. DEFINE_PAGE_EVENT(xfs_releasepage);
  837. DEFINE_PAGE_EVENT(xfs_invalidatepage);
  838. DECLARE_EVENT_CLASS(xfs_iomap_class,
  839. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
  840. int flags, struct xfs_bmbt_irec *irec),
  841. TP_ARGS(ip, offset, count, flags, irec),
  842. TP_STRUCT__entry(
  843. __field(dev_t, dev)
  844. __field(xfs_ino_t, ino)
  845. __field(loff_t, size)
  846. __field(loff_t, new_size)
  847. __field(loff_t, offset)
  848. __field(size_t, count)
  849. __field(int, flags)
  850. __field(xfs_fileoff_t, startoff)
  851. __field(xfs_fsblock_t, startblock)
  852. __field(xfs_filblks_t, blockcount)
  853. ),
  854. TP_fast_assign(
  855. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  856. __entry->ino = ip->i_ino;
  857. __entry->size = ip->i_d.di_size;
  858. __entry->new_size = ip->i_new_size;
  859. __entry->offset = offset;
  860. __entry->count = count;
  861. __entry->flags = flags;
  862. __entry->startoff = irec ? irec->br_startoff : 0;
  863. __entry->startblock = irec ? irec->br_startblock : 0;
  864. __entry->blockcount = irec ? irec->br_blockcount : 0;
  865. ),
  866. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
  867. "offset 0x%llx count %zd flags %s "
  868. "startoff 0x%llx startblock %lld blockcount 0x%llx",
  869. MAJOR(__entry->dev), MINOR(__entry->dev),
  870. __entry->ino,
  871. __entry->size,
  872. __entry->new_size,
  873. __entry->offset,
  874. __entry->count,
  875. __print_flags(__entry->flags, "|", BMAPI_FLAGS),
  876. __entry->startoff,
  877. (__int64_t)__entry->startblock,
  878. __entry->blockcount)
  879. )
  880. #define DEFINE_IOMAP_EVENT(name) \
  881. DEFINE_EVENT(xfs_iomap_class, name, \
  882. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
  883. int flags, struct xfs_bmbt_irec *irec), \
  884. TP_ARGS(ip, offset, count, flags, irec))
  885. DEFINE_IOMAP_EVENT(xfs_iomap_enter);
  886. DEFINE_IOMAP_EVENT(xfs_iomap_found);
  887. DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
  888. DECLARE_EVENT_CLASS(xfs_simple_io_class,
  889. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
  890. TP_ARGS(ip, offset, count),
  891. TP_STRUCT__entry(
  892. __field(dev_t, dev)
  893. __field(xfs_ino_t, ino)
  894. __field(loff_t, size)
  895. __field(loff_t, new_size)
  896. __field(loff_t, offset)
  897. __field(size_t, count)
  898. ),
  899. TP_fast_assign(
  900. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  901. __entry->ino = ip->i_ino;
  902. __entry->size = ip->i_d.di_size;
  903. __entry->new_size = ip->i_new_size;
  904. __entry->offset = offset;
  905. __entry->count = count;
  906. ),
  907. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
  908. "offset 0x%llx count %zd",
  909. MAJOR(__entry->dev), MINOR(__entry->dev),
  910. __entry->ino,
  911. __entry->size,
  912. __entry->new_size,
  913. __entry->offset,
  914. __entry->count)
  915. );
  916. #define DEFINE_SIMPLE_IO_EVENT(name) \
  917. DEFINE_EVENT(xfs_simple_io_class, name, \
  918. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
  919. TP_ARGS(ip, offset, count))
  920. DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
  921. DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
  922. TRACE_EVENT(xfs_itruncate_start,
  923. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size, int flag,
  924. xfs_off_t toss_start, xfs_off_t toss_finish),
  925. TP_ARGS(ip, new_size, flag, toss_start, toss_finish),
  926. TP_STRUCT__entry(
  927. __field(dev_t, dev)
  928. __field(xfs_ino_t, ino)
  929. __field(xfs_fsize_t, size)
  930. __field(xfs_fsize_t, new_size)
  931. __field(xfs_off_t, toss_start)
  932. __field(xfs_off_t, toss_finish)
  933. __field(int, flag)
  934. ),
  935. TP_fast_assign(
  936. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  937. __entry->ino = ip->i_ino;
  938. __entry->size = ip->i_d.di_size;
  939. __entry->new_size = new_size;
  940. __entry->toss_start = toss_start;
  941. __entry->toss_finish = toss_finish;
  942. __entry->flag = flag;
  943. ),
  944. TP_printk("dev %d:%d ino 0x%llx %s size 0x%llx new_size 0x%llx "
  945. "toss start 0x%llx toss finish 0x%llx",
  946. MAJOR(__entry->dev), MINOR(__entry->dev),
  947. __entry->ino,
  948. __print_flags(__entry->flag, "|", XFS_ITRUNC_FLAGS),
  949. __entry->size,
  950. __entry->new_size,
  951. __entry->toss_start,
  952. __entry->toss_finish)
  953. );
  954. DECLARE_EVENT_CLASS(xfs_itrunc_class,
  955. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
  956. TP_ARGS(ip, new_size),
  957. TP_STRUCT__entry(
  958. __field(dev_t, dev)
  959. __field(xfs_ino_t, ino)
  960. __field(xfs_fsize_t, size)
  961. __field(xfs_fsize_t, new_size)
  962. ),
  963. TP_fast_assign(
  964. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  965. __entry->ino = ip->i_ino;
  966. __entry->size = ip->i_d.di_size;
  967. __entry->new_size = new_size;
  968. ),
  969. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx",
  970. MAJOR(__entry->dev), MINOR(__entry->dev),
  971. __entry->ino,
  972. __entry->size,
  973. __entry->new_size)
  974. )
  975. #define DEFINE_ITRUNC_EVENT(name) \
  976. DEFINE_EVENT(xfs_itrunc_class, name, \
  977. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
  978. TP_ARGS(ip, new_size))
  979. DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start);
  980. DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end);
  981. TRACE_EVENT(xfs_pagecache_inval,
  982. TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
  983. TP_ARGS(ip, start, finish),
  984. TP_STRUCT__entry(
  985. __field(dev_t, dev)
  986. __field(xfs_ino_t, ino)
  987. __field(xfs_fsize_t, size)
  988. __field(xfs_off_t, start)
  989. __field(xfs_off_t, finish)
  990. ),
  991. TP_fast_assign(
  992. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  993. __entry->ino = ip->i_ino;
  994. __entry->size = ip->i_d.di_size;
  995. __entry->start = start;
  996. __entry->finish = finish;
  997. ),
  998. TP_printk("dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx",
  999. MAJOR(__entry->dev), MINOR(__entry->dev),
  1000. __entry->ino,
  1001. __entry->size,
  1002. __entry->start,
  1003. __entry->finish)
  1004. );
  1005. TRACE_EVENT(xfs_bunmap,
  1006. TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len,
  1007. int flags, unsigned long caller_ip),
  1008. TP_ARGS(ip, bno, len, flags, caller_ip),
  1009. TP_STRUCT__entry(
  1010. __field(dev_t, dev)
  1011. __field(xfs_ino_t, ino)
  1012. __field(xfs_fsize_t, size)
  1013. __field(xfs_fileoff_t, bno)
  1014. __field(xfs_filblks_t, len)
  1015. __field(unsigned long, caller_ip)
  1016. __field(int, flags)
  1017. ),
  1018. TP_fast_assign(
  1019. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1020. __entry->ino = ip->i_ino;
  1021. __entry->size = ip->i_d.di_size;
  1022. __entry->bno = bno;
  1023. __entry->len = len;
  1024. __entry->caller_ip = caller_ip;
  1025. __entry->flags = flags;
  1026. ),
  1027. TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx"
  1028. "flags %s caller %pf",
  1029. MAJOR(__entry->dev), MINOR(__entry->dev),
  1030. __entry->ino,
  1031. __entry->size,
  1032. __entry->bno,
  1033. __entry->len,
  1034. __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS),
  1035. (void *)__entry->caller_ip)
  1036. );
  1037. #define XFS_BUSY_SYNC \
  1038. { 0, "async" }, \
  1039. { 1, "sync" }
  1040. TRACE_EVENT(xfs_alloc_busy,
  1041. TP_PROTO(struct xfs_trans *trans, xfs_agnumber_t agno,
  1042. xfs_agblock_t agbno, xfs_extlen_t len, int sync),
  1043. TP_ARGS(trans, agno, agbno, len, sync),
  1044. TP_STRUCT__entry(
  1045. __field(dev_t, dev)
  1046. __field(struct xfs_trans *, tp)
  1047. __field(int, tid)
  1048. __field(xfs_agnumber_t, agno)
  1049. __field(xfs_agblock_t, agbno)
  1050. __field(xfs_extlen_t, len)
  1051. __field(int, sync)
  1052. ),
  1053. TP_fast_assign(
  1054. __entry->dev = trans->t_mountp->m_super->s_dev;
  1055. __entry->tp = trans;
  1056. __entry->tid = trans->t_ticket->t_tid;
  1057. __entry->agno = agno;
  1058. __entry->agbno = agbno;
  1059. __entry->len = len;
  1060. __entry->sync = sync;
  1061. ),
  1062. TP_printk("dev %d:%d trans 0x%p tid 0x%x agno %u agbno %u len %u %s",
  1063. MAJOR(__entry->dev), MINOR(__entry->dev),
  1064. __entry->tp,
  1065. __entry->tid,
  1066. __entry->agno,
  1067. __entry->agbno,
  1068. __entry->len,
  1069. __print_symbolic(__entry->sync, XFS_BUSY_SYNC))
  1070. );
  1071. TRACE_EVENT(xfs_alloc_unbusy,
  1072. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1073. xfs_agblock_t agbno, xfs_extlen_t len),
  1074. TP_ARGS(mp, agno, agbno, len),
  1075. TP_STRUCT__entry(
  1076. __field(dev_t, dev)
  1077. __field(xfs_agnumber_t, agno)
  1078. __field(xfs_agblock_t, agbno)
  1079. __field(xfs_extlen_t, len)
  1080. ),
  1081. TP_fast_assign(
  1082. __entry->dev = mp->m_super->s_dev;
  1083. __entry->agno = agno;
  1084. __entry->agbno = agbno;
  1085. __entry->len = len;
  1086. ),
  1087. TP_printk("dev %d:%d agno %u agbno %u len %u",
  1088. MAJOR(__entry->dev), MINOR(__entry->dev),
  1089. __entry->agno,
  1090. __entry->agbno,
  1091. __entry->len)
  1092. );
  1093. #define XFS_BUSY_STATES \
  1094. { 0, "missing" }, \
  1095. { 1, "found" }
  1096. TRACE_EVENT(xfs_alloc_busysearch,
  1097. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1098. xfs_agblock_t agbno, xfs_extlen_t len, int found),
  1099. TP_ARGS(mp, agno, agbno, len, found),
  1100. TP_STRUCT__entry(
  1101. __field(dev_t, dev)
  1102. __field(xfs_agnumber_t, agno)
  1103. __field(xfs_agblock_t, agbno)
  1104. __field(xfs_extlen_t, len)
  1105. __field(int, found)
  1106. ),
  1107. TP_fast_assign(
  1108. __entry->dev = mp->m_super->s_dev;
  1109. __entry->agno = agno;
  1110. __entry->agbno = agbno;
  1111. __entry->len = len;
  1112. __entry->found = found;
  1113. ),
  1114. TP_printk("dev %d:%d agno %u agbno %u len %u %s",
  1115. MAJOR(__entry->dev), MINOR(__entry->dev),
  1116. __entry->agno,
  1117. __entry->agbno,
  1118. __entry->len,
  1119. __print_symbolic(__entry->found, XFS_BUSY_STATES))
  1120. );
  1121. TRACE_EVENT(xfs_trans_commit_lsn,
  1122. TP_PROTO(struct xfs_trans *trans),
  1123. TP_ARGS(trans),
  1124. TP_STRUCT__entry(
  1125. __field(dev_t, dev)
  1126. __field(struct xfs_trans *, tp)
  1127. __field(xfs_lsn_t, lsn)
  1128. ),
  1129. TP_fast_assign(
  1130. __entry->dev = trans->t_mountp->m_super->s_dev;
  1131. __entry->tp = trans;
  1132. __entry->lsn = trans->t_commit_lsn;
  1133. ),
  1134. TP_printk("dev %d:%d trans 0x%p commit_lsn 0x%llx",
  1135. MAJOR(__entry->dev), MINOR(__entry->dev),
  1136. __entry->tp,
  1137. __entry->lsn)
  1138. );
  1139. TRACE_EVENT(xfs_agf,
  1140. TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
  1141. unsigned long caller_ip),
  1142. TP_ARGS(mp, agf, flags, caller_ip),
  1143. TP_STRUCT__entry(
  1144. __field(dev_t, dev)
  1145. __field(xfs_agnumber_t, agno)
  1146. __field(int, flags)
  1147. __field(__u32, length)
  1148. __field(__u32, bno_root)
  1149. __field(__u32, cnt_root)
  1150. __field(__u32, bno_level)
  1151. __field(__u32, cnt_level)
  1152. __field(__u32, flfirst)
  1153. __field(__u32, fllast)
  1154. __field(__u32, flcount)
  1155. __field(__u32, freeblks)
  1156. __field(__u32, longest)
  1157. __field(unsigned long, caller_ip)
  1158. ),
  1159. TP_fast_assign(
  1160. __entry->dev = mp->m_super->s_dev;
  1161. __entry->agno = be32_to_cpu(agf->agf_seqno),
  1162. __entry->flags = flags;
  1163. __entry->length = be32_to_cpu(agf->agf_length),
  1164. __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
  1165. __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
  1166. __entry->bno_level =
  1167. be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
  1168. __entry->cnt_level =
  1169. be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
  1170. __entry->flfirst = be32_to_cpu(agf->agf_flfirst),
  1171. __entry->fllast = be32_to_cpu(agf->agf_fllast),
  1172. __entry->flcount = be32_to_cpu(agf->agf_flcount),
  1173. __entry->freeblks = be32_to_cpu(agf->agf_freeblks),
  1174. __entry->longest = be32_to_cpu(agf->agf_longest);
  1175. __entry->caller_ip = caller_ip;
  1176. ),
  1177. TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u "
  1178. "levels b %u c %u flfirst %u fllast %u flcount %u "
  1179. "freeblks %u longest %u caller %pf",
  1180. MAJOR(__entry->dev), MINOR(__entry->dev),
  1181. __entry->agno,
  1182. __print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
  1183. __entry->length,
  1184. __entry->bno_root,
  1185. __entry->cnt_root,
  1186. __entry->bno_level,
  1187. __entry->cnt_level,
  1188. __entry->flfirst,
  1189. __entry->fllast,
  1190. __entry->flcount,
  1191. __entry->freeblks,
  1192. __entry->longest,
  1193. (void *)__entry->caller_ip)
  1194. );
  1195. TRACE_EVENT(xfs_free_extent,
  1196. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
  1197. xfs_extlen_t len, bool isfl, int haveleft, int haveright),
  1198. TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright),
  1199. TP_STRUCT__entry(
  1200. __field(dev_t, dev)
  1201. __field(xfs_agnumber_t, agno)
  1202. __field(xfs_agblock_t, agbno)
  1203. __field(xfs_extlen_t, len)
  1204. __field(int, isfl)
  1205. __field(int, haveleft)
  1206. __field(int, haveright)
  1207. ),
  1208. TP_fast_assign(
  1209. __entry->dev = mp->m_super->s_dev;
  1210. __entry->agno = agno;
  1211. __entry->agbno = agbno;
  1212. __entry->len = len;
  1213. __entry->isfl = isfl;
  1214. __entry->haveleft = haveleft;
  1215. __entry->haveright = haveright;
  1216. ),
  1217. TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s",
  1218. MAJOR(__entry->dev), MINOR(__entry->dev),
  1219. __entry->agno,
  1220. __entry->agbno,
  1221. __entry->len,
  1222. __entry->isfl,
  1223. __entry->haveleft ?
  1224. (__entry->haveright ? "both" : "left") :
  1225. (__entry->haveright ? "right" : "none"))
  1226. );
  1227. DECLARE_EVENT_CLASS(xfs_alloc_class,
  1228. TP_PROTO(struct xfs_alloc_arg *args),
  1229. TP_ARGS(args),
  1230. TP_STRUCT__entry(
  1231. __field(dev_t, dev)
  1232. __field(xfs_agnumber_t, agno)
  1233. __field(xfs_agblock_t, agbno)
  1234. __field(xfs_extlen_t, minlen)
  1235. __field(xfs_extlen_t, maxlen)
  1236. __field(xfs_extlen_t, mod)
  1237. __field(xfs_extlen_t, prod)
  1238. __field(xfs_extlen_t, minleft)
  1239. __field(xfs_extlen_t, total)
  1240. __field(xfs_extlen_t, alignment)
  1241. __field(xfs_extlen_t, minalignslop)
  1242. __field(xfs_extlen_t, len)
  1243. __field(short, type)
  1244. __field(short, otype)
  1245. __field(char, wasdel)
  1246. __field(char, wasfromfl)
  1247. __field(char, isfl)
  1248. __field(char, userdata)
  1249. __field(xfs_fsblock_t, firstblock)
  1250. ),
  1251. TP_fast_assign(
  1252. __entry->dev = args->mp->m_super->s_dev;
  1253. __entry->agno = args->agno;
  1254. __entry->agbno = args->agbno;
  1255. __entry->minlen = args->minlen;
  1256. __entry->maxlen = args->maxlen;
  1257. __entry->mod = args->mod;
  1258. __entry->prod = args->prod;
  1259. __entry->minleft = args->minleft;
  1260. __entry->total = args->total;
  1261. __entry->alignment = args->alignment;
  1262. __entry->minalignslop = args->minalignslop;
  1263. __entry->len = args->len;
  1264. __entry->type = args->type;
  1265. __entry->otype = args->otype;
  1266. __entry->wasdel = args->wasdel;
  1267. __entry->wasfromfl = args->wasfromfl;
  1268. __entry->isfl = args->isfl;
  1269. __entry->userdata = args->userdata;
  1270. __entry->firstblock = args->firstblock;
  1271. ),
  1272. TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u "
  1273. "prod %u minleft %u total %u alignment %u minalignslop %u "
  1274. "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d "
  1275. "userdata %d firstblock 0x%llx",
  1276. MAJOR(__entry->dev), MINOR(__entry->dev),
  1277. __entry->agno,
  1278. __entry->agbno,
  1279. __entry->minlen,
  1280. __entry->maxlen,
  1281. __entry->mod,
  1282. __entry->prod,
  1283. __entry->minleft,
  1284. __entry->total,
  1285. __entry->alignment,
  1286. __entry->minalignslop,
  1287. __entry->len,
  1288. __print_symbolic(__entry->type, XFS_ALLOC_TYPES),
  1289. __print_symbolic(__entry->otype, XFS_ALLOC_TYPES),
  1290. __entry->wasdel,
  1291. __entry->wasfromfl,
  1292. __entry->isfl,
  1293. __entry->userdata,
  1294. __entry->firstblock)
  1295. )
  1296. #define DEFINE_ALLOC_EVENT(name) \
  1297. DEFINE_EVENT(xfs_alloc_class, name, \
  1298. TP_PROTO(struct xfs_alloc_arg *args), \
  1299. TP_ARGS(args))
  1300. DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
  1301. DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
  1302. DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
  1303. DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
  1304. DEFINE_ALLOC_EVENT(xfs_alloc_near_greater);
  1305. DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser);
  1306. DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
  1307. DEFINE_ALLOC_EVENT(xfs_alloc_size_neither);
  1308. DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry);
  1309. DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft);
  1310. DEFINE_ALLOC_EVENT(xfs_alloc_size_done);
  1311. DEFINE_ALLOC_EVENT(xfs_alloc_size_error);
  1312. DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist);
  1313. DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
  1314. DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
  1315. DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
  1316. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
  1317. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
  1318. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
  1319. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
  1320. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
  1321. DECLARE_EVENT_CLASS(xfs_dir2_class,
  1322. TP_PROTO(struct xfs_da_args *args),
  1323. TP_ARGS(args),
  1324. TP_STRUCT__entry(
  1325. __field(dev_t, dev)
  1326. __field(xfs_ino_t, ino)
  1327. __dynamic_array(char, name, args->namelen)
  1328. __field(int, namelen)
  1329. __field(xfs_dahash_t, hashval)
  1330. __field(xfs_ino_t, inumber)
  1331. __field(int, op_flags)
  1332. ),
  1333. TP_fast_assign(
  1334. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1335. __entry->ino = args->dp->i_ino;
  1336. if (args->namelen)
  1337. memcpy(__get_str(name), args->name, args->namelen);
  1338. __entry->namelen = args->namelen;
  1339. __entry->hashval = args->hashval;
  1340. __entry->inumber = args->inumber;
  1341. __entry->op_flags = args->op_flags;
  1342. ),
  1343. TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x "
  1344. "inumber 0x%llx op_flags %s",
  1345. MAJOR(__entry->dev), MINOR(__entry->dev),
  1346. __entry->ino,
  1347. __entry->namelen,
  1348. __entry->namelen ? __get_str(name) : NULL,
  1349. __entry->namelen,
  1350. __entry->hashval,
  1351. __entry->inumber,
  1352. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS))
  1353. )
  1354. #define DEFINE_DIR2_EVENT(name) \
  1355. DEFINE_EVENT(xfs_dir2_class, name, \
  1356. TP_PROTO(struct xfs_da_args *args), \
  1357. TP_ARGS(args))
  1358. DEFINE_DIR2_EVENT(xfs_dir2_sf_addname);
  1359. DEFINE_DIR2_EVENT(xfs_dir2_sf_create);
  1360. DEFINE_DIR2_EVENT(xfs_dir2_sf_lookup);
  1361. DEFINE_DIR2_EVENT(xfs_dir2_sf_replace);
  1362. DEFINE_DIR2_EVENT(xfs_dir2_sf_removename);
  1363. DEFINE_DIR2_EVENT(xfs_dir2_sf_toino4);
  1364. DEFINE_DIR2_EVENT(xfs_dir2_sf_toino8);
  1365. DEFINE_DIR2_EVENT(xfs_dir2_sf_to_block);
  1366. DEFINE_DIR2_EVENT(xfs_dir2_block_addname);
  1367. DEFINE_DIR2_EVENT(xfs_dir2_block_lookup);
  1368. DEFINE_DIR2_EVENT(xfs_dir2_block_replace);
  1369. DEFINE_DIR2_EVENT(xfs_dir2_block_removename);
  1370. DEFINE_DIR2_EVENT(xfs_dir2_block_to_sf);
  1371. DEFINE_DIR2_EVENT(xfs_dir2_block_to_leaf);
  1372. DEFINE_DIR2_EVENT(xfs_dir2_leaf_addname);
  1373. DEFINE_DIR2_EVENT(xfs_dir2_leaf_lookup);
  1374. DEFINE_DIR2_EVENT(xfs_dir2_leaf_replace);
  1375. DEFINE_DIR2_EVENT(xfs_dir2_leaf_removename);
  1376. DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_block);
  1377. DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_node);
  1378. DEFINE_DIR2_EVENT(xfs_dir2_node_addname);
  1379. DEFINE_DIR2_EVENT(xfs_dir2_node_lookup);
  1380. DEFINE_DIR2_EVENT(xfs_dir2_node_replace);
  1381. DEFINE_DIR2_EVENT(xfs_dir2_node_removename);
  1382. DEFINE_DIR2_EVENT(xfs_dir2_node_to_leaf);
  1383. DECLARE_EVENT_CLASS(xfs_dir2_space_class,
  1384. TP_PROTO(struct xfs_da_args *args, int idx),
  1385. TP_ARGS(args, idx),
  1386. TP_STRUCT__entry(
  1387. __field(dev_t, dev)
  1388. __field(xfs_ino_t, ino)
  1389. __field(int, op_flags)
  1390. __field(int, idx)
  1391. ),
  1392. TP_fast_assign(
  1393. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1394. __entry->ino = args->dp->i_ino;
  1395. __entry->op_flags = args->op_flags;
  1396. __entry->idx = idx;
  1397. ),
  1398. TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d",
  1399. MAJOR(__entry->dev), MINOR(__entry->dev),
  1400. __entry->ino,
  1401. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  1402. __entry->idx)
  1403. )
  1404. #define DEFINE_DIR2_SPACE_EVENT(name) \
  1405. DEFINE_EVENT(xfs_dir2_space_class, name, \
  1406. TP_PROTO(struct xfs_da_args *args, int idx), \
  1407. TP_ARGS(args, idx))
  1408. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_add);
  1409. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_remove);
  1410. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_grow_inode);
  1411. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_shrink_inode);
  1412. TRACE_EVENT(xfs_dir2_leafn_moveents,
  1413. TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
  1414. TP_ARGS(args, src_idx, dst_idx, count),
  1415. TP_STRUCT__entry(
  1416. __field(dev_t, dev)
  1417. __field(xfs_ino_t, ino)
  1418. __field(int, op_flags)
  1419. __field(int, src_idx)
  1420. __field(int, dst_idx)
  1421. __field(int, count)
  1422. ),
  1423. TP_fast_assign(
  1424. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1425. __entry->ino = args->dp->i_ino;
  1426. __entry->op_flags = args->op_flags;
  1427. __entry->src_idx = src_idx;
  1428. __entry->dst_idx = dst_idx;
  1429. __entry->count = count;
  1430. ),
  1431. TP_printk("dev %d:%d ino 0x%llx op_flags %s "
  1432. "src_idx %d dst_idx %d count %d",
  1433. MAJOR(__entry->dev), MINOR(__entry->dev),
  1434. __entry->ino,
  1435. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  1436. __entry->src_idx,
  1437. __entry->dst_idx,
  1438. __entry->count)
  1439. );
  1440. #define XFS_SWAPEXT_INODES \
  1441. { 0, "target" }, \
  1442. { 1, "temp" }
  1443. #define XFS_INODE_FORMAT_STR \
  1444. { 0, "invalid" }, \
  1445. { 1, "local" }, \
  1446. { 2, "extent" }, \
  1447. { 3, "btree" }
  1448. DECLARE_EVENT_CLASS(xfs_swap_extent_class,
  1449. TP_PROTO(struct xfs_inode *ip, int which),
  1450. TP_ARGS(ip, which),
  1451. TP_STRUCT__entry(
  1452. __field(dev_t, dev)
  1453. __field(int, which)
  1454. __field(xfs_ino_t, ino)
  1455. __field(int, format)
  1456. __field(int, nex)
  1457. __field(int, max_nex)
  1458. __field(int, broot_size)
  1459. __field(int, fork_off)
  1460. ),
  1461. TP_fast_assign(
  1462. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1463. __entry->which = which;
  1464. __entry->ino = ip->i_ino;
  1465. __entry->format = ip->i_d.di_format;
  1466. __entry->nex = ip->i_d.di_nextents;
  1467. __entry->max_nex = ip->i_df.if_ext_max;
  1468. __entry->broot_size = ip->i_df.if_broot_bytes;
  1469. __entry->fork_off = XFS_IFORK_BOFF(ip);
  1470. ),
  1471. TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, "
  1472. "Max in-fork extents %d, broot size %d, fork offset %d",
  1473. MAJOR(__entry->dev), MINOR(__entry->dev),
  1474. __entry->ino,
  1475. __print_symbolic(__entry->which, XFS_SWAPEXT_INODES),
  1476. __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR),
  1477. __entry->nex,
  1478. __entry->max_nex,
  1479. __entry->broot_size,
  1480. __entry->fork_off)
  1481. )
  1482. #define DEFINE_SWAPEXT_EVENT(name) \
  1483. DEFINE_EVENT(xfs_swap_extent_class, name, \
  1484. TP_PROTO(struct xfs_inode *ip, int which), \
  1485. TP_ARGS(ip, which))
  1486. DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
  1487. DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
  1488. DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
  1489. TP_PROTO(struct log *log, struct xlog_recover *trans,
  1490. struct xlog_recover_item *item, int pass),
  1491. TP_ARGS(log, trans, item, pass),
  1492. TP_STRUCT__entry(
  1493. __field(dev_t, dev)
  1494. __field(unsigned long, item)
  1495. __field(xlog_tid_t, tid)
  1496. __field(int, type)
  1497. __field(int, pass)
  1498. __field(int, count)
  1499. __field(int, total)
  1500. ),
  1501. TP_fast_assign(
  1502. __entry->dev = log->l_mp->m_super->s_dev;
  1503. __entry->item = (unsigned long)item;
  1504. __entry->tid = trans->r_log_tid;
  1505. __entry->type = ITEM_TYPE(item);
  1506. __entry->pass = pass;
  1507. __entry->count = item->ri_cnt;
  1508. __entry->total = item->ri_total;
  1509. ),
  1510. TP_printk("dev %d:%d trans 0x%x, pass %d, item 0x%p, item type %s "
  1511. "item region count/total %d/%d",
  1512. MAJOR(__entry->dev), MINOR(__entry->dev),
  1513. __entry->tid,
  1514. __entry->pass,
  1515. (void *)__entry->item,
  1516. __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
  1517. __entry->count,
  1518. __entry->total)
  1519. )
  1520. #define DEFINE_LOG_RECOVER_ITEM(name) \
  1521. DEFINE_EVENT(xfs_log_recover_item_class, name, \
  1522. TP_PROTO(struct log *log, struct xlog_recover *trans, \
  1523. struct xlog_recover_item *item, int pass), \
  1524. TP_ARGS(log, trans, item, pass))
  1525. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add);
  1526. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add_cont);
  1527. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_head);
  1528. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
  1529. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
  1530. DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
  1531. TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f),
  1532. TP_ARGS(log, buf_f),
  1533. TP_STRUCT__entry(
  1534. __field(dev_t, dev)
  1535. __field(__int64_t, blkno)
  1536. __field(unsigned short, len)
  1537. __field(unsigned short, flags)
  1538. __field(unsigned short, size)
  1539. __field(unsigned int, map_size)
  1540. ),
  1541. TP_fast_assign(
  1542. __entry->dev = log->l_mp->m_super->s_dev;
  1543. __entry->blkno = buf_f->blf_blkno;
  1544. __entry->len = buf_f->blf_len;
  1545. __entry->flags = buf_f->blf_flags;
  1546. __entry->size = buf_f->blf_size;
  1547. __entry->map_size = buf_f->blf_map_size;
  1548. ),
  1549. TP_printk("dev %d:%d blkno 0x%llx, len %u, flags 0x%x, size %d, "
  1550. "map_size %d",
  1551. MAJOR(__entry->dev), MINOR(__entry->dev),
  1552. __entry->blkno,
  1553. __entry->len,
  1554. __entry->flags,
  1555. __entry->size,
  1556. __entry->map_size)
  1557. )
  1558. #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
  1559. DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
  1560. TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \
  1561. TP_ARGS(log, buf_f))
  1562. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
  1563. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel);
  1564. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_add);
  1565. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_ref_inc);
  1566. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_recover);
  1567. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_inode_buf);
  1568. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
  1569. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
  1570. DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
  1571. TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f),
  1572. TP_ARGS(log, in_f),
  1573. TP_STRUCT__entry(
  1574. __field(dev_t, dev)
  1575. __field(xfs_ino_t, ino)
  1576. __field(unsigned short, size)
  1577. __field(int, fields)
  1578. __field(unsigned short, asize)
  1579. __field(unsigned short, dsize)
  1580. __field(__int64_t, blkno)
  1581. __field(int, len)
  1582. __field(int, boffset)
  1583. ),
  1584. TP_fast_assign(
  1585. __entry->dev = log->l_mp->m_super->s_dev;
  1586. __entry->ino = in_f->ilf_ino;
  1587. __entry->size = in_f->ilf_size;
  1588. __entry->fields = in_f->ilf_fields;
  1589. __entry->asize = in_f->ilf_asize;
  1590. __entry->dsize = in_f->ilf_dsize;
  1591. __entry->blkno = in_f->ilf_blkno;
  1592. __entry->len = in_f->ilf_len;
  1593. __entry->boffset = in_f->ilf_boffset;
  1594. ),
  1595. TP_printk("dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, "
  1596. "dsize %d, blkno 0x%llx, len %d, boffset %d",
  1597. MAJOR(__entry->dev), MINOR(__entry->dev),
  1598. __entry->ino,
  1599. __entry->size,
  1600. __entry->fields,
  1601. __entry->asize,
  1602. __entry->dsize,
  1603. __entry->blkno,
  1604. __entry->len,
  1605. __entry->boffset)
  1606. )
  1607. #define DEFINE_LOG_RECOVER_INO_ITEM(name) \
  1608. DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
  1609. TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \
  1610. TP_ARGS(log, in_f))
  1611. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
  1612. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel);
  1613. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip);
  1614. #endif /* _TRACE_XFS_H */
  1615. #undef TRACE_INCLUDE_PATH
  1616. #define TRACE_INCLUDE_PATH .
  1617. #define TRACE_INCLUDE_FILE xfs_trace
  1618. #include <trace/define_trace.h>