xfs_trace.h 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694
  1. /*
  2. * Copyright (c) 2009, Christoph Hellwig
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #undef TRACE_SYSTEM
  19. #define TRACE_SYSTEM xfs
  20. #if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
  21. #define _TRACE_XFS_H
  22. #include <linux/tracepoint.h>
  23. struct xfs_agf;
  24. struct xfs_alloc_arg;
  25. struct xfs_attr_list_context;
  26. struct xfs_buf_log_item;
  27. struct xfs_da_args;
  28. struct xfs_da_node_entry;
  29. struct xfs_dquot;
  30. struct xlog_ticket;
  31. struct log;
  32. struct xlog_recover;
  33. struct xlog_recover_item;
  34. struct xfs_buf_log_format;
  35. struct xfs_inode_log_format;
  36. DECLARE_EVENT_CLASS(xfs_attr_list_class,
  37. TP_PROTO(struct xfs_attr_list_context *ctx),
  38. TP_ARGS(ctx),
  39. TP_STRUCT__entry(
  40. __field(dev_t, dev)
  41. __field(xfs_ino_t, ino)
  42. __field(u32, hashval)
  43. __field(u32, blkno)
  44. __field(u32, offset)
  45. __field(void *, alist)
  46. __field(int, bufsize)
  47. __field(int, count)
  48. __field(int, firstu)
  49. __field(int, dupcnt)
  50. __field(int, flags)
  51. ),
  52. TP_fast_assign(
  53. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  54. __entry->ino = ctx->dp->i_ino;
  55. __entry->hashval = ctx->cursor->hashval;
  56. __entry->blkno = ctx->cursor->blkno;
  57. __entry->offset = ctx->cursor->offset;
  58. __entry->alist = ctx->alist;
  59. __entry->bufsize = ctx->bufsize;
  60. __entry->count = ctx->count;
  61. __entry->firstu = ctx->firstu;
  62. __entry->flags = ctx->flags;
  63. ),
  64. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  65. "alist 0x%p size %u count %u firstu %u flags %d %s",
  66. MAJOR(__entry->dev), MINOR(__entry->dev),
  67. __entry->ino,
  68. __entry->hashval,
  69. __entry->blkno,
  70. __entry->offset,
  71. __entry->dupcnt,
  72. __entry->alist,
  73. __entry->bufsize,
  74. __entry->count,
  75. __entry->firstu,
  76. __entry->flags,
  77. __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS)
  78. )
  79. )
  80. #define DEFINE_ATTR_LIST_EVENT(name) \
  81. DEFINE_EVENT(xfs_attr_list_class, name, \
  82. TP_PROTO(struct xfs_attr_list_context *ctx), \
  83. TP_ARGS(ctx))
  84. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
  85. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
  86. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
  87. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end);
  88. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full);
  89. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
  90. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
  91. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
  92. DECLARE_EVENT_CLASS(xfs_perag_class,
  93. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount,
  94. unsigned long caller_ip),
  95. TP_ARGS(mp, agno, refcount, caller_ip),
  96. TP_STRUCT__entry(
  97. __field(dev_t, dev)
  98. __field(xfs_agnumber_t, agno)
  99. __field(int, refcount)
  100. __field(unsigned long, caller_ip)
  101. ),
  102. TP_fast_assign(
  103. __entry->dev = mp->m_super->s_dev;
  104. __entry->agno = agno;
  105. __entry->refcount = refcount;
  106. __entry->caller_ip = caller_ip;
  107. ),
  108. TP_printk("dev %d:%d agno %u refcount %d caller %pf",
  109. MAJOR(__entry->dev), MINOR(__entry->dev),
  110. __entry->agno,
  111. __entry->refcount,
  112. (char *)__entry->caller_ip)
  113. );
  114. #define DEFINE_PERAG_REF_EVENT(name) \
  115. DEFINE_EVENT(xfs_perag_class, name, \
  116. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \
  117. unsigned long caller_ip), \
  118. TP_ARGS(mp, agno, refcount, caller_ip))
  119. DEFINE_PERAG_REF_EVENT(xfs_perag_get);
  120. DEFINE_PERAG_REF_EVENT(xfs_perag_put);
  121. TRACE_EVENT(xfs_attr_list_node_descend,
  122. TP_PROTO(struct xfs_attr_list_context *ctx,
  123. struct xfs_da_node_entry *btree),
  124. TP_ARGS(ctx, btree),
  125. TP_STRUCT__entry(
  126. __field(dev_t, dev)
  127. __field(xfs_ino_t, ino)
  128. __field(u32, hashval)
  129. __field(u32, blkno)
  130. __field(u32, offset)
  131. __field(void *, alist)
  132. __field(int, bufsize)
  133. __field(int, count)
  134. __field(int, firstu)
  135. __field(int, dupcnt)
  136. __field(int, flags)
  137. __field(u32, bt_hashval)
  138. __field(u32, bt_before)
  139. ),
  140. TP_fast_assign(
  141. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  142. __entry->ino = ctx->dp->i_ino;
  143. __entry->hashval = ctx->cursor->hashval;
  144. __entry->blkno = ctx->cursor->blkno;
  145. __entry->offset = ctx->cursor->offset;
  146. __entry->alist = ctx->alist;
  147. __entry->bufsize = ctx->bufsize;
  148. __entry->count = ctx->count;
  149. __entry->firstu = ctx->firstu;
  150. __entry->flags = ctx->flags;
  151. __entry->bt_hashval = be32_to_cpu(btree->hashval);
  152. __entry->bt_before = be32_to_cpu(btree->before);
  153. ),
  154. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  155. "alist 0x%p size %u count %u firstu %u flags %d %s "
  156. "node hashval %u, node before %u",
  157. MAJOR(__entry->dev), MINOR(__entry->dev),
  158. __entry->ino,
  159. __entry->hashval,
  160. __entry->blkno,
  161. __entry->offset,
  162. __entry->dupcnt,
  163. __entry->alist,
  164. __entry->bufsize,
  165. __entry->count,
  166. __entry->firstu,
  167. __entry->flags,
  168. __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
  169. __entry->bt_hashval,
  170. __entry->bt_before)
  171. );
  172. TRACE_EVENT(xfs_iext_insert,
  173. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx,
  174. struct xfs_bmbt_irec *r, int state, unsigned long caller_ip),
  175. TP_ARGS(ip, idx, r, state, caller_ip),
  176. TP_STRUCT__entry(
  177. __field(dev_t, dev)
  178. __field(xfs_ino_t, ino)
  179. __field(xfs_extnum_t, idx)
  180. __field(xfs_fileoff_t, startoff)
  181. __field(xfs_fsblock_t, startblock)
  182. __field(xfs_filblks_t, blockcount)
  183. __field(xfs_exntst_t, state)
  184. __field(int, bmap_state)
  185. __field(unsigned long, caller_ip)
  186. ),
  187. TP_fast_assign(
  188. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  189. __entry->ino = ip->i_ino;
  190. __entry->idx = idx;
  191. __entry->startoff = r->br_startoff;
  192. __entry->startblock = r->br_startblock;
  193. __entry->blockcount = r->br_blockcount;
  194. __entry->state = r->br_state;
  195. __entry->bmap_state = state;
  196. __entry->caller_ip = caller_ip;
  197. ),
  198. TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
  199. "offset %lld block %lld count %lld flag %d caller %pf",
  200. MAJOR(__entry->dev), MINOR(__entry->dev),
  201. __entry->ino,
  202. __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
  203. (long)__entry->idx,
  204. __entry->startoff,
  205. (__int64_t)__entry->startblock,
  206. __entry->blockcount,
  207. __entry->state,
  208. (char *)__entry->caller_ip)
  209. );
  210. DECLARE_EVENT_CLASS(xfs_bmap_class,
  211. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state,
  212. unsigned long caller_ip),
  213. TP_ARGS(ip, idx, state, caller_ip),
  214. TP_STRUCT__entry(
  215. __field(dev_t, dev)
  216. __field(xfs_ino_t, ino)
  217. __field(xfs_extnum_t, idx)
  218. __field(xfs_fileoff_t, startoff)
  219. __field(xfs_fsblock_t, startblock)
  220. __field(xfs_filblks_t, blockcount)
  221. __field(xfs_exntst_t, state)
  222. __field(int, bmap_state)
  223. __field(unsigned long, caller_ip)
  224. ),
  225. TP_fast_assign(
  226. struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ?
  227. ip->i_afp : &ip->i_df;
  228. struct xfs_bmbt_irec r;
  229. xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r);
  230. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  231. __entry->ino = ip->i_ino;
  232. __entry->idx = idx;
  233. __entry->startoff = r.br_startoff;
  234. __entry->startblock = r.br_startblock;
  235. __entry->blockcount = r.br_blockcount;
  236. __entry->state = r.br_state;
  237. __entry->bmap_state = state;
  238. __entry->caller_ip = caller_ip;
  239. ),
  240. TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
  241. "offset %lld block %lld count %lld flag %d caller %pf",
  242. MAJOR(__entry->dev), MINOR(__entry->dev),
  243. __entry->ino,
  244. __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
  245. (long)__entry->idx,
  246. __entry->startoff,
  247. (__int64_t)__entry->startblock,
  248. __entry->blockcount,
  249. __entry->state,
  250. (char *)__entry->caller_ip)
  251. )
  252. #define DEFINE_BMAP_EVENT(name) \
  253. DEFINE_EVENT(xfs_bmap_class, name, \
  254. TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
  255. unsigned long caller_ip), \
  256. TP_ARGS(ip, idx, state, caller_ip))
  257. DEFINE_BMAP_EVENT(xfs_iext_remove);
  258. DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
  259. DEFINE_BMAP_EVENT(xfs_bmap_post_update);
  260. DEFINE_BMAP_EVENT(xfs_extlist);
  261. DECLARE_EVENT_CLASS(xfs_buf_class,
  262. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip),
  263. TP_ARGS(bp, caller_ip),
  264. TP_STRUCT__entry(
  265. __field(dev_t, dev)
  266. __field(xfs_daddr_t, bno)
  267. __field(size_t, buffer_length)
  268. __field(int, hold)
  269. __field(int, pincount)
  270. __field(unsigned, lockval)
  271. __field(unsigned, flags)
  272. __field(unsigned long, caller_ip)
  273. ),
  274. TP_fast_assign(
  275. __entry->dev = bp->b_target->bt_dev;
  276. __entry->bno = bp->b_bn;
  277. __entry->buffer_length = bp->b_buffer_length;
  278. __entry->hold = atomic_read(&bp->b_hold);
  279. __entry->pincount = atomic_read(&bp->b_pin_count);
  280. __entry->lockval = xfs_buf_lock_value(bp);
  281. __entry->flags = bp->b_flags;
  282. __entry->caller_ip = caller_ip;
  283. ),
  284. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  285. "lock %d flags %s caller %pf",
  286. MAJOR(__entry->dev), MINOR(__entry->dev),
  287. (unsigned long long)__entry->bno,
  288. __entry->buffer_length,
  289. __entry->hold,
  290. __entry->pincount,
  291. __entry->lockval,
  292. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  293. (void *)__entry->caller_ip)
  294. )
  295. #define DEFINE_BUF_EVENT(name) \
  296. DEFINE_EVENT(xfs_buf_class, name, \
  297. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
  298. TP_ARGS(bp, caller_ip))
  299. DEFINE_BUF_EVENT(xfs_buf_init);
  300. DEFINE_BUF_EVENT(xfs_buf_free);
  301. DEFINE_BUF_EVENT(xfs_buf_hold);
  302. DEFINE_BUF_EVENT(xfs_buf_rele);
  303. DEFINE_BUF_EVENT(xfs_buf_pin);
  304. DEFINE_BUF_EVENT(xfs_buf_unpin);
  305. DEFINE_BUF_EVENT(xfs_buf_iodone);
  306. DEFINE_BUF_EVENT(xfs_buf_iorequest);
  307. DEFINE_BUF_EVENT(xfs_buf_bawrite);
  308. DEFINE_BUF_EVENT(xfs_buf_bdwrite);
  309. DEFINE_BUF_EVENT(xfs_buf_lock);
  310. DEFINE_BUF_EVENT(xfs_buf_lock_done);
  311. DEFINE_BUF_EVENT(xfs_buf_cond_lock);
  312. DEFINE_BUF_EVENT(xfs_buf_unlock);
  313. DEFINE_BUF_EVENT(xfs_buf_ordered_retry);
  314. DEFINE_BUF_EVENT(xfs_buf_iowait);
  315. DEFINE_BUF_EVENT(xfs_buf_iowait_done);
  316. DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
  317. DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue);
  318. DEFINE_BUF_EVENT(xfs_buf_delwri_split);
  319. DEFINE_BUF_EVENT(xfs_buf_get_noaddr);
  320. DEFINE_BUF_EVENT(xfs_bdstrat_shut);
  321. DEFINE_BUF_EVENT(xfs_buf_item_relse);
  322. DEFINE_BUF_EVENT(xfs_buf_item_iodone);
  323. DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
  324. DEFINE_BUF_EVENT(xfs_buf_error_relse);
  325. DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
  326. DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
  327. /* not really buffer traces, but the buf provides useful information */
  328. DEFINE_BUF_EVENT(xfs_btree_corrupt);
  329. DEFINE_BUF_EVENT(xfs_da_btree_corrupt);
  330. DEFINE_BUF_EVENT(xfs_reset_dqcounts);
  331. DEFINE_BUF_EVENT(xfs_inode_item_push);
  332. /* pass flags explicitly */
  333. DECLARE_EVENT_CLASS(xfs_buf_flags_class,
  334. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip),
  335. TP_ARGS(bp, flags, caller_ip),
  336. TP_STRUCT__entry(
  337. __field(dev_t, dev)
  338. __field(xfs_daddr_t, bno)
  339. __field(size_t, buffer_length)
  340. __field(int, hold)
  341. __field(int, pincount)
  342. __field(unsigned, lockval)
  343. __field(unsigned, flags)
  344. __field(unsigned long, caller_ip)
  345. ),
  346. TP_fast_assign(
  347. __entry->dev = bp->b_target->bt_dev;
  348. __entry->bno = bp->b_bn;
  349. __entry->buffer_length = bp->b_buffer_length;
  350. __entry->flags = flags;
  351. __entry->hold = atomic_read(&bp->b_hold);
  352. __entry->pincount = atomic_read(&bp->b_pin_count);
  353. __entry->lockval = xfs_buf_lock_value(bp);
  354. __entry->caller_ip = caller_ip;
  355. ),
  356. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  357. "lock %d flags %s caller %pf",
  358. MAJOR(__entry->dev), MINOR(__entry->dev),
  359. (unsigned long long)__entry->bno,
  360. __entry->buffer_length,
  361. __entry->hold,
  362. __entry->pincount,
  363. __entry->lockval,
  364. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  365. (void *)__entry->caller_ip)
  366. )
  367. #define DEFINE_BUF_FLAGS_EVENT(name) \
  368. DEFINE_EVENT(xfs_buf_flags_class, name, \
  369. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
  370. TP_ARGS(bp, flags, caller_ip))
  371. DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
  372. DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
  373. DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
  374. TRACE_EVENT(xfs_buf_ioerror,
  375. TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip),
  376. TP_ARGS(bp, error, caller_ip),
  377. TP_STRUCT__entry(
  378. __field(dev_t, dev)
  379. __field(xfs_daddr_t, bno)
  380. __field(size_t, buffer_length)
  381. __field(unsigned, flags)
  382. __field(int, hold)
  383. __field(int, pincount)
  384. __field(unsigned, lockval)
  385. __field(int, error)
  386. __field(unsigned long, caller_ip)
  387. ),
  388. TP_fast_assign(
  389. __entry->dev = bp->b_target->bt_dev;
  390. __entry->bno = bp->b_bn;
  391. __entry->buffer_length = bp->b_buffer_length;
  392. __entry->hold = atomic_read(&bp->b_hold);
  393. __entry->pincount = atomic_read(&bp->b_pin_count);
  394. __entry->lockval = xfs_buf_lock_value(bp);
  395. __entry->error = error;
  396. __entry->flags = bp->b_flags;
  397. __entry->caller_ip = caller_ip;
  398. ),
  399. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  400. "lock %d error %d flags %s caller %pf",
  401. MAJOR(__entry->dev), MINOR(__entry->dev),
  402. (unsigned long long)__entry->bno,
  403. __entry->buffer_length,
  404. __entry->hold,
  405. __entry->pincount,
  406. __entry->lockval,
  407. __entry->error,
  408. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  409. (void *)__entry->caller_ip)
  410. );
  411. DECLARE_EVENT_CLASS(xfs_buf_item_class,
  412. TP_PROTO(struct xfs_buf_log_item *bip),
  413. TP_ARGS(bip),
  414. TP_STRUCT__entry(
  415. __field(dev_t, dev)
  416. __field(xfs_daddr_t, buf_bno)
  417. __field(size_t, buf_len)
  418. __field(int, buf_hold)
  419. __field(int, buf_pincount)
  420. __field(int, buf_lockval)
  421. __field(unsigned, buf_flags)
  422. __field(unsigned, bli_recur)
  423. __field(int, bli_refcount)
  424. __field(unsigned, bli_flags)
  425. __field(void *, li_desc)
  426. __field(unsigned, li_flags)
  427. ),
  428. TP_fast_assign(
  429. __entry->dev = bip->bli_buf->b_target->bt_dev;
  430. __entry->bli_flags = bip->bli_flags;
  431. __entry->bli_recur = bip->bli_recur;
  432. __entry->bli_refcount = atomic_read(&bip->bli_refcount);
  433. __entry->buf_bno = bip->bli_buf->b_bn;
  434. __entry->buf_len = bip->bli_buf->b_buffer_length;
  435. __entry->buf_flags = bip->bli_buf->b_flags;
  436. __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
  437. __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
  438. __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf);
  439. __entry->li_desc = bip->bli_item.li_desc;
  440. __entry->li_flags = bip->bli_item.li_flags;
  441. ),
  442. TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
  443. "lock %d flags %s recur %d refcount %d bliflags %s "
  444. "lidesc 0x%p liflags %s",
  445. MAJOR(__entry->dev), MINOR(__entry->dev),
  446. (unsigned long long)__entry->buf_bno,
  447. __entry->buf_len,
  448. __entry->buf_hold,
  449. __entry->buf_pincount,
  450. __entry->buf_lockval,
  451. __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS),
  452. __entry->bli_recur,
  453. __entry->bli_refcount,
  454. __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS),
  455. __entry->li_desc,
  456. __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS))
  457. )
  458. #define DEFINE_BUF_ITEM_EVENT(name) \
  459. DEFINE_EVENT(xfs_buf_item_class, name, \
  460. TP_PROTO(struct xfs_buf_log_item *bip), \
  461. TP_ARGS(bip))
  462. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
  463. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
  464. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
  465. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
  466. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
  467. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
  468. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
  469. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_trylock);
  470. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
  471. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
  472. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
  473. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
  474. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pushbuf);
  475. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
  476. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
  477. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
  478. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur);
  479. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf);
  480. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur);
  481. DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf);
  482. DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse);
  483. DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
  484. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
  485. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
  486. DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
  487. DECLARE_EVENT_CLASS(xfs_lock_class,
  488. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags,
  489. unsigned long caller_ip),
  490. TP_ARGS(ip, lock_flags, caller_ip),
  491. TP_STRUCT__entry(
  492. __field(dev_t, dev)
  493. __field(xfs_ino_t, ino)
  494. __field(int, lock_flags)
  495. __field(unsigned long, caller_ip)
  496. ),
  497. TP_fast_assign(
  498. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  499. __entry->ino = ip->i_ino;
  500. __entry->lock_flags = lock_flags;
  501. __entry->caller_ip = caller_ip;
  502. ),
  503. TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf",
  504. MAJOR(__entry->dev), MINOR(__entry->dev),
  505. __entry->ino,
  506. __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS),
  507. (void *)__entry->caller_ip)
  508. )
  509. #define DEFINE_LOCK_EVENT(name) \
  510. DEFINE_EVENT(xfs_lock_class, name, \
  511. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
  512. unsigned long caller_ip), \
  513. TP_ARGS(ip, lock_flags, caller_ip))
  514. DEFINE_LOCK_EVENT(xfs_ilock);
  515. DEFINE_LOCK_EVENT(xfs_ilock_nowait);
  516. DEFINE_LOCK_EVENT(xfs_ilock_demote);
  517. DEFINE_LOCK_EVENT(xfs_iunlock);
  518. DECLARE_EVENT_CLASS(xfs_iget_class,
  519. TP_PROTO(struct xfs_inode *ip),
  520. TP_ARGS(ip),
  521. TP_STRUCT__entry(
  522. __field(dev_t, dev)
  523. __field(xfs_ino_t, ino)
  524. ),
  525. TP_fast_assign(
  526. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  527. __entry->ino = ip->i_ino;
  528. ),
  529. TP_printk("dev %d:%d ino 0x%llx",
  530. MAJOR(__entry->dev), MINOR(__entry->dev),
  531. __entry->ino)
  532. )
  533. #define DEFINE_IGET_EVENT(name) \
  534. DEFINE_EVENT(xfs_iget_class, name, \
  535. TP_PROTO(struct xfs_inode *ip), \
  536. TP_ARGS(ip))
  537. DEFINE_IGET_EVENT(xfs_iget_skip);
  538. DEFINE_IGET_EVENT(xfs_iget_reclaim);
  539. DEFINE_IGET_EVENT(xfs_iget_found);
  540. DEFINE_IGET_EVENT(xfs_iget_alloc);
  541. DECLARE_EVENT_CLASS(xfs_inode_class,
  542. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
  543. TP_ARGS(ip, caller_ip),
  544. TP_STRUCT__entry(
  545. __field(dev_t, dev)
  546. __field(xfs_ino_t, ino)
  547. __field(int, count)
  548. __field(int, pincount)
  549. __field(unsigned long, caller_ip)
  550. ),
  551. TP_fast_assign(
  552. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  553. __entry->ino = ip->i_ino;
  554. __entry->count = atomic_read(&VFS_I(ip)->i_count);
  555. __entry->pincount = atomic_read(&ip->i_pincount);
  556. __entry->caller_ip = caller_ip;
  557. ),
  558. TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pf",
  559. MAJOR(__entry->dev), MINOR(__entry->dev),
  560. __entry->ino,
  561. __entry->count,
  562. __entry->pincount,
  563. (char *)__entry->caller_ip)
  564. )
  565. #define DEFINE_INODE_EVENT(name) \
  566. DEFINE_EVENT(xfs_inode_class, name, \
  567. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
  568. TP_ARGS(ip, caller_ip))
  569. DEFINE_INODE_EVENT(xfs_ihold);
  570. DEFINE_INODE_EVENT(xfs_irele);
  571. DEFINE_INODE_EVENT(xfs_inode_pin);
  572. DEFINE_INODE_EVENT(xfs_inode_unpin);
  573. DEFINE_INODE_EVENT(xfs_inode_unpin_nowait);
  574. /* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
  575. DEFINE_INODE_EVENT(xfs_inode);
  576. #define xfs_itrace_entry(ip) \
  577. trace_xfs_inode(ip, _THIS_IP_)
  578. DECLARE_EVENT_CLASS(xfs_dquot_class,
  579. TP_PROTO(struct xfs_dquot *dqp),
  580. TP_ARGS(dqp),
  581. TP_STRUCT__entry(
  582. __field(dev_t, dev)
  583. __field(u32, id)
  584. __field(unsigned, flags)
  585. __field(unsigned, nrefs)
  586. __field(unsigned long long, res_bcount)
  587. __field(unsigned long long, bcount)
  588. __field(unsigned long long, icount)
  589. __field(unsigned long long, blk_hardlimit)
  590. __field(unsigned long long, blk_softlimit)
  591. __field(unsigned long long, ino_hardlimit)
  592. __field(unsigned long long, ino_softlimit)
  593. ), \
  594. TP_fast_assign(
  595. __entry->dev = dqp->q_mount->m_super->s_dev;
  596. __entry->id = be32_to_cpu(dqp->q_core.d_id);
  597. __entry->flags = dqp->dq_flags;
  598. __entry->nrefs = dqp->q_nrefs;
  599. __entry->res_bcount = dqp->q_res_bcount;
  600. __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount);
  601. __entry->icount = be64_to_cpu(dqp->q_core.d_icount);
  602. __entry->blk_hardlimit =
  603. be64_to_cpu(dqp->q_core.d_blk_hardlimit);
  604. __entry->blk_softlimit =
  605. be64_to_cpu(dqp->q_core.d_blk_softlimit);
  606. __entry->ino_hardlimit =
  607. be64_to_cpu(dqp->q_core.d_ino_hardlimit);
  608. __entry->ino_softlimit =
  609. be64_to_cpu(dqp->q_core.d_ino_softlimit);
  610. ),
  611. TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx "
  612. "bcnt 0x%llx bhardlimit 0x%llx bsoftlimit 0x%llx "
  613. "icnt 0x%llx ihardlimit 0x%llx isoftlimit 0x%llx]",
  614. MAJOR(__entry->dev), MINOR(__entry->dev),
  615. __entry->id,
  616. __print_flags(__entry->flags, "|", XFS_DQ_FLAGS),
  617. __entry->nrefs,
  618. __entry->res_bcount,
  619. __entry->bcount,
  620. __entry->blk_hardlimit,
  621. __entry->blk_softlimit,
  622. __entry->icount,
  623. __entry->ino_hardlimit,
  624. __entry->ino_softlimit)
  625. )
  626. #define DEFINE_DQUOT_EVENT(name) \
  627. DEFINE_EVENT(xfs_dquot_class, name, \
  628. TP_PROTO(struct xfs_dquot *dqp), \
  629. TP_ARGS(dqp))
  630. DEFINE_DQUOT_EVENT(xfs_dqadjust);
  631. DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
  632. DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
  633. DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink);
  634. DEFINE_DQUOT_EVENT(xfs_dqattach_found);
  635. DEFINE_DQUOT_EVENT(xfs_dqattach_get);
  636. DEFINE_DQUOT_EVENT(xfs_dqinit);
  637. DEFINE_DQUOT_EVENT(xfs_dqreuse);
  638. DEFINE_DQUOT_EVENT(xfs_dqalloc);
  639. DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
  640. DEFINE_DQUOT_EVENT(xfs_dqread);
  641. DEFINE_DQUOT_EVENT(xfs_dqread_fail);
  642. DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
  643. DEFINE_DQUOT_EVENT(xfs_dqlookup_want);
  644. DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist);
  645. DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
  646. DEFINE_DQUOT_EVENT(xfs_dqget_hit);
  647. DEFINE_DQUOT_EVENT(xfs_dqget_miss);
  648. DEFINE_DQUOT_EVENT(xfs_dqput);
  649. DEFINE_DQUOT_EVENT(xfs_dqput_wait);
  650. DEFINE_DQUOT_EVENT(xfs_dqput_free);
  651. DEFINE_DQUOT_EVENT(xfs_dqrele);
  652. DEFINE_DQUOT_EVENT(xfs_dqflush);
  653. DEFINE_DQUOT_EVENT(xfs_dqflush_force);
  654. DEFINE_DQUOT_EVENT(xfs_dqflush_done);
  655. /* not really iget events, but we re-use the format */
  656. DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
  657. DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
  658. DECLARE_EVENT_CLASS(xfs_loggrant_class,
  659. TP_PROTO(struct log *log, struct xlog_ticket *tic),
  660. TP_ARGS(log, tic),
  661. TP_STRUCT__entry(
  662. __field(dev_t, dev)
  663. __field(unsigned, trans_type)
  664. __field(char, ocnt)
  665. __field(char, cnt)
  666. __field(int, curr_res)
  667. __field(int, unit_res)
  668. __field(unsigned int, flags)
  669. __field(void *, reserve_headq)
  670. __field(void *, write_headq)
  671. __field(int, grant_reserve_cycle)
  672. __field(int, grant_reserve_bytes)
  673. __field(int, grant_write_cycle)
  674. __field(int, grant_write_bytes)
  675. __field(int, curr_cycle)
  676. __field(int, curr_block)
  677. __field(xfs_lsn_t, tail_lsn)
  678. ),
  679. TP_fast_assign(
  680. __entry->dev = log->l_mp->m_super->s_dev;
  681. __entry->trans_type = tic->t_trans_type;
  682. __entry->ocnt = tic->t_ocnt;
  683. __entry->cnt = tic->t_cnt;
  684. __entry->curr_res = tic->t_curr_res;
  685. __entry->unit_res = tic->t_unit_res;
  686. __entry->flags = tic->t_flags;
  687. __entry->reserve_headq = log->l_reserve_headq;
  688. __entry->write_headq = log->l_write_headq;
  689. __entry->grant_reserve_cycle = log->l_grant_reserve_cycle;
  690. __entry->grant_reserve_bytes = log->l_grant_reserve_bytes;
  691. __entry->grant_write_cycle = log->l_grant_write_cycle;
  692. __entry->grant_write_bytes = log->l_grant_write_bytes;
  693. __entry->curr_cycle = log->l_curr_cycle;
  694. __entry->curr_block = log->l_curr_block;
  695. __entry->tail_lsn = log->l_tail_lsn;
  696. ),
  697. TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
  698. "t_unit_res %u t_flags %s reserve_headq 0x%p "
  699. "write_headq 0x%p grant_reserve_cycle %d "
  700. "grant_reserve_bytes %d grant_write_cycle %d "
  701. "grant_write_bytes %d curr_cycle %d curr_block %d "
  702. "tail_cycle %d tail_block %d",
  703. MAJOR(__entry->dev), MINOR(__entry->dev),
  704. __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES),
  705. __entry->ocnt,
  706. __entry->cnt,
  707. __entry->curr_res,
  708. __entry->unit_res,
  709. __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
  710. __entry->reserve_headq,
  711. __entry->write_headq,
  712. __entry->grant_reserve_cycle,
  713. __entry->grant_reserve_bytes,
  714. __entry->grant_write_cycle,
  715. __entry->grant_write_bytes,
  716. __entry->curr_cycle,
  717. __entry->curr_block,
  718. CYCLE_LSN(__entry->tail_lsn),
  719. BLOCK_LSN(__entry->tail_lsn)
  720. )
  721. )
  722. #define DEFINE_LOGGRANT_EVENT(name) \
  723. DEFINE_EVENT(xfs_loggrant_class, name, \
  724. TP_PROTO(struct log *log, struct xlog_ticket *tic), \
  725. TP_ARGS(log, tic))
  726. DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
  727. DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
  728. DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
  729. DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
  730. DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
  731. DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
  732. DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
  733. DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
  734. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
  735. DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
  736. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
  737. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
  738. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
  739. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
  740. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
  741. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
  742. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
  743. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
  744. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
  745. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
  746. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
  747. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter);
  748. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit);
  749. DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub);
  750. DECLARE_EVENT_CLASS(xfs_file_class,
  751. TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags),
  752. TP_ARGS(ip, count, offset, flags),
  753. TP_STRUCT__entry(
  754. __field(dev_t, dev)
  755. __field(xfs_ino_t, ino)
  756. __field(xfs_fsize_t, size)
  757. __field(xfs_fsize_t, new_size)
  758. __field(loff_t, offset)
  759. __field(size_t, count)
  760. __field(int, flags)
  761. ),
  762. TP_fast_assign(
  763. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  764. __entry->ino = ip->i_ino;
  765. __entry->size = ip->i_d.di_size;
  766. __entry->new_size = ip->i_new_size;
  767. __entry->offset = offset;
  768. __entry->count = count;
  769. __entry->flags = flags;
  770. ),
  771. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
  772. "offset 0x%llx count 0x%zx ioflags %s",
  773. MAJOR(__entry->dev), MINOR(__entry->dev),
  774. __entry->ino,
  775. __entry->size,
  776. __entry->new_size,
  777. __entry->offset,
  778. __entry->count,
  779. __print_flags(__entry->flags, "|", XFS_IO_FLAGS))
  780. )
  781. #define DEFINE_RW_EVENT(name) \
  782. DEFINE_EVENT(xfs_file_class, name, \
  783. TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
  784. TP_ARGS(ip, count, offset, flags))
  785. DEFINE_RW_EVENT(xfs_file_read);
  786. DEFINE_RW_EVENT(xfs_file_buffered_write);
  787. DEFINE_RW_EVENT(xfs_file_direct_write);
  788. DEFINE_RW_EVENT(xfs_file_splice_read);
  789. DEFINE_RW_EVENT(xfs_file_splice_write);
  790. DECLARE_EVENT_CLASS(xfs_page_class,
  791. TP_PROTO(struct inode *inode, struct page *page, unsigned long off),
  792. TP_ARGS(inode, page, off),
  793. TP_STRUCT__entry(
  794. __field(dev_t, dev)
  795. __field(xfs_ino_t, ino)
  796. __field(pgoff_t, pgoff)
  797. __field(loff_t, size)
  798. __field(unsigned long, offset)
  799. __field(int, delalloc)
  800. __field(int, unmapped)
  801. __field(int, unwritten)
  802. ),
  803. TP_fast_assign(
  804. int delalloc = -1, unmapped = -1, unwritten = -1;
  805. if (page_has_buffers(page))
  806. xfs_count_page_state(page, &delalloc,
  807. &unmapped, &unwritten);
  808. __entry->dev = inode->i_sb->s_dev;
  809. __entry->ino = XFS_I(inode)->i_ino;
  810. __entry->pgoff = page_offset(page);
  811. __entry->size = i_size_read(inode);
  812. __entry->offset = off;
  813. __entry->delalloc = delalloc;
  814. __entry->unmapped = unmapped;
  815. __entry->unwritten = unwritten;
  816. ),
  817. TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
  818. "delalloc %d unmapped %d unwritten %d",
  819. MAJOR(__entry->dev), MINOR(__entry->dev),
  820. __entry->ino,
  821. __entry->pgoff,
  822. __entry->size,
  823. __entry->offset,
  824. __entry->delalloc,
  825. __entry->unmapped,
  826. __entry->unwritten)
  827. )
  828. #define DEFINE_PAGE_EVENT(name) \
  829. DEFINE_EVENT(xfs_page_class, name, \
  830. TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \
  831. TP_ARGS(inode, page, off))
  832. DEFINE_PAGE_EVENT(xfs_writepage);
  833. DEFINE_PAGE_EVENT(xfs_releasepage);
  834. DEFINE_PAGE_EVENT(xfs_invalidatepage);
  835. DECLARE_EVENT_CLASS(xfs_iomap_class,
  836. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
  837. int flags, struct xfs_bmbt_irec *irec),
  838. TP_ARGS(ip, offset, count, flags, irec),
  839. TP_STRUCT__entry(
  840. __field(dev_t, dev)
  841. __field(xfs_ino_t, ino)
  842. __field(loff_t, size)
  843. __field(loff_t, new_size)
  844. __field(loff_t, offset)
  845. __field(size_t, count)
  846. __field(int, flags)
  847. __field(xfs_fileoff_t, startoff)
  848. __field(xfs_fsblock_t, startblock)
  849. __field(xfs_filblks_t, blockcount)
  850. ),
  851. TP_fast_assign(
  852. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  853. __entry->ino = ip->i_ino;
  854. __entry->size = ip->i_d.di_size;
  855. __entry->new_size = ip->i_new_size;
  856. __entry->offset = offset;
  857. __entry->count = count;
  858. __entry->flags = flags;
  859. __entry->startoff = irec ? irec->br_startoff : 0;
  860. __entry->startblock = irec ? irec->br_startblock : 0;
  861. __entry->blockcount = irec ? irec->br_blockcount : 0;
  862. ),
  863. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
  864. "offset 0x%llx count %zd flags %s "
  865. "startoff 0x%llx startblock %lld blockcount 0x%llx",
  866. MAJOR(__entry->dev), MINOR(__entry->dev),
  867. __entry->ino,
  868. __entry->size,
  869. __entry->new_size,
  870. __entry->offset,
  871. __entry->count,
  872. __print_flags(__entry->flags, "|", BMAPI_FLAGS),
  873. __entry->startoff,
  874. (__int64_t)__entry->startblock,
  875. __entry->blockcount)
  876. )
  877. #define DEFINE_IOMAP_EVENT(name) \
  878. DEFINE_EVENT(xfs_iomap_class, name, \
  879. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
  880. int flags, struct xfs_bmbt_irec *irec), \
  881. TP_ARGS(ip, offset, count, flags, irec))
  882. DEFINE_IOMAP_EVENT(xfs_iomap_enter);
  883. DEFINE_IOMAP_EVENT(xfs_iomap_found);
  884. DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
  885. DECLARE_EVENT_CLASS(xfs_simple_io_class,
  886. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
  887. TP_ARGS(ip, offset, count),
  888. TP_STRUCT__entry(
  889. __field(dev_t, dev)
  890. __field(xfs_ino_t, ino)
  891. __field(loff_t, size)
  892. __field(loff_t, new_size)
  893. __field(loff_t, offset)
  894. __field(size_t, count)
  895. ),
  896. TP_fast_assign(
  897. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  898. __entry->ino = ip->i_ino;
  899. __entry->size = ip->i_d.di_size;
  900. __entry->new_size = ip->i_new_size;
  901. __entry->offset = offset;
  902. __entry->count = count;
  903. ),
  904. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
  905. "offset 0x%llx count %zd",
  906. MAJOR(__entry->dev), MINOR(__entry->dev),
  907. __entry->ino,
  908. __entry->size,
  909. __entry->new_size,
  910. __entry->offset,
  911. __entry->count)
  912. );
  913. #define DEFINE_SIMPLE_IO_EVENT(name) \
  914. DEFINE_EVENT(xfs_simple_io_class, name, \
  915. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
  916. TP_ARGS(ip, offset, count))
  917. DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
  918. DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
  919. TRACE_EVENT(xfs_itruncate_start,
  920. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size, int flag,
  921. xfs_off_t toss_start, xfs_off_t toss_finish),
  922. TP_ARGS(ip, new_size, flag, toss_start, toss_finish),
  923. TP_STRUCT__entry(
  924. __field(dev_t, dev)
  925. __field(xfs_ino_t, ino)
  926. __field(xfs_fsize_t, size)
  927. __field(xfs_fsize_t, new_size)
  928. __field(xfs_off_t, toss_start)
  929. __field(xfs_off_t, toss_finish)
  930. __field(int, flag)
  931. ),
  932. TP_fast_assign(
  933. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  934. __entry->ino = ip->i_ino;
  935. __entry->size = ip->i_d.di_size;
  936. __entry->new_size = new_size;
  937. __entry->toss_start = toss_start;
  938. __entry->toss_finish = toss_finish;
  939. __entry->flag = flag;
  940. ),
  941. TP_printk("dev %d:%d ino 0x%llx %s size 0x%llx new_size 0x%llx "
  942. "toss start 0x%llx toss finish 0x%llx",
  943. MAJOR(__entry->dev), MINOR(__entry->dev),
  944. __entry->ino,
  945. __print_flags(__entry->flag, "|", XFS_ITRUNC_FLAGS),
  946. __entry->size,
  947. __entry->new_size,
  948. __entry->toss_start,
  949. __entry->toss_finish)
  950. );
  951. DECLARE_EVENT_CLASS(xfs_itrunc_class,
  952. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
  953. TP_ARGS(ip, new_size),
  954. TP_STRUCT__entry(
  955. __field(dev_t, dev)
  956. __field(xfs_ino_t, ino)
  957. __field(xfs_fsize_t, size)
  958. __field(xfs_fsize_t, new_size)
  959. ),
  960. TP_fast_assign(
  961. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  962. __entry->ino = ip->i_ino;
  963. __entry->size = ip->i_d.di_size;
  964. __entry->new_size = new_size;
  965. ),
  966. TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx",
  967. MAJOR(__entry->dev), MINOR(__entry->dev),
  968. __entry->ino,
  969. __entry->size,
  970. __entry->new_size)
  971. )
  972. #define DEFINE_ITRUNC_EVENT(name) \
  973. DEFINE_EVENT(xfs_itrunc_class, name, \
  974. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
  975. TP_ARGS(ip, new_size))
  976. DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start);
  977. DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end);
  978. TRACE_EVENT(xfs_pagecache_inval,
  979. TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
  980. TP_ARGS(ip, start, finish),
  981. TP_STRUCT__entry(
  982. __field(dev_t, dev)
  983. __field(xfs_ino_t, ino)
  984. __field(xfs_fsize_t, size)
  985. __field(xfs_off_t, start)
  986. __field(xfs_off_t, finish)
  987. ),
  988. TP_fast_assign(
  989. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  990. __entry->ino = ip->i_ino;
  991. __entry->size = ip->i_d.di_size;
  992. __entry->start = start;
  993. __entry->finish = finish;
  994. ),
  995. TP_printk("dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx",
  996. MAJOR(__entry->dev), MINOR(__entry->dev),
  997. __entry->ino,
  998. __entry->size,
  999. __entry->start,
  1000. __entry->finish)
  1001. );
  1002. TRACE_EVENT(xfs_bunmap,
  1003. TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len,
  1004. int flags, unsigned long caller_ip),
  1005. TP_ARGS(ip, bno, len, flags, caller_ip),
  1006. TP_STRUCT__entry(
  1007. __field(dev_t, dev)
  1008. __field(xfs_ino_t, ino)
  1009. __field(xfs_fsize_t, size)
  1010. __field(xfs_fileoff_t, bno)
  1011. __field(xfs_filblks_t, len)
  1012. __field(unsigned long, caller_ip)
  1013. __field(int, flags)
  1014. ),
  1015. TP_fast_assign(
  1016. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1017. __entry->ino = ip->i_ino;
  1018. __entry->size = ip->i_d.di_size;
  1019. __entry->bno = bno;
  1020. __entry->len = len;
  1021. __entry->caller_ip = caller_ip;
  1022. __entry->flags = flags;
  1023. ),
  1024. TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx"
  1025. "flags %s caller %pf",
  1026. MAJOR(__entry->dev), MINOR(__entry->dev),
  1027. __entry->ino,
  1028. __entry->size,
  1029. __entry->bno,
  1030. __entry->len,
  1031. __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS),
  1032. (void *)__entry->caller_ip)
  1033. );
  1034. #define XFS_BUSY_SYNC \
  1035. { 0, "async" }, \
  1036. { 1, "sync" }
  1037. TRACE_EVENT(xfs_alloc_busy,
  1038. TP_PROTO(struct xfs_trans *trans, xfs_agnumber_t agno,
  1039. xfs_agblock_t agbno, xfs_extlen_t len, int sync),
  1040. TP_ARGS(trans, agno, agbno, len, sync),
  1041. TP_STRUCT__entry(
  1042. __field(dev_t, dev)
  1043. __field(struct xfs_trans *, tp)
  1044. __field(int, tid)
  1045. __field(xfs_agnumber_t, agno)
  1046. __field(xfs_agblock_t, agbno)
  1047. __field(xfs_extlen_t, len)
  1048. __field(int, sync)
  1049. ),
  1050. TP_fast_assign(
  1051. __entry->dev = trans->t_mountp->m_super->s_dev;
  1052. __entry->tp = trans;
  1053. __entry->tid = trans->t_ticket->t_tid;
  1054. __entry->agno = agno;
  1055. __entry->agbno = agbno;
  1056. __entry->len = len;
  1057. __entry->sync = sync;
  1058. ),
  1059. TP_printk("dev %d:%d trans 0x%p tid 0x%x agno %u agbno %u len %u %s",
  1060. MAJOR(__entry->dev), MINOR(__entry->dev),
  1061. __entry->tp,
  1062. __entry->tid,
  1063. __entry->agno,
  1064. __entry->agbno,
  1065. __entry->len,
  1066. __print_symbolic(__entry->sync, XFS_BUSY_SYNC))
  1067. );
  1068. TRACE_EVENT(xfs_alloc_unbusy,
  1069. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1070. xfs_agblock_t agbno, xfs_extlen_t len),
  1071. TP_ARGS(mp, agno, agbno, len),
  1072. TP_STRUCT__entry(
  1073. __field(dev_t, dev)
  1074. __field(xfs_agnumber_t, agno)
  1075. __field(xfs_agblock_t, agbno)
  1076. __field(xfs_extlen_t, len)
  1077. ),
  1078. TP_fast_assign(
  1079. __entry->dev = mp->m_super->s_dev;
  1080. __entry->agno = agno;
  1081. __entry->agbno = agbno;
  1082. __entry->len = len;
  1083. ),
  1084. TP_printk("dev %d:%d agno %u agbno %u len %u",
  1085. MAJOR(__entry->dev), MINOR(__entry->dev),
  1086. __entry->agno,
  1087. __entry->agbno,
  1088. __entry->len)
  1089. );
  1090. #define XFS_BUSY_STATES \
  1091. { 0, "missing" }, \
  1092. { 1, "found" }
  1093. TRACE_EVENT(xfs_alloc_busysearch,
  1094. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1095. xfs_agblock_t agbno, xfs_extlen_t len, int found),
  1096. TP_ARGS(mp, agno, agbno, len, found),
  1097. TP_STRUCT__entry(
  1098. __field(dev_t, dev)
  1099. __field(xfs_agnumber_t, agno)
  1100. __field(xfs_agblock_t, agbno)
  1101. __field(xfs_extlen_t, len)
  1102. __field(int, found)
  1103. ),
  1104. TP_fast_assign(
  1105. __entry->dev = mp->m_super->s_dev;
  1106. __entry->agno = agno;
  1107. __entry->agbno = agbno;
  1108. __entry->len = len;
  1109. __entry->found = found;
  1110. ),
  1111. TP_printk("dev %d:%d agno %u agbno %u len %u %s",
  1112. MAJOR(__entry->dev), MINOR(__entry->dev),
  1113. __entry->agno,
  1114. __entry->agbno,
  1115. __entry->len,
  1116. __print_symbolic(__entry->found, XFS_BUSY_STATES))
  1117. );
  1118. TRACE_EVENT(xfs_trans_commit_lsn,
  1119. TP_PROTO(struct xfs_trans *trans),
  1120. TP_ARGS(trans),
  1121. TP_STRUCT__entry(
  1122. __field(dev_t, dev)
  1123. __field(struct xfs_trans *, tp)
  1124. __field(xfs_lsn_t, lsn)
  1125. ),
  1126. TP_fast_assign(
  1127. __entry->dev = trans->t_mountp->m_super->s_dev;
  1128. __entry->tp = trans;
  1129. __entry->lsn = trans->t_commit_lsn;
  1130. ),
  1131. TP_printk("dev %d:%d trans 0x%p commit_lsn 0x%llx",
  1132. MAJOR(__entry->dev), MINOR(__entry->dev),
  1133. __entry->tp,
  1134. __entry->lsn)
  1135. );
  1136. TRACE_EVENT(xfs_agf,
  1137. TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
  1138. unsigned long caller_ip),
  1139. TP_ARGS(mp, agf, flags, caller_ip),
  1140. TP_STRUCT__entry(
  1141. __field(dev_t, dev)
  1142. __field(xfs_agnumber_t, agno)
  1143. __field(int, flags)
  1144. __field(__u32, length)
  1145. __field(__u32, bno_root)
  1146. __field(__u32, cnt_root)
  1147. __field(__u32, bno_level)
  1148. __field(__u32, cnt_level)
  1149. __field(__u32, flfirst)
  1150. __field(__u32, fllast)
  1151. __field(__u32, flcount)
  1152. __field(__u32, freeblks)
  1153. __field(__u32, longest)
  1154. __field(unsigned long, caller_ip)
  1155. ),
  1156. TP_fast_assign(
  1157. __entry->dev = mp->m_super->s_dev;
  1158. __entry->agno = be32_to_cpu(agf->agf_seqno),
  1159. __entry->flags = flags;
  1160. __entry->length = be32_to_cpu(agf->agf_length),
  1161. __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
  1162. __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
  1163. __entry->bno_level =
  1164. be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
  1165. __entry->cnt_level =
  1166. be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
  1167. __entry->flfirst = be32_to_cpu(agf->agf_flfirst),
  1168. __entry->fllast = be32_to_cpu(agf->agf_fllast),
  1169. __entry->flcount = be32_to_cpu(agf->agf_flcount),
  1170. __entry->freeblks = be32_to_cpu(agf->agf_freeblks),
  1171. __entry->longest = be32_to_cpu(agf->agf_longest);
  1172. __entry->caller_ip = caller_ip;
  1173. ),
  1174. TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u "
  1175. "levels b %u c %u flfirst %u fllast %u flcount %u "
  1176. "freeblks %u longest %u caller %pf",
  1177. MAJOR(__entry->dev), MINOR(__entry->dev),
  1178. __entry->agno,
  1179. __print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
  1180. __entry->length,
  1181. __entry->bno_root,
  1182. __entry->cnt_root,
  1183. __entry->bno_level,
  1184. __entry->cnt_level,
  1185. __entry->flfirst,
  1186. __entry->fllast,
  1187. __entry->flcount,
  1188. __entry->freeblks,
  1189. __entry->longest,
  1190. (void *)__entry->caller_ip)
  1191. );
  1192. TRACE_EVENT(xfs_free_extent,
  1193. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
  1194. xfs_extlen_t len, bool isfl, int haveleft, int haveright),
  1195. TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright),
  1196. TP_STRUCT__entry(
  1197. __field(dev_t, dev)
  1198. __field(xfs_agnumber_t, agno)
  1199. __field(xfs_agblock_t, agbno)
  1200. __field(xfs_extlen_t, len)
  1201. __field(int, isfl)
  1202. __field(int, haveleft)
  1203. __field(int, haveright)
  1204. ),
  1205. TP_fast_assign(
  1206. __entry->dev = mp->m_super->s_dev;
  1207. __entry->agno = agno;
  1208. __entry->agbno = agbno;
  1209. __entry->len = len;
  1210. __entry->isfl = isfl;
  1211. __entry->haveleft = haveleft;
  1212. __entry->haveright = haveright;
  1213. ),
  1214. TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s",
  1215. MAJOR(__entry->dev), MINOR(__entry->dev),
  1216. __entry->agno,
  1217. __entry->agbno,
  1218. __entry->len,
  1219. __entry->isfl,
  1220. __entry->haveleft ?
  1221. (__entry->haveright ? "both" : "left") :
  1222. (__entry->haveright ? "right" : "none"))
  1223. );
  1224. DECLARE_EVENT_CLASS(xfs_alloc_class,
  1225. TP_PROTO(struct xfs_alloc_arg *args),
  1226. TP_ARGS(args),
  1227. TP_STRUCT__entry(
  1228. __field(dev_t, dev)
  1229. __field(xfs_agnumber_t, agno)
  1230. __field(xfs_agblock_t, agbno)
  1231. __field(xfs_extlen_t, minlen)
  1232. __field(xfs_extlen_t, maxlen)
  1233. __field(xfs_extlen_t, mod)
  1234. __field(xfs_extlen_t, prod)
  1235. __field(xfs_extlen_t, minleft)
  1236. __field(xfs_extlen_t, total)
  1237. __field(xfs_extlen_t, alignment)
  1238. __field(xfs_extlen_t, minalignslop)
  1239. __field(xfs_extlen_t, len)
  1240. __field(short, type)
  1241. __field(short, otype)
  1242. __field(char, wasdel)
  1243. __field(char, wasfromfl)
  1244. __field(char, isfl)
  1245. __field(char, userdata)
  1246. __field(xfs_fsblock_t, firstblock)
  1247. ),
  1248. TP_fast_assign(
  1249. __entry->dev = args->mp->m_super->s_dev;
  1250. __entry->agno = args->agno;
  1251. __entry->agbno = args->agbno;
  1252. __entry->minlen = args->minlen;
  1253. __entry->maxlen = args->maxlen;
  1254. __entry->mod = args->mod;
  1255. __entry->prod = args->prod;
  1256. __entry->minleft = args->minleft;
  1257. __entry->total = args->total;
  1258. __entry->alignment = args->alignment;
  1259. __entry->minalignslop = args->minalignslop;
  1260. __entry->len = args->len;
  1261. __entry->type = args->type;
  1262. __entry->otype = args->otype;
  1263. __entry->wasdel = args->wasdel;
  1264. __entry->wasfromfl = args->wasfromfl;
  1265. __entry->isfl = args->isfl;
  1266. __entry->userdata = args->userdata;
  1267. __entry->firstblock = args->firstblock;
  1268. ),
  1269. TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u "
  1270. "prod %u minleft %u total %u alignment %u minalignslop %u "
  1271. "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d "
  1272. "userdata %d firstblock 0x%llx",
  1273. MAJOR(__entry->dev), MINOR(__entry->dev),
  1274. __entry->agno,
  1275. __entry->agbno,
  1276. __entry->minlen,
  1277. __entry->maxlen,
  1278. __entry->mod,
  1279. __entry->prod,
  1280. __entry->minleft,
  1281. __entry->total,
  1282. __entry->alignment,
  1283. __entry->minalignslop,
  1284. __entry->len,
  1285. __print_symbolic(__entry->type, XFS_ALLOC_TYPES),
  1286. __print_symbolic(__entry->otype, XFS_ALLOC_TYPES),
  1287. __entry->wasdel,
  1288. __entry->wasfromfl,
  1289. __entry->isfl,
  1290. __entry->userdata,
  1291. __entry->firstblock)
  1292. )
  1293. #define DEFINE_ALLOC_EVENT(name) \
  1294. DEFINE_EVENT(xfs_alloc_class, name, \
  1295. TP_PROTO(struct xfs_alloc_arg *args), \
  1296. TP_ARGS(args))
  1297. DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
  1298. DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
  1299. DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
  1300. DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
  1301. DEFINE_ALLOC_EVENT(xfs_alloc_near_greater);
  1302. DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser);
  1303. DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
  1304. DEFINE_ALLOC_EVENT(xfs_alloc_size_neither);
  1305. DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry);
  1306. DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft);
  1307. DEFINE_ALLOC_EVENT(xfs_alloc_size_done);
  1308. DEFINE_ALLOC_EVENT(xfs_alloc_size_error);
  1309. DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist);
  1310. DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
  1311. DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
  1312. DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
  1313. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
  1314. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
  1315. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
  1316. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
  1317. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
  1318. DECLARE_EVENT_CLASS(xfs_dir2_class,
  1319. TP_PROTO(struct xfs_da_args *args),
  1320. TP_ARGS(args),
  1321. TP_STRUCT__entry(
  1322. __field(dev_t, dev)
  1323. __field(xfs_ino_t, ino)
  1324. __dynamic_array(char, name, args->namelen)
  1325. __field(int, namelen)
  1326. __field(xfs_dahash_t, hashval)
  1327. __field(xfs_ino_t, inumber)
  1328. __field(int, op_flags)
  1329. ),
  1330. TP_fast_assign(
  1331. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1332. __entry->ino = args->dp->i_ino;
  1333. if (args->namelen)
  1334. memcpy(__get_str(name), args->name, args->namelen);
  1335. __entry->namelen = args->namelen;
  1336. __entry->hashval = args->hashval;
  1337. __entry->inumber = args->inumber;
  1338. __entry->op_flags = args->op_flags;
  1339. ),
  1340. TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x "
  1341. "inumber 0x%llx op_flags %s",
  1342. MAJOR(__entry->dev), MINOR(__entry->dev),
  1343. __entry->ino,
  1344. __entry->namelen,
  1345. __entry->namelen ? __get_str(name) : NULL,
  1346. __entry->namelen,
  1347. __entry->hashval,
  1348. __entry->inumber,
  1349. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS))
  1350. )
  1351. #define DEFINE_DIR2_EVENT(name) \
  1352. DEFINE_EVENT(xfs_dir2_class, name, \
  1353. TP_PROTO(struct xfs_da_args *args), \
  1354. TP_ARGS(args))
  1355. DEFINE_DIR2_EVENT(xfs_dir2_sf_addname);
  1356. DEFINE_DIR2_EVENT(xfs_dir2_sf_create);
  1357. DEFINE_DIR2_EVENT(xfs_dir2_sf_lookup);
  1358. DEFINE_DIR2_EVENT(xfs_dir2_sf_replace);
  1359. DEFINE_DIR2_EVENT(xfs_dir2_sf_removename);
  1360. DEFINE_DIR2_EVENT(xfs_dir2_sf_toino4);
  1361. DEFINE_DIR2_EVENT(xfs_dir2_sf_toino8);
  1362. DEFINE_DIR2_EVENT(xfs_dir2_sf_to_block);
  1363. DEFINE_DIR2_EVENT(xfs_dir2_block_addname);
  1364. DEFINE_DIR2_EVENT(xfs_dir2_block_lookup);
  1365. DEFINE_DIR2_EVENT(xfs_dir2_block_replace);
  1366. DEFINE_DIR2_EVENT(xfs_dir2_block_removename);
  1367. DEFINE_DIR2_EVENT(xfs_dir2_block_to_sf);
  1368. DEFINE_DIR2_EVENT(xfs_dir2_block_to_leaf);
  1369. DEFINE_DIR2_EVENT(xfs_dir2_leaf_addname);
  1370. DEFINE_DIR2_EVENT(xfs_dir2_leaf_lookup);
  1371. DEFINE_DIR2_EVENT(xfs_dir2_leaf_replace);
  1372. DEFINE_DIR2_EVENT(xfs_dir2_leaf_removename);
  1373. DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_block);
  1374. DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_node);
  1375. DEFINE_DIR2_EVENT(xfs_dir2_node_addname);
  1376. DEFINE_DIR2_EVENT(xfs_dir2_node_lookup);
  1377. DEFINE_DIR2_EVENT(xfs_dir2_node_replace);
  1378. DEFINE_DIR2_EVENT(xfs_dir2_node_removename);
  1379. DEFINE_DIR2_EVENT(xfs_dir2_node_to_leaf);
  1380. DECLARE_EVENT_CLASS(xfs_dir2_space_class,
  1381. TP_PROTO(struct xfs_da_args *args, int idx),
  1382. TP_ARGS(args, idx),
  1383. TP_STRUCT__entry(
  1384. __field(dev_t, dev)
  1385. __field(xfs_ino_t, ino)
  1386. __field(int, op_flags)
  1387. __field(int, idx)
  1388. ),
  1389. TP_fast_assign(
  1390. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1391. __entry->ino = args->dp->i_ino;
  1392. __entry->op_flags = args->op_flags;
  1393. __entry->idx = idx;
  1394. ),
  1395. TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d",
  1396. MAJOR(__entry->dev), MINOR(__entry->dev),
  1397. __entry->ino,
  1398. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  1399. __entry->idx)
  1400. )
  1401. #define DEFINE_DIR2_SPACE_EVENT(name) \
  1402. DEFINE_EVENT(xfs_dir2_space_class, name, \
  1403. TP_PROTO(struct xfs_da_args *args, int idx), \
  1404. TP_ARGS(args, idx))
  1405. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_add);
  1406. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_remove);
  1407. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_grow_inode);
  1408. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_shrink_inode);
  1409. TRACE_EVENT(xfs_dir2_leafn_moveents,
  1410. TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
  1411. TP_ARGS(args, src_idx, dst_idx, count),
  1412. TP_STRUCT__entry(
  1413. __field(dev_t, dev)
  1414. __field(xfs_ino_t, ino)
  1415. __field(int, op_flags)
  1416. __field(int, src_idx)
  1417. __field(int, dst_idx)
  1418. __field(int, count)
  1419. ),
  1420. TP_fast_assign(
  1421. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1422. __entry->ino = args->dp->i_ino;
  1423. __entry->op_flags = args->op_flags;
  1424. __entry->src_idx = src_idx;
  1425. __entry->dst_idx = dst_idx;
  1426. __entry->count = count;
  1427. ),
  1428. TP_printk("dev %d:%d ino 0x%llx op_flags %s "
  1429. "src_idx %d dst_idx %d count %d",
  1430. MAJOR(__entry->dev), MINOR(__entry->dev),
  1431. __entry->ino,
  1432. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  1433. __entry->src_idx,
  1434. __entry->dst_idx,
  1435. __entry->count)
  1436. );
  1437. #define XFS_SWAPEXT_INODES \
  1438. { 0, "target" }, \
  1439. { 1, "temp" }
  1440. #define XFS_INODE_FORMAT_STR \
  1441. { 0, "invalid" }, \
  1442. { 1, "local" }, \
  1443. { 2, "extent" }, \
  1444. { 3, "btree" }
  1445. DECLARE_EVENT_CLASS(xfs_swap_extent_class,
  1446. TP_PROTO(struct xfs_inode *ip, int which),
  1447. TP_ARGS(ip, which),
  1448. TP_STRUCT__entry(
  1449. __field(dev_t, dev)
  1450. __field(int, which)
  1451. __field(xfs_ino_t, ino)
  1452. __field(int, format)
  1453. __field(int, nex)
  1454. __field(int, max_nex)
  1455. __field(int, broot_size)
  1456. __field(int, fork_off)
  1457. ),
  1458. TP_fast_assign(
  1459. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1460. __entry->which = which;
  1461. __entry->ino = ip->i_ino;
  1462. __entry->format = ip->i_d.di_format;
  1463. __entry->nex = ip->i_d.di_nextents;
  1464. __entry->max_nex = ip->i_df.if_ext_max;
  1465. __entry->broot_size = ip->i_df.if_broot_bytes;
  1466. __entry->fork_off = XFS_IFORK_BOFF(ip);
  1467. ),
  1468. TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, "
  1469. "Max in-fork extents %d, broot size %d, fork offset %d",
  1470. MAJOR(__entry->dev), MINOR(__entry->dev),
  1471. __entry->ino,
  1472. __print_symbolic(__entry->which, XFS_SWAPEXT_INODES),
  1473. __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR),
  1474. __entry->nex,
  1475. __entry->max_nex,
  1476. __entry->broot_size,
  1477. __entry->fork_off)
  1478. )
  1479. #define DEFINE_SWAPEXT_EVENT(name) \
  1480. DEFINE_EVENT(xfs_swap_extent_class, name, \
  1481. TP_PROTO(struct xfs_inode *ip, int which), \
  1482. TP_ARGS(ip, which))
  1483. DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
  1484. DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
  1485. DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
  1486. TP_PROTO(struct log *log, struct xlog_recover *trans,
  1487. struct xlog_recover_item *item, int pass),
  1488. TP_ARGS(log, trans, item, pass),
  1489. TP_STRUCT__entry(
  1490. __field(dev_t, dev)
  1491. __field(unsigned long, item)
  1492. __field(xlog_tid_t, tid)
  1493. __field(int, type)
  1494. __field(int, pass)
  1495. __field(int, count)
  1496. __field(int, total)
  1497. ),
  1498. TP_fast_assign(
  1499. __entry->dev = log->l_mp->m_super->s_dev;
  1500. __entry->item = (unsigned long)item;
  1501. __entry->tid = trans->r_log_tid;
  1502. __entry->type = ITEM_TYPE(item);
  1503. __entry->pass = pass;
  1504. __entry->count = item->ri_cnt;
  1505. __entry->total = item->ri_total;
  1506. ),
  1507. TP_printk("dev %d:%d trans 0x%x, pass %d, item 0x%p, item type %s "
  1508. "item region count/total %d/%d",
  1509. MAJOR(__entry->dev), MINOR(__entry->dev),
  1510. __entry->tid,
  1511. __entry->pass,
  1512. (void *)__entry->item,
  1513. __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
  1514. __entry->count,
  1515. __entry->total)
  1516. )
  1517. #define DEFINE_LOG_RECOVER_ITEM(name) \
  1518. DEFINE_EVENT(xfs_log_recover_item_class, name, \
  1519. TP_PROTO(struct log *log, struct xlog_recover *trans, \
  1520. struct xlog_recover_item *item, int pass), \
  1521. TP_ARGS(log, trans, item, pass))
  1522. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add);
  1523. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add_cont);
  1524. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_head);
  1525. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
  1526. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
  1527. DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
  1528. TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f),
  1529. TP_ARGS(log, buf_f),
  1530. TP_STRUCT__entry(
  1531. __field(dev_t, dev)
  1532. __field(__int64_t, blkno)
  1533. __field(unsigned short, len)
  1534. __field(unsigned short, flags)
  1535. __field(unsigned short, size)
  1536. __field(unsigned int, map_size)
  1537. ),
  1538. TP_fast_assign(
  1539. __entry->dev = log->l_mp->m_super->s_dev;
  1540. __entry->blkno = buf_f->blf_blkno;
  1541. __entry->len = buf_f->blf_len;
  1542. __entry->flags = buf_f->blf_flags;
  1543. __entry->size = buf_f->blf_size;
  1544. __entry->map_size = buf_f->blf_map_size;
  1545. ),
  1546. TP_printk("dev %d:%d blkno 0x%llx, len %u, flags 0x%x, size %d, "
  1547. "map_size %d",
  1548. MAJOR(__entry->dev), MINOR(__entry->dev),
  1549. __entry->blkno,
  1550. __entry->len,
  1551. __entry->flags,
  1552. __entry->size,
  1553. __entry->map_size)
  1554. )
  1555. #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
  1556. DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
  1557. TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \
  1558. TP_ARGS(log, buf_f))
  1559. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
  1560. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel);
  1561. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_add);
  1562. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_ref_inc);
  1563. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_recover);
  1564. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_inode_buf);
  1565. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
  1566. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
  1567. DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
  1568. TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f),
  1569. TP_ARGS(log, in_f),
  1570. TP_STRUCT__entry(
  1571. __field(dev_t, dev)
  1572. __field(xfs_ino_t, ino)
  1573. __field(unsigned short, size)
  1574. __field(int, fields)
  1575. __field(unsigned short, asize)
  1576. __field(unsigned short, dsize)
  1577. __field(__int64_t, blkno)
  1578. __field(int, len)
  1579. __field(int, boffset)
  1580. ),
  1581. TP_fast_assign(
  1582. __entry->dev = log->l_mp->m_super->s_dev;
  1583. __entry->ino = in_f->ilf_ino;
  1584. __entry->size = in_f->ilf_size;
  1585. __entry->fields = in_f->ilf_fields;
  1586. __entry->asize = in_f->ilf_asize;
  1587. __entry->dsize = in_f->ilf_dsize;
  1588. __entry->blkno = in_f->ilf_blkno;
  1589. __entry->len = in_f->ilf_len;
  1590. __entry->boffset = in_f->ilf_boffset;
  1591. ),
  1592. TP_printk("dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, "
  1593. "dsize %d, blkno 0x%llx, len %d, boffset %d",
  1594. MAJOR(__entry->dev), MINOR(__entry->dev),
  1595. __entry->ino,
  1596. __entry->size,
  1597. __entry->fields,
  1598. __entry->asize,
  1599. __entry->dsize,
  1600. __entry->blkno,
  1601. __entry->len,
  1602. __entry->boffset)
  1603. )
  1604. #define DEFINE_LOG_RECOVER_INO_ITEM(name) \
  1605. DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
  1606. TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \
  1607. TP_ARGS(log, in_f))
  1608. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
  1609. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel);
  1610. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip);
  1611. #endif /* _TRACE_XFS_H */
  1612. #undef TRACE_INCLUDE_PATH
  1613. #define TRACE_INCLUDE_PATH .
  1614. #define TRACE_INCLUDE_FILE xfs_trace
  1615. #include <trace/define_trace.h>