xen.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504
  1. #undef TRACE_SYSTEM
  2. #define TRACE_SYSTEM xen
  3. #if !defined(_TRACE_XEN_H) || defined(TRACE_HEADER_MULTI_READ)
  4. #define _TRACE_XEN_H
  5. #include <linux/tracepoint.h>
  6. #include <asm/paravirt_types.h>
  7. #include <asm/xen/trace_types.h>
  8. /* Multicalls */
  9. DECLARE_EVENT_CLASS(xen_mc__batch,
  10. TP_PROTO(enum paravirt_lazy_mode mode),
  11. TP_ARGS(mode),
  12. TP_STRUCT__entry(
  13. __field(enum paravirt_lazy_mode, mode)
  14. ),
  15. TP_fast_assign(__entry->mode = mode),
  16. TP_printk("start batch LAZY_%s",
  17. (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" :
  18. (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE")
  19. );
  20. #define DEFINE_XEN_MC_BATCH(name) \
  21. DEFINE_EVENT(xen_mc__batch, name, \
  22. TP_PROTO(enum paravirt_lazy_mode mode), \
  23. TP_ARGS(mode))
  24. DEFINE_XEN_MC_BATCH(xen_mc_batch);
  25. DEFINE_XEN_MC_BATCH(xen_mc_issue);
  26. TRACE_EVENT(xen_mc_entry,
  27. TP_PROTO(struct multicall_entry *mc, unsigned nargs),
  28. TP_ARGS(mc, nargs),
  29. TP_STRUCT__entry(
  30. __field(unsigned int, op)
  31. __field(unsigned int, nargs)
  32. __array(unsigned long, args, 6)
  33. ),
  34. TP_fast_assign(__entry->op = mc->op;
  35. __entry->nargs = nargs;
  36. memcpy(__entry->args, mc->args, sizeof(unsigned long) * nargs);
  37. memset(__entry->args + nargs, 0, sizeof(unsigned long) * (6 - nargs));
  38. ),
  39. TP_printk("op %u%s args [%lx, %lx, %lx, %lx, %lx, %lx]",
  40. __entry->op, xen_hypercall_name(__entry->op),
  41. __entry->args[0], __entry->args[1], __entry->args[2],
  42. __entry->args[3], __entry->args[4], __entry->args[5])
  43. );
  44. TRACE_EVENT(xen_mc_entry_alloc,
  45. TP_PROTO(size_t args),
  46. TP_ARGS(args),
  47. TP_STRUCT__entry(
  48. __field(size_t, args)
  49. ),
  50. TP_fast_assign(__entry->args = args),
  51. TP_printk("alloc entry %zu arg bytes", __entry->args)
  52. );
  53. TRACE_EVENT(xen_mc_callback,
  54. TP_PROTO(xen_mc_callback_fn_t fn, void *data),
  55. TP_ARGS(fn, data),
  56. TP_STRUCT__entry(
  57. __field(xen_mc_callback_fn_t, fn)
  58. __field(void *, data)
  59. ),
  60. TP_fast_assign(
  61. __entry->fn = fn;
  62. __entry->data = data;
  63. ),
  64. TP_printk("callback %pf, data %p",
  65. __entry->fn, __entry->data)
  66. );
  67. TRACE_EVENT(xen_mc_flush_reason,
  68. TP_PROTO(enum xen_mc_flush_reason reason),
  69. TP_ARGS(reason),
  70. TP_STRUCT__entry(
  71. __field(enum xen_mc_flush_reason, reason)
  72. ),
  73. TP_fast_assign(__entry->reason = reason),
  74. TP_printk("flush reason %s",
  75. (__entry->reason == XEN_MC_FL_NONE) ? "NONE" :
  76. (__entry->reason == XEN_MC_FL_BATCH) ? "BATCH" :
  77. (__entry->reason == XEN_MC_FL_ARGS) ? "ARGS" :
  78. (__entry->reason == XEN_MC_FL_CALLBACK) ? "CALLBACK" : "??")
  79. );
  80. TRACE_EVENT(xen_mc_flush,
  81. TP_PROTO(unsigned mcidx, unsigned argidx, unsigned cbidx),
  82. TP_ARGS(mcidx, argidx, cbidx),
  83. TP_STRUCT__entry(
  84. __field(unsigned, mcidx)
  85. __field(unsigned, argidx)
  86. __field(unsigned, cbidx)
  87. ),
  88. TP_fast_assign(__entry->mcidx = mcidx;
  89. __entry->argidx = argidx;
  90. __entry->cbidx = cbidx),
  91. TP_printk("flushing %u hypercalls, %u arg bytes, %u callbacks",
  92. __entry->mcidx, __entry->argidx, __entry->cbidx)
  93. );
  94. TRACE_EVENT(xen_mc_extend_args,
  95. TP_PROTO(unsigned long op, size_t args, enum xen_mc_extend_args res),
  96. TP_ARGS(op, args, res),
  97. TP_STRUCT__entry(
  98. __field(unsigned int, op)
  99. __field(size_t, args)
  100. __field(enum xen_mc_extend_args, res)
  101. ),
  102. TP_fast_assign(__entry->op = op;
  103. __entry->args = args;
  104. __entry->res = res),
  105. TP_printk("extending op %u%s by %zu bytes res %s",
  106. __entry->op, xen_hypercall_name(__entry->op),
  107. __entry->args,
  108. __entry->res == XEN_MC_XE_OK ? "OK" :
  109. __entry->res == XEN_MC_XE_BAD_OP ? "BAD_OP" :
  110. __entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???")
  111. );
  112. /* mmu */
  113. DECLARE_EVENT_CLASS(xen_mmu__set_pte,
  114. TP_PROTO(pte_t *ptep, pte_t pteval),
  115. TP_ARGS(ptep, pteval),
  116. TP_STRUCT__entry(
  117. __field(pte_t *, ptep)
  118. __field(pteval_t, pteval)
  119. ),
  120. TP_fast_assign(__entry->ptep = ptep;
  121. __entry->pteval = pteval.pte),
  122. TP_printk("ptep %p pteval %0*llx (raw %0*llx)",
  123. __entry->ptep,
  124. (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
  125. (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
  126. );
  127. #define DEFINE_XEN_MMU_SET_PTE(name) \
  128. DEFINE_EVENT(xen_mmu__set_pte, name, \
  129. TP_PROTO(pte_t *ptep, pte_t pteval), \
  130. TP_ARGS(ptep, pteval))
  131. DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
  132. DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
  133. TRACE_EVENT(xen_mmu_set_domain_pte,
  134. TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid),
  135. TP_ARGS(ptep, pteval, domid),
  136. TP_STRUCT__entry(
  137. __field(pte_t *, ptep)
  138. __field(pteval_t, pteval)
  139. __field(unsigned, domid)
  140. ),
  141. TP_fast_assign(__entry->ptep = ptep;
  142. __entry->pteval = pteval.pte;
  143. __entry->domid = domid),
  144. TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u",
  145. __entry->ptep,
  146. (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
  147. (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval,
  148. __entry->domid)
  149. );
  150. TRACE_EVENT(xen_mmu_set_pte_at,
  151. TP_PROTO(struct mm_struct *mm, unsigned long addr,
  152. pte_t *ptep, pte_t pteval),
  153. TP_ARGS(mm, addr, ptep, pteval),
  154. TP_STRUCT__entry(
  155. __field(struct mm_struct *, mm)
  156. __field(unsigned long, addr)
  157. __field(pte_t *, ptep)
  158. __field(pteval_t, pteval)
  159. ),
  160. TP_fast_assign(__entry->mm = mm;
  161. __entry->addr = addr;
  162. __entry->ptep = ptep;
  163. __entry->pteval = pteval.pte),
  164. TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
  165. __entry->mm, __entry->addr, __entry->ptep,
  166. (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
  167. (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
  168. );
  169. TRACE_EVENT(xen_mmu_pte_clear,
  170. TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
  171. TP_ARGS(mm, addr, ptep),
  172. TP_STRUCT__entry(
  173. __field(struct mm_struct *, mm)
  174. __field(unsigned long, addr)
  175. __field(pte_t *, ptep)
  176. ),
  177. TP_fast_assign(__entry->mm = mm;
  178. __entry->addr = addr;
  179. __entry->ptep = ptep),
  180. TP_printk("mm %p addr %lx ptep %p",
  181. __entry->mm, __entry->addr, __entry->ptep)
  182. );
  183. TRACE_EVENT(xen_mmu_set_pmd,
  184. TP_PROTO(pmd_t *pmdp, pmd_t pmdval),
  185. TP_ARGS(pmdp, pmdval),
  186. TP_STRUCT__entry(
  187. __field(pmd_t *, pmdp)
  188. __field(pmdval_t, pmdval)
  189. ),
  190. TP_fast_assign(__entry->pmdp = pmdp;
  191. __entry->pmdval = pmdval.pmd),
  192. TP_printk("pmdp %p pmdval %0*llx (raw %0*llx)",
  193. __entry->pmdp,
  194. (int)sizeof(pmdval_t) * 2, (unsigned long long)pmd_val(native_make_pmd(__entry->pmdval)),
  195. (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
  196. );
  197. TRACE_EVENT(xen_mmu_pmd_clear,
  198. TP_PROTO(pmd_t *pmdp),
  199. TP_ARGS(pmdp),
  200. TP_STRUCT__entry(
  201. __field(pmd_t *, pmdp)
  202. ),
  203. TP_fast_assign(__entry->pmdp = pmdp),
  204. TP_printk("pmdp %p", __entry->pmdp)
  205. );
  206. #if PAGETABLE_LEVELS >= 4
  207. TRACE_EVENT(xen_mmu_set_pud,
  208. TP_PROTO(pud_t *pudp, pud_t pudval),
  209. TP_ARGS(pudp, pudval),
  210. TP_STRUCT__entry(
  211. __field(pud_t *, pudp)
  212. __field(pudval_t, pudval)
  213. ),
  214. TP_fast_assign(__entry->pudp = pudp;
  215. __entry->pudval = native_pud_val(pudval)),
  216. TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
  217. __entry->pudp,
  218. (int)sizeof(pudval_t) * 2, (unsigned long long)pud_val(native_make_pud(__entry->pudval)),
  219. (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
  220. );
  221. TRACE_EVENT(xen_mmu_set_pgd,
  222. TP_PROTO(pgd_t *pgdp, pgd_t *user_pgdp, pgd_t pgdval),
  223. TP_ARGS(pgdp, user_pgdp, pgdval),
  224. TP_STRUCT__entry(
  225. __field(pgd_t *, pgdp)
  226. __field(pgd_t *, user_pgdp)
  227. __field(pgdval_t, pgdval)
  228. ),
  229. TP_fast_assign(__entry->pgdp = pgdp;
  230. __entry->user_pgdp = user_pgdp;
  231. __entry->pgdval = pgdval.pgd),
  232. TP_printk("pgdp %p user_pgdp %p pgdval %0*llx (raw %0*llx)",
  233. __entry->pgdp, __entry->user_pgdp,
  234. (int)sizeof(pgdval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pgdval)),
  235. (int)sizeof(pgdval_t) * 2, (unsigned long long)__entry->pgdval)
  236. );
  237. TRACE_EVENT(xen_mmu_pud_clear,
  238. TP_PROTO(pud_t *pudp),
  239. TP_ARGS(pudp),
  240. TP_STRUCT__entry(
  241. __field(pud_t *, pudp)
  242. ),
  243. TP_fast_assign(__entry->pudp = pudp),
  244. TP_printk("pudp %p", __entry->pudp)
  245. );
  246. #else
  247. TRACE_EVENT(xen_mmu_set_pud,
  248. TP_PROTO(pud_t *pudp, pud_t pudval),
  249. TP_ARGS(pudp, pudval),
  250. TP_STRUCT__entry(
  251. __field(pud_t *, pudp)
  252. __field(pudval_t, pudval)
  253. ),
  254. TP_fast_assign(__entry->pudp = pudp;
  255. __entry->pudval = native_pud_val(pudval)),
  256. TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
  257. __entry->pudp,
  258. (int)sizeof(pudval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pudval)),
  259. (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
  260. );
  261. #endif
  262. TRACE_EVENT(xen_mmu_pgd_clear,
  263. TP_PROTO(pgd_t *pgdp),
  264. TP_ARGS(pgdp),
  265. TP_STRUCT__entry(
  266. __field(pgd_t *, pgdp)
  267. ),
  268. TP_fast_assign(__entry->pgdp = pgdp),
  269. TP_printk("pgdp %p", __entry->pgdp)
  270. );
  271. DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot,
  272. TP_PROTO(struct mm_struct *mm, unsigned long addr,
  273. pte_t *ptep, pte_t pteval),
  274. TP_ARGS(mm, addr, ptep, pteval),
  275. TP_STRUCT__entry(
  276. __field(struct mm_struct *, mm)
  277. __field(unsigned long, addr)
  278. __field(pte_t *, ptep)
  279. __field(pteval_t, pteval)
  280. ),
  281. TP_fast_assign(__entry->mm = mm;
  282. __entry->addr = addr;
  283. __entry->ptep = ptep;
  284. __entry->pteval = pteval.pte),
  285. TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
  286. __entry->mm, __entry->addr, __entry->ptep,
  287. (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
  288. (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
  289. );
  290. #define DEFINE_XEN_MMU_PTEP_MODIFY_PROT(name) \
  291. DEFINE_EVENT(xen_mmu_ptep_modify_prot, name, \
  292. TP_PROTO(struct mm_struct *mm, unsigned long addr, \
  293. pte_t *ptep, pte_t pteval), \
  294. TP_ARGS(mm, addr, ptep, pteval))
  295. DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_start);
  296. DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_commit);
  297. TRACE_EVENT(xen_mmu_alloc_ptpage,
  298. TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned),
  299. TP_ARGS(mm, pfn, level, pinned),
  300. TP_STRUCT__entry(
  301. __field(struct mm_struct *, mm)
  302. __field(unsigned long, pfn)
  303. __field(unsigned, level)
  304. __field(bool, pinned)
  305. ),
  306. TP_fast_assign(__entry->mm = mm;
  307. __entry->pfn = pfn;
  308. __entry->level = level;
  309. __entry->pinned = pinned),
  310. TP_printk("mm %p pfn %lx level %d %spinned",
  311. __entry->mm, __entry->pfn, __entry->level,
  312. __entry->pinned ? "" : "un")
  313. );
  314. TRACE_EVENT(xen_mmu_release_ptpage,
  315. TP_PROTO(unsigned long pfn, unsigned level, bool pinned),
  316. TP_ARGS(pfn, level, pinned),
  317. TP_STRUCT__entry(
  318. __field(unsigned long, pfn)
  319. __field(unsigned, level)
  320. __field(bool, pinned)
  321. ),
  322. TP_fast_assign(__entry->pfn = pfn;
  323. __entry->level = level;
  324. __entry->pinned = pinned),
  325. TP_printk("pfn %lx level %d %spinned",
  326. __entry->pfn, __entry->level,
  327. __entry->pinned ? "" : "un")
  328. );
  329. DECLARE_EVENT_CLASS(xen_mmu_pgd,
  330. TP_PROTO(struct mm_struct *mm, pgd_t *pgd),
  331. TP_ARGS(mm, pgd),
  332. TP_STRUCT__entry(
  333. __field(struct mm_struct *, mm)
  334. __field(pgd_t *, pgd)
  335. ),
  336. TP_fast_assign(__entry->mm = mm;
  337. __entry->pgd = pgd),
  338. TP_printk("mm %p pgd %p", __entry->mm, __entry->pgd)
  339. );
  340. #define DEFINE_XEN_MMU_PGD_EVENT(name) \
  341. DEFINE_EVENT(xen_mmu_pgd, name, \
  342. TP_PROTO(struct mm_struct *mm, pgd_t *pgd), \
  343. TP_ARGS(mm, pgd))
  344. DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
  345. DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
  346. TRACE_EVENT(xen_mmu_flush_tlb,
  347. TP_PROTO(int x),
  348. TP_ARGS(x),
  349. TP_STRUCT__entry(__array(char, x, 0)),
  350. TP_fast_assign((void)x),
  351. TP_printk("%s", "")
  352. );
  353. TRACE_EVENT(xen_mmu_flush_tlb_single,
  354. TP_PROTO(unsigned long addr),
  355. TP_ARGS(addr),
  356. TP_STRUCT__entry(
  357. __field(unsigned long, addr)
  358. ),
  359. TP_fast_assign(__entry->addr = addr),
  360. TP_printk("addr %lx", __entry->addr)
  361. );
  362. TRACE_EVENT(xen_mmu_flush_tlb_others,
  363. TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
  364. unsigned long addr),
  365. TP_ARGS(cpus, mm, addr),
  366. TP_STRUCT__entry(
  367. __field(unsigned, ncpus)
  368. __field(struct mm_struct *, mm)
  369. __field(unsigned long, addr)
  370. ),
  371. TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
  372. __entry->mm = mm;
  373. __entry->addr = addr),
  374. TP_printk("ncpus %d mm %p addr %lx",
  375. __entry->ncpus, __entry->mm, __entry->addr)
  376. );
  377. TRACE_EVENT(xen_mmu_write_cr3,
  378. TP_PROTO(bool kernel, unsigned long cr3),
  379. TP_ARGS(kernel, cr3),
  380. TP_STRUCT__entry(
  381. __field(bool, kernel)
  382. __field(unsigned long, cr3)
  383. ),
  384. TP_fast_assign(__entry->kernel = kernel;
  385. __entry->cr3 = cr3),
  386. TP_printk("%s cr3 %lx",
  387. __entry->kernel ? "kernel" : "user", __entry->cr3)
  388. );
  389. /* CPU */
  390. TRACE_EVENT(xen_cpu_write_ldt_entry,
  391. TP_PROTO(struct desc_struct *dt, int entrynum, u64 desc),
  392. TP_ARGS(dt, entrynum, desc),
  393. TP_STRUCT__entry(
  394. __field(struct desc_struct *, dt)
  395. __field(int, entrynum)
  396. __field(u64, desc)
  397. ),
  398. TP_fast_assign(__entry->dt = dt;
  399. __entry->entrynum = entrynum;
  400. __entry->desc = desc;
  401. ),
  402. TP_printk("dt %p entrynum %d entry %016llx",
  403. __entry->dt, __entry->entrynum,
  404. (unsigned long long)__entry->desc)
  405. );
  406. TRACE_EVENT(xen_cpu_write_idt_entry,
  407. TP_PROTO(gate_desc *dt, int entrynum, const gate_desc *ent),
  408. TP_ARGS(dt, entrynum, ent),
  409. TP_STRUCT__entry(
  410. __field(gate_desc *, dt)
  411. __field(int, entrynum)
  412. ),
  413. TP_fast_assign(__entry->dt = dt;
  414. __entry->entrynum = entrynum;
  415. ),
  416. TP_printk("dt %p entrynum %d",
  417. __entry->dt, __entry->entrynum)
  418. );
  419. TRACE_EVENT(xen_cpu_load_idt,
  420. TP_PROTO(const struct desc_ptr *desc),
  421. TP_ARGS(desc),
  422. TP_STRUCT__entry(
  423. __field(unsigned long, addr)
  424. ),
  425. TP_fast_assign(__entry->addr = desc->address),
  426. TP_printk("addr %lx", __entry->addr)
  427. );
  428. TRACE_EVENT(xen_cpu_write_gdt_entry,
  429. TP_PROTO(struct desc_struct *dt, int entrynum, const void *desc, int type),
  430. TP_ARGS(dt, entrynum, desc, type),
  431. TP_STRUCT__entry(
  432. __field(u64, desc)
  433. __field(struct desc_struct *, dt)
  434. __field(int, entrynum)
  435. __field(int, type)
  436. ),
  437. TP_fast_assign(__entry->dt = dt;
  438. __entry->entrynum = entrynum;
  439. __entry->desc = *(u64 *)desc;
  440. __entry->type = type;
  441. ),
  442. TP_printk("dt %p entrynum %d type %d desc %016llx",
  443. __entry->dt, __entry->entrynum, __entry->type,
  444. (unsigned long long)__entry->desc)
  445. );
  446. TRACE_EVENT(xen_cpu_set_ldt,
  447. TP_PROTO(const void *addr, unsigned entries),
  448. TP_ARGS(addr, entries),
  449. TP_STRUCT__entry(
  450. __field(const void *, addr)
  451. __field(unsigned, entries)
  452. ),
  453. TP_fast_assign(__entry->addr = addr;
  454. __entry->entries = entries),
  455. TP_printk("addr %p entries %u",
  456. __entry->addr, __entry->entries)
  457. );
  458. #endif /* _TRACE_XEN_H */
  459. /* This part must be outside protection */
  460. #include <trace/define_trace.h>