ktrace.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. /*
  2. * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <xfs.h>
  19. static kmem_zone_t *ktrace_hdr_zone;
  20. static kmem_zone_t *ktrace_ent_zone;
  21. static int ktrace_zentries;
  22. void __init
  23. ktrace_init(int zentries)
  24. {
  25. ktrace_zentries = roundup_pow_of_two(zentries);
  26. ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
  27. "ktrace_hdr");
  28. ASSERT(ktrace_hdr_zone);
  29. ktrace_ent_zone = kmem_zone_init(ktrace_zentries
  30. * sizeof(ktrace_entry_t),
  31. "ktrace_ent");
  32. ASSERT(ktrace_ent_zone);
  33. }
  34. void __exit
  35. ktrace_uninit(void)
  36. {
  37. kmem_zone_destroy(ktrace_hdr_zone);
  38. kmem_zone_destroy(ktrace_ent_zone);
  39. }
  40. /*
  41. * ktrace_alloc()
  42. *
  43. * Allocate a ktrace header and enough buffering for the given
  44. * number of entries. Round the number of entries up to a
  45. * power of 2 so we can do fast masking to get the index from
  46. * the atomic index counter.
  47. */
  48. ktrace_t *
  49. ktrace_alloc(int nentries, unsigned int __nocast sleep)
  50. {
  51. ktrace_t *ktp;
  52. ktrace_entry_t *ktep;
  53. int entries;
  54. ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
  55. if (ktp == (ktrace_t*)NULL) {
  56. /*
  57. * KM_SLEEP callers don't expect failure.
  58. */
  59. if (sleep & KM_SLEEP)
  60. panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
  61. return NULL;
  62. }
  63. /*
  64. * Special treatment for buffers with the ktrace_zentries entries
  65. */
  66. entries = roundup_pow_of_two(nentries);
  67. if (entries == ktrace_zentries) {
  68. ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
  69. sleep);
  70. } else {
  71. ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)),
  72. sleep | KM_LARGE);
  73. }
  74. if (ktep == NULL) {
  75. /*
  76. * KM_SLEEP callers don't expect failure.
  77. */
  78. if (sleep & KM_SLEEP)
  79. panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
  80. kmem_free(ktp);
  81. return NULL;
  82. }
  83. ktp->kt_entries = ktep;
  84. ktp->kt_nentries = entries;
  85. ASSERT(is_power_of_2(entries));
  86. ktp->kt_index_mask = entries - 1;
  87. atomic_set(&ktp->kt_index, 0);
  88. ktp->kt_rollover = 0;
  89. return ktp;
  90. }
  91. /*
  92. * ktrace_free()
  93. *
  94. * Free up the ktrace header and buffer. It is up to the caller
  95. * to ensure that no-one is referencing it.
  96. */
  97. void
  98. ktrace_free(ktrace_t *ktp)
  99. {
  100. int entries_size;
  101. if (ktp == (ktrace_t *)NULL)
  102. return;
  103. /*
  104. * Special treatment for the Vnode trace buffer.
  105. */
  106. if (ktp->kt_nentries == ktrace_zentries) {
  107. kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
  108. } else {
  109. entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t));
  110. kmem_free(ktp->kt_entries);
  111. }
  112. kmem_zone_free(ktrace_hdr_zone, ktp);
  113. }
  114. /*
  115. * Enter the given values into the "next" entry in the trace buffer.
  116. * kt_index is always the index of the next entry to be filled.
  117. */
  118. void
  119. ktrace_enter(
  120. ktrace_t *ktp,
  121. void *val0,
  122. void *val1,
  123. void *val2,
  124. void *val3,
  125. void *val4,
  126. void *val5,
  127. void *val6,
  128. void *val7,
  129. void *val8,
  130. void *val9,
  131. void *val10,
  132. void *val11,
  133. void *val12,
  134. void *val13,
  135. void *val14,
  136. void *val15)
  137. {
  138. int index;
  139. ktrace_entry_t *ktep;
  140. ASSERT(ktp != NULL);
  141. /*
  142. * Grab an entry by pushing the index up to the next one.
  143. */
  144. index = atomic_add_return(1, &ktp->kt_index);
  145. index = (index - 1) & ktp->kt_index_mask;
  146. if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
  147. ktp->kt_rollover = 1;
  148. ASSERT((index >= 0) && (index < ktp->kt_nentries));
  149. ktep = &(ktp->kt_entries[index]);
  150. ktep->val[0] = val0;
  151. ktep->val[1] = val1;
  152. ktep->val[2] = val2;
  153. ktep->val[3] = val3;
  154. ktep->val[4] = val4;
  155. ktep->val[5] = val5;
  156. ktep->val[6] = val6;
  157. ktep->val[7] = val7;
  158. ktep->val[8] = val8;
  159. ktep->val[9] = val9;
  160. ktep->val[10] = val10;
  161. ktep->val[11] = val11;
  162. ktep->val[12] = val12;
  163. ktep->val[13] = val13;
  164. ktep->val[14] = val14;
  165. ktep->val[15] = val15;
  166. }
  167. /*
  168. * Return the number of entries in the trace buffer.
  169. */
  170. int
  171. ktrace_nentries(
  172. ktrace_t *ktp)
  173. {
  174. int index;
  175. if (ktp == NULL)
  176. return 0;
  177. index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
  178. return (ktp->kt_rollover ? ktp->kt_nentries : index);
  179. }
  180. /*
  181. * ktrace_first()
  182. *
  183. * This is used to find the start of the trace buffer.
  184. * In conjunction with ktrace_next() it can be used to
  185. * iterate through the entire trace buffer. This code does
  186. * not do any locking because it is assumed that it is called
  187. * from the debugger.
  188. *
  189. * The caller must pass in a pointer to a ktrace_snap
  190. * structure in which we will keep some state used to
  191. * iterate through the buffer. This state must not touched
  192. * by any code outside of this module.
  193. */
  194. ktrace_entry_t *
  195. ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
  196. {
  197. ktrace_entry_t *ktep;
  198. int index;
  199. int nentries;
  200. if (ktp->kt_rollover)
  201. index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
  202. else
  203. index = 0;
  204. ktsp->ks_start = index;
  205. ktep = &(ktp->kt_entries[index]);
  206. nentries = ktrace_nentries(ktp);
  207. index++;
  208. if (index < nentries) {
  209. ktsp->ks_index = index;
  210. } else {
  211. ktsp->ks_index = 0;
  212. if (index > nentries)
  213. ktep = NULL;
  214. }
  215. return ktep;
  216. }
  217. /*
  218. * ktrace_next()
  219. *
  220. * This is used to iterate through the entries of the given
  221. * trace buffer. The caller must pass in the ktrace_snap_t
  222. * structure initialized by ktrace_first(). The return value
  223. * will be either a pointer to the next ktrace_entry or NULL
  224. * if all of the entries have been traversed.
  225. */
  226. ktrace_entry_t *
  227. ktrace_next(
  228. ktrace_t *ktp,
  229. ktrace_snap_t *ktsp)
  230. {
  231. int index;
  232. ktrace_entry_t *ktep;
  233. index = ktsp->ks_index;
  234. if (index == ktsp->ks_start) {
  235. ktep = NULL;
  236. } else {
  237. ktep = &ktp->kt_entries[index];
  238. }
  239. index++;
  240. if (index == ktrace_nentries(ktp)) {
  241. ktsp->ks_index = 0;
  242. } else {
  243. ktsp->ks_index = index;
  244. }
  245. return ktep;
  246. }
  247. /*
  248. * ktrace_skip()
  249. *
  250. * Skip the next "count" entries and return the entry after that.
  251. * Return NULL if this causes us to iterate past the beginning again.
  252. */
  253. ktrace_entry_t *
  254. ktrace_skip(
  255. ktrace_t *ktp,
  256. int count,
  257. ktrace_snap_t *ktsp)
  258. {
  259. int index;
  260. int new_index;
  261. ktrace_entry_t *ktep;
  262. int nentries = ktrace_nentries(ktp);
  263. index = ktsp->ks_index;
  264. new_index = index + count;
  265. while (new_index >= nentries) {
  266. new_index -= nentries;
  267. }
  268. if (index == ktsp->ks_start) {
  269. /*
  270. * We've iterated around to the start, so we're done.
  271. */
  272. ktep = NULL;
  273. } else if ((new_index < index) && (index < ktsp->ks_index)) {
  274. /*
  275. * We've skipped past the start again, so we're done.
  276. */
  277. ktep = NULL;
  278. ktsp->ks_index = ktsp->ks_start;
  279. } else {
  280. ktep = &(ktp->kt_entries[new_index]);
  281. new_index++;
  282. if (new_index == nentries) {
  283. ktsp->ks_index = 0;
  284. } else {
  285. ktsp->ks_index = new_index;
  286. }
  287. }
  288. return ktep;
  289. }