marker.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. /*
  2. * Copyright (C) 2007 Mathieu Desnoyers
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. */
  18. #include <linux/module.h>
  19. #include <linux/mutex.h>
  20. #include <linux/types.h>
  21. #include <linux/jhash.h>
  22. #include <linux/list.h>
  23. #include <linux/rcupdate.h>
  24. #include <linux/marker.h>
  25. #include <linux/err.h>
  26. #include <linux/slab.h>
  27. extern struct marker __start___markers[];
  28. extern struct marker __stop___markers[];
  29. /* Set to 1 to enable marker debug output */
  30. static const int marker_debug;
  31. /*
  32. * markers_mutex nests inside module_mutex. Markers mutex protects the builtin
  33. * and module markers and the hash table.
  34. */
  35. static DEFINE_MUTEX(markers_mutex);
  36. /*
  37. * Marker hash table, containing the active markers.
  38. * Protected by module_mutex.
  39. */
  40. #define MARKER_HASH_BITS 6
  41. #define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
  42. /*
  43. * Note about RCU :
  44. * It is used to make sure every handler has finished using its private data
  45. * between two consecutive operation (add or remove) on a given marker. It is
  46. * also used to delay the free of multiple probes array until a quiescent state
  47. * is reached.
  48. * marker entries modifications are protected by the markers_mutex.
  49. */
  50. struct marker_entry {
  51. struct hlist_node hlist;
  52. char *format;
  53. /* Probe wrapper */
  54. void (*call)(const struct marker *mdata, void *call_private, ...);
  55. struct marker_probe_closure single;
  56. struct marker_probe_closure *multi;
  57. int refcount; /* Number of times armed. 0 if disarmed. */
  58. unsigned char ptype:1;
  59. char name[0]; /* Contains name'\0'format'\0' */
  60. };
  61. static struct hlist_head marker_table[MARKER_TABLE_SIZE];
  62. /**
  63. * __mark_empty_function - Empty probe callback
  64. * @probe_private: probe private data
  65. * @call_private: call site private data
  66. * @fmt: format string
  67. * @...: variable argument list
  68. *
  69. * Empty callback provided as a probe to the markers. By providing this to a
  70. * disabled marker, we make sure the execution flow is always valid even
  71. * though the function pointer change and the marker enabling are two distinct
  72. * operations that modifies the execution flow of preemptible code.
  73. */
  74. void __mark_empty_function(void *probe_private, void *call_private,
  75. const char *fmt, va_list *args)
  76. {
  77. }
  78. EXPORT_SYMBOL_GPL(__mark_empty_function);
  79. /*
  80. * marker_probe_cb Callback that prepares the variable argument list for probes.
  81. * @mdata: pointer of type struct marker
  82. * @call_private: caller site private data
  83. * @...: Variable argument list.
  84. *
  85. * Since we do not use "typical" pointer based RCU in the 1 argument case, we
  86. * need to put a full smp_rmb() in this branch. This is why we do not use
  87. * rcu_dereference() for the pointer read.
  88. */
  89. void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
  90. {
  91. va_list args;
  92. char ptype;
  93. /*
  94. * rcu_read_lock_sched does two things : disabling preemption to make
  95. * sure the teardown of the callbacks can be done correctly when they
  96. * are in modules and they insure RCU read coherency.
  97. */
  98. rcu_read_lock_sched();
  99. ptype = mdata->ptype;
  100. if (likely(!ptype)) {
  101. marker_probe_func *func;
  102. /* Must read the ptype before ptr. They are not data dependant,
  103. * so we put an explicit smp_rmb() here. */
  104. smp_rmb();
  105. func = mdata->single.func;
  106. /* Must read the ptr before private data. They are not data
  107. * dependant, so we put an explicit smp_rmb() here. */
  108. smp_rmb();
  109. va_start(args, call_private);
  110. func(mdata->single.probe_private, call_private, mdata->format,
  111. &args);
  112. va_end(args);
  113. } else {
  114. struct marker_probe_closure *multi;
  115. int i;
  116. /*
  117. * Read mdata->ptype before mdata->multi.
  118. */
  119. smp_rmb();
  120. multi = mdata->multi;
  121. /*
  122. * multi points to an array, therefore accessing the array
  123. * depends on reading multi. However, even in this case,
  124. * we must insure that the pointer is read _before_ the array
  125. * data. Same as rcu_dereference, but we need a full smp_rmb()
  126. * in the fast path, so put the explicit barrier here.
  127. */
  128. smp_read_barrier_depends();
  129. for (i = 0; multi[i].func; i++) {
  130. va_start(args, call_private);
  131. multi[i].func(multi[i].probe_private, call_private,
  132. mdata->format, &args);
  133. va_end(args);
  134. }
  135. }
  136. rcu_read_unlock_sched();
  137. }
  138. EXPORT_SYMBOL_GPL(marker_probe_cb);
  139. /*
  140. * marker_probe_cb Callback that does not prepare the variable argument list.
  141. * @mdata: pointer of type struct marker
  142. * @call_private: caller site private data
  143. * @...: Variable argument list.
  144. *
  145. * Should be connected to markers "MARK_NOARGS".
  146. */
  147. void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
  148. {
  149. va_list args; /* not initialized */
  150. char ptype;
  151. rcu_read_lock_sched();
  152. ptype = mdata->ptype;
  153. if (likely(!ptype)) {
  154. marker_probe_func *func;
  155. /* Must read the ptype before ptr. They are not data dependant,
  156. * so we put an explicit smp_rmb() here. */
  157. smp_rmb();
  158. func = mdata->single.func;
  159. /* Must read the ptr before private data. They are not data
  160. * dependant, so we put an explicit smp_rmb() here. */
  161. smp_rmb();
  162. func(mdata->single.probe_private, call_private, mdata->format,
  163. &args);
  164. } else {
  165. struct marker_probe_closure *multi;
  166. int i;
  167. /*
  168. * Read mdata->ptype before mdata->multi.
  169. */
  170. smp_rmb();
  171. multi = mdata->multi;
  172. /*
  173. * multi points to an array, therefore accessing the array
  174. * depends on reading multi. However, even in this case,
  175. * we must insure that the pointer is read _before_ the array
  176. * data. Same as rcu_dereference, but we need a full smp_rmb()
  177. * in the fast path, so put the explicit barrier here.
  178. */
  179. smp_read_barrier_depends();
  180. for (i = 0; multi[i].func; i++)
  181. multi[i].func(multi[i].probe_private, call_private,
  182. mdata->format, &args);
  183. }
  184. rcu_read_unlock_sched();
  185. }
  186. EXPORT_SYMBOL_GPL(marker_probe_cb_noarg);
  187. static void debug_print_probes(struct marker_entry *entry)
  188. {
  189. int i;
  190. if (!marker_debug)
  191. return;
  192. if (!entry->ptype) {
  193. printk(KERN_DEBUG "Single probe : %p %p\n",
  194. entry->single.func,
  195. entry->single.probe_private);
  196. } else {
  197. for (i = 0; entry->multi[i].func; i++)
  198. printk(KERN_DEBUG "Multi probe %d : %p %p\n", i,
  199. entry->multi[i].func,
  200. entry->multi[i].probe_private);
  201. }
  202. }
  203. static struct marker_probe_closure *
  204. marker_entry_add_probe(struct marker_entry *entry,
  205. marker_probe_func *probe, void *probe_private)
  206. {
  207. int nr_probes = 0;
  208. struct marker_probe_closure *old, *new;
  209. WARN_ON(!probe);
  210. debug_print_probes(entry);
  211. old = entry->multi;
  212. if (!entry->ptype) {
  213. if (entry->single.func == probe &&
  214. entry->single.probe_private == probe_private)
  215. return ERR_PTR(-EBUSY);
  216. if (entry->single.func == __mark_empty_function) {
  217. /* 0 -> 1 probes */
  218. entry->single.func = probe;
  219. entry->single.probe_private = probe_private;
  220. entry->refcount = 1;
  221. entry->ptype = 0;
  222. debug_print_probes(entry);
  223. return NULL;
  224. } else {
  225. /* 1 -> 2 probes */
  226. nr_probes = 1;
  227. old = NULL;
  228. }
  229. } else {
  230. /* (N -> N+1), (N != 0, 1) probes */
  231. for (nr_probes = 0; old[nr_probes].func; nr_probes++)
  232. if (old[nr_probes].func == probe
  233. && old[nr_probes].probe_private
  234. == probe_private)
  235. return ERR_PTR(-EBUSY);
  236. }
  237. /* + 2 : one for new probe, one for NULL func */
  238. new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure),
  239. GFP_KERNEL);
  240. if (new == NULL)
  241. return ERR_PTR(-ENOMEM);
  242. if (!old)
  243. new[0] = entry->single;
  244. else
  245. memcpy(new, old,
  246. nr_probes * sizeof(struct marker_probe_closure));
  247. new[nr_probes].func = probe;
  248. new[nr_probes].probe_private = probe_private;
  249. entry->refcount = nr_probes + 1;
  250. entry->multi = new;
  251. entry->ptype = 1;
  252. debug_print_probes(entry);
  253. return old;
  254. }
  255. static struct marker_probe_closure *
  256. marker_entry_remove_probe(struct marker_entry *entry,
  257. marker_probe_func *probe, void *probe_private)
  258. {
  259. int nr_probes = 0, nr_del = 0, i;
  260. struct marker_probe_closure *old, *new;
  261. old = entry->multi;
  262. debug_print_probes(entry);
  263. if (!entry->ptype) {
  264. /* 0 -> N is an error */
  265. WARN_ON(entry->single.func == __mark_empty_function);
  266. /* 1 -> 0 probes */
  267. WARN_ON(probe && entry->single.func != probe);
  268. WARN_ON(entry->single.probe_private != probe_private);
  269. entry->single.func = __mark_empty_function;
  270. entry->refcount = 0;
  271. entry->ptype = 0;
  272. debug_print_probes(entry);
  273. return NULL;
  274. } else {
  275. /* (N -> M), (N > 1, M >= 0) probes */
  276. for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
  277. if ((!probe || old[nr_probes].func == probe)
  278. && old[nr_probes].probe_private
  279. == probe_private)
  280. nr_del++;
  281. }
  282. }
  283. if (nr_probes - nr_del == 0) {
  284. /* N -> 0, (N > 1) */
  285. entry->single.func = __mark_empty_function;
  286. entry->refcount = 0;
  287. entry->ptype = 0;
  288. } else if (nr_probes - nr_del == 1) {
  289. /* N -> 1, (N > 1) */
  290. for (i = 0; old[i].func; i++)
  291. if ((probe && old[i].func != probe) ||
  292. old[i].probe_private != probe_private)
  293. entry->single = old[i];
  294. entry->refcount = 1;
  295. entry->ptype = 0;
  296. } else {
  297. int j = 0;
  298. /* N -> M, (N > 1, M > 1) */
  299. /* + 1 for NULL */
  300. new = kzalloc((nr_probes - nr_del + 1)
  301. * sizeof(struct marker_probe_closure), GFP_KERNEL);
  302. if (new == NULL)
  303. return ERR_PTR(-ENOMEM);
  304. for (i = 0; old[i].func; i++)
  305. if ((probe && old[i].func != probe) ||
  306. old[i].probe_private != probe_private)
  307. new[j++] = old[i];
  308. entry->refcount = nr_probes - nr_del;
  309. entry->ptype = 1;
  310. entry->multi = new;
  311. }
  312. debug_print_probes(entry);
  313. return old;
  314. }
  315. /*
  316. * Get marker if the marker is present in the marker hash table.
  317. * Must be called with markers_mutex held.
  318. * Returns NULL if not present.
  319. */
  320. static struct marker_entry *get_marker(const char *name)
  321. {
  322. struct hlist_head *head;
  323. struct hlist_node *node;
  324. struct marker_entry *e;
  325. u32 hash = jhash(name, strlen(name), 0);
  326. head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
  327. hlist_for_each_entry(e, node, head, hlist) {
  328. if (!strcmp(name, e->name))
  329. return e;
  330. }
  331. return NULL;
  332. }
  333. /*
  334. * Add the marker to the marker hash table. Must be called with markers_mutex
  335. * held.
  336. */
  337. static struct marker_entry *add_marker(const char *name, const char *format)
  338. {
  339. struct hlist_head *head;
  340. struct hlist_node *node;
  341. struct marker_entry *e;
  342. size_t name_len = strlen(name) + 1;
  343. size_t format_len = 0;
  344. u32 hash = jhash(name, name_len-1, 0);
  345. if (format)
  346. format_len = strlen(format) + 1;
  347. head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
  348. hlist_for_each_entry(e, node, head, hlist) {
  349. if (!strcmp(name, e->name)) {
  350. printk(KERN_NOTICE
  351. "Marker %s busy\n", name);
  352. return ERR_PTR(-EBUSY); /* Already there */
  353. }
  354. }
  355. /*
  356. * Using kmalloc here to allocate a variable length element. Could
  357. * cause some memory fragmentation if overused.
  358. */
  359. e = kmalloc(sizeof(struct marker_entry) + name_len + format_len,
  360. GFP_KERNEL);
  361. if (!e)
  362. return ERR_PTR(-ENOMEM);
  363. memcpy(&e->name[0], name, name_len);
  364. if (format) {
  365. e->format = &e->name[name_len];
  366. memcpy(e->format, format, format_len);
  367. if (strcmp(e->format, MARK_NOARGS) == 0)
  368. e->call = marker_probe_cb_noarg;
  369. else
  370. e->call = marker_probe_cb;
  371. trace_mark(core_marker_format, "name %s format %s",
  372. e->name, e->format);
  373. } else {
  374. e->format = NULL;
  375. e->call = marker_probe_cb;
  376. }
  377. e->single.func = __mark_empty_function;
  378. e->single.probe_private = NULL;
  379. e->multi = NULL;
  380. e->ptype = 0;
  381. e->refcount = 0;
  382. hlist_add_head(&e->hlist, head);
  383. return e;
  384. }
  385. /*
  386. * Remove the marker from the marker hash table. Must be called with mutex_lock
  387. * held.
  388. */
  389. static int remove_marker(const char *name)
  390. {
  391. struct hlist_head *head;
  392. struct hlist_node *node;
  393. struct marker_entry *e;
  394. int found = 0;
  395. size_t len = strlen(name) + 1;
  396. u32 hash = jhash(name, len-1, 0);
  397. head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
  398. hlist_for_each_entry(e, node, head, hlist) {
  399. if (!strcmp(name, e->name)) {
  400. found = 1;
  401. break;
  402. }
  403. }
  404. if (!found)
  405. return -ENOENT;
  406. if (e->single.func != __mark_empty_function)
  407. return -EBUSY;
  408. hlist_del(&e->hlist);
  409. kfree(e);
  410. return 0;
  411. }
  412. /*
  413. * Set the mark_entry format to the format found in the element.
  414. */
  415. static int marker_set_format(struct marker_entry **entry, const char *format)
  416. {
  417. struct marker_entry *e;
  418. size_t name_len = strlen((*entry)->name) + 1;
  419. size_t format_len = strlen(format) + 1;
  420. e = kmalloc(sizeof(struct marker_entry) + name_len + format_len,
  421. GFP_KERNEL);
  422. if (!e)
  423. return -ENOMEM;
  424. memcpy(&e->name[0], (*entry)->name, name_len);
  425. e->format = &e->name[name_len];
  426. memcpy(e->format, format, format_len);
  427. if (strcmp(e->format, MARK_NOARGS) == 0)
  428. e->call = marker_probe_cb_noarg;
  429. else
  430. e->call = marker_probe_cb;
  431. e->single = (*entry)->single;
  432. e->multi = (*entry)->multi;
  433. e->ptype = (*entry)->ptype;
  434. e->refcount = (*entry)->refcount;
  435. hlist_add_before(&e->hlist, &(*entry)->hlist);
  436. hlist_del(&(*entry)->hlist);
  437. kfree(*entry);
  438. *entry = e;
  439. trace_mark(core_marker_format, "name %s format %s",
  440. e->name, e->format);
  441. return 0;
  442. }
  443. /*
  444. * Sets the probe callback corresponding to one marker.
  445. */
  446. static int set_marker(struct marker_entry **entry, struct marker *elem,
  447. int active)
  448. {
  449. int ret;
  450. WARN_ON(strcmp((*entry)->name, elem->name) != 0);
  451. if ((*entry)->format) {
  452. if (strcmp((*entry)->format, elem->format) != 0) {
  453. printk(KERN_NOTICE
  454. "Format mismatch for probe %s "
  455. "(%s), marker (%s)\n",
  456. (*entry)->name,
  457. (*entry)->format,
  458. elem->format);
  459. return -EPERM;
  460. }
  461. } else {
  462. ret = marker_set_format(entry, elem->format);
  463. if (ret)
  464. return ret;
  465. }
  466. /*
  467. * probe_cb setup (statically known) is done here. It is
  468. * asynchronous with the rest of execution, therefore we only
  469. * pass from a "safe" callback (with argument) to an "unsafe"
  470. * callback (does not set arguments).
  471. */
  472. elem->call = (*entry)->call;
  473. /*
  474. * Sanity check :
  475. * We only update the single probe private data when the ptr is
  476. * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
  477. */
  478. WARN_ON(elem->single.func != __mark_empty_function
  479. && elem->single.probe_private
  480. != (*entry)->single.probe_private &&
  481. !elem->ptype);
  482. elem->single.probe_private = (*entry)->single.probe_private;
  483. /*
  484. * Make sure the private data is valid when we update the
  485. * single probe ptr.
  486. */
  487. smp_wmb();
  488. elem->single.func = (*entry)->single.func;
  489. /*
  490. * We also make sure that the new probe callbacks array is consistent
  491. * before setting a pointer to it.
  492. */
  493. rcu_assign_pointer(elem->multi, (*entry)->multi);
  494. /*
  495. * Update the function or multi probe array pointer before setting the
  496. * ptype.
  497. */
  498. smp_wmb();
  499. elem->ptype = (*entry)->ptype;
  500. elem->state = active;
  501. return 0;
  502. }
  503. /*
  504. * Disable a marker and its probe callback.
  505. * Note: only waiting an RCU period after setting elem->call to the empty
  506. * function insures that the original callback is not used anymore. This insured
  507. * by rcu_read_lock_sched around the call site.
  508. */
  509. static void disable_marker(struct marker *elem)
  510. {
  511. /* leave "call" as is. It is known statically. */
  512. elem->state = 0;
  513. elem->single.func = __mark_empty_function;
  514. /* Update the function before setting the ptype */
  515. smp_wmb();
  516. elem->ptype = 0; /* single probe */
  517. /*
  518. * Leave the private data and id there, because removal is racy and
  519. * should be done only after an RCU period. These are never used until
  520. * the next initialization anyway.
  521. */
  522. }
  523. /**
  524. * marker_update_probe_range - Update a probe range
  525. * @begin: beginning of the range
  526. * @end: end of the range
  527. *
  528. * Updates the probe callback corresponding to a range of markers.
  529. */
  530. void marker_update_probe_range(struct marker *begin,
  531. struct marker *end)
  532. {
  533. struct marker *iter;
  534. struct marker_entry *mark_entry;
  535. mutex_lock(&markers_mutex);
  536. for (iter = begin; iter < end; iter++) {
  537. mark_entry = get_marker(iter->name);
  538. if (mark_entry) {
  539. set_marker(&mark_entry, iter,
  540. !!mark_entry->refcount);
  541. /*
  542. * ignore error, continue
  543. */
  544. } else {
  545. disable_marker(iter);
  546. }
  547. }
  548. mutex_unlock(&markers_mutex);
  549. }
  550. /*
  551. * Update probes, removing the faulty probes.
  552. *
  553. * Internal callback only changed before the first probe is connected to it.
  554. * Single probe private data can only be changed on 0 -> 1 and 2 -> 1
  555. * transitions. All other transitions will leave the old private data valid.
  556. * This makes the non-atomicity of the callback/private data updates valid.
  557. *
  558. * "special case" updates :
  559. * 0 -> 1 callback
  560. * 1 -> 0 callback
  561. * 1 -> 2 callbacks
  562. * 2 -> 1 callbacks
  563. * Other updates all behave the same, just like the 2 -> 3 or 3 -> 2 updates.
  564. * Site effect : marker_set_format may delete the marker entry (creating a
  565. * replacement).
  566. */
  567. static void marker_update_probes(void)
  568. {
  569. /* Core kernel markers */
  570. marker_update_probe_range(__start___markers, __stop___markers);
  571. /* Markers in modules. */
  572. module_update_markers();
  573. }
  574. /**
  575. * marker_probe_register - Connect a probe to a marker
  576. * @name: marker name
  577. * @format: format string
  578. * @probe: probe handler
  579. * @probe_private: probe private data
  580. *
  581. * private data must be a valid allocated memory address, or NULL.
  582. * Returns 0 if ok, error value on error.
  583. * The probe address must at least be aligned on the architecture pointer size.
  584. */
  585. int marker_probe_register(const char *name, const char *format,
  586. marker_probe_func *probe, void *probe_private)
  587. {
  588. struct marker_entry *entry;
  589. int ret = 0;
  590. struct marker_probe_closure *old;
  591. mutex_lock(&markers_mutex);
  592. entry = get_marker(name);
  593. if (!entry) {
  594. entry = add_marker(name, format);
  595. if (IS_ERR(entry)) {
  596. ret = PTR_ERR(entry);
  597. goto end;
  598. }
  599. }
  600. old = marker_entry_add_probe(entry, probe, probe_private);
  601. if (IS_ERR(old)) {
  602. ret = PTR_ERR(old);
  603. goto end;
  604. }
  605. mutex_unlock(&markers_mutex);
  606. marker_update_probes(); /* may update entry */
  607. synchronize_sched();
  608. kfree(old);
  609. mutex_lock(&markers_mutex);
  610. entry = get_marker(name);
  611. WARN_ON(!entry);
  612. end:
  613. mutex_unlock(&markers_mutex);
  614. return ret;
  615. }
  616. EXPORT_SYMBOL_GPL(marker_probe_register);
  617. /**
  618. * marker_probe_unregister - Disconnect a probe from a marker
  619. * @name: marker name
  620. * @probe: probe function pointer
  621. * @probe_private: probe private data
  622. *
  623. * Returns the private data given to marker_probe_register, or an ERR_PTR().
  624. * We do not need to call a synchronize_sched to make sure the probes have
  625. * finished running before doing a module unload, because the module unload
  626. * itself uses stop_machine(), which insures that every preempt disabled section
  627. * have finished.
  628. */
  629. int marker_probe_unregister(const char *name,
  630. marker_probe_func *probe, void *probe_private)
  631. {
  632. struct marker_entry *entry;
  633. struct marker_probe_closure *old;
  634. int ret = -ENOENT;
  635. mutex_lock(&markers_mutex);
  636. entry = get_marker(name);
  637. if (!entry)
  638. goto end;
  639. old = marker_entry_remove_probe(entry, probe, probe_private);
  640. mutex_unlock(&markers_mutex);
  641. marker_update_probes(); /* may update entry */
  642. synchronize_sched();
  643. kfree(old);
  644. mutex_lock(&markers_mutex);
  645. entry = get_marker(name);
  646. if (!entry)
  647. goto end;
  648. remove_marker(name); /* Ignore busy error message */
  649. ret = 0;
  650. end:
  651. mutex_unlock(&markers_mutex);
  652. return ret;
  653. }
  654. EXPORT_SYMBOL_GPL(marker_probe_unregister);
  655. static struct marker_entry *
  656. get_marker_from_private_data(marker_probe_func *probe, void *probe_private)
  657. {
  658. struct marker_entry *entry;
  659. unsigned int i;
  660. struct hlist_head *head;
  661. struct hlist_node *node;
  662. for (i = 0; i < MARKER_TABLE_SIZE; i++) {
  663. head = &marker_table[i];
  664. hlist_for_each_entry(entry, node, head, hlist) {
  665. if (!entry->ptype) {
  666. if (entry->single.func == probe
  667. && entry->single.probe_private
  668. == probe_private)
  669. return entry;
  670. } else {
  671. struct marker_probe_closure *closure;
  672. closure = entry->multi;
  673. for (i = 0; closure[i].func; i++) {
  674. if (closure[i].func == probe &&
  675. closure[i].probe_private
  676. == probe_private)
  677. return entry;
  678. }
  679. }
  680. }
  681. }
  682. return NULL;
  683. }
  684. /**
  685. * marker_probe_unregister_private_data - Disconnect a probe from a marker
  686. * @probe: probe function
  687. * @probe_private: probe private data
  688. *
  689. * Unregister a probe by providing the registered private data.
  690. * Only removes the first marker found in hash table.
  691. * Return 0 on success or error value.
  692. * We do not need to call a synchronize_sched to make sure the probes have
  693. * finished running before doing a module unload, because the module unload
  694. * itself uses stop_machine(), which insures that every preempt disabled section
  695. * have finished.
  696. */
  697. int marker_probe_unregister_private_data(marker_probe_func *probe,
  698. void *probe_private)
  699. {
  700. struct marker_entry *entry;
  701. int ret = 0;
  702. struct marker_probe_closure *old;
  703. mutex_lock(&markers_mutex);
  704. entry = get_marker_from_private_data(probe, probe_private);
  705. if (!entry) {
  706. ret = -ENOENT;
  707. goto end;
  708. }
  709. old = marker_entry_remove_probe(entry, NULL, probe_private);
  710. mutex_unlock(&markers_mutex);
  711. marker_update_probes(); /* may update entry */
  712. synchronize_sched();
  713. kfree(old);
  714. mutex_lock(&markers_mutex);
  715. entry = get_marker_from_private_data(probe, probe_private);
  716. WARN_ON(!entry);
  717. remove_marker(entry->name); /* Ignore busy error message */
  718. end:
  719. mutex_unlock(&markers_mutex);
  720. return ret;
  721. }
  722. EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
  723. /**
  724. * marker_get_private_data - Get a marker's probe private data
  725. * @name: marker name
  726. * @probe: probe to match
  727. * @num: get the nth matching probe's private data
  728. *
  729. * Returns the nth private data pointer (starting from 0) matching, or an
  730. * ERR_PTR.
  731. * Returns the private data pointer, or an ERR_PTR.
  732. * The private data pointer should _only_ be dereferenced if the caller is the
  733. * owner of the data, or its content could vanish. This is mostly used to
  734. * confirm that a caller is the owner of a registered probe.
  735. */
  736. void *marker_get_private_data(const char *name, marker_probe_func *probe,
  737. int num)
  738. {
  739. struct hlist_head *head;
  740. struct hlist_node *node;
  741. struct marker_entry *e;
  742. size_t name_len = strlen(name) + 1;
  743. u32 hash = jhash(name, name_len-1, 0);
  744. int i;
  745. head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
  746. hlist_for_each_entry(e, node, head, hlist) {
  747. if (!strcmp(name, e->name)) {
  748. if (!e->ptype) {
  749. if (num == 0 && e->single.func == probe)
  750. return e->single.probe_private;
  751. else
  752. break;
  753. } else {
  754. struct marker_probe_closure *closure;
  755. int match = 0;
  756. closure = e->multi;
  757. for (i = 0; closure[i].func; i++) {
  758. if (closure[i].func != probe)
  759. continue;
  760. if (match++ == num)
  761. return closure[i].probe_private;
  762. }
  763. }
  764. }
  765. }
  766. return ERR_PTR(-ENOENT);
  767. }
  768. EXPORT_SYMBOL_GPL(marker_get_private_data);