buffer_sync.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596
  1. /**
  2. * @file buffer_sync.c
  3. *
  4. * @remark Copyright 2002-2009 OProfile authors
  5. * @remark Read the file COPYING
  6. *
  7. * @author John Levon <levon@movementarian.org>
  8. * @author Barry Kasindorf
  9. * @author Robert Richter <robert.richter@amd.com>
  10. *
  11. * This is the core of the buffer management. Each
  12. * CPU buffer is processed and entered into the
  13. * global event buffer. Such processing is necessary
  14. * in several circumstances, mentioned below.
  15. *
  16. * The processing does the job of converting the
  17. * transitory EIP value into a persistent dentry/offset
  18. * value that the profiler can record at its leisure.
  19. *
  20. * See fs/dcookies.c for a description of the dentry/offset
  21. * objects.
  22. */
  23. #include <linux/mm.h>
  24. #include <linux/workqueue.h>
  25. #include <linux/notifier.h>
  26. #include <linux/dcookies.h>
  27. #include <linux/profile.h>
  28. #include <linux/module.h>
  29. #include <linux/fs.h>
  30. #include <linux/oprofile.h>
  31. #include <linux/sched.h>
  32. #include "oprofile_stats.h"
  33. #include "event_buffer.h"
  34. #include "cpu_buffer.h"
  35. #include "buffer_sync.h"
  36. static LIST_HEAD(dying_tasks);
  37. static LIST_HEAD(dead_tasks);
  38. static cpumask_var_t marked_cpus;
  39. static DEFINE_SPINLOCK(task_mortuary);
  40. static void process_task_mortuary(void);
  41. /* Take ownership of the task struct and place it on the
  42. * list for processing. Only after two full buffer syncs
  43. * does the task eventually get freed, because by then
  44. * we are sure we will not reference it again.
  45. * Can be invoked from softirq via RCU callback due to
  46. * call_rcu() of the task struct, hence the _irqsave.
  47. */
  48. static int
  49. task_free_notify(struct notifier_block *self, unsigned long val, void *data)
  50. {
  51. unsigned long flags;
  52. struct task_struct *task = data;
  53. spin_lock_irqsave(&task_mortuary, flags);
  54. list_add(&task->tasks, &dying_tasks);
  55. spin_unlock_irqrestore(&task_mortuary, flags);
  56. return NOTIFY_OK;
  57. }
  58. /* The task is on its way out. A sync of the buffer means we can catch
  59. * any remaining samples for this task.
  60. */
  61. static int
  62. task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
  63. {
  64. /* To avoid latency problems, we only process the current CPU,
  65. * hoping that most samples for the task are on this CPU
  66. */
  67. sync_buffer(raw_smp_processor_id());
  68. return 0;
  69. }
  70. /* The task is about to try a do_munmap(). We peek at what it's going to
  71. * do, and if it's an executable region, process the samples first, so
  72. * we don't lose any. This does not have to be exact, it's a QoI issue
  73. * only.
  74. */
  75. static int
  76. munmap_notify(struct notifier_block *self, unsigned long val, void *data)
  77. {
  78. unsigned long addr = (unsigned long)data;
  79. struct mm_struct *mm = current->mm;
  80. struct vm_area_struct *mpnt;
  81. down_read(&mm->mmap_sem);
  82. mpnt = find_vma(mm, addr);
  83. if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
  84. up_read(&mm->mmap_sem);
  85. /* To avoid latency problems, we only process the current CPU,
  86. * hoping that most samples for the task are on this CPU
  87. */
  88. sync_buffer(raw_smp_processor_id());
  89. return 0;
  90. }
  91. up_read(&mm->mmap_sem);
  92. return 0;
  93. }
  94. /* We need to be told about new modules so we don't attribute to a previously
  95. * loaded module, or drop the samples on the floor.
  96. */
  97. static int
  98. module_load_notify(struct notifier_block *self, unsigned long val, void *data)
  99. {
  100. #ifdef CONFIG_MODULES
  101. if (val != MODULE_STATE_COMING)
  102. return 0;
  103. /* FIXME: should we process all CPU buffers ? */
  104. mutex_lock(&buffer_mutex);
  105. add_event_entry(ESCAPE_CODE);
  106. add_event_entry(MODULE_LOADED_CODE);
  107. mutex_unlock(&buffer_mutex);
  108. #endif
  109. return 0;
  110. }
  111. static struct notifier_block task_free_nb = {
  112. .notifier_call = task_free_notify,
  113. };
  114. static struct notifier_block task_exit_nb = {
  115. .notifier_call = task_exit_notify,
  116. };
  117. static struct notifier_block munmap_nb = {
  118. .notifier_call = munmap_notify,
  119. };
  120. static struct notifier_block module_load_nb = {
  121. .notifier_call = module_load_notify,
  122. };
  123. static void end_sync(void)
  124. {
  125. end_cpu_work();
  126. /* make sure we don't leak task structs */
  127. process_task_mortuary();
  128. process_task_mortuary();
  129. }
  130. int sync_start(void)
  131. {
  132. int err;
  133. if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL))
  134. return -ENOMEM;
  135. cpumask_clear(marked_cpus);
  136. start_cpu_work();
  137. err = task_handoff_register(&task_free_nb);
  138. if (err)
  139. goto out1;
  140. err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
  141. if (err)
  142. goto out2;
  143. err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
  144. if (err)
  145. goto out3;
  146. err = register_module_notifier(&module_load_nb);
  147. if (err)
  148. goto out4;
  149. out:
  150. return err;
  151. out4:
  152. profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
  153. out3:
  154. profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
  155. out2:
  156. task_handoff_unregister(&task_free_nb);
  157. out1:
  158. end_sync();
  159. free_cpumask_var(marked_cpus);
  160. goto out;
  161. }
  162. void sync_stop(void)
  163. {
  164. unregister_module_notifier(&module_load_nb);
  165. profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
  166. profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
  167. task_handoff_unregister(&task_free_nb);
  168. end_sync();
  169. free_cpumask_var(marked_cpus);
  170. }
  171. /* Optimisation. We can manage without taking the dcookie sem
  172. * because we cannot reach this code without at least one
  173. * dcookie user still being registered (namely, the reader
  174. * of the event buffer). */
  175. static inline unsigned long fast_get_dcookie(struct path *path)
  176. {
  177. unsigned long cookie;
  178. if (path->dentry->d_flags & DCACHE_COOKIE)
  179. return (unsigned long)path->dentry;
  180. get_dcookie(path, &cookie);
  181. return cookie;
  182. }
  183. /* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
  184. * which corresponds loosely to "application name". This is
  185. * not strictly necessary but allows oprofile to associate
  186. * shared-library samples with particular applications
  187. */
  188. static unsigned long get_exec_dcookie(struct mm_struct *mm)
  189. {
  190. unsigned long cookie = NO_COOKIE;
  191. struct vm_area_struct *vma;
  192. if (!mm)
  193. goto out;
  194. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  195. if (!vma->vm_file)
  196. continue;
  197. if (!(vma->vm_flags & VM_EXECUTABLE))
  198. continue;
  199. cookie = fast_get_dcookie(&vma->vm_file->f_path);
  200. break;
  201. }
  202. out:
  203. return cookie;
  204. }
  205. /* Convert the EIP value of a sample into a persistent dentry/offset
  206. * pair that can then be added to the global event buffer. We make
  207. * sure to do this lookup before a mm->mmap modification happens so
  208. * we don't lose track.
  209. */
  210. static unsigned long
  211. lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
  212. {
  213. unsigned long cookie = NO_COOKIE;
  214. struct vm_area_struct *vma;
  215. for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
  216. if (addr < vma->vm_start || addr >= vma->vm_end)
  217. continue;
  218. if (vma->vm_file) {
  219. cookie = fast_get_dcookie(&vma->vm_file->f_path);
  220. *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
  221. vma->vm_start;
  222. } else {
  223. /* must be an anonymous map */
  224. *offset = addr;
  225. }
  226. break;
  227. }
  228. if (!vma)
  229. cookie = INVALID_COOKIE;
  230. return cookie;
  231. }
  232. static unsigned long last_cookie = INVALID_COOKIE;
  233. static void add_cpu_switch(int i)
  234. {
  235. add_event_entry(ESCAPE_CODE);
  236. add_event_entry(CPU_SWITCH_CODE);
  237. add_event_entry(i);
  238. last_cookie = INVALID_COOKIE;
  239. }
  240. static void add_kernel_ctx_switch(unsigned int in_kernel)
  241. {
  242. add_event_entry(ESCAPE_CODE);
  243. if (in_kernel)
  244. add_event_entry(KERNEL_ENTER_SWITCH_CODE);
  245. else
  246. add_event_entry(KERNEL_EXIT_SWITCH_CODE);
  247. }
  248. static void
  249. add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
  250. {
  251. add_event_entry(ESCAPE_CODE);
  252. add_event_entry(CTX_SWITCH_CODE);
  253. add_event_entry(task->pid);
  254. add_event_entry(cookie);
  255. /* Another code for daemon back-compat */
  256. add_event_entry(ESCAPE_CODE);
  257. add_event_entry(CTX_TGID_CODE);
  258. add_event_entry(task->tgid);
  259. }
  260. static void add_cookie_switch(unsigned long cookie)
  261. {
  262. add_event_entry(ESCAPE_CODE);
  263. add_event_entry(COOKIE_SWITCH_CODE);
  264. add_event_entry(cookie);
  265. }
  266. static void add_trace_begin(void)
  267. {
  268. add_event_entry(ESCAPE_CODE);
  269. add_event_entry(TRACE_BEGIN_CODE);
  270. }
  271. static void add_data(struct op_entry *entry, struct mm_struct *mm)
  272. {
  273. unsigned long code, pc, val;
  274. unsigned long cookie;
  275. off_t offset;
  276. if (!op_cpu_buffer_get_data(entry, &code))
  277. return;
  278. if (!op_cpu_buffer_get_data(entry, &pc))
  279. return;
  280. if (!op_cpu_buffer_get_size(entry))
  281. return;
  282. if (mm) {
  283. cookie = lookup_dcookie(mm, pc, &offset);
  284. if (cookie == NO_COOKIE)
  285. offset = pc;
  286. if (cookie == INVALID_COOKIE) {
  287. atomic_inc(&oprofile_stats.sample_lost_no_mapping);
  288. offset = pc;
  289. }
  290. if (cookie != last_cookie) {
  291. add_cookie_switch(cookie);
  292. last_cookie = cookie;
  293. }
  294. } else
  295. offset = pc;
  296. add_event_entry(ESCAPE_CODE);
  297. add_event_entry(code);
  298. add_event_entry(offset); /* Offset from Dcookie */
  299. while (op_cpu_buffer_get_data(entry, &val))
  300. add_event_entry(val);
  301. }
  302. static inline void add_sample_entry(unsigned long offset, unsigned long event)
  303. {
  304. add_event_entry(offset);
  305. add_event_entry(event);
  306. }
  307. /*
  308. * Add a sample to the global event buffer. If possible the
  309. * sample is converted into a persistent dentry/offset pair
  310. * for later lookup from userspace. Return 0 on failure.
  311. */
  312. static int
  313. add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
  314. {
  315. unsigned long cookie;
  316. off_t offset;
  317. if (in_kernel) {
  318. add_sample_entry(s->eip, s->event);
  319. return 1;
  320. }
  321. /* add userspace sample */
  322. if (!mm) {
  323. atomic_inc(&oprofile_stats.sample_lost_no_mm);
  324. return 0;
  325. }
  326. cookie = lookup_dcookie(mm, s->eip, &offset);
  327. if (cookie == INVALID_COOKIE) {
  328. atomic_inc(&oprofile_stats.sample_lost_no_mapping);
  329. return 0;
  330. }
  331. if (cookie != last_cookie) {
  332. add_cookie_switch(cookie);
  333. last_cookie = cookie;
  334. }
  335. add_sample_entry(offset, s->event);
  336. return 1;
  337. }
  338. static void release_mm(struct mm_struct *mm)
  339. {
  340. if (!mm)
  341. return;
  342. up_read(&mm->mmap_sem);
  343. mmput(mm);
  344. }
  345. static struct mm_struct *take_tasks_mm(struct task_struct *task)
  346. {
  347. struct mm_struct *mm = get_task_mm(task);
  348. if (mm)
  349. down_read(&mm->mmap_sem);
  350. return mm;
  351. }
  352. static inline int is_code(unsigned long val)
  353. {
  354. return val == ESCAPE_CODE;
  355. }
  356. /* Move tasks along towards death. Any tasks on dead_tasks
  357. * will definitely have no remaining references in any
  358. * CPU buffers at this point, because we use two lists,
  359. * and to have reached the list, it must have gone through
  360. * one full sync already.
  361. */
  362. static void process_task_mortuary(void)
  363. {
  364. unsigned long flags;
  365. LIST_HEAD(local_dead_tasks);
  366. struct task_struct *task;
  367. struct task_struct *ttask;
  368. spin_lock_irqsave(&task_mortuary, flags);
  369. list_splice_init(&dead_tasks, &local_dead_tasks);
  370. list_splice_init(&dying_tasks, &dead_tasks);
  371. spin_unlock_irqrestore(&task_mortuary, flags);
  372. list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
  373. list_del(&task->tasks);
  374. free_task(task);
  375. }
  376. }
  377. static void mark_done(int cpu)
  378. {
  379. int i;
  380. cpumask_set_cpu(cpu, marked_cpus);
  381. for_each_online_cpu(i) {
  382. if (!cpumask_test_cpu(i, marked_cpus))
  383. return;
  384. }
  385. /* All CPUs have been processed at least once,
  386. * we can process the mortuary once
  387. */
  388. process_task_mortuary();
  389. cpumask_clear(marked_cpus);
  390. }
  391. /* FIXME: this is not sufficient if we implement syscall barrier backtrace
  392. * traversal, the code switch to sb_sample_start at first kernel enter/exit
  393. * switch so we need a fifth state and some special handling in sync_buffer()
  394. */
  395. typedef enum {
  396. sb_bt_ignore = -2,
  397. sb_buffer_start,
  398. sb_bt_start,
  399. sb_sample_start,
  400. } sync_buffer_state;
  401. /* Sync one of the CPU's buffers into the global event buffer.
  402. * Here we need to go through each batch of samples punctuated
  403. * by context switch notes, taking the task's mmap_sem and doing
  404. * lookup in task->mm->mmap to convert EIP into dcookie/offset
  405. * value.
  406. */
  407. void sync_buffer(int cpu)
  408. {
  409. struct mm_struct *mm = NULL;
  410. struct mm_struct *oldmm;
  411. unsigned long val;
  412. struct task_struct *new;
  413. unsigned long cookie = 0;
  414. int in_kernel = 1;
  415. sync_buffer_state state = sb_buffer_start;
  416. unsigned int i;
  417. unsigned long available;
  418. unsigned long flags;
  419. struct op_entry entry;
  420. struct op_sample *sample;
  421. mutex_lock(&buffer_mutex);
  422. add_cpu_switch(cpu);
  423. op_cpu_buffer_reset(cpu);
  424. available = op_cpu_buffer_entries(cpu);
  425. for (i = 0; i < available; ++i) {
  426. sample = op_cpu_buffer_read_entry(&entry, cpu);
  427. if (!sample)
  428. break;
  429. if (is_code(sample->eip)) {
  430. flags = sample->event;
  431. if (flags & TRACE_BEGIN) {
  432. state = sb_bt_start;
  433. add_trace_begin();
  434. }
  435. if (flags & KERNEL_CTX_SWITCH) {
  436. /* kernel/userspace switch */
  437. in_kernel = flags & IS_KERNEL;
  438. if (state == sb_buffer_start)
  439. state = sb_sample_start;
  440. add_kernel_ctx_switch(flags & IS_KERNEL);
  441. }
  442. if (flags & USER_CTX_SWITCH
  443. && op_cpu_buffer_get_data(&entry, &val)) {
  444. /* userspace context switch */
  445. new = (struct task_struct *)val;
  446. oldmm = mm;
  447. release_mm(oldmm);
  448. mm = take_tasks_mm(new);
  449. if (mm != oldmm)
  450. cookie = get_exec_dcookie(mm);
  451. add_user_ctx_switch(new, cookie);
  452. }
  453. if (op_cpu_buffer_get_size(&entry))
  454. add_data(&entry, mm);
  455. continue;
  456. }
  457. if (state < sb_bt_start)
  458. /* ignore sample */
  459. continue;
  460. if (add_sample(mm, sample, in_kernel))
  461. continue;
  462. /* ignore backtraces if failed to add a sample */
  463. if (state == sb_bt_start) {
  464. state = sb_bt_ignore;
  465. atomic_inc(&oprofile_stats.bt_lost_no_mapping);
  466. }
  467. }
  468. release_mm(mm);
  469. mark_done(cpu);
  470. mutex_unlock(&buffer_mutex);
  471. }
  472. /* The function can be used to add a buffer worth of data directly to
  473. * the kernel buffer. The buffer is assumed to be a circular buffer.
  474. * Take the entries from index start and end at index end, wrapping
  475. * at max_entries.
  476. */
  477. void oprofile_put_buff(unsigned long *buf, unsigned int start,
  478. unsigned int stop, unsigned int max)
  479. {
  480. int i;
  481. i = start;
  482. mutex_lock(&buffer_mutex);
  483. while (i != stop) {
  484. add_event_entry(buf[i++]);
  485. if (i >= max)
  486. i = 0;
  487. }
  488. mutex_unlock(&buffer_mutex);
  489. }