buffer_sync.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. /**
  2. * @file buffer_sync.c
  3. *
  4. * @remark Copyright 2002 OProfile authors
  5. * @remark Read the file COPYING
  6. *
  7. * @author John Levon <levon@movementarian.org>
  8. * @author Barry Kasindorf
  9. *
  10. * This is the core of the buffer management. Each
  11. * CPU buffer is processed and entered into the
  12. * global event buffer. Such processing is necessary
  13. * in several circumstances, mentioned below.
  14. *
  15. * The processing does the job of converting the
  16. * transitory EIP value into a persistent dentry/offset
  17. * value that the profiler can record at its leisure.
  18. *
  19. * See fs/dcookies.c for a description of the dentry/offset
  20. * objects.
  21. */
  22. #include <linux/mm.h>
  23. #include <linux/workqueue.h>
  24. #include <linux/notifier.h>
  25. #include <linux/dcookies.h>
  26. #include <linux/profile.h>
  27. #include <linux/module.h>
  28. #include <linux/fs.h>
  29. #include <linux/oprofile.h>
  30. #include <linux/sched.h>
  31. #include "oprofile_stats.h"
  32. #include "event_buffer.h"
  33. #include "cpu_buffer.h"
  34. #include "buffer_sync.h"
  35. static LIST_HEAD(dying_tasks);
  36. static LIST_HEAD(dead_tasks);
  37. static cpumask_t marked_cpus = CPU_MASK_NONE;
  38. static DEFINE_SPINLOCK(task_mortuary);
  39. static void process_task_mortuary(void);
  40. /* Take ownership of the task struct and place it on the
  41. * list for processing. Only after two full buffer syncs
  42. * does the task eventually get freed, because by then
  43. * we are sure we will not reference it again.
  44. * Can be invoked from softirq via RCU callback due to
  45. * call_rcu() of the task struct, hence the _irqsave.
  46. */
  47. static int
  48. task_free_notify(struct notifier_block *self, unsigned long val, void *data)
  49. {
  50. unsigned long flags;
  51. struct task_struct *task = data;
  52. spin_lock_irqsave(&task_mortuary, flags);
  53. list_add(&task->tasks, &dying_tasks);
  54. spin_unlock_irqrestore(&task_mortuary, flags);
  55. return NOTIFY_OK;
  56. }
  57. /* The task is on its way out. A sync of the buffer means we can catch
  58. * any remaining samples for this task.
  59. */
  60. static int
  61. task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
  62. {
  63. /* To avoid latency problems, we only process the current CPU,
  64. * hoping that most samples for the task are on this CPU
  65. */
  66. sync_buffer(raw_smp_processor_id());
  67. return 0;
  68. }
  69. /* The task is about to try a do_munmap(). We peek at what it's going to
  70. * do, and if it's an executable region, process the samples first, so
  71. * we don't lose any. This does not have to be exact, it's a QoI issue
  72. * only.
  73. */
  74. static int
  75. munmap_notify(struct notifier_block *self, unsigned long val, void *data)
  76. {
  77. unsigned long addr = (unsigned long)data;
  78. struct mm_struct *mm = current->mm;
  79. struct vm_area_struct *mpnt;
  80. down_read(&mm->mmap_sem);
  81. mpnt = find_vma(mm, addr);
  82. if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
  83. up_read(&mm->mmap_sem);
  84. /* To avoid latency problems, we only process the current CPU,
  85. * hoping that most samples for the task are on this CPU
  86. */
  87. sync_buffer(raw_smp_processor_id());
  88. return 0;
  89. }
  90. up_read(&mm->mmap_sem);
  91. return 0;
  92. }
  93. /* We need to be told about new modules so we don't attribute to a previously
  94. * loaded module, or drop the samples on the floor.
  95. */
  96. static int
  97. module_load_notify(struct notifier_block *self, unsigned long val, void *data)
  98. {
  99. #ifdef CONFIG_MODULES
  100. if (val != MODULE_STATE_COMING)
  101. return 0;
  102. /* FIXME: should we process all CPU buffers ? */
  103. mutex_lock(&buffer_mutex);
  104. add_event_entry(ESCAPE_CODE);
  105. add_event_entry(MODULE_LOADED_CODE);
  106. mutex_unlock(&buffer_mutex);
  107. #endif
  108. return 0;
  109. }
  110. static struct notifier_block task_free_nb = {
  111. .notifier_call = task_free_notify,
  112. };
  113. static struct notifier_block task_exit_nb = {
  114. .notifier_call = task_exit_notify,
  115. };
  116. static struct notifier_block munmap_nb = {
  117. .notifier_call = munmap_notify,
  118. };
  119. static struct notifier_block module_load_nb = {
  120. .notifier_call = module_load_notify,
  121. };
  122. static void end_sync(void)
  123. {
  124. end_cpu_work();
  125. /* make sure we don't leak task structs */
  126. process_task_mortuary();
  127. process_task_mortuary();
  128. }
  129. int sync_start(void)
  130. {
  131. int err;
  132. start_cpu_work();
  133. err = task_handoff_register(&task_free_nb);
  134. if (err)
  135. goto out1;
  136. err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
  137. if (err)
  138. goto out2;
  139. err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
  140. if (err)
  141. goto out3;
  142. err = register_module_notifier(&module_load_nb);
  143. if (err)
  144. goto out4;
  145. out:
  146. return err;
  147. out4:
  148. profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
  149. out3:
  150. profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
  151. out2:
  152. task_handoff_unregister(&task_free_nb);
  153. out1:
  154. end_sync();
  155. goto out;
  156. }
  157. void sync_stop(void)
  158. {
  159. unregister_module_notifier(&module_load_nb);
  160. profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
  161. profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
  162. task_handoff_unregister(&task_free_nb);
  163. end_sync();
  164. }
  165. /* Optimisation. We can manage without taking the dcookie sem
  166. * because we cannot reach this code without at least one
  167. * dcookie user still being registered (namely, the reader
  168. * of the event buffer). */
  169. static inline unsigned long fast_get_dcookie(struct path *path)
  170. {
  171. unsigned long cookie;
  172. if (path->dentry->d_cookie)
  173. return (unsigned long)path->dentry;
  174. get_dcookie(path, &cookie);
  175. return cookie;
  176. }
  177. /* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
  178. * which corresponds loosely to "application name". This is
  179. * not strictly necessary but allows oprofile to associate
  180. * shared-library samples with particular applications
  181. */
  182. static unsigned long get_exec_dcookie(struct mm_struct *mm)
  183. {
  184. unsigned long cookie = NO_COOKIE;
  185. struct vm_area_struct *vma;
  186. if (!mm)
  187. goto out;
  188. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  189. if (!vma->vm_file)
  190. continue;
  191. if (!(vma->vm_flags & VM_EXECUTABLE))
  192. continue;
  193. cookie = fast_get_dcookie(&vma->vm_file->f_path);
  194. break;
  195. }
  196. out:
  197. return cookie;
  198. }
  199. /* Convert the EIP value of a sample into a persistent dentry/offset
  200. * pair that can then be added to the global event buffer. We make
  201. * sure to do this lookup before a mm->mmap modification happens so
  202. * we don't lose track.
  203. */
  204. static unsigned long
  205. lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
  206. {
  207. unsigned long cookie = NO_COOKIE;
  208. struct vm_area_struct *vma;
  209. for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
  210. if (addr < vma->vm_start || addr >= vma->vm_end)
  211. continue;
  212. if (vma->vm_file) {
  213. cookie = fast_get_dcookie(&vma->vm_file->f_path);
  214. *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
  215. vma->vm_start;
  216. } else {
  217. /* must be an anonymous map */
  218. *offset = addr;
  219. }
  220. break;
  221. }
  222. if (!vma)
  223. cookie = INVALID_COOKIE;
  224. return cookie;
  225. }
  226. static void increment_tail(struct oprofile_cpu_buffer *b)
  227. {
  228. unsigned long new_tail = b->tail_pos + 1;
  229. rmb(); /* be sure fifo pointers are synchromized */
  230. if (new_tail < b->buffer_size)
  231. b->tail_pos = new_tail;
  232. else
  233. b->tail_pos = 0;
  234. }
  235. static unsigned long last_cookie = INVALID_COOKIE;
  236. static void add_cpu_switch(int i)
  237. {
  238. add_event_entry(ESCAPE_CODE);
  239. add_event_entry(CPU_SWITCH_CODE);
  240. add_event_entry(i);
  241. last_cookie = INVALID_COOKIE;
  242. }
  243. static void add_kernel_ctx_switch(unsigned int in_kernel)
  244. {
  245. add_event_entry(ESCAPE_CODE);
  246. if (in_kernel)
  247. add_event_entry(KERNEL_ENTER_SWITCH_CODE);
  248. else
  249. add_event_entry(KERNEL_EXIT_SWITCH_CODE);
  250. }
  251. static void
  252. add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
  253. {
  254. add_event_entry(ESCAPE_CODE);
  255. add_event_entry(CTX_SWITCH_CODE);
  256. add_event_entry(task->pid);
  257. add_event_entry(cookie);
  258. /* Another code for daemon back-compat */
  259. add_event_entry(ESCAPE_CODE);
  260. add_event_entry(CTX_TGID_CODE);
  261. add_event_entry(task->tgid);
  262. }
  263. static void add_cookie_switch(unsigned long cookie)
  264. {
  265. add_event_entry(ESCAPE_CODE);
  266. add_event_entry(COOKIE_SWITCH_CODE);
  267. add_event_entry(cookie);
  268. }
  269. static void add_trace_begin(void)
  270. {
  271. add_event_entry(ESCAPE_CODE);
  272. add_event_entry(TRACE_BEGIN_CODE);
  273. }
  274. #ifdef CONFIG_OPROFILE_IBS
  275. #define IBS_FETCH_CODE_SIZE 2
  276. #define IBS_OP_CODE_SIZE 5
  277. #define IBS_EIP(offset) \
  278. (((struct op_sample *)&cpu_buf->buffer[(offset)])->eip)
  279. #define IBS_EVENT(offset) \
  280. (((struct op_sample *)&cpu_buf->buffer[(offset)])->event)
  281. /*
  282. * Add IBS fetch and op entries to event buffer
  283. */
  284. static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
  285. int in_kernel, struct mm_struct *mm)
  286. {
  287. unsigned long rip;
  288. int i, count;
  289. unsigned long ibs_cookie = 0;
  290. off_t offset;
  291. increment_tail(cpu_buf); /* move to RIP entry */
  292. rip = IBS_EIP(cpu_buf->tail_pos);
  293. #ifdef __LP64__
  294. rip += IBS_EVENT(cpu_buf->tail_pos) << 32;
  295. #endif
  296. if (mm) {
  297. ibs_cookie = lookup_dcookie(mm, rip, &offset);
  298. if (ibs_cookie == NO_COOKIE)
  299. offset = rip;
  300. if (ibs_cookie == INVALID_COOKIE) {
  301. atomic_inc(&oprofile_stats.sample_lost_no_mapping);
  302. offset = rip;
  303. }
  304. if (ibs_cookie != last_cookie) {
  305. add_cookie_switch(ibs_cookie);
  306. last_cookie = ibs_cookie;
  307. }
  308. } else
  309. offset = rip;
  310. add_event_entry(ESCAPE_CODE);
  311. add_event_entry(code);
  312. add_event_entry(offset); /* Offset from Dcookie */
  313. /* we send the Dcookie offset, but send the raw Linear Add also*/
  314. add_event_entry(IBS_EIP(cpu_buf->tail_pos));
  315. add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
  316. if (code == IBS_FETCH_CODE)
  317. count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/
  318. else
  319. count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
  320. for (i = 0; i < count; i++) {
  321. increment_tail(cpu_buf);
  322. add_event_entry(IBS_EIP(cpu_buf->tail_pos));
  323. add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
  324. }
  325. }
  326. #endif
  327. static void add_sample_entry(unsigned long offset, unsigned long event)
  328. {
  329. add_event_entry(offset);
  330. add_event_entry(event);
  331. }
  332. static int add_us_sample(struct mm_struct *mm, struct op_sample *s)
  333. {
  334. unsigned long cookie;
  335. off_t offset;
  336. cookie = lookup_dcookie(mm, s->eip, &offset);
  337. if (cookie == INVALID_COOKIE) {
  338. atomic_inc(&oprofile_stats.sample_lost_no_mapping);
  339. return 0;
  340. }
  341. if (cookie != last_cookie) {
  342. add_cookie_switch(cookie);
  343. last_cookie = cookie;
  344. }
  345. add_sample_entry(offset, s->event);
  346. return 1;
  347. }
  348. /* Add a sample to the global event buffer. If possible the
  349. * sample is converted into a persistent dentry/offset pair
  350. * for later lookup from userspace.
  351. */
  352. static int
  353. add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
  354. {
  355. if (in_kernel) {
  356. add_sample_entry(s->eip, s->event);
  357. return 1;
  358. } else if (mm) {
  359. return add_us_sample(mm, s);
  360. } else {
  361. atomic_inc(&oprofile_stats.sample_lost_no_mm);
  362. }
  363. return 0;
  364. }
  365. static void release_mm(struct mm_struct *mm)
  366. {
  367. if (!mm)
  368. return;
  369. up_read(&mm->mmap_sem);
  370. mmput(mm);
  371. }
  372. static struct mm_struct *take_tasks_mm(struct task_struct *task)
  373. {
  374. struct mm_struct *mm = get_task_mm(task);
  375. if (mm)
  376. down_read(&mm->mmap_sem);
  377. return mm;
  378. }
  379. static inline int is_code(unsigned long val)
  380. {
  381. return val == ESCAPE_CODE;
  382. }
  383. /* "acquire" as many cpu buffer slots as we can */
  384. static unsigned long get_slots(struct oprofile_cpu_buffer *b)
  385. {
  386. unsigned long head = b->head_pos;
  387. unsigned long tail = b->tail_pos;
  388. /*
  389. * Subtle. This resets the persistent last_task
  390. * and in_kernel values used for switching notes.
  391. * BUT, there is a small window between reading
  392. * head_pos, and this call, that means samples
  393. * can appear at the new head position, but not
  394. * be prefixed with the notes for switching
  395. * kernel mode or a task switch. This small hole
  396. * can lead to mis-attribution or samples where
  397. * we don't know if it's in the kernel or not,
  398. * at the start of an event buffer.
  399. */
  400. cpu_buffer_reset(b);
  401. if (head >= tail)
  402. return head - tail;
  403. return head + (b->buffer_size - tail);
  404. }
  405. /* Move tasks along towards death. Any tasks on dead_tasks
  406. * will definitely have no remaining references in any
  407. * CPU buffers at this point, because we use two lists,
  408. * and to have reached the list, it must have gone through
  409. * one full sync already.
  410. */
  411. static void process_task_mortuary(void)
  412. {
  413. unsigned long flags;
  414. LIST_HEAD(local_dead_tasks);
  415. struct task_struct *task;
  416. struct task_struct *ttask;
  417. spin_lock_irqsave(&task_mortuary, flags);
  418. list_splice_init(&dead_tasks, &local_dead_tasks);
  419. list_splice_init(&dying_tasks, &dead_tasks);
  420. spin_unlock_irqrestore(&task_mortuary, flags);
  421. list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
  422. list_del(&task->tasks);
  423. free_task(task);
  424. }
  425. }
  426. static void mark_done(int cpu)
  427. {
  428. int i;
  429. cpu_set(cpu, marked_cpus);
  430. for_each_online_cpu(i) {
  431. if (!cpu_isset(i, marked_cpus))
  432. return;
  433. }
  434. /* All CPUs have been processed at least once,
  435. * we can process the mortuary once
  436. */
  437. process_task_mortuary();
  438. cpus_clear(marked_cpus);
  439. }
  440. /* FIXME: this is not sufficient if we implement syscall barrier backtrace
  441. * traversal, the code switch to sb_sample_start at first kernel enter/exit
  442. * switch so we need a fifth state and some special handling in sync_buffer()
  443. */
  444. typedef enum {
  445. sb_bt_ignore = -2,
  446. sb_buffer_start,
  447. sb_bt_start,
  448. sb_sample_start,
  449. } sync_buffer_state;
  450. /* Sync one of the CPU's buffers into the global event buffer.
  451. * Here we need to go through each batch of samples punctuated
  452. * by context switch notes, taking the task's mmap_sem and doing
  453. * lookup in task->mm->mmap to convert EIP into dcookie/offset
  454. * value.
  455. */
  456. void sync_buffer(int cpu)
  457. {
  458. struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
  459. struct mm_struct *mm = NULL;
  460. struct task_struct *new;
  461. unsigned long cookie = 0;
  462. int in_kernel = 1;
  463. unsigned int i;
  464. sync_buffer_state state = sb_buffer_start;
  465. unsigned long available;
  466. mutex_lock(&buffer_mutex);
  467. add_cpu_switch(cpu);
  468. /* Remember, only we can modify tail_pos */
  469. available = get_slots(cpu_buf);
  470. for (i = 0; i < available; ++i) {
  471. struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
  472. if (is_code(s->eip)) {
  473. if (s->event <= CPU_IS_KERNEL) {
  474. /* kernel/userspace switch */
  475. in_kernel = s->event;
  476. if (state == sb_buffer_start)
  477. state = sb_sample_start;
  478. add_kernel_ctx_switch(s->event);
  479. } else if (s->event == CPU_TRACE_BEGIN) {
  480. state = sb_bt_start;
  481. add_trace_begin();
  482. #ifdef CONFIG_OPROFILE_IBS
  483. } else if (s->event == IBS_FETCH_BEGIN) {
  484. state = sb_bt_start;
  485. add_ibs_begin(cpu_buf,
  486. IBS_FETCH_CODE, in_kernel, mm);
  487. } else if (s->event == IBS_OP_BEGIN) {
  488. state = sb_bt_start;
  489. add_ibs_begin(cpu_buf,
  490. IBS_OP_CODE, in_kernel, mm);
  491. #endif
  492. } else {
  493. struct mm_struct *oldmm = mm;
  494. /* userspace context switch */
  495. new = (struct task_struct *)s->event;
  496. release_mm(oldmm);
  497. mm = take_tasks_mm(new);
  498. if (mm != oldmm)
  499. cookie = get_exec_dcookie(mm);
  500. add_user_ctx_switch(new, cookie);
  501. }
  502. } else if (state >= sb_bt_start &&
  503. !add_sample(mm, s, in_kernel)) {
  504. if (state == sb_bt_start) {
  505. state = sb_bt_ignore;
  506. atomic_inc(&oprofile_stats.bt_lost_no_mapping);
  507. }
  508. }
  509. increment_tail(cpu_buf);
  510. }
  511. release_mm(mm);
  512. mark_done(cpu);
  513. mutex_unlock(&buffer_mutex);
  514. }