123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599 |
- /**
- * @file buffer_sync.c
- *
- * @remark Copyright 2002-2009 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- * @author Barry Kasindorf
- * @author Robert Richter <robert.richter@amd.com>
- *
- * This is the core of the buffer management. Each
- * CPU buffer is processed and entered into the
- * global event buffer. Such processing is necessary
- * in several circumstances, mentioned below.
- *
- * The processing does the job of converting the
- * transitory EIP value into a persistent dentry/offset
- * value that the profiler can record at its leisure.
- *
- * See fs/dcookies.c for a description of the dentry/offset
- * objects.
- */
- #include <linux/mm.h>
- #include <linux/workqueue.h>
- #include <linux/notifier.h>
- #include <linux/dcookies.h>
- #include <linux/profile.h>
- #include <linux/module.h>
- #include <linux/fs.h>
- #include <linux/oprofile.h>
- #include <linux/sched.h>
- #include <linux/gfp.h>
- #include "oprofile_stats.h"
- #include "event_buffer.h"
- #include "cpu_buffer.h"
- #include "buffer_sync.h"
- static LIST_HEAD(dying_tasks);
- static LIST_HEAD(dead_tasks);
- static cpumask_var_t marked_cpus;
- static DEFINE_SPINLOCK(task_mortuary);
- static void process_task_mortuary(void);
- /* Take ownership of the task struct and place it on the
- * list for processing. Only after two full buffer syncs
- * does the task eventually get freed, because by then
- * we are sure we will not reference it again.
- * Can be invoked from softirq via RCU callback due to
- * call_rcu() of the task struct, hence the _irqsave.
- */
- static int
- task_free_notify(struct notifier_block *self, unsigned long val, void *data)
- {
- unsigned long flags;
- struct task_struct *task = data;
- spin_lock_irqsave(&task_mortuary, flags);
- list_add(&task->tasks, &dying_tasks);
- spin_unlock_irqrestore(&task_mortuary, flags);
- return NOTIFY_OK;
- }
- /* The task is on its way out. A sync of the buffer means we can catch
- * any remaining samples for this task.
- */
- static int
- task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
- {
- /* To avoid latency problems, we only process the current CPU,
- * hoping that most samples for the task are on this CPU
- */
- sync_buffer(raw_smp_processor_id());
- return 0;
- }
- /* The task is about to try a do_munmap(). We peek at what it's going to
- * do, and if it's an executable region, process the samples first, so
- * we don't lose any. This does not have to be exact, it's a QoI issue
- * only.
- */
- static int
- munmap_notify(struct notifier_block *self, unsigned long val, void *data)
- {
- unsigned long addr = (unsigned long)data;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *mpnt;
- down_read(&mm->mmap_sem);
- mpnt = find_vma(mm, addr);
- if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
- up_read(&mm->mmap_sem);
- /* To avoid latency problems, we only process the current CPU,
- * hoping that most samples for the task are on this CPU
- */
- sync_buffer(raw_smp_processor_id());
- return 0;
- }
- up_read(&mm->mmap_sem);
- return 0;
- }
- /* We need to be told about new modules so we don't attribute to a previously
- * loaded module, or drop the samples on the floor.
- */
- static int
- module_load_notify(struct notifier_block *self, unsigned long val, void *data)
- {
- #ifdef CONFIG_MODULES
- if (val != MODULE_STATE_COMING)
- return 0;
- /* FIXME: should we process all CPU buffers ? */
- mutex_lock(&buffer_mutex);
- add_event_entry(ESCAPE_CODE);
- add_event_entry(MODULE_LOADED_CODE);
- mutex_unlock(&buffer_mutex);
- #endif
- return 0;
- }
- static struct notifier_block task_free_nb = {
- .notifier_call = task_free_notify,
- };
- static struct notifier_block task_exit_nb = {
- .notifier_call = task_exit_notify,
- };
- static struct notifier_block munmap_nb = {
- .notifier_call = munmap_notify,
- };
- static struct notifier_block module_load_nb = {
- .notifier_call = module_load_notify,
- };
- static void free_all_tasks(void)
- {
- /* make sure we don't leak task structs */
- process_task_mortuary();
- process_task_mortuary();
- }
- int sync_start(void)
- {
- int err;
- if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
- return -ENOMEM;
- err = task_handoff_register(&task_free_nb);
- if (err)
- goto out1;
- err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
- if (err)
- goto out2;
- err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
- if (err)
- goto out3;
- err = register_module_notifier(&module_load_nb);
- if (err)
- goto out4;
- start_cpu_work();
- out:
- return err;
- out4:
- profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
- out3:
- profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
- out2:
- task_handoff_unregister(&task_free_nb);
- free_all_tasks();
- out1:
- free_cpumask_var(marked_cpus);
- goto out;
- }
- void sync_stop(void)
- {
- end_cpu_work();
- unregister_module_notifier(&module_load_nb);
- profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
- profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
- task_handoff_unregister(&task_free_nb);
- barrier(); /* do all of the above first */
- flush_cpu_work();
- free_all_tasks();
- free_cpumask_var(marked_cpus);
- }
- /* Optimisation. We can manage without taking the dcookie sem
- * because we cannot reach this code without at least one
- * dcookie user still being registered (namely, the reader
- * of the event buffer). */
- static inline unsigned long fast_get_dcookie(struct path *path)
- {
- unsigned long cookie;
- if (path->dentry->d_flags & DCACHE_COOKIE)
- return (unsigned long)path->dentry;
- get_dcookie(path, &cookie);
- return cookie;
- }
- /* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
- * which corresponds loosely to "application name". This is
- * not strictly necessary but allows oprofile to associate
- * shared-library samples with particular applications
- */
- static unsigned long get_exec_dcookie(struct mm_struct *mm)
- {
- unsigned long cookie = NO_COOKIE;
- struct vm_area_struct *vma;
- if (!mm)
- goto out;
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (!vma->vm_file)
- continue;
- if (!(vma->vm_flags & VM_EXECUTABLE))
- continue;
- cookie = fast_get_dcookie(&vma->vm_file->f_path);
- break;
- }
- out:
- return cookie;
- }
- /* Convert the EIP value of a sample into a persistent dentry/offset
- * pair that can then be added to the global event buffer. We make
- * sure to do this lookup before a mm->mmap modification happens so
- * we don't lose track.
- */
- static unsigned long
- lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
- {
- unsigned long cookie = NO_COOKIE;
- struct vm_area_struct *vma;
- for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
- if (addr < vma->vm_start || addr >= vma->vm_end)
- continue;
- if (vma->vm_file) {
- cookie = fast_get_dcookie(&vma->vm_file->f_path);
- *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
- vma->vm_start;
- } else {
- /* must be an anonymous map */
- *offset = addr;
- }
- break;
- }
- if (!vma)
- cookie = INVALID_COOKIE;
- return cookie;
- }
- static unsigned long last_cookie = INVALID_COOKIE;
- static void add_cpu_switch(int i)
- {
- add_event_entry(ESCAPE_CODE);
- add_event_entry(CPU_SWITCH_CODE);
- add_event_entry(i);
- last_cookie = INVALID_COOKIE;
- }
- static void add_kernel_ctx_switch(unsigned int in_kernel)
- {
- add_event_entry(ESCAPE_CODE);
- if (in_kernel)
- add_event_entry(KERNEL_ENTER_SWITCH_CODE);
- else
- add_event_entry(KERNEL_EXIT_SWITCH_CODE);
- }
- static void
- add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
- {
- add_event_entry(ESCAPE_CODE);
- add_event_entry(CTX_SWITCH_CODE);
- add_event_entry(task->pid);
- add_event_entry(cookie);
- /* Another code for daemon back-compat */
- add_event_entry(ESCAPE_CODE);
- add_event_entry(CTX_TGID_CODE);
- add_event_entry(task->tgid);
- }
- static void add_cookie_switch(unsigned long cookie)
- {
- add_event_entry(ESCAPE_CODE);
- add_event_entry(COOKIE_SWITCH_CODE);
- add_event_entry(cookie);
- }
- static void add_trace_begin(void)
- {
- add_event_entry(ESCAPE_CODE);
- add_event_entry(TRACE_BEGIN_CODE);
- }
- static void add_data(struct op_entry *entry, struct mm_struct *mm)
- {
- unsigned long code, pc, val;
- unsigned long cookie;
- off_t offset;
- if (!op_cpu_buffer_get_data(entry, &code))
- return;
- if (!op_cpu_buffer_get_data(entry, &pc))
- return;
- if (!op_cpu_buffer_get_size(entry))
- return;
- if (mm) {
- cookie = lookup_dcookie(mm, pc, &offset);
- if (cookie == NO_COOKIE)
- offset = pc;
- if (cookie == INVALID_COOKIE) {
- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
- offset = pc;
- }
- if (cookie != last_cookie) {
- add_cookie_switch(cookie);
- last_cookie = cookie;
- }
- } else
- offset = pc;
- add_event_entry(ESCAPE_CODE);
- add_event_entry(code);
- add_event_entry(offset); /* Offset from Dcookie */
- while (op_cpu_buffer_get_data(entry, &val))
- add_event_entry(val);
- }
- static inline void add_sample_entry(unsigned long offset, unsigned long event)
- {
- add_event_entry(offset);
- add_event_entry(event);
- }
- /*
- * Add a sample to the global event buffer. If possible the
- * sample is converted into a persistent dentry/offset pair
- * for later lookup from userspace. Return 0 on failure.
- */
- static int
- add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
- {
- unsigned long cookie;
- off_t offset;
- if (in_kernel) {
- add_sample_entry(s->eip, s->event);
- return 1;
- }
- /* add userspace sample */
- if (!mm) {
- atomic_inc(&oprofile_stats.sample_lost_no_mm);
- return 0;
- }
- cookie = lookup_dcookie(mm, s->eip, &offset);
- if (cookie == INVALID_COOKIE) {
- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
- return 0;
- }
- if (cookie != last_cookie) {
- add_cookie_switch(cookie);
- last_cookie = cookie;
- }
- add_sample_entry(offset, s->event);
- return 1;
- }
- static void release_mm(struct mm_struct *mm)
- {
- if (!mm)
- return;
- up_read(&mm->mmap_sem);
- mmput(mm);
- }
- static struct mm_struct *take_tasks_mm(struct task_struct *task)
- {
- struct mm_struct *mm = get_task_mm(task);
- if (mm)
- down_read(&mm->mmap_sem);
- return mm;
- }
- static inline int is_code(unsigned long val)
- {
- return val == ESCAPE_CODE;
- }
- /* Move tasks along towards death. Any tasks on dead_tasks
- * will definitely have no remaining references in any
- * CPU buffers at this point, because we use two lists,
- * and to have reached the list, it must have gone through
- * one full sync already.
- */
- static void process_task_mortuary(void)
- {
- unsigned long flags;
- LIST_HEAD(local_dead_tasks);
- struct task_struct *task;
- struct task_struct *ttask;
- spin_lock_irqsave(&task_mortuary, flags);
- list_splice_init(&dead_tasks, &local_dead_tasks);
- list_splice_init(&dying_tasks, &dead_tasks);
- spin_unlock_irqrestore(&task_mortuary, flags);
- list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
- list_del(&task->tasks);
- free_task(task);
- }
- }
- static void mark_done(int cpu)
- {
- int i;
- cpumask_set_cpu(cpu, marked_cpus);
- for_each_online_cpu(i) {
- if (!cpumask_test_cpu(i, marked_cpus))
- return;
- }
- /* All CPUs have been processed at least once,
- * we can process the mortuary once
- */
- process_task_mortuary();
- cpumask_clear(marked_cpus);
- }
- /* FIXME: this is not sufficient if we implement syscall barrier backtrace
- * traversal, the code switch to sb_sample_start at first kernel enter/exit
- * switch so we need a fifth state and some special handling in sync_buffer()
- */
- typedef enum {
- sb_bt_ignore = -2,
- sb_buffer_start,
- sb_bt_start,
- sb_sample_start,
- } sync_buffer_state;
- /* Sync one of the CPU's buffers into the global event buffer.
- * Here we need to go through each batch of samples punctuated
- * by context switch notes, taking the task's mmap_sem and doing
- * lookup in task->mm->mmap to convert EIP into dcookie/offset
- * value.
- */
- void sync_buffer(int cpu)
- {
- struct mm_struct *mm = NULL;
- struct mm_struct *oldmm;
- unsigned long val;
- struct task_struct *new;
- unsigned long cookie = 0;
- int in_kernel = 1;
- sync_buffer_state state = sb_buffer_start;
- unsigned int i;
- unsigned long available;
- unsigned long flags;
- struct op_entry entry;
- struct op_sample *sample;
- mutex_lock(&buffer_mutex);
- add_cpu_switch(cpu);
- op_cpu_buffer_reset(cpu);
- available = op_cpu_buffer_entries(cpu);
- for (i = 0; i < available; ++i) {
- sample = op_cpu_buffer_read_entry(&entry, cpu);
- if (!sample)
- break;
- if (is_code(sample->eip)) {
- flags = sample->event;
- if (flags & TRACE_BEGIN) {
- state = sb_bt_start;
- add_trace_begin();
- }
- if (flags & KERNEL_CTX_SWITCH) {
- /* kernel/userspace switch */
- in_kernel = flags & IS_KERNEL;
- if (state == sb_buffer_start)
- state = sb_sample_start;
- add_kernel_ctx_switch(flags & IS_KERNEL);
- }
- if (flags & USER_CTX_SWITCH
- && op_cpu_buffer_get_data(&entry, &val)) {
- /* userspace context switch */
- new = (struct task_struct *)val;
- oldmm = mm;
- release_mm(oldmm);
- mm = take_tasks_mm(new);
- if (mm != oldmm)
- cookie = get_exec_dcookie(mm);
- add_user_ctx_switch(new, cookie);
- }
- if (op_cpu_buffer_get_size(&entry))
- add_data(&entry, mm);
- continue;
- }
- if (state < sb_bt_start)
- /* ignore sample */
- continue;
- if (add_sample(mm, sample, in_kernel))
- continue;
- /* ignore backtraces if failed to add a sample */
- if (state == sb_bt_start) {
- state = sb_bt_ignore;
- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
- }
- }
- release_mm(mm);
- mark_done(cpu);
- mutex_unlock(&buffer_mutex);
- }
- /* The function can be used to add a buffer worth of data directly to
- * the kernel buffer. The buffer is assumed to be a circular buffer.
- * Take the entries from index start and end at index end, wrapping
- * at max_entries.
- */
- void oprofile_put_buff(unsigned long *buf, unsigned int start,
- unsigned int stop, unsigned int max)
- {
- int i;
- i = start;
- mutex_lock(&buffer_mutex);
- while (i != stop) {
- add_event_entry(buf[i++]);
- if (i >= max)
- i = 0;
- }
- mutex_unlock(&buffer_mutex);
- }
|