1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586 |
- #include <linux/percpu.h>
- #include <linux/jump_label.h>
- #include <asm/trace.h>
- #include <asm/asm-prototypes.h>
- #ifdef HAVE_JUMP_LABEL
- struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
- void opal_tracepoint_regfunc(void)
- {
- static_key_slow_inc(&opal_tracepoint_key);
- }
- void opal_tracepoint_unregfunc(void)
- {
- static_key_slow_dec(&opal_tracepoint_key);
- }
- #else
- /*
- * We optimise OPAL calls by placing opal_tracepoint_refcount
- * directly in the TOC so we can check if the opal tracepoints are
- * enabled via a single load.
- */
- /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
- extern long opal_tracepoint_refcount;
- void opal_tracepoint_regfunc(void)
- {
- opal_tracepoint_refcount++;
- }
- void opal_tracepoint_unregfunc(void)
- {
- opal_tracepoint_refcount--;
- }
- #endif
- /*
- * Since the tracing code might execute OPAL calls we need to guard against
- * recursion.
- */
- static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
- void __trace_opal_entry(unsigned long opcode, unsigned long *args)
- {
- unsigned long flags;
- unsigned int *depth;
- local_irq_save(flags);
- depth = this_cpu_ptr(&opal_trace_depth);
- if (*depth)
- goto out;
- (*depth)++;
- preempt_disable();
- trace_opal_entry(opcode, args);
- (*depth)--;
- out:
- local_irq_restore(flags);
- }
- void __trace_opal_exit(long opcode, unsigned long retval)
- {
- unsigned long flags;
- unsigned int *depth;
- local_irq_save(flags);
- depth = this_cpu_ptr(&opal_trace_depth);
- if (*depth)
- goto out;
- (*depth)++;
- trace_opal_exit(opcode, retval);
- preempt_enable();
- (*depth)--;
- out:
- local_irq_restore(flags);
- }
|