1#include <linux/percpu.h>
2#include <linux/jump_label.h>
3#include <asm/trace.h>
4
5#ifdef HAVE_JUMP_LABEL
6struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
7
8void opal_tracepoint_regfunc(void)
9{
10	static_key_slow_inc(&opal_tracepoint_key);
11}
12
13void opal_tracepoint_unregfunc(void)
14{
15	static_key_slow_dec(&opal_tracepoint_key);
16}
17#else
18/*
19 * We optimise OPAL calls by placing opal_tracepoint_refcount
20 * directly in the TOC so we can check if the opal tracepoints are
21 * enabled via a single load.
22 */
23
24/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
25extern long opal_tracepoint_refcount;
26
27void opal_tracepoint_regfunc(void)
28{
29	opal_tracepoint_refcount++;
30}
31
32void opal_tracepoint_unregfunc(void)
33{
34	opal_tracepoint_refcount--;
35}
36#endif
37
38/*
39 * Since the tracing code might execute OPAL calls we need to guard against
40 * recursion.
41 */
42static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
43
44void __trace_opal_entry(unsigned long opcode, unsigned long *args)
45{
46	unsigned long flags;
47	unsigned int *depth;
48
49	local_irq_save(flags);
50
51	depth = &__get_cpu_var(opal_trace_depth);
52
53	if (*depth)
54		goto out;
55
56	(*depth)++;
57	preempt_disable();
58	trace_opal_entry(opcode, args);
59	(*depth)--;
60
61out:
62	local_irq_restore(flags);
63}
64
65void __trace_opal_exit(long opcode, unsigned long retval)
66{
67	unsigned long flags;
68	unsigned int *depth;
69
70	local_irq_save(flags);
71
72	depth = &__get_cpu_var(opal_trace_depth);
73
74	if (*depth)
75		goto out;
76
77	(*depth)++;
78	trace_opal_exit(opcode, retval);
79	preempt_enable();
80	(*depth)--;
81
82out:
83	local_irq_restore(flags);
84}
85