1#include <linux/spinlock.h>
2#include <linux/hardirq.h>
3#include <linux/ftrace.h>
4#include <linux/percpu.h>
5#include <linux/init.h>
6#include <linux/list.h>
7#include <trace/syscall.h>
8
9#include <asm/ftrace.h>
10
11#ifdef CONFIG_DYNAMIC_FTRACE
12static const u32 ftrace_nop = 0x01000000;
13
14static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
15{
16	u32 call;
17	s32 off;
18
19	off = ((s32)addr - (s32)ip);
20	call = 0x40000000 | ((u32)off >> 2);
21
22	return call;
23}
24
25static int ftrace_modify_code(unsigned long ip, u32 old, u32 new)
26{
27	u32 replaced;
28	int faulted;
29
30	__asm__ __volatile__(
31	"1:	cas	[%[ip]], %[old], %[new]\n"
32	"	flush	%[ip]\n"
33	"	mov	0, %[faulted]\n"
34	"2:\n"
35	"	.section .fixup,#alloc,#execinstr\n"
36	"	.align	4\n"
37	"3:	sethi	%%hi(2b), %[faulted]\n"
38	"	jmpl	%[faulted] + %%lo(2b), %%g0\n"
39	"	 mov	1, %[faulted]\n"
40	"	.previous\n"
41	"	.section __ex_table,\"a\"\n"
42	"	.align	4\n"
43	"	.word	1b, 3b\n"
44	"	.previous\n"
45	: "=r" (replaced), [faulted] "=r" (faulted)
46	: [new] "0" (new), [old] "r" (old), [ip] "r" (ip)
47	: "memory");
48
49	if (replaced != old && replaced != new)
50		faulted = 2;
51
52	return faulted;
53}
54
55int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
56{
57	unsigned long ip = rec->ip;
58	u32 old, new;
59
60	old = ftrace_call_replace(ip, addr);
61	new = ftrace_nop;
62	return ftrace_modify_code(ip, old, new);
63}
64
65int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
66{
67	unsigned long ip = rec->ip;
68	u32 old, new;
69
70	old = ftrace_nop;
71	new = ftrace_call_replace(ip, addr);
72	return ftrace_modify_code(ip, old, new);
73}
74
75int ftrace_update_ftrace_func(ftrace_func_t func)
76{
77	unsigned long ip = (unsigned long)(&ftrace_call);
78	u32 old, new;
79
80	old = *(u32 *) &ftrace_call;
81	new = ftrace_call_replace(ip, (unsigned long)func);
82	return ftrace_modify_code(ip, old, new);
83}
84
85int __init ftrace_dyn_arch_init(void)
86{
87	return 0;
88}
89#endif
90
91#ifdef CONFIG_FUNCTION_GRAPH_TRACER
92
93#ifdef CONFIG_DYNAMIC_FTRACE
94extern void ftrace_graph_call(void);
95
96int ftrace_enable_ftrace_graph_caller(void)
97{
98	unsigned long ip = (unsigned long)(&ftrace_graph_call);
99	u32 old, new;
100
101	old = *(u32 *) &ftrace_graph_call;
102	new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
103	return ftrace_modify_code(ip, old, new);
104}
105
106int ftrace_disable_ftrace_graph_caller(void)
107{
108	unsigned long ip = (unsigned long)(&ftrace_graph_call);
109	u32 old, new;
110
111	old = *(u32 *) &ftrace_graph_call;
112	new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
113
114	return ftrace_modify_code(ip, old, new);
115}
116
117#endif /* !CONFIG_DYNAMIC_FTRACE */
118
119/*
120 * Hook the return address and push it in the stack of return addrs
121 * in current thread info.
122 */
123unsigned long prepare_ftrace_return(unsigned long parent,
124				    unsigned long self_addr,
125				    unsigned long frame_pointer)
126{
127	unsigned long return_hooker = (unsigned long) &return_to_handler;
128	struct ftrace_graph_ent trace;
129
130	if (unlikely(atomic_read(&current->tracing_graph_pause)))
131		return parent + 8UL;
132
133	if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
134				     frame_pointer) == -EBUSY)
135		return parent + 8UL;
136
137	trace.func = self_addr;
138
139	/* Only trace if the calling function expects to */
140	if (!ftrace_graph_entry(&trace)) {
141		current->curr_ret_stack--;
142		return parent + 8UL;
143	}
144
145	return return_hooker;
146}
147#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
148