1/*
2 * ftrace graph code
3 *
4 * Copyright (C) 2009-2010 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
6 */
7
8#include <linux/ftrace.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/uaccess.h>
12#include <linux/atomic.h>
13#include <asm/cacheflush.h>
14
15#ifdef CONFIG_DYNAMIC_FTRACE
16
17static const unsigned char mnop[] = {
18	0x03, 0xc0, 0x00, 0x18, /* MNOP; */
19	0x03, 0xc0, 0x00, 0x18, /* MNOP; */
20};
21
22static void bfin_make_pcrel24(unsigned char *insn, unsigned long src,
23                              unsigned long dst)
24{
25	uint32_t pcrel = (dst - src) >> 1;
26	insn[0] = pcrel >> 16;
27	insn[1] = 0xe3;
28	insn[2] = pcrel;
29	insn[3] = pcrel >> 8;
30}
31#define bfin_make_pcrel24(insn, src, dst) bfin_make_pcrel24(insn, src, (unsigned long)(dst))
32
33static int ftrace_modify_code(unsigned long ip, const unsigned char *code,
34                              unsigned long len)
35{
36	int ret = probe_kernel_write((void *)ip, (void *)code, len);
37	flush_icache_range(ip, ip + len);
38	return ret;
39}
40
41int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
42                    unsigned long addr)
43{
44	/* Turn the mcount call site into two MNOPs as those are 32bit insns */
45	return ftrace_modify_code(rec->ip, mnop, sizeof(mnop));
46}
47
48int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
49{
50	/* Restore the mcount call site */
51	unsigned char call[8];
52	call[0] = 0x67; /* [--SP] = RETS; */
53	call[1] = 0x01;
54	bfin_make_pcrel24(&call[2], rec->ip + 2, addr);
55	call[6] = 0x27; /* RETS = [SP++]; */
56	call[7] = 0x01;
57	return ftrace_modify_code(rec->ip, call, sizeof(call));
58}
59
60int ftrace_update_ftrace_func(ftrace_func_t func)
61{
62	unsigned char call[4];
63	unsigned long ip = (unsigned long)&ftrace_call;
64	bfin_make_pcrel24(call, ip, func);
65	return ftrace_modify_code(ip, call, sizeof(call));
66}
67
68int __init ftrace_dyn_arch_init(void)
69{
70	return 0;
71}
72
73#endif
74
75#ifdef CONFIG_FUNCTION_GRAPH_TRACER
76
77# ifdef CONFIG_DYNAMIC_FTRACE
78
79extern void ftrace_graph_call(void);
80
81int ftrace_enable_ftrace_graph_caller(void)
82{
83	unsigned long ip = (unsigned long)&ftrace_graph_call;
84	uint16_t jump_pcrel12 = ((unsigned long)&ftrace_graph_caller - ip) >> 1;
85	jump_pcrel12 |= 0x2000;
86	return ftrace_modify_code(ip, (void *)&jump_pcrel12, sizeof(jump_pcrel12));
87}
88
89int ftrace_disable_ftrace_graph_caller(void)
90{
91	return ftrace_modify_code((unsigned long)&ftrace_graph_call, empty_zero_page, 2);
92}
93
94# endif
95
96/*
97 * Hook the return address and push it in the stack of return addrs
98 * in current thread info.
99 */
100void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
101                           unsigned long frame_pointer)
102{
103	struct ftrace_graph_ent trace;
104	unsigned long return_hooker = (unsigned long)&return_to_handler;
105
106	if (unlikely(atomic_read(&current->tracing_graph_pause)))
107		return;
108
109	if (ftrace_push_return_trace(*parent, self_addr, &trace.depth,
110	                             frame_pointer) == -EBUSY)
111		return;
112
113	trace.func = self_addr;
114
115	/* Only trace if the calling function expects to */
116	if (!ftrace_graph_entry(&trace)) {
117		current->curr_ret_stack--;
118		return;
119	}
120
121	/* all is well in the world !  hijack RETS ... */
122	*parent = return_hooker;
123}
124
125#endif
126