1
2#ifndef _LINUX_KERNEL_TRACE_H
3#define _LINUX_KERNEL_TRACE_H
4
5#include <linux/fs.h>
6#include <linux/atomic.h>
7#include <linux/sched.h>
8#include <linux/clocksource.h>
9#include <linux/ring_buffer.h>
10#include <linux/mmiotrace.h>
11#include <linux/tracepoint.h>
12#include <linux/ftrace.h>
13#include <linux/hw_breakpoint.h>
14#include <linux/trace_seq.h>
15#include <linux/ftrace_event.h>
16#include <linux/compiler.h>
17
18#ifdef CONFIG_FTRACE_SYSCALLS
19#include <asm/unistd.h>		/* For NR_SYSCALLS	     */
20#include <asm/syscall.h>	/* some archs define it here */
21#endif
22
23enum trace_type {
24	__TRACE_FIRST_TYPE = 0,
25
26	TRACE_FN,
27	TRACE_CTX,
28	TRACE_WAKE,
29	TRACE_STACK,
30	TRACE_PRINT,
31	TRACE_BPRINT,
32	TRACE_MMIO_RW,
33	TRACE_MMIO_MAP,
34	TRACE_BRANCH,
35	TRACE_GRAPH_RET,
36	TRACE_GRAPH_ENT,
37	TRACE_USER_STACK,
38	TRACE_BLK,
39	TRACE_BPUTS,
40
41	__TRACE_LAST_TYPE,
42};
43
44
45#undef __field
46#define __field(type, item)		type	item;
47
48#undef __field_struct
49#define __field_struct(type, item)	__field(type, item)
50
51#undef __field_desc
52#define __field_desc(type, container, item)
53
54#undef __array
55#define __array(type, item, size)	type	item[size];
56
57#undef __array_desc
58#define __array_desc(type, container, item, size)
59
60#undef __dynamic_array
61#define __dynamic_array(type, item)	type	item[];
62
63#undef F_STRUCT
64#define F_STRUCT(args...)		args
65
66#undef FTRACE_ENTRY
67#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)	\
68	struct struct_name {						\
69		struct trace_entry	ent;				\
70		tstruct							\
71	}
72
73#undef TP_ARGS
74#define TP_ARGS(args...)	args
75
76#undef FTRACE_ENTRY_DUP
77#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
78
79#undef FTRACE_ENTRY_REG
80#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	\
81			 filter, regfn) \
82	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
83		     filter)
84
85#include "trace_entries.h"
86
87/*
88 * syscalls are special, and need special handling, this is why
89 * they are not included in trace_entries.h
90 */
91struct syscall_trace_enter {
92	struct trace_entry	ent;
93	int			nr;
94	unsigned long		args[];
95};
96
97struct syscall_trace_exit {
98	struct trace_entry	ent;
99	int			nr;
100	long			ret;
101};
102
103struct kprobe_trace_entry_head {
104	struct trace_entry	ent;
105	unsigned long		ip;
106};
107
108struct kretprobe_trace_entry_head {
109	struct trace_entry	ent;
110	unsigned long		func;
111	unsigned long		ret_ip;
112};
113
114/*
115 * trace_flag_type is an enumeration that holds different
116 * states when a trace occurs. These are:
117 *  IRQS_OFF		- interrupts were disabled
118 *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
119 *  NEED_RESCHED	- reschedule is requested
120 *  HARDIRQ		- inside an interrupt handler
121 *  SOFTIRQ		- inside a softirq handler
122 */
123enum trace_flag_type {
124	TRACE_FLAG_IRQS_OFF		= 0x01,
125	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
126	TRACE_FLAG_NEED_RESCHED		= 0x04,
127	TRACE_FLAG_HARDIRQ		= 0x08,
128	TRACE_FLAG_SOFTIRQ		= 0x10,
129	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
130};
131
132#define TRACE_BUF_SIZE		1024
133
134struct trace_array;
135
136/*
137 * The CPU trace array - it consists of thousands of trace entries
138 * plus some other descriptor data: (for example which task started
139 * the trace, etc.)
140 */
141struct trace_array_cpu {
142	atomic_t		disabled;
143	void			*buffer_page;	/* ring buffer spare */
144
145	unsigned long		entries;
146	unsigned long		saved_latency;
147	unsigned long		critical_start;
148	unsigned long		critical_end;
149	unsigned long		critical_sequence;
150	unsigned long		nice;
151	unsigned long		policy;
152	unsigned long		rt_priority;
153	unsigned long		skipped_entries;
154	cycle_t			preempt_timestamp;
155	pid_t			pid;
156	kuid_t			uid;
157	char			comm[TASK_COMM_LEN];
158};
159
160struct tracer;
161
162struct trace_buffer {
163	struct trace_array		*tr;
164	struct ring_buffer		*buffer;
165	struct trace_array_cpu __percpu	*data;
166	cycle_t				time_start;
167	int				cpu;
168};
169
170/*
171 * The trace array - an array of per-CPU trace arrays. This is the
172 * highest level data structure that individual tracers deal with.
173 * They have on/off state as well:
174 */
175struct trace_array {
176	struct list_head	list;
177	char			*name;
178	struct trace_buffer	trace_buffer;
179#ifdef CONFIG_TRACER_MAX_TRACE
180	/*
181	 * The max_buffer is used to snapshot the trace when a maximum
182	 * latency is reached, or when the user initiates a snapshot.
183	 * Some tracers will use this to store a maximum trace while
184	 * it continues examining live traces.
185	 *
186	 * The buffers for the max_buffer are set up the same as the trace_buffer
187	 * When a snapshot is taken, the buffer of the max_buffer is swapped
188	 * with the buffer of the trace_buffer and the buffers are reset for
189	 * the trace_buffer so the tracing can continue.
190	 */
191	struct trace_buffer	max_buffer;
192	bool			allocated_snapshot;
193	unsigned long		max_latency;
194#endif
195	/*
196	 * max_lock is used to protect the swapping of buffers
197	 * when taking a max snapshot. The buffers themselves are
198	 * protected by per_cpu spinlocks. But the action of the swap
199	 * needs its own lock.
200	 *
201	 * This is defined as a arch_spinlock_t in order to help
202	 * with performance when lockdep debugging is enabled.
203	 *
204	 * It is also used in other places outside the update_max_tr
205	 * so it needs to be defined outside of the
206	 * CONFIG_TRACER_MAX_TRACE.
207	 */
208	arch_spinlock_t		max_lock;
209	int			buffer_disabled;
210#ifdef CONFIG_FTRACE_SYSCALLS
211	int			sys_refcount_enter;
212	int			sys_refcount_exit;
213	struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls];
214	struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls];
215#endif
216	int			stop_count;
217	int			clock_id;
218	struct tracer		*current_trace;
219	unsigned int		flags;
220	raw_spinlock_t		start_lock;
221	struct dentry		*dir;
222	struct dentry		*options;
223	struct dentry		*percpu_dir;
224	struct dentry		*event_dir;
225	struct list_head	systems;
226	struct list_head	events;
227	cpumask_var_t		tracing_cpumask; /* only trace on set CPUs */
228	int			ref;
229#ifdef CONFIG_FUNCTION_TRACER
230	struct ftrace_ops	*ops;
231	/* function tracing enabled */
232	int			function_enabled;
233#endif
234};
235
236enum {
237	TRACE_ARRAY_FL_GLOBAL	= (1 << 0)
238};
239
240extern struct list_head ftrace_trace_arrays;
241
242extern struct mutex trace_types_lock;
243
244extern int trace_array_get(struct trace_array *tr);
245extern void trace_array_put(struct trace_array *tr);
246
247/*
248 * The global tracer (top) should be the first trace array added,
249 * but we check the flag anyway.
250 */
251static inline struct trace_array *top_trace_array(void)
252{
253	struct trace_array *tr;
254
255	if (list_empty(&ftrace_trace_arrays))
256		return NULL;
257
258	tr = list_entry(ftrace_trace_arrays.prev,
259			typeof(*tr), list);
260	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
261	return tr;
262}
263
264#define FTRACE_CMP_TYPE(var, type) \
265	__builtin_types_compatible_p(typeof(var), type *)
266
267#undef IF_ASSIGN
268#define IF_ASSIGN(var, entry, etype, id)		\
269	if (FTRACE_CMP_TYPE(var, etype)) {		\
270		var = (typeof(var))(entry);		\
271		WARN_ON(id && (entry)->type != id);	\
272		break;					\
273	}
274
275/* Will cause compile errors if type is not found. */
276extern void __ftrace_bad_type(void);
277
278/*
279 * The trace_assign_type is a verifier that the entry type is
280 * the same as the type being assigned. To add new types simply
281 * add a line with the following format:
282 *
283 * IF_ASSIGN(var, ent, type, id);
284 *
285 *  Where "type" is the trace type that includes the trace_entry
286 *  as the "ent" item. And "id" is the trace identifier that is
287 *  used in the trace_type enum.
288 *
289 *  If the type can have more than one id, then use zero.
290 */
291#define trace_assign_type(var, ent)					\
292	do {								\
293		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
294		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
295		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
296		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
297		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
298		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
299		IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);	\
300		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
301			  TRACE_MMIO_RW);				\
302		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
303			  TRACE_MMIO_MAP);				\
304		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
305		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
306			  TRACE_GRAPH_ENT);		\
307		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
308			  TRACE_GRAPH_RET);		\
309		__ftrace_bad_type();					\
310	} while (0)
311
312/*
313 * An option specific to a tracer. This is a boolean value.
314 * The bit is the bit index that sets its value on the
315 * flags value in struct tracer_flags.
316 */
317struct tracer_opt {
318	const char	*name; /* Will appear on the trace_options file */
319	u32		bit; /* Mask assigned in val field in tracer_flags */
320};
321
322/*
323 * The set of specific options for a tracer. Your tracer
324 * have to set the initial value of the flags val.
325 */
326struct tracer_flags {
327	u32			val;
328	struct tracer_opt	*opts;
329};
330
331/* Makes more easy to define a tracer opt */
332#define TRACER_OPT(s, b)	.name = #s, .bit = b
333
334
335/**
336 * struct tracer - a specific tracer and its callbacks to interact with debugfs
337 * @name: the name chosen to select it on the available_tracers file
338 * @init: called when one switches to this tracer (echo name > current_tracer)
339 * @reset: called when one switches to another tracer
340 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
341 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
342 * @update_thresh: called when tracing_thresh is updated
343 * @open: called when the trace file is opened
344 * @pipe_open: called when the trace_pipe file is opened
345 * @close: called when the trace file is released
346 * @pipe_close: called when the trace_pipe file is released
347 * @read: override the default read callback on trace_pipe
348 * @splice_read: override the default splice_read callback on trace_pipe
349 * @selftest: selftest to run on boot (see trace_selftest.c)
350 * @print_headers: override the first lines that describe your columns
351 * @print_line: callback that prints a trace
352 * @set_flag: signals one of your private flags changed (trace_options file)
353 * @flags: your private flags
354 */
355struct tracer {
356	const char		*name;
357	int			(*init)(struct trace_array *tr);
358	void			(*reset)(struct trace_array *tr);
359	void			(*start)(struct trace_array *tr);
360	void			(*stop)(struct trace_array *tr);
361	int			(*update_thresh)(struct trace_array *tr);
362	void			(*open)(struct trace_iterator *iter);
363	void			(*pipe_open)(struct trace_iterator *iter);
364	void			(*close)(struct trace_iterator *iter);
365	void			(*pipe_close)(struct trace_iterator *iter);
366	ssize_t			(*read)(struct trace_iterator *iter,
367					struct file *filp, char __user *ubuf,
368					size_t cnt, loff_t *ppos);
369	ssize_t			(*splice_read)(struct trace_iterator *iter,
370					       struct file *filp,
371					       loff_t *ppos,
372					       struct pipe_inode_info *pipe,
373					       size_t len,
374					       unsigned int flags);
375#ifdef CONFIG_FTRACE_STARTUP_TEST
376	int			(*selftest)(struct tracer *trace,
377					    struct trace_array *tr);
378#endif
379	void			(*print_header)(struct seq_file *m);
380	enum print_line_t	(*print_line)(struct trace_iterator *iter);
381	/* If you handled the flag setting, return 0 */
382	int			(*set_flag)(struct trace_array *tr,
383					    u32 old_flags, u32 bit, int set);
384	/* Return 0 if OK with change, else return non-zero */
385	int			(*flag_changed)(struct trace_array *tr,
386						u32 mask, int set);
387	struct tracer		*next;
388	struct tracer_flags	*flags;
389	int			enabled;
390	bool			print_max;
391	bool			allow_instances;
392#ifdef CONFIG_TRACER_MAX_TRACE
393	bool			use_max_tr;
394#endif
395};
396
397
398/* Only current can touch trace_recursion */
399
400/*
401 * For function tracing recursion:
402 *  The order of these bits are important.
403 *
404 *  When function tracing occurs, the following steps are made:
405 *   If arch does not support a ftrace feature:
406 *    call internal function (uses INTERNAL bits) which calls...
407 *   If callback is registered to the "global" list, the list
408 *    function is called and recursion checks the GLOBAL bits.
409 *    then this function calls...
410 *   The function callback, which can use the FTRACE bits to
411 *    check for recursion.
412 *
413 * Now if the arch does not suppport a feature, and it calls
414 * the global list function which calls the ftrace callback
415 * all three of these steps will do a recursion protection.
416 * There's no reason to do one if the previous caller already
417 * did. The recursion that we are protecting against will
418 * go through the same steps again.
419 *
420 * To prevent the multiple recursion checks, if a recursion
421 * bit is set that is higher than the MAX bit of the current
422 * check, then we know that the check was made by the previous
423 * caller, and we can skip the current check.
424 */
425enum {
426	TRACE_BUFFER_BIT,
427	TRACE_BUFFER_NMI_BIT,
428	TRACE_BUFFER_IRQ_BIT,
429	TRACE_BUFFER_SIRQ_BIT,
430
431	/* Start of function recursion bits */
432	TRACE_FTRACE_BIT,
433	TRACE_FTRACE_NMI_BIT,
434	TRACE_FTRACE_IRQ_BIT,
435	TRACE_FTRACE_SIRQ_BIT,
436
437	/* INTERNAL_BITs must be greater than FTRACE_BITs */
438	TRACE_INTERNAL_BIT,
439	TRACE_INTERNAL_NMI_BIT,
440	TRACE_INTERNAL_IRQ_BIT,
441	TRACE_INTERNAL_SIRQ_BIT,
442
443	TRACE_CONTROL_BIT,
444
445/*
446 * Abuse of the trace_recursion.
447 * As we need a way to maintain state if we are tracing the function
448 * graph in irq because we want to trace a particular function that
449 * was called in irq context but we have irq tracing off. Since this
450 * can only be modified by current, we can reuse trace_recursion.
451 */
452	TRACE_IRQ_BIT,
453};
454
455#define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
456#define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
457#define trace_recursion_test(bit)	((current)->trace_recursion & (1<<(bit)))
458
459#define TRACE_CONTEXT_BITS	4
460
461#define TRACE_FTRACE_START	TRACE_FTRACE_BIT
462#define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
463
464#define TRACE_LIST_START	TRACE_INTERNAL_BIT
465#define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
466
467#define TRACE_CONTEXT_MASK	TRACE_LIST_MAX
468
469static __always_inline int trace_get_context_bit(void)
470{
471	int bit;
472
473	if (in_interrupt()) {
474		if (in_nmi())
475			bit = 0;
476
477		else if (in_irq())
478			bit = 1;
479		else
480			bit = 2;
481	} else
482		bit = 3;
483
484	return bit;
485}
486
487static __always_inline int trace_test_and_set_recursion(int start, int max)
488{
489	unsigned int val = current->trace_recursion;
490	int bit;
491
492	/* A previous recursion check was made */
493	if ((val & TRACE_CONTEXT_MASK) > max)
494		return 0;
495
496	bit = trace_get_context_bit() + start;
497	if (unlikely(val & (1 << bit)))
498		return -1;
499
500	val |= 1 << bit;
501	current->trace_recursion = val;
502	barrier();
503
504	return bit;
505}
506
507static __always_inline void trace_clear_recursion(int bit)
508{
509	unsigned int val = current->trace_recursion;
510
511	if (!bit)
512		return;
513
514	bit = 1 << bit;
515	val &= ~bit;
516
517	barrier();
518	current->trace_recursion = val;
519}
520
521static inline struct ring_buffer_iter *
522trace_buffer_iter(struct trace_iterator *iter, int cpu)
523{
524	if (iter->buffer_iter && iter->buffer_iter[cpu])
525		return iter->buffer_iter[cpu];
526	return NULL;
527}
528
529int tracer_init(struct tracer *t, struct trace_array *tr);
530int tracing_is_enabled(void);
531void tracing_reset(struct trace_buffer *buf, int cpu);
532void tracing_reset_online_cpus(struct trace_buffer *buf);
533void tracing_reset_current(int cpu);
534void tracing_reset_all_online_cpus(void);
535int tracing_open_generic(struct inode *inode, struct file *filp);
536bool tracing_is_disabled(void);
537struct dentry *trace_create_file(const char *name,
538				 umode_t mode,
539				 struct dentry *parent,
540				 void *data,
541				 const struct file_operations *fops);
542
543struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
544struct dentry *tracing_init_dentry(void);
545
546struct ring_buffer_event;
547
548struct ring_buffer_event *
549trace_buffer_lock_reserve(struct ring_buffer *buffer,
550			  int type,
551			  unsigned long len,
552			  unsigned long flags,
553			  int pc);
554
555struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
556						struct trace_array_cpu *data);
557
558struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
559					  int *ent_cpu, u64 *ent_ts);
560
561void __buffer_unlock_commit(struct ring_buffer *buffer,
562			    struct ring_buffer_event *event);
563
564int trace_empty(struct trace_iterator *iter);
565
566void *trace_find_next_entry_inc(struct trace_iterator *iter);
567
568void trace_init_global_iter(struct trace_iterator *iter);
569
570void tracing_iter_reset(struct trace_iterator *iter, int cpu);
571
572void tracing_sched_switch_trace(struct trace_array *tr,
573				struct task_struct *prev,
574				struct task_struct *next,
575				unsigned long flags, int pc);
576
577void tracing_sched_wakeup_trace(struct trace_array *tr,
578				struct task_struct *wakee,
579				struct task_struct *cur,
580				unsigned long flags, int pc);
581void trace_function(struct trace_array *tr,
582		    unsigned long ip,
583		    unsigned long parent_ip,
584		    unsigned long flags, int pc);
585void trace_graph_function(struct trace_array *tr,
586		    unsigned long ip,
587		    unsigned long parent_ip,
588		    unsigned long flags, int pc);
589void trace_latency_header(struct seq_file *m);
590void trace_default_header(struct seq_file *m);
591void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
592int trace_empty(struct trace_iterator *iter);
593
594void trace_graph_return(struct ftrace_graph_ret *trace);
595int trace_graph_entry(struct ftrace_graph_ent *trace);
596void set_graph_array(struct trace_array *tr);
597
598void tracing_start_cmdline_record(void);
599void tracing_stop_cmdline_record(void);
600void tracing_sched_switch_assign_trace(struct trace_array *tr);
601void tracing_stop_sched_switch_record(void);
602void tracing_start_sched_switch_record(void);
603int register_tracer(struct tracer *type);
604int is_tracing_stopped(void);
605
606loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
607
608extern cpumask_var_t __read_mostly tracing_buffer_mask;
609
610#define for_each_tracing_cpu(cpu)	\
611	for_each_cpu(cpu, tracing_buffer_mask)
612
613extern unsigned long nsecs_to_usecs(unsigned long nsecs);
614
615extern unsigned long tracing_thresh;
616
617#ifdef CONFIG_TRACER_MAX_TRACE
618void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
619void update_max_tr_single(struct trace_array *tr,
620			  struct task_struct *tsk, int cpu);
621#endif /* CONFIG_TRACER_MAX_TRACE */
622
623#ifdef CONFIG_STACKTRACE
624void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
625			int skip, int pc);
626
627void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
628			     int skip, int pc, struct pt_regs *regs);
629
630void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
631			    int pc);
632
633void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
634		   int pc);
635#else
636static inline void ftrace_trace_stack(struct ring_buffer *buffer,
637				      unsigned long flags, int skip, int pc)
638{
639}
640
641static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
642					   unsigned long flags, int skip,
643					   int pc, struct pt_regs *regs)
644{
645}
646
647static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
648					  unsigned long flags, int pc)
649{
650}
651
652static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
653				 int skip, int pc)
654{
655}
656#endif /* CONFIG_STACKTRACE */
657
658extern cycle_t ftrace_now(int cpu);
659
660extern void trace_find_cmdline(int pid, char comm[]);
661extern int trace_find_tgid(int pid);
662
663#ifdef CONFIG_DYNAMIC_FTRACE
664extern unsigned long ftrace_update_tot_cnt;
665#endif
666#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
667extern int DYN_FTRACE_TEST_NAME(void);
668#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
669extern int DYN_FTRACE_TEST_NAME2(void);
670
671extern bool ring_buffer_expanded;
672extern bool tracing_selftest_disabled;
673DECLARE_PER_CPU(int, ftrace_cpu_disabled);
674
675#ifdef CONFIG_FTRACE_STARTUP_TEST
676extern int trace_selftest_startup_function(struct tracer *trace,
677					   struct trace_array *tr);
678extern int trace_selftest_startup_function_graph(struct tracer *trace,
679						 struct trace_array *tr);
680extern int trace_selftest_startup_irqsoff(struct tracer *trace,
681					  struct trace_array *tr);
682extern int trace_selftest_startup_preemptoff(struct tracer *trace,
683					     struct trace_array *tr);
684extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
685						 struct trace_array *tr);
686extern int trace_selftest_startup_wakeup(struct tracer *trace,
687					 struct trace_array *tr);
688extern int trace_selftest_startup_nop(struct tracer *trace,
689					 struct trace_array *tr);
690extern int trace_selftest_startup_sched_switch(struct tracer *trace,
691					       struct trace_array *tr);
692extern int trace_selftest_startup_branch(struct tracer *trace,
693					 struct trace_array *tr);
694/*
695 * Tracer data references selftest functions that only occur
696 * on boot up. These can be __init functions. Thus, when selftests
697 * are enabled, then the tracers need to reference __init functions.
698 */
699#define __tracer_data		__refdata
700#else
701/* Tracers are seldom changed. Optimize when selftests are disabled. */
702#define __tracer_data		__read_mostly
703#endif /* CONFIG_FTRACE_STARTUP_TEST */
704
705extern void *head_page(struct trace_array_cpu *data);
706extern unsigned long long ns2usecs(cycle_t nsec);
707extern int
708trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
709extern int
710trace_vprintk(unsigned long ip, const char *fmt, va_list args);
711extern int
712trace_array_vprintk(struct trace_array *tr,
713		    unsigned long ip, const char *fmt, va_list args);
714int trace_array_printk(struct trace_array *tr,
715		       unsigned long ip, const char *fmt, ...);
716int trace_array_printk_buf(struct ring_buffer *buffer,
717			   unsigned long ip, const char *fmt, ...);
718void trace_printk_seq(struct trace_seq *s);
719enum print_line_t print_trace_line(struct trace_iterator *iter);
720
721extern unsigned long trace_flags;
722
723/* Standard output formatting function used for function return traces */
724#ifdef CONFIG_FUNCTION_GRAPH_TRACER
725
726/* Flag options */
727#define TRACE_GRAPH_PRINT_OVERRUN       0x1
728#define TRACE_GRAPH_PRINT_CPU           0x2
729#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
730#define TRACE_GRAPH_PRINT_PROC          0x8
731#define TRACE_GRAPH_PRINT_DURATION      0x10
732#define TRACE_GRAPH_PRINT_ABS_TIME      0x20
733#define TRACE_GRAPH_PRINT_IRQS          0x40
734#define TRACE_GRAPH_PRINT_TAIL          0x80
735#define TRACE_GRAPH_PRINT_FILL_SHIFT	28
736#define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
737
738extern enum print_line_t
739print_graph_function_flags(struct trace_iterator *iter, u32 flags);
740extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
741extern enum print_line_t
742trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
743extern void graph_trace_open(struct trace_iterator *iter);
744extern void graph_trace_close(struct trace_iterator *iter);
745extern int __trace_graph_entry(struct trace_array *tr,
746			       struct ftrace_graph_ent *trace,
747			       unsigned long flags, int pc);
748extern void __trace_graph_return(struct trace_array *tr,
749				 struct ftrace_graph_ret *trace,
750				 unsigned long flags, int pc);
751
752
753#ifdef CONFIG_DYNAMIC_FTRACE
754/* TODO: make this variable */
755#define FTRACE_GRAPH_MAX_FUNCS		32
756extern int ftrace_graph_count;
757extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
758extern int ftrace_graph_notrace_count;
759extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
760
761static inline int ftrace_graph_addr(unsigned long addr)
762{
763	int i;
764
765	if (!ftrace_graph_count)
766		return 1;
767
768	for (i = 0; i < ftrace_graph_count; i++) {
769		if (addr == ftrace_graph_funcs[i]) {
770			/*
771			 * If no irqs are to be traced, but a set_graph_function
772			 * is set, and called by an interrupt handler, we still
773			 * want to trace it.
774			 */
775			if (in_irq())
776				trace_recursion_set(TRACE_IRQ_BIT);
777			else
778				trace_recursion_clear(TRACE_IRQ_BIT);
779			return 1;
780		}
781	}
782
783	return 0;
784}
785
786static inline int ftrace_graph_notrace_addr(unsigned long addr)
787{
788	int i;
789
790	if (!ftrace_graph_notrace_count)
791		return 0;
792
793	for (i = 0; i < ftrace_graph_notrace_count; i++) {
794		if (addr == ftrace_graph_notrace_funcs[i])
795			return 1;
796	}
797
798	return 0;
799}
800#else
801static inline int ftrace_graph_addr(unsigned long addr)
802{
803	return 1;
804}
805
806static inline int ftrace_graph_notrace_addr(unsigned long addr)
807{
808	return 0;
809}
810#endif /* CONFIG_DYNAMIC_FTRACE */
811#else /* CONFIG_FUNCTION_GRAPH_TRACER */
812static inline enum print_line_t
813print_graph_function_flags(struct trace_iterator *iter, u32 flags)
814{
815	return TRACE_TYPE_UNHANDLED;
816}
817#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
818
819extern struct list_head ftrace_pids;
820
821#ifdef CONFIG_FUNCTION_TRACER
822extern bool ftrace_filter_param __initdata;
823static inline int ftrace_trace_task(struct task_struct *task)
824{
825	if (list_empty(&ftrace_pids))
826		return 1;
827
828	return test_tsk_trace_trace(task);
829}
830extern int ftrace_is_dead(void);
831int ftrace_create_function_files(struct trace_array *tr,
832				 struct dentry *parent);
833void ftrace_destroy_function_files(struct trace_array *tr);
834void ftrace_init_global_array_ops(struct trace_array *tr);
835void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
836void ftrace_reset_array_ops(struct trace_array *tr);
837int using_ftrace_ops_list_func(void);
838#else
839static inline int ftrace_trace_task(struct task_struct *task)
840{
841	return 1;
842}
843static inline int ftrace_is_dead(void) { return 0; }
844static inline int
845ftrace_create_function_files(struct trace_array *tr,
846			     struct dentry *parent)
847{
848	return 0;
849}
850static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
851static inline __init void
852ftrace_init_global_array_ops(struct trace_array *tr) { }
853static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
854/* ftace_func_t type is not defined, use macro instead of static inline */
855#define ftrace_init_array_ops(tr, func) do { } while (0)
856#endif /* CONFIG_FUNCTION_TRACER */
857
858#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
859void ftrace_create_filter_files(struct ftrace_ops *ops,
860				struct dentry *parent);
861void ftrace_destroy_filter_files(struct ftrace_ops *ops);
862#else
863/*
864 * The ops parameter passed in is usually undefined.
865 * This must be a macro.
866 */
867#define ftrace_create_filter_files(ops, parent) do { } while (0)
868#define ftrace_destroy_filter_files(ops) do { } while (0)
869#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
870
871int ftrace_event_is_function(struct ftrace_event_call *call);
872
873/*
874 * struct trace_parser - servers for reading the user input separated by spaces
875 * @cont: set if the input is not complete - no final space char was found
876 * @buffer: holds the parsed user input
877 * @idx: user input length
878 * @size: buffer size
879 */
880struct trace_parser {
881	bool		cont;
882	char		*buffer;
883	unsigned	idx;
884	unsigned	size;
885};
886
887static inline bool trace_parser_loaded(struct trace_parser *parser)
888{
889	return (parser->idx != 0);
890}
891
892static inline bool trace_parser_cont(struct trace_parser *parser)
893{
894	return parser->cont;
895}
896
897static inline void trace_parser_clear(struct trace_parser *parser)
898{
899	parser->cont = false;
900	parser->idx = 0;
901}
902
903extern int trace_parser_get_init(struct trace_parser *parser, int size);
904extern void trace_parser_put(struct trace_parser *parser);
905extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
906	size_t cnt, loff_t *ppos);
907
908/*
909 * trace_iterator_flags is an enumeration that defines bit
910 * positions into trace_flags that controls the output.
911 *
912 * NOTE: These bits must match the trace_options array in
913 *       trace.c.
914 */
915enum trace_iterator_flags {
916	TRACE_ITER_PRINT_PARENT		= 0x01,
917	TRACE_ITER_SYM_OFFSET		= 0x02,
918	TRACE_ITER_SYM_ADDR		= 0x04,
919	TRACE_ITER_VERBOSE		= 0x08,
920	TRACE_ITER_RAW			= 0x10,
921	TRACE_ITER_HEX			= 0x20,
922	TRACE_ITER_BIN			= 0x40,
923	TRACE_ITER_BLOCK		= 0x80,
924	TRACE_ITER_STACKTRACE		= 0x100,
925	TRACE_ITER_PRINTK		= 0x200,
926	TRACE_ITER_PREEMPTONLY		= 0x400,
927	TRACE_ITER_BRANCH		= 0x800,
928	TRACE_ITER_ANNOTATE		= 0x1000,
929	TRACE_ITER_USERSTACKTRACE       = 0x2000,
930	TRACE_ITER_SYM_USEROBJ          = 0x4000,
931	TRACE_ITER_PRINTK_MSGONLY	= 0x8000,
932	TRACE_ITER_CONTEXT_INFO		= 0x10000, /* Print pid/cpu/time */
933	TRACE_ITER_LATENCY_FMT		= 0x20000,
934	TRACE_ITER_SLEEP_TIME		= 0x40000,
935	TRACE_ITER_GRAPH_TIME		= 0x80000,
936	TRACE_ITER_RECORD_CMD		= 0x100000,
937	TRACE_ITER_OVERWRITE		= 0x200000,
938	TRACE_ITER_STOP_ON_FREE		= 0x400000,
939	TRACE_ITER_IRQ_INFO		= 0x800000,
940	TRACE_ITER_MARKERS		= 0x1000000,
941	TRACE_ITER_FUNCTION		= 0x2000000,
942	TRACE_ITER_TGID 		= 0x4000000,
943};
944
945/*
946 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
947 * control the output of kernel symbols.
948 */
949#define TRACE_ITER_SYM_MASK \
950	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
951
952extern struct tracer nop_trace;
953
954#ifdef CONFIG_BRANCH_TRACER
955extern int enable_branch_tracing(struct trace_array *tr);
956extern void disable_branch_tracing(void);
957static inline int trace_branch_enable(struct trace_array *tr)
958{
959	if (trace_flags & TRACE_ITER_BRANCH)
960		return enable_branch_tracing(tr);
961	return 0;
962}
963static inline void trace_branch_disable(void)
964{
965	/* due to races, always disable */
966	disable_branch_tracing();
967}
968#else
969static inline int trace_branch_enable(struct trace_array *tr)
970{
971	return 0;
972}
973static inline void trace_branch_disable(void)
974{
975}
976#endif /* CONFIG_BRANCH_TRACER */
977
978/* set ring buffers to default size if not already done so */
979int tracing_update_buffers(void);
980
981struct ftrace_event_field {
982	struct list_head	link;
983	const char		*name;
984	const char		*type;
985	int			filter_type;
986	int			offset;
987	int			size;
988	int			is_signed;
989};
990
991struct event_filter {
992	int			n_preds;	/* Number assigned */
993	int			a_preds;	/* allocated */
994	struct filter_pred	*preds;
995	struct filter_pred	*root;
996	char			*filter_string;
997};
998
999struct event_subsystem {
1000	struct list_head	list;
1001	const char		*name;
1002	struct event_filter	*filter;
1003	int			ref_count;
1004};
1005
1006struct ftrace_subsystem_dir {
1007	struct list_head		list;
1008	struct event_subsystem		*subsystem;
1009	struct trace_array		*tr;
1010	struct dentry			*entry;
1011	int				ref_count;
1012	int				nr_events;
1013};
1014
1015#define FILTER_PRED_INVALID	((unsigned short)-1)
1016#define FILTER_PRED_IS_RIGHT	(1 << 15)
1017#define FILTER_PRED_FOLD	(1 << 15)
1018
1019/*
1020 * The max preds is the size of unsigned short with
1021 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1022 * and FOLD flags. The other is reserved.
1023 *
1024 * 2^14 preds is way more than enough.
1025 */
1026#define MAX_FILTER_PRED		16384
1027
1028struct filter_pred;
1029struct regex;
1030
1031typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1032
1033typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1034
1035enum regex_type {
1036	MATCH_FULL = 0,
1037	MATCH_FRONT_ONLY,
1038	MATCH_MIDDLE_ONLY,
1039	MATCH_END_ONLY,
1040};
1041
1042struct regex {
1043	char			pattern[MAX_FILTER_STR_VAL];
1044	int			len;
1045	int			field_len;
1046	regex_match_func	match;
1047};
1048
1049struct filter_pred {
1050	filter_pred_fn_t 	fn;
1051	u64 			val;
1052	struct regex		regex;
1053	unsigned short		*ops;
1054	struct ftrace_event_field *field;
1055	int 			offset;
1056	int 			not;
1057	int 			op;
1058	unsigned short		index;
1059	unsigned short		parent;
1060	unsigned short		left;
1061	unsigned short		right;
1062};
1063
1064extern enum regex_type
1065filter_parse_regex(char *buff, int len, char **search, int *not);
1066extern void print_event_filter(struct ftrace_event_file *file,
1067			       struct trace_seq *s);
1068extern int apply_event_filter(struct ftrace_event_file *file,
1069			      char *filter_string);
1070extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
1071					char *filter_string);
1072extern void print_subsystem_event_filter(struct event_subsystem *system,
1073					 struct trace_seq *s);
1074extern int filter_assign_type(const char *type);
1075extern int create_event_filter(struct ftrace_event_call *call,
1076			       char *filter_str, bool set_str,
1077			       struct event_filter **filterp);
1078extern void free_event_filter(struct event_filter *filter);
1079
1080struct ftrace_event_field *
1081trace_find_event_field(struct ftrace_event_call *call, char *name);
1082
1083extern void trace_event_enable_cmd_record(bool enable);
1084extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1085extern int event_trace_del_tracer(struct trace_array *tr);
1086
1087extern struct ftrace_event_file *find_event_file(struct trace_array *tr,
1088						 const char *system,
1089						 const char *event);
1090
1091static inline void *event_file_data(struct file *filp)
1092{
1093	return ACCESS_ONCE(file_inode(filp)->i_private);
1094}
1095
1096extern struct mutex event_mutex;
1097extern struct list_head ftrace_events;
1098
1099extern const struct file_operations event_trigger_fops;
1100
1101extern int register_trigger_cmds(void);
1102extern void clear_event_triggers(struct trace_array *tr);
1103
1104struct event_trigger_data {
1105	unsigned long			count;
1106	int				ref;
1107	struct event_trigger_ops	*ops;
1108	struct event_command		*cmd_ops;
1109	struct event_filter __rcu	*filter;
1110	char				*filter_str;
1111	void				*private_data;
1112	struct list_head		list;
1113};
1114
1115/**
1116 * struct event_trigger_ops - callbacks for trace event triggers
1117 *
1118 * The methods in this structure provide per-event trigger hooks for
1119 * various trigger operations.
1120 *
1121 * All the methods below, except for @init() and @free(), must be
1122 * implemented.
1123 *
1124 * @func: The trigger 'probe' function called when the triggering
1125 *	event occurs.  The data passed into this callback is the data
1126 *	that was supplied to the event_command @reg() function that
1127 *	registered the trigger (see struct event_command).
1128 *
1129 * @init: An optional initialization function called for the trigger
1130 *	when the trigger is registered (via the event_command reg()
1131 *	function).  This can be used to perform per-trigger
1132 *	initialization such as incrementing a per-trigger reference
1133 *	count, for instance.  This is usually implemented by the
1134 *	generic utility function @event_trigger_init() (see
1135 *	trace_event_triggers.c).
1136 *
1137 * @free: An optional de-initialization function called for the
1138 *	trigger when the trigger is unregistered (via the
1139 *	event_command @reg() function).  This can be used to perform
1140 *	per-trigger de-initialization such as decrementing a
1141 *	per-trigger reference count and freeing corresponding trigger
1142 *	data, for instance.  This is usually implemented by the
1143 *	generic utility function @event_trigger_free() (see
1144 *	trace_event_triggers.c).
1145 *
1146 * @print: The callback function invoked to have the trigger print
1147 *	itself.  This is usually implemented by a wrapper function
1148 *	that calls the generic utility function @event_trigger_print()
1149 *	(see trace_event_triggers.c).
1150 */
1151struct event_trigger_ops {
1152	void			(*func)(struct event_trigger_data *data);
1153	int			(*init)(struct event_trigger_ops *ops,
1154					struct event_trigger_data *data);
1155	void			(*free)(struct event_trigger_ops *ops,
1156					struct event_trigger_data *data);
1157	int			(*print)(struct seq_file *m,
1158					 struct event_trigger_ops *ops,
1159					 struct event_trigger_data *data);
1160};
1161
1162/**
1163 * struct event_command - callbacks and data members for event commands
1164 *
1165 * Event commands are invoked by users by writing the command name
1166 * into the 'trigger' file associated with a trace event.  The
1167 * parameters associated with a specific invocation of an event
1168 * command are used to create an event trigger instance, which is
1169 * added to the list of trigger instances associated with that trace
1170 * event.  When the event is hit, the set of triggers associated with
1171 * that event is invoked.
1172 *
1173 * The data members in this structure provide per-event command data
1174 * for various event commands.
1175 *
1176 * All the data members below, except for @post_trigger, must be set
1177 * for each event command.
1178 *
1179 * @name: The unique name that identifies the event command.  This is
1180 *	the name used when setting triggers via trigger files.
1181 *
1182 * @trigger_type: A unique id that identifies the event command
1183 *	'type'.  This value has two purposes, the first to ensure that
1184 *	only one trigger of the same type can be set at a given time
1185 *	for a particular event e.g. it doesn't make sense to have both
1186 *	a traceon and traceoff trigger attached to a single event at
1187 *	the same time, so traceon and traceoff have the same type
1188 *	though they have different names.  The @trigger_type value is
1189 *	also used as a bit value for deferring the actual trigger
1190 *	action until after the current event is finished.  Some
1191 *	commands need to do this if they themselves log to the trace
1192 *	buffer (see the @post_trigger() member below).  @trigger_type
1193 *	values are defined by adding new values to the trigger_type
1194 *	enum in include/linux/ftrace_event.h.
1195 *
1196 * @post_trigger: A flag that says whether or not this command needs
1197 *	to have its action delayed until after the current event has
1198 *	been closed.  Some triggers need to avoid being invoked while
1199 *	an event is currently in the process of being logged, since
1200 *	the trigger may itself log data into the trace buffer.  Thus
1201 *	we make sure the current event is committed before invoking
1202 *	those triggers.  To do that, the trigger invocation is split
1203 *	in two - the first part checks the filter using the current
1204 *	trace record; if a command has the @post_trigger flag set, it
1205 *	sets a bit for itself in the return value, otherwise it
1206 *	directly invokes the trigger.  Once all commands have been
1207 *	either invoked or set their return flag, the current record is
1208 *	either committed or discarded.  At that point, if any commands
1209 *	have deferred their triggers, those commands are finally
1210 *	invoked following the close of the current event.  In other
1211 *	words, if the event_trigger_ops @func() probe implementation
1212 *	itself logs to the trace buffer, this flag should be set,
1213 *	otherwise it can be left unspecified.
1214 *
1215 * All the methods below, except for @set_filter(), must be
1216 * implemented.
1217 *
1218 * @func: The callback function responsible for parsing and
1219 *	registering the trigger written to the 'trigger' file by the
1220 *	user.  It allocates the trigger instance and registers it with
1221 *	the appropriate trace event.  It makes use of the other
1222 *	event_command callback functions to orchestrate this, and is
1223 *	usually implemented by the generic utility function
1224 *	@event_trigger_callback() (see trace_event_triggers.c).
1225 *
1226 * @reg: Adds the trigger to the list of triggers associated with the
1227 *	event, and enables the event trigger itself, after
1228 *	initializing it (via the event_trigger_ops @init() function).
1229 *	This is also where commands can use the @trigger_type value to
1230 *	make the decision as to whether or not multiple instances of
1231 *	the trigger should be allowed.  This is usually implemented by
1232 *	the generic utility function @register_trigger() (see
1233 *	trace_event_triggers.c).
1234 *
1235 * @unreg: Removes the trigger from the list of triggers associated
1236 *	with the event, and disables the event trigger itself, after
1237 *	initializing it (via the event_trigger_ops @free() function).
1238 *	This is usually implemented by the generic utility function
1239 *	@unregister_trigger() (see trace_event_triggers.c).
1240 *
1241 * @set_filter: An optional function called to parse and set a filter
1242 *	for the trigger.  If no @set_filter() method is set for the
1243 *	event command, filters set by the user for the command will be
1244 *	ignored.  This is usually implemented by the generic utility
1245 *	function @set_trigger_filter() (see trace_event_triggers.c).
1246 *
1247 * @get_trigger_ops: The callback function invoked to retrieve the
1248 *	event_trigger_ops implementation associated with the command.
1249 */
1250struct event_command {
1251	struct list_head	list;
1252	char			*name;
1253	enum event_trigger_type	trigger_type;
1254	bool			post_trigger;
1255	int			(*func)(struct event_command *cmd_ops,
1256					struct ftrace_event_file *file,
1257					char *glob, char *cmd, char *params);
1258	int			(*reg)(char *glob,
1259				       struct event_trigger_ops *ops,
1260				       struct event_trigger_data *data,
1261				       struct ftrace_event_file *file);
1262	void			(*unreg)(char *glob,
1263					 struct event_trigger_ops *ops,
1264					 struct event_trigger_data *data,
1265					 struct ftrace_event_file *file);
1266	int			(*set_filter)(char *filter_str,
1267					      struct event_trigger_data *data,
1268					      struct ftrace_event_file *file);
1269	struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1270};
1271
1272extern int trace_event_enable_disable(struct ftrace_event_file *file,
1273				      int enable, int soft_disable);
1274extern int tracing_alloc_snapshot(void);
1275
1276extern const char *__start___trace_bprintk_fmt[];
1277extern const char *__stop___trace_bprintk_fmt[];
1278
1279extern const char *__start___tracepoint_str[];
1280extern const char *__stop___tracepoint_str[];
1281
1282void trace_printk_init_buffers(void);
1283void trace_printk_start_comm(void);
1284int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1285int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1286
1287/*
1288 * Normal trace_printk() and friends allocates special buffers
1289 * to do the manipulation, as well as saves the print formats
1290 * into sections to display. But the trace infrastructure wants
1291 * to use these without the added overhead at the price of being
1292 * a bit slower (used mainly for warnings, where we don't care
1293 * about performance). The internal_trace_puts() is for such
1294 * a purpose.
1295 */
1296#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1297
1298#undef FTRACE_ENTRY
1299#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)	\
1300	extern struct ftrace_event_call					\
1301	__aligned(4) event_##call;
1302#undef FTRACE_ENTRY_DUP
1303#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter)	\
1304	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1305		     filter)
1306#include "trace_entries.h"
1307
1308#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1309int perf_ftrace_event_register(struct ftrace_event_call *call,
1310			       enum trace_reg type, void *data);
1311#else
1312#define perf_ftrace_event_register NULL
1313#endif
1314
1315#endif /* _LINUX_KERNEL_TRACE_H */
1316