1/*
2 * Cell Broadband Engine OProfile Support
3 *
4 * (C) Copyright IBM Corporation 2006
5 *
6 * Authors: Maynard Johnson <maynardj@us.ibm.com>
7 *	    Carl Love <carll@us.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/hrtimer.h>
16#include <linux/smp.h>
17#include <linux/slab.h>
18#include <asm/cell-pmu.h>
19#include <asm/time.h>
20#include "pr_util.h"
21
22#define SCALE_SHIFT 14
23
24static u32 *samples;
25
26/* spu_prof_running is a flag used to indicate if spu profiling is enabled
27 * or not.  It is set by the routines start_spu_profiling_cycles() and
28 * start_spu_profiling_events().  The flag is cleared by the routines
29 * stop_spu_profiling_cycles() and stop_spu_profiling_events().  These
30 * routines are called via global_start() and global_stop() which are called in
31 * op_powerpc_start() and op_powerpc_stop().  These routines are called once
32 * per system as a result of the user starting/stopping oprofile.  Hence, only
33 * one CPU per user at a time will be changing  the value of spu_prof_running.
34 * In general, OProfile does not protect against multiple users trying to run
35 * OProfile at a time.
36 */
37int spu_prof_running;
38static unsigned int profiling_interval;
39
40#define NUM_SPU_BITS_TRBUF 16
41#define SPUS_PER_TB_ENTRY   4
42
43#define SPU_PC_MASK	     0xFFFF
44
45DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
46unsigned long oprof_spu_smpl_arry_lck_flags;
47
48void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
49{
50	unsigned long ns_per_cyc;
51
52	if (!freq_khz)
53		freq_khz = ppc_proc_freq/1000;
54
55	/* To calculate a timeout in nanoseconds, the basic
56	 * formula is ns = cycles_reset * (NSEC_PER_SEC / cpu frequency).
57	 * To avoid floating point math, we use the scale math
58	 * technique as described in linux/jiffies.h.  We use
59	 * a scale factor of SCALE_SHIFT, which provides 4 decimal places
60	 * of precision.  This is close enough for the purpose at hand.
61	 *
62	 * The value of the timeout should be small enough that the hw
63	 * trace buffer will not get more than about 1/3 full for the
64	 * maximum user specified (the LFSR value) hw sampling frequency.
65	 * This is to ensure the trace buffer will never fill even if the
66	 * kernel thread scheduling varies under a heavy system load.
67	 */
68
69	ns_per_cyc = (USEC_PER_SEC << SCALE_SHIFT)/freq_khz;
70	profiling_interval = (ns_per_cyc * cycles_reset) >> SCALE_SHIFT;
71
72}
73
74/*
75 * Extract SPU PC from trace buffer entry
76 */
77static void spu_pc_extract(int cpu, int entry)
78{
79	/* the trace buffer is 128 bits */
80	u64 trace_buffer[2];
81	u64 spu_mask;
82	int spu;
83
84	spu_mask = SPU_PC_MASK;
85
86	/* Each SPU PC is 16 bits; hence, four spus in each of
87	 * the two 64-bit buffer entries that make up the
88	 * 128-bit trace_buffer entry.	Process two 64-bit values
89	 * simultaneously.
90	 * trace[0] SPU PC contents are: 0 1 2 3
91	 * trace[1] SPU PC contents are: 4 5 6 7
92	 */
93
94	cbe_read_trace_buffer(cpu, trace_buffer);
95
96	for (spu = SPUS_PER_TB_ENTRY-1; spu >= 0; spu--) {
97		/* spu PC trace entry is upper 16 bits of the
98		 * 18 bit SPU program counter
99		 */
100		samples[spu * TRACE_ARRAY_SIZE + entry]
101			= (spu_mask & trace_buffer[0]) << 2;
102		samples[(spu + SPUS_PER_TB_ENTRY) * TRACE_ARRAY_SIZE + entry]
103			= (spu_mask & trace_buffer[1]) << 2;
104
105		trace_buffer[0] = trace_buffer[0] >> NUM_SPU_BITS_TRBUF;
106		trace_buffer[1] = trace_buffer[1] >> NUM_SPU_BITS_TRBUF;
107	}
108}
109
110static int cell_spu_pc_collection(int cpu)
111{
112	u32 trace_addr;
113	int entry;
114
115	/* process the collected SPU PC for the node */
116
117	entry = 0;
118
119	trace_addr = cbe_read_pm(cpu, trace_address);
120	while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
121		/* there is data in the trace buffer to process */
122		spu_pc_extract(cpu, entry);
123
124		entry++;
125
126		if (entry >= TRACE_ARRAY_SIZE)
127			/* spu_samples is full */
128			break;
129
130		trace_addr = cbe_read_pm(cpu, trace_address);
131	}
132
133	return entry;
134}
135
136
137static enum hrtimer_restart profile_spus(struct hrtimer *timer)
138{
139	ktime_t kt;
140	int cpu, node, k, num_samples, spu_num;
141
142	if (!spu_prof_running)
143		goto stop;
144
145	for_each_online_cpu(cpu) {
146		if (cbe_get_hw_thread_id(cpu))
147			continue;
148
149		node = cbe_cpu_to_node(cpu);
150
151		/* There should only be one kernel thread at a time processing
152		 * the samples.	 In the very unlikely case that the processing
153		 * is taking a very long time and multiple kernel threads are
154		 * started to process the samples.  Make sure only one kernel
155		 * thread is working on the samples array at a time.  The
156		 * sample array must be loaded and then processed for a given
157		 * cpu.	 The sample array is not per cpu.
158		 */
159		spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
160				  oprof_spu_smpl_arry_lck_flags);
161		num_samples = cell_spu_pc_collection(cpu);
162
163		if (num_samples == 0) {
164			spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
165					       oprof_spu_smpl_arry_lck_flags);
166			continue;
167		}
168
169		for (k = 0; k < SPUS_PER_NODE; k++) {
170			spu_num = k + (node * SPUS_PER_NODE);
171			spu_sync_buffer(spu_num,
172					samples + (k * TRACE_ARRAY_SIZE),
173					num_samples);
174		}
175
176		spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
177				       oprof_spu_smpl_arry_lck_flags);
178
179	}
180	smp_wmb();	/* insure spu event buffer updates are written */
181			/* don't want events intermingled... */
182
183	kt = ktime_set(0, profiling_interval);
184	if (!spu_prof_running)
185		goto stop;
186	hrtimer_forward(timer, timer->base->get_time(), kt);
187	return HRTIMER_RESTART;
188
189 stop:
190	printk(KERN_INFO "SPU_PROF: spu-prof timer ending\n");
191	return HRTIMER_NORESTART;
192}
193
194static struct hrtimer timer;
195/*
196 * Entry point for SPU cycle profiling.
197 * NOTE:  SPU profiling is done system-wide, not per-CPU.
198 *
199 * cycles_reset is the count value specified by the user when
200 * setting up OProfile to count SPU_CYCLES.
201 */
202int start_spu_profiling_cycles(unsigned int cycles_reset)
203{
204	ktime_t kt;
205
206	pr_debug("timer resolution: %lu\n", TICK_NSEC);
207	kt = ktime_set(0, profiling_interval);
208	hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
209	hrtimer_set_expires(&timer, kt);
210	timer.function = profile_spus;
211
212	/* Allocate arrays for collecting SPU PC samples */
213	samples = kzalloc(SPUS_PER_NODE *
214			  TRACE_ARRAY_SIZE * sizeof(u32), GFP_KERNEL);
215
216	if (!samples)
217		return -ENOMEM;
218
219	spu_prof_running = 1;
220	hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
221	schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
222
223	return 0;
224}
225
226/*
227 * Entry point for SPU event profiling.
228 * NOTE:  SPU profiling is done system-wide, not per-CPU.
229 *
230 * cycles_reset is the count value specified by the user when
231 * setting up OProfile to count SPU_CYCLES.
232 */
233void start_spu_profiling_events(void)
234{
235	spu_prof_running = 1;
236	schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
237
238	return;
239}
240
241void stop_spu_profiling_cycles(void)
242{
243	spu_prof_running = 0;
244	hrtimer_cancel(&timer);
245	kfree(samples);
246	pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
247}
248
249void stop_spu_profiling_events(void)
250{
251	spu_prof_running = 0;
252}
253