1/*
2 * This file provides the ACPI based P-state support. This
3 * module works with generic cpufreq infrastructure. Most of
4 * the code is based on i386 version
5 * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
6 *
7 * Copyright (C) 2005 Intel Corp
8 *      Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
9 */
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/cpufreq.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <asm/io.h>
19#include <asm/uaccess.h>
20#include <asm/pal.h>
21
22#include <linux/acpi.h>
23#include <acpi/processor.h>
24
25MODULE_AUTHOR("Venkatesh Pallipadi");
26MODULE_DESCRIPTION("ACPI Processor P-States Driver");
27MODULE_LICENSE("GPL");
28
29
30struct cpufreq_acpi_io {
31	struct acpi_processor_performance	acpi_data;
32	struct cpufreq_frequency_table		*freq_table;
33	unsigned int				resume;
34};
35
36static struct cpufreq_acpi_io	*acpi_io_data[NR_CPUS];
37
38static struct cpufreq_driver acpi_cpufreq_driver;
39
40
41static int
42processor_set_pstate (
43	u32	value)
44{
45	s64 retval;
46
47	pr_debug("processor_set_pstate\n");
48
49	retval = ia64_pal_set_pstate((u64)value);
50
51	if (retval) {
52		pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n",
53		        value, retval);
54		return -ENODEV;
55	}
56	return (int)retval;
57}
58
59
60static int
61processor_get_pstate (
62	u32	*value)
63{
64	u64	pstate_index = 0;
65	s64 	retval;
66
67	pr_debug("processor_get_pstate\n");
68
69	retval = ia64_pal_get_pstate(&pstate_index,
70	                             PAL_GET_PSTATE_TYPE_INSTANT);
71	*value = (u32) pstate_index;
72
73	if (retval)
74		pr_debug("Failed to get current freq with "
75			"error 0x%lx, idx 0x%x\n", retval, *value);
76
77	return (int)retval;
78}
79
80
81/* To be used only after data->acpi_data is initialized */
82static unsigned
83extract_clock (
84	struct cpufreq_acpi_io *data,
85	unsigned value,
86	unsigned int cpu)
87{
88	unsigned long i;
89
90	pr_debug("extract_clock\n");
91
92	for (i = 0; i < data->acpi_data.state_count; i++) {
93		if (value == data->acpi_data.states[i].status)
94			return data->acpi_data.states[i].core_frequency;
95	}
96	return data->acpi_data.states[i-1].core_frequency;
97}
98
99
100static unsigned int
101processor_get_freq (
102	struct cpufreq_acpi_io	*data,
103	unsigned int		cpu)
104{
105	int			ret = 0;
106	u32			value = 0;
107	cpumask_t		saved_mask;
108	unsigned long 		clock_freq;
109
110	pr_debug("processor_get_freq\n");
111
112	saved_mask = current->cpus_allowed;
113	set_cpus_allowed_ptr(current, cpumask_of(cpu));
114	if (smp_processor_id() != cpu)
115		goto migrate_end;
116
117	/* processor_get_pstate gets the instantaneous frequency */
118	ret = processor_get_pstate(&value);
119
120	if (ret) {
121		set_cpus_allowed_ptr(current, &saved_mask);
122		printk(KERN_WARNING "get performance failed with error %d\n",
123		       ret);
124		ret = 0;
125		goto migrate_end;
126	}
127	clock_freq = extract_clock(data, value, cpu);
128	ret = (clock_freq*1000);
129
130migrate_end:
131	set_cpus_allowed_ptr(current, &saved_mask);
132	return ret;
133}
134
135
136static int
137processor_set_freq (
138	struct cpufreq_acpi_io	*data,
139	struct cpufreq_policy   *policy,
140	int			state)
141{
142	int			ret = 0;
143	u32			value = 0;
144	cpumask_t		saved_mask;
145	int			retval;
146
147	pr_debug("processor_set_freq\n");
148
149	saved_mask = current->cpus_allowed;
150	set_cpus_allowed_ptr(current, cpumask_of(policy->cpu));
151	if (smp_processor_id() != policy->cpu) {
152		retval = -EAGAIN;
153		goto migrate_end;
154	}
155
156	if (state == data->acpi_data.state) {
157		if (unlikely(data->resume)) {
158			pr_debug("Called after resume, resetting to P%d\n", state);
159			data->resume = 0;
160		} else {
161			pr_debug("Already at target state (P%d)\n", state);
162			retval = 0;
163			goto migrate_end;
164		}
165	}
166
167	pr_debug("Transitioning from P%d to P%d\n",
168		data->acpi_data.state, state);
169
170	/*
171	 * First we write the target state's 'control' value to the
172	 * control_register.
173	 */
174
175	value = (u32) data->acpi_data.states[state].control;
176
177	pr_debug("Transitioning to state: 0x%08x\n", value);
178
179	ret = processor_set_pstate(value);
180	if (ret) {
181		printk(KERN_WARNING "Transition failed with error %d\n", ret);
182		retval = -ENODEV;
183		goto migrate_end;
184	}
185
186	data->acpi_data.state = state;
187
188	retval = 0;
189
190migrate_end:
191	set_cpus_allowed_ptr(current, &saved_mask);
192	return (retval);
193}
194
195
196static unsigned int
197acpi_cpufreq_get (
198	unsigned int		cpu)
199{
200	struct cpufreq_acpi_io *data = acpi_io_data[cpu];
201
202	pr_debug("acpi_cpufreq_get\n");
203
204	return processor_get_freq(data, cpu);
205}
206
207
208static int
209acpi_cpufreq_target (
210	struct cpufreq_policy   *policy,
211	unsigned int index)
212{
213	return processor_set_freq(acpi_io_data[policy->cpu], policy, index);
214}
215
216static int
217acpi_cpufreq_cpu_init (
218	struct cpufreq_policy   *policy)
219{
220	unsigned int		i;
221	unsigned int		cpu = policy->cpu;
222	struct cpufreq_acpi_io	*data;
223	unsigned int		result = 0;
224
225	pr_debug("acpi_cpufreq_cpu_init\n");
226
227	data = kzalloc(sizeof(*data), GFP_KERNEL);
228	if (!data)
229		return (-ENOMEM);
230
231	acpi_io_data[cpu] = data;
232
233	result = acpi_processor_register_performance(&data->acpi_data, cpu);
234
235	if (result)
236		goto err_free;
237
238	/* capability check */
239	if (data->acpi_data.state_count <= 1) {
240		pr_debug("No P-States\n");
241		result = -ENODEV;
242		goto err_unreg;
243	}
244
245	if ((data->acpi_data.control_register.space_id !=
246					ACPI_ADR_SPACE_FIXED_HARDWARE) ||
247	    (data->acpi_data.status_register.space_id !=
248					ACPI_ADR_SPACE_FIXED_HARDWARE)) {
249		pr_debug("Unsupported address space [%d, %d]\n",
250			(u32) (data->acpi_data.control_register.space_id),
251			(u32) (data->acpi_data.status_register.space_id));
252		result = -ENODEV;
253		goto err_unreg;
254	}
255
256	/* alloc freq_table */
257	data->freq_table = kzalloc(sizeof(*data->freq_table) *
258	                           (data->acpi_data.state_count + 1),
259	                           GFP_KERNEL);
260	if (!data->freq_table) {
261		result = -ENOMEM;
262		goto err_unreg;
263	}
264
265	/* detect transition latency */
266	policy->cpuinfo.transition_latency = 0;
267	for (i=0; i<data->acpi_data.state_count; i++) {
268		if ((data->acpi_data.states[i].transition_latency * 1000) >
269		    policy->cpuinfo.transition_latency) {
270			policy->cpuinfo.transition_latency =
271			    data->acpi_data.states[i].transition_latency * 1000;
272		}
273	}
274
275	/* table init */
276	for (i = 0; i <= data->acpi_data.state_count; i++)
277	{
278		if (i < data->acpi_data.state_count) {
279			data->freq_table[i].frequency =
280			      data->acpi_data.states[i].core_frequency * 1000;
281		} else {
282			data->freq_table[i].frequency = CPUFREQ_TABLE_END;
283		}
284	}
285
286	result = cpufreq_table_validate_and_show(policy, data->freq_table);
287	if (result) {
288		goto err_freqfree;
289	}
290
291	/* notify BIOS that we exist */
292	acpi_processor_notify_smm(THIS_MODULE);
293
294	printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
295	       "activated.\n", cpu);
296
297	for (i = 0; i < data->acpi_data.state_count; i++)
298		pr_debug("     %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
299			(i == data->acpi_data.state?'*':' '), i,
300			(u32) data->acpi_data.states[i].core_frequency,
301			(u32) data->acpi_data.states[i].power,
302			(u32) data->acpi_data.states[i].transition_latency,
303			(u32) data->acpi_data.states[i].bus_master_latency,
304			(u32) data->acpi_data.states[i].status,
305			(u32) data->acpi_data.states[i].control);
306
307	/* the first call to ->target() should result in us actually
308	 * writing something to the appropriate registers. */
309	data->resume = 1;
310
311	return (result);
312
313 err_freqfree:
314	kfree(data->freq_table);
315 err_unreg:
316	acpi_processor_unregister_performance(&data->acpi_data, cpu);
317 err_free:
318	kfree(data);
319	acpi_io_data[cpu] = NULL;
320
321	return (result);
322}
323
324
325static int
326acpi_cpufreq_cpu_exit (
327	struct cpufreq_policy   *policy)
328{
329	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
330
331	pr_debug("acpi_cpufreq_cpu_exit\n");
332
333	if (data) {
334		acpi_io_data[policy->cpu] = NULL;
335		acpi_processor_unregister_performance(&data->acpi_data,
336		                                      policy->cpu);
337		kfree(data);
338	}
339
340	return (0);
341}
342
343
344static struct cpufreq_driver acpi_cpufreq_driver = {
345	.verify 	= cpufreq_generic_frequency_table_verify,
346	.target_index	= acpi_cpufreq_target,
347	.get 		= acpi_cpufreq_get,
348	.init		= acpi_cpufreq_cpu_init,
349	.exit		= acpi_cpufreq_cpu_exit,
350	.name		= "acpi-cpufreq",
351	.attr		= cpufreq_generic_attr,
352};
353
354
355static int __init
356acpi_cpufreq_init (void)
357{
358	pr_debug("acpi_cpufreq_init\n");
359
360 	return cpufreq_register_driver(&acpi_cpufreq_driver);
361}
362
363
364static void __exit
365acpi_cpufreq_exit (void)
366{
367	pr_debug("acpi_cpufreq_exit\n");
368
369	cpufreq_unregister_driver(&acpi_cpufreq_driver);
370	return;
371}
372
373
374late_initcall(acpi_cpufreq_init);
375module_exit(acpi_cpufreq_exit);
376
377