processor_core.c revision 27663c5855b10af9ec67bc7dfba001426ba21222
1/*
2 * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
3 *
4 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 *  			- Added processor hotplug support
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 *  This program is free software; you can redistribute it and/or modify
13 *  it under the terms of the GNU General Public License as published by
14 *  the Free Software Foundation; either version 2 of the License, or (at
15 *  your option) any later version.
16 *
17 *  This program is distributed in the hope that it will be useful, but
18 *  WITHOUT ANY WARRANTY; without even the implied warranty of
19 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20 *  General Public License for more details.
21 *
22 *  You should have received a copy of the GNU General Public License along
23 *  with this program; if not, write to the Free Software Foundation, Inc.,
24 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 *
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 *  TBD:
28 *	1. Make # power states dynamic.
29 *	2. Support duty_cycle values that span bit 4.
30 *	3. Optimize by having scheduler determine business instead of
31 *	   having us try to calculate it here.
32 *	4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/types.h>
39#include <linux/pci.h>
40#include <linux/pm.h>
41#include <linux/cpufreq.h>
42#include <linux/cpu.h>
43#include <linux/proc_fs.h>
44#include <linux/seq_file.h>
45#include <linux/dmi.h>
46#include <linux/moduleparam.h>
47#include <linux/cpuidle.h>
48
49#include <asm/io.h>
50#include <asm/system.h>
51#include <asm/cpu.h>
52#include <asm/delay.h>
53#include <asm/uaccess.h>
54#include <asm/processor.h>
55#include <asm/smp.h>
56#include <asm/acpi.h>
57
58#include <acpi/acpi_bus.h>
59#include <acpi/acpi_drivers.h>
60#include <acpi/processor.h>
61
62#define ACPI_PROCESSOR_COMPONENT	0x01000000
63#define ACPI_PROCESSOR_CLASS		"processor"
64#define ACPI_PROCESSOR_DEVICE_NAME	"Processor"
65#define ACPI_PROCESSOR_FILE_INFO	"info"
66#define ACPI_PROCESSOR_FILE_THROTTLING	"throttling"
67#define ACPI_PROCESSOR_FILE_LIMIT	"limit"
68#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
69#define ACPI_PROCESSOR_NOTIFY_POWER	0x81
70#define ACPI_PROCESSOR_NOTIFY_THROTTLING	0x82
71
72#define ACPI_PROCESSOR_LIMIT_USER	0
73#define ACPI_PROCESSOR_LIMIT_THERMAL	1
74
75#define _COMPONENT		ACPI_PROCESSOR_COMPONENT
76ACPI_MODULE_NAME("processor_core");
77
78MODULE_AUTHOR("Paul Diefenbaugh");
79MODULE_DESCRIPTION("ACPI Processor Driver");
80MODULE_LICENSE("GPL");
81
82static int acpi_processor_add(struct acpi_device *device);
83static int acpi_processor_start(struct acpi_device *device);
84static int acpi_processor_remove(struct acpi_device *device, int type);
85static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
86static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
87static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
88static int acpi_processor_handle_eject(struct acpi_processor *pr);
89
90
91static const struct acpi_device_id processor_device_ids[] = {
92	{ACPI_PROCESSOR_HID, 0},
93	{"", 0},
94};
95MODULE_DEVICE_TABLE(acpi, processor_device_ids);
96
97static struct acpi_driver acpi_processor_driver = {
98	.name = "processor",
99	.class = ACPI_PROCESSOR_CLASS,
100	.ids = processor_device_ids,
101	.ops = {
102		.add = acpi_processor_add,
103		.remove = acpi_processor_remove,
104		.start = acpi_processor_start,
105		.suspend = acpi_processor_suspend,
106		.resume = acpi_processor_resume,
107		},
108};
109
110#define INSTALL_NOTIFY_HANDLER		1
111#define UNINSTALL_NOTIFY_HANDLER	2
112
113static const struct file_operations acpi_processor_info_fops = {
114	.owner = THIS_MODULE,
115	.open = acpi_processor_info_open_fs,
116	.read = seq_read,
117	.llseek = seq_lseek,
118	.release = single_release,
119};
120
121DEFINE_PER_CPU(struct acpi_processor *, processors);
122struct acpi_processor_errata errata __read_mostly;
123static int set_no_mwait(const struct dmi_system_id *id)
124{
125	printk(KERN_NOTICE PREFIX "%s detected - "
126		"disabling mwait for CPU C-states\n", id->ident);
127	idle_nomwait = 1;
128	return 0;
129}
130
131static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
132	{
133	set_no_mwait, "IFL91 board", {
134	DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
135	DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
136	DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
137	DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
138	{
139	set_no_mwait, "Extensa 5220", {
140	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
141	DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
142	DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
143	DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
144	{},
145};
146
147/* --------------------------------------------------------------------------
148                                Errata Handling
149   -------------------------------------------------------------------------- */
150
151static int acpi_processor_errata_piix4(struct pci_dev *dev)
152{
153	u8 value1 = 0;
154	u8 value2 = 0;
155
156
157	if (!dev)
158		return -EINVAL;
159
160	/*
161	 * Note that 'dev' references the PIIX4 ACPI Controller.
162	 */
163
164	switch (dev->revision) {
165	case 0:
166		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
167		break;
168	case 1:
169		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
170		break;
171	case 2:
172		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
173		break;
174	case 3:
175		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
176		break;
177	default:
178		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
179		break;
180	}
181
182	switch (dev->revision) {
183
184	case 0:		/* PIIX4 A-step */
185	case 1:		/* PIIX4 B-step */
186		/*
187		 * See specification changes #13 ("Manual Throttle Duty Cycle")
188		 * and #14 ("Enabling and Disabling Manual Throttle"), plus
189		 * erratum #5 ("STPCLK# Deassertion Time") from the January
190		 * 2002 PIIX4 specification update.  Applies to only older
191		 * PIIX4 models.
192		 */
193		errata.piix4.throttle = 1;
194
195	case 2:		/* PIIX4E */
196	case 3:		/* PIIX4M */
197		/*
198		 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
199		 * Livelock") from the January 2002 PIIX4 specification update.
200		 * Applies to all PIIX4 models.
201		 */
202
203		/*
204		 * BM-IDE
205		 * ------
206		 * Find the PIIX4 IDE Controller and get the Bus Master IDE
207		 * Status register address.  We'll use this later to read
208		 * each IDE controller's DMA status to make sure we catch all
209		 * DMA activity.
210		 */
211		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
212				     PCI_DEVICE_ID_INTEL_82371AB,
213				     PCI_ANY_ID, PCI_ANY_ID, NULL);
214		if (dev) {
215			errata.piix4.bmisx = pci_resource_start(dev, 4);
216			pci_dev_put(dev);
217		}
218
219		/*
220		 * Type-F DMA
221		 * ----------
222		 * Find the PIIX4 ISA Controller and read the Motherboard
223		 * DMA controller's status to see if Type-F (Fast) DMA mode
224		 * is enabled (bit 7) on either channel.  Note that we'll
225		 * disable C3 support if this is enabled, as some legacy
226		 * devices won't operate well if fast DMA is disabled.
227		 */
228		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
229				     PCI_DEVICE_ID_INTEL_82371AB_0,
230				     PCI_ANY_ID, PCI_ANY_ID, NULL);
231		if (dev) {
232			pci_read_config_byte(dev, 0x76, &value1);
233			pci_read_config_byte(dev, 0x77, &value2);
234			if ((value1 & 0x80) || (value2 & 0x80))
235				errata.piix4.fdma = 1;
236			pci_dev_put(dev);
237		}
238
239		break;
240	}
241
242	if (errata.piix4.bmisx)
243		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
244				  "Bus master activity detection (BM-IDE) erratum enabled\n"));
245	if (errata.piix4.fdma)
246		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
247				  "Type-F DMA livelock erratum (C3 disabled)\n"));
248
249	return 0;
250}
251
252static int acpi_processor_errata(struct acpi_processor *pr)
253{
254	int result = 0;
255	struct pci_dev *dev = NULL;
256
257
258	if (!pr)
259		return -EINVAL;
260
261	/*
262	 * PIIX4
263	 */
264	dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
265			     PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
266			     PCI_ANY_ID, NULL);
267	if (dev) {
268		result = acpi_processor_errata_piix4(dev);
269		pci_dev_put(dev);
270	}
271
272	return result;
273}
274
275/* --------------------------------------------------------------------------
276                              Common ACPI processor functions
277   -------------------------------------------------------------------------- */
278
279/*
280 * _PDC is required for a BIOS-OS handshake for most of the newer
281 * ACPI processor features.
282 */
283static int acpi_processor_set_pdc(struct acpi_processor *pr)
284{
285	struct acpi_object_list *pdc_in = pr->pdc;
286	acpi_status status = AE_OK;
287
288
289	if (!pdc_in)
290		return status;
291	if (idle_nomwait) {
292		/*
293		 * If mwait is disabled for CPU C-states, the C2C3_FFH access
294		 * mode will be disabled in the parameter of _PDC object.
295		 * Of course C1_FFH access mode will also be disabled.
296		 */
297		union acpi_object *obj;
298		u32 *buffer = NULL;
299
300		obj = pdc_in->pointer;
301		buffer = (u32 *)(obj->buffer.pointer);
302		buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
303
304	}
305	status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
306
307	if (ACPI_FAILURE(status))
308		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
309		    "Could not evaluate _PDC, using legacy perf. control...\n"));
310
311	return status;
312}
313
314/* --------------------------------------------------------------------------
315                              FS Interface (/proc)
316   -------------------------------------------------------------------------- */
317
318static struct proc_dir_entry *acpi_processor_dir = NULL;
319
320static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
321{
322	struct acpi_processor *pr = seq->private;
323
324
325	if (!pr)
326		goto end;
327
328	seq_printf(seq, "processor id:            %d\n"
329		   "acpi id:                 %d\n"
330		   "bus mastering control:   %s\n"
331		   "power management:        %s\n"
332		   "throttling control:      %s\n"
333		   "limit interface:         %s\n",
334		   pr->id,
335		   pr->acpi_id,
336		   pr->flags.bm_control ? "yes" : "no",
337		   pr->flags.power ? "yes" : "no",
338		   pr->flags.throttling ? "yes" : "no",
339		   pr->flags.limit ? "yes" : "no");
340
341      end:
342	return 0;
343}
344
345static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
346{
347	return single_open(file, acpi_processor_info_seq_show,
348			   PDE(inode)->data);
349}
350
351static int acpi_processor_add_fs(struct acpi_device *device)
352{
353	struct proc_dir_entry *entry = NULL;
354
355
356	if (!acpi_device_dir(device)) {
357		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
358						     acpi_processor_dir);
359		if (!acpi_device_dir(device))
360			return -ENODEV;
361	}
362	acpi_device_dir(device)->owner = THIS_MODULE;
363
364	/* 'info' [R] */
365	entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
366				 S_IRUGO, acpi_device_dir(device),
367				 &acpi_processor_info_fops,
368				 acpi_driver_data(device));
369	if (!entry)
370		return -EIO;
371
372	/* 'throttling' [R/W] */
373	entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
374				 S_IFREG | S_IRUGO | S_IWUSR,
375				 acpi_device_dir(device),
376				 &acpi_processor_throttling_fops,
377				 acpi_driver_data(device));
378	if (!entry)
379		return -EIO;
380
381	/* 'limit' [R/W] */
382	entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
383				 S_IFREG | S_IRUGO | S_IWUSR,
384				 acpi_device_dir(device),
385				 &acpi_processor_limit_fops,
386				 acpi_driver_data(device));
387	if (!entry)
388		return -EIO;
389	return 0;
390}
391
392static int acpi_processor_remove_fs(struct acpi_device *device)
393{
394
395	if (acpi_device_dir(device)) {
396		remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
397				  acpi_device_dir(device));
398		remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
399				  acpi_device_dir(device));
400		remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
401				  acpi_device_dir(device));
402		remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
403		acpi_device_dir(device) = NULL;
404	}
405
406	return 0;
407}
408
409/* Use the acpiid in MADT to map cpus in case of SMP */
410
411#ifndef CONFIG_SMP
412static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;}
413#else
414
415static struct acpi_table_madt *madt;
416
417static int map_lapic_id(struct acpi_subtable_header *entry,
418		 u32 acpi_id, int *apic_id)
419{
420	struct acpi_madt_local_apic *lapic =
421		(struct acpi_madt_local_apic *)entry;
422	if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
423	    lapic->processor_id == acpi_id) {
424		*apic_id = lapic->id;
425		return 1;
426	}
427	return 0;
428}
429
430static int map_lsapic_id(struct acpi_subtable_header *entry,
431		  u32 acpi_id, int *apic_id)
432{
433	struct acpi_madt_local_sapic *lsapic =
434		(struct acpi_madt_local_sapic *)entry;
435	/* Only check enabled APICs*/
436	if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
437		/* First check against id */
438		if (lsapic->processor_id == acpi_id) {
439			*apic_id = (lsapic->id << 8) | lsapic->eid;
440			return 1;
441		/* Check against optional uid */
442		} else if (entry->length >= 16 &&
443			lsapic->uid == acpi_id) {
444			*apic_id = lsapic->uid;
445			return 1;
446		}
447	}
448	return 0;
449}
450
451static int map_madt_entry(u32 acpi_id)
452{
453	unsigned long madt_end, entry;
454	int apic_id = -1;
455
456	if (!madt)
457		return apic_id;
458
459	entry = (unsigned long)madt;
460	madt_end = entry + madt->header.length;
461
462	/* Parse all entries looking for a match. */
463
464	entry += sizeof(struct acpi_table_madt);
465	while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
466		struct acpi_subtable_header *header =
467			(struct acpi_subtable_header *)entry;
468		if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
469			if (map_lapic_id(header, acpi_id, &apic_id))
470				break;
471		} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
472			if (map_lsapic_id(header, acpi_id, &apic_id))
473				break;
474		}
475		entry += header->length;
476	}
477	return apic_id;
478}
479
480static int map_mat_entry(acpi_handle handle, u32 acpi_id)
481{
482	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
483	union acpi_object *obj;
484	struct acpi_subtable_header *header;
485	int apic_id = -1;
486
487	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
488		goto exit;
489
490	if (!buffer.length || !buffer.pointer)
491		goto exit;
492
493	obj = buffer.pointer;
494	if (obj->type != ACPI_TYPE_BUFFER ||
495	    obj->buffer.length < sizeof(struct acpi_subtable_header)) {
496		goto exit;
497	}
498
499	header = (struct acpi_subtable_header *)obj->buffer.pointer;
500	if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
501		map_lapic_id(header, acpi_id, &apic_id);
502	} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
503		map_lsapic_id(header, acpi_id, &apic_id);
504	}
505
506exit:
507	if (buffer.pointer)
508		kfree(buffer.pointer);
509	return apic_id;
510}
511
512static int get_cpu_id(acpi_handle handle, u32 acpi_id)
513{
514	int i;
515	int apic_id = -1;
516
517	apic_id = map_mat_entry(handle, acpi_id);
518	if (apic_id == -1)
519		apic_id = map_madt_entry(acpi_id);
520	if (apic_id == -1)
521		return apic_id;
522
523	for_each_possible_cpu(i) {
524		if (cpu_physical_id(i) == apic_id)
525			return i;
526	}
527	return -1;
528}
529#endif
530
531/* --------------------------------------------------------------------------
532                                 Driver Interface
533   -------------------------------------------------------------------------- */
534
535static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
536{
537	acpi_status status = 0;
538	union acpi_object object = { 0 };
539	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
540	int cpu_index;
541	static int cpu0_initialized;
542
543
544	if (!pr)
545		return -EINVAL;
546
547	if (num_online_cpus() > 1)
548		errata.smp = TRUE;
549
550	acpi_processor_errata(pr);
551
552	/*
553	 * Check to see if we have bus mastering arbitration control.  This
554	 * is required for proper C3 usage (to maintain cache coherency).
555	 */
556	if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
557		pr->flags.bm_control = 1;
558		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
559				  "Bus mastering arbitration control present\n"));
560	} else
561		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
562				  "No bus mastering arbitration control\n"));
563
564	/* Check if it is a Device with HID and UID */
565	if (has_uid) {
566		unsigned long long value;
567		status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
568						NULL, &value);
569		if (ACPI_FAILURE(status)) {
570			printk(KERN_ERR PREFIX "Evaluating processor _UID\n");
571			return -ENODEV;
572		}
573		pr->acpi_id = value;
574	} else {
575		/*
576		* Evalute the processor object.  Note that it is common on SMP to
577		* have the first (boot) processor with a valid PBLK address while
578		* all others have a NULL address.
579		*/
580		status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
581		if (ACPI_FAILURE(status)) {
582			printk(KERN_ERR PREFIX "Evaluating processor object\n");
583			return -ENODEV;
584		}
585
586		/*
587		* TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
588		*      >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
589		*/
590		pr->acpi_id = object.processor.proc_id;
591	}
592	cpu_index = get_cpu_id(pr->handle, pr->acpi_id);
593
594	/* Handle UP system running SMP kernel, with no LAPIC in MADT */
595	if (!cpu0_initialized && (cpu_index == -1) &&
596	    (num_online_cpus() == 1)) {
597		cpu_index = 0;
598	}
599
600	cpu0_initialized = 1;
601
602	pr->id = cpu_index;
603
604	/*
605	 *  Extra Processor objects may be enumerated on MP systems with
606	 *  less than the max # of CPUs. They should be ignored _iff
607	 *  they are physically not present.
608	 */
609	if (pr->id == -1) {
610		if (ACPI_FAILURE
611		    (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
612			return -ENODEV;
613		}
614	}
615
616	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
617			  pr->acpi_id));
618
619	if (!object.processor.pblk_address)
620		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
621	else if (object.processor.pblk_length != 6)
622		printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
623			    object.processor.pblk_length);
624	else {
625		pr->throttling.address = object.processor.pblk_address;
626		pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
627		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
628
629		pr->pblk = object.processor.pblk_address;
630
631		/*
632		 * We don't care about error returns - we just try to mark
633		 * these reserved so that nobody else is confused into thinking
634		 * that this region might be unused..
635		 *
636		 * (In particular, allocating the IO range for Cardbus)
637		 */
638		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
639	}
640
641	/*
642	 * If ACPI describes a slot number for this CPU, we can use it
643	 * ensure we get the right value in the "physical id" field
644	 * of /proc/cpuinfo
645	 */
646	status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
647	if (ACPI_SUCCESS(status))
648		arch_fix_phys_package_id(pr->id, object.integer.value);
649
650	return 0;
651}
652
653static DEFINE_PER_CPU(void *, processor_device_array);
654
655static int __cpuinit acpi_processor_start(struct acpi_device *device)
656{
657	int result = 0;
658	acpi_status status = AE_OK;
659	struct acpi_processor *pr;
660	struct sys_device *sysdev;
661
662	pr = acpi_driver_data(device);
663
664	result = acpi_processor_get_info(pr, device->flags.unique_id);
665	if (result) {
666		/* Processor is physically not present */
667		return 0;
668	}
669
670	BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
671
672	/*
673	 * Buggy BIOS check
674	 * ACPI id of processors can be reported wrongly by the BIOS.
675	 * Don't trust it blindly
676	 */
677	if (per_cpu(processor_device_array, pr->id) != NULL &&
678	    per_cpu(processor_device_array, pr->id) != device) {
679		printk(KERN_WARNING "BIOS reported wrong ACPI id "
680			"for the processor\n");
681		return -ENODEV;
682	}
683	per_cpu(processor_device_array, pr->id) = device;
684
685	per_cpu(processors, pr->id) = pr;
686
687	result = acpi_processor_add_fs(device);
688	if (result)
689		goto end;
690
691	sysdev = get_cpu_sysdev(pr->id);
692	if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev"))
693		return -EFAULT;
694
695	status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
696					     acpi_processor_notify, pr);
697
698	/* _PDC call should be done before doing anything else (if reqd.). */
699	arch_acpi_processor_init_pdc(pr);
700	acpi_processor_set_pdc(pr);
701#ifdef CONFIG_CPU_FREQ
702	acpi_processor_ppc_has_changed(pr);
703#endif
704	acpi_processor_get_throttling_info(pr);
705	acpi_processor_get_limit_info(pr);
706
707
708	acpi_processor_power_init(pr, device);
709
710	pr->cdev = thermal_cooling_device_register("Processor", device,
711						&processor_cooling_ops);
712	if (IS_ERR(pr->cdev)) {
713		result = PTR_ERR(pr->cdev);
714		goto end;
715	}
716
717	dev_info(&device->dev, "registered as cooling_device%d\n",
718		 pr->cdev->id);
719
720	result = sysfs_create_link(&device->dev.kobj,
721				   &pr->cdev->device.kobj,
722				   "thermal_cooling");
723	if (result)
724		printk(KERN_ERR PREFIX "Create sysfs link\n");
725	result = sysfs_create_link(&pr->cdev->device.kobj,
726				   &device->dev.kobj,
727				   "device");
728	if (result)
729		printk(KERN_ERR PREFIX "Create sysfs link\n");
730
731	if (pr->flags.throttling) {
732		printk(KERN_INFO PREFIX "%s [%s] (supports",
733		       acpi_device_name(device), acpi_device_bid(device));
734		printk(" %d throttling states", pr->throttling.state_count);
735		printk(")\n");
736	}
737
738      end:
739
740	return result;
741}
742
743static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
744{
745	struct acpi_processor *pr = data;
746	struct acpi_device *device = NULL;
747	int saved;
748
749	if (!pr)
750		return;
751
752	if (acpi_bus_get_device(pr->handle, &device))
753		return;
754
755	switch (event) {
756	case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
757		saved = pr->performance_platform_limit;
758		acpi_processor_ppc_has_changed(pr);
759		if (saved == pr->performance_platform_limit)
760			break;
761		acpi_bus_generate_proc_event(device, event,
762					pr->performance_platform_limit);
763		acpi_bus_generate_netlink_event(device->pnp.device_class,
764						  device->dev.bus_id, event,
765						  pr->performance_platform_limit);
766		break;
767	case ACPI_PROCESSOR_NOTIFY_POWER:
768		acpi_processor_cst_has_changed(pr);
769		acpi_bus_generate_proc_event(device, event, 0);
770		acpi_bus_generate_netlink_event(device->pnp.device_class,
771						  device->dev.bus_id, event, 0);
772		break;
773	case ACPI_PROCESSOR_NOTIFY_THROTTLING:
774		acpi_processor_tstate_has_changed(pr);
775		acpi_bus_generate_proc_event(device, event, 0);
776		acpi_bus_generate_netlink_event(device->pnp.device_class,
777						  device->dev.bus_id, event, 0);
778	default:
779		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
780				  "Unsupported event [0x%x]\n", event));
781		break;
782	}
783
784	return;
785}
786
787static int acpi_cpu_soft_notify(struct notifier_block *nfb,
788		unsigned long action, void *hcpu)
789{
790	unsigned int cpu = (unsigned long)hcpu;
791	struct acpi_processor *pr = per_cpu(processors, cpu);
792
793	if (action == CPU_ONLINE && pr) {
794		acpi_processor_ppc_has_changed(pr);
795		acpi_processor_cst_has_changed(pr);
796		acpi_processor_tstate_has_changed(pr);
797	}
798	return NOTIFY_OK;
799}
800
801static struct notifier_block acpi_cpu_notifier =
802{
803	    .notifier_call = acpi_cpu_soft_notify,
804};
805
806static int acpi_processor_add(struct acpi_device *device)
807{
808	struct acpi_processor *pr = NULL;
809
810
811	if (!device)
812		return -EINVAL;
813
814	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
815	if (!pr)
816		return -ENOMEM;
817
818	pr->handle = device->handle;
819	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
820	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
821	acpi_driver_data(device) = pr;
822
823	return 0;
824}
825
826static int acpi_processor_remove(struct acpi_device *device, int type)
827{
828	acpi_status status = AE_OK;
829	struct acpi_processor *pr = NULL;
830
831
832	if (!device || !acpi_driver_data(device))
833		return -EINVAL;
834
835	pr = acpi_driver_data(device);
836
837	if (pr->id >= nr_cpu_ids) {
838		kfree(pr);
839		return 0;
840	}
841
842	if (type == ACPI_BUS_REMOVAL_EJECT) {
843		if (acpi_processor_handle_eject(pr))
844			return -EINVAL;
845	}
846
847	acpi_processor_power_exit(pr, device);
848
849	status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
850					    acpi_processor_notify);
851
852	sysfs_remove_link(&device->dev.kobj, "sysdev");
853
854	acpi_processor_remove_fs(device);
855
856	if (pr->cdev) {
857		sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
858		sysfs_remove_link(&pr->cdev->device.kobj, "device");
859		thermal_cooling_device_unregister(pr->cdev);
860		pr->cdev = NULL;
861	}
862
863	per_cpu(processors, pr->id) = NULL;
864	per_cpu(processor_device_array, pr->id) = NULL;
865	kfree(pr);
866
867	return 0;
868}
869
870#ifdef CONFIG_ACPI_HOTPLUG_CPU
871/****************************************************************************
872 * 	Acpi processor hotplug support 				       	    *
873 ****************************************************************************/
874
875static int is_processor_present(acpi_handle handle)
876{
877	acpi_status status;
878	unsigned long long sta = 0;
879
880
881	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
882
883	if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
884		return 1;
885
886	/*
887	 * _STA is mandatory for a processor that supports hot plug
888	 */
889	if (status == AE_NOT_FOUND)
890		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
891				"Processor does not support hot plug\n"));
892	else
893		ACPI_EXCEPTION((AE_INFO, status,
894				"Processor Device is not present"));
895	return 0;
896}
897
898static
899int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
900{
901	acpi_handle phandle;
902	struct acpi_device *pdev;
903	struct acpi_processor *pr;
904
905
906	if (acpi_get_parent(handle, &phandle)) {
907		return -ENODEV;
908	}
909
910	if (acpi_bus_get_device(phandle, &pdev)) {
911		return -ENODEV;
912	}
913
914	if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
915		return -ENODEV;
916	}
917
918	acpi_bus_start(*device);
919
920	pr = acpi_driver_data(*device);
921	if (!pr)
922		return -ENODEV;
923
924	if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
925		kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
926	}
927	return 0;
928}
929
930static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
931						u32 event, void *data)
932{
933	struct acpi_processor *pr;
934	struct acpi_device *device = NULL;
935	int result;
936
937
938	switch (event) {
939	case ACPI_NOTIFY_BUS_CHECK:
940	case ACPI_NOTIFY_DEVICE_CHECK:
941		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
942		"Processor driver received %s event\n",
943		       (event == ACPI_NOTIFY_BUS_CHECK) ?
944		       "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
945
946		if (!is_processor_present(handle))
947			break;
948
949		if (acpi_bus_get_device(handle, &device)) {
950			result = acpi_processor_device_add(handle, &device);
951			if (result)
952				printk(KERN_ERR PREFIX
953					    "Unable to add the device\n");
954			break;
955		}
956
957		pr = acpi_driver_data(device);
958		if (!pr) {
959			printk(KERN_ERR PREFIX "Driver data is NULL\n");
960			break;
961		}
962
963		if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
964			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
965			break;
966		}
967
968		result = acpi_processor_start(device);
969		if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) {
970			kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
971		} else {
972			printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
973				    acpi_device_bid(device));
974		}
975		break;
976	case ACPI_NOTIFY_EJECT_REQUEST:
977		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
978				  "received ACPI_NOTIFY_EJECT_REQUEST\n"));
979
980		if (acpi_bus_get_device(handle, &device)) {
981			printk(KERN_ERR PREFIX
982				    "Device don't exist, dropping EJECT\n");
983			break;
984		}
985		pr = acpi_driver_data(device);
986		if (!pr) {
987			printk(KERN_ERR PREFIX
988				    "Driver data is NULL, dropping EJECT\n");
989			return;
990		}
991
992		if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
993			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
994		break;
995	default:
996		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
997				  "Unsupported event [0x%x]\n", event));
998		break;
999	}
1000
1001	return;
1002}
1003
1004static acpi_status
1005processor_walk_namespace_cb(acpi_handle handle,
1006			    u32 lvl, void *context, void **rv)
1007{
1008	acpi_status status;
1009	int *action = context;
1010	acpi_object_type type = 0;
1011
1012	status = acpi_get_type(handle, &type);
1013	if (ACPI_FAILURE(status))
1014		return (AE_OK);
1015
1016	if (type != ACPI_TYPE_PROCESSOR)
1017		return (AE_OK);
1018
1019	switch (*action) {
1020	case INSTALL_NOTIFY_HANDLER:
1021		acpi_install_notify_handler(handle,
1022					    ACPI_SYSTEM_NOTIFY,
1023					    acpi_processor_hotplug_notify,
1024					    NULL);
1025		break;
1026	case UNINSTALL_NOTIFY_HANDLER:
1027		acpi_remove_notify_handler(handle,
1028					   ACPI_SYSTEM_NOTIFY,
1029					   acpi_processor_hotplug_notify);
1030		break;
1031	default:
1032		break;
1033	}
1034
1035	return (AE_OK);
1036}
1037
1038static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1039{
1040
1041	if (!is_processor_present(handle)) {
1042		return AE_ERROR;
1043	}
1044
1045	if (acpi_map_lsapic(handle, p_cpu))
1046		return AE_ERROR;
1047
1048	if (arch_register_cpu(*p_cpu)) {
1049		acpi_unmap_lsapic(*p_cpu);
1050		return AE_ERROR;
1051	}
1052
1053	return AE_OK;
1054}
1055
1056static int acpi_processor_handle_eject(struct acpi_processor *pr)
1057{
1058	if (cpu_online(pr->id))
1059		cpu_down(pr->id);
1060
1061	arch_unregister_cpu(pr->id);
1062	acpi_unmap_lsapic(pr->id);
1063	return (0);
1064}
1065#else
1066static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1067{
1068	return AE_ERROR;
1069}
1070static int acpi_processor_handle_eject(struct acpi_processor *pr)
1071{
1072	return (-EINVAL);
1073}
1074#endif
1075
1076static
1077void acpi_processor_install_hotplug_notify(void)
1078{
1079#ifdef CONFIG_ACPI_HOTPLUG_CPU
1080	int action = INSTALL_NOTIFY_HANDLER;
1081	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1082			    ACPI_ROOT_OBJECT,
1083			    ACPI_UINT32_MAX,
1084			    processor_walk_namespace_cb, &action, NULL);
1085#endif
1086	register_hotcpu_notifier(&acpi_cpu_notifier);
1087}
1088
1089static
1090void acpi_processor_uninstall_hotplug_notify(void)
1091{
1092#ifdef CONFIG_ACPI_HOTPLUG_CPU
1093	int action = UNINSTALL_NOTIFY_HANDLER;
1094	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1095			    ACPI_ROOT_OBJECT,
1096			    ACPI_UINT32_MAX,
1097			    processor_walk_namespace_cb, &action, NULL);
1098#endif
1099	unregister_hotcpu_notifier(&acpi_cpu_notifier);
1100}
1101
1102/*
1103 * We keep the driver loaded even when ACPI is not running.
1104 * This is needed for the powernow-k8 driver, that works even without
1105 * ACPI, but needs symbols from this driver
1106 */
1107
1108static int __init acpi_processor_init(void)
1109{
1110	int result = 0;
1111
1112	memset(&errata, 0, sizeof(errata));
1113
1114#ifdef CONFIG_SMP
1115	if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1116				(struct acpi_table_header **)&madt)))
1117		madt = NULL;
1118#endif
1119
1120	acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1121	if (!acpi_processor_dir)
1122		return -ENOMEM;
1123	acpi_processor_dir->owner = THIS_MODULE;
1124
1125	/*
1126	 * Check whether the system is DMI table. If yes, OSPM
1127	 * should not use mwait for CPU-states.
1128	 */
1129	dmi_check_system(processor_idle_dmi_table);
1130	result = cpuidle_register_driver(&acpi_idle_driver);
1131	if (result < 0)
1132		goto out_proc;
1133
1134	result = acpi_bus_register_driver(&acpi_processor_driver);
1135	if (result < 0)
1136		goto out_cpuidle;
1137
1138	acpi_processor_install_hotplug_notify();
1139
1140	acpi_thermal_cpufreq_init();
1141
1142	acpi_processor_ppc_init();
1143
1144	acpi_processor_throttling_init();
1145
1146	return 0;
1147
1148out_cpuidle:
1149	cpuidle_unregister_driver(&acpi_idle_driver);
1150
1151out_proc:
1152	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1153
1154	return result;
1155}
1156
1157static void __exit acpi_processor_exit(void)
1158{
1159	acpi_processor_ppc_exit();
1160
1161	acpi_thermal_cpufreq_exit();
1162
1163	acpi_processor_uninstall_hotplug_notify();
1164
1165	acpi_bus_unregister_driver(&acpi_processor_driver);
1166
1167	cpuidle_unregister_driver(&acpi_idle_driver);
1168
1169	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1170
1171	return;
1172}
1173
1174module_init(acpi_processor_init);
1175module_exit(acpi_processor_exit);
1176
1177EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
1178
1179MODULE_ALIAS("processor");
1180