cpu.c revision c9be0a36f9bf392a7984473124a67a12964df11f
1/*
2 * drivers/base/cpu.c - basic CPU class support
3 */
4
5#include <linux/sysdev.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/sched.h>
9#include <linux/cpu.h>
10#include <linux/topology.h>
11#include <linux/device.h>
12#include <linux/node.h>
13
14#include "base.h"
15
16struct sysdev_class cpu_sysdev_class = {
17	.name = "cpu",
18};
19EXPORT_SYMBOL(cpu_sysdev_class);
20
21static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices);
22
23#ifdef CONFIG_HOTPLUG_CPU
24static ssize_t show_online(struct sys_device *dev, struct sysdev_attribute *attr,
25			   char *buf)
26{
27	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
28
29	return sprintf(buf, "%u\n", !!cpu_online(cpu->sysdev.id));
30}
31
32static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr,
33				 const char *buf, size_t count)
34{
35	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
36	ssize_t ret;
37
38	cpu_hotplug_driver_lock();
39	switch (buf[0]) {
40	case '0':
41		ret = cpu_down(cpu->sysdev.id);
42		if (!ret)
43			kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
44		break;
45	case '1':
46		ret = cpu_up(cpu->sysdev.id);
47		if (!ret)
48			kobject_uevent(&dev->kobj, KOBJ_ONLINE);
49		break;
50	default:
51		ret = -EINVAL;
52	}
53	cpu_hotplug_driver_unlock();
54
55	if (ret >= 0)
56		ret = count;
57	return ret;
58}
59static SYSDEV_ATTR(online, 0644, show_online, store_online);
60
61static void __cpuinit register_cpu_control(struct cpu *cpu)
62{
63	sysdev_create_file(&cpu->sysdev, &attr_online);
64}
65void unregister_cpu(struct cpu *cpu)
66{
67	int logical_cpu = cpu->sysdev.id;
68
69	unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
70
71	sysdev_remove_file(&cpu->sysdev, &attr_online);
72
73	sysdev_unregister(&cpu->sysdev);
74	per_cpu(cpu_sys_devices, logical_cpu) = NULL;
75	return;
76}
77
78#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
79static ssize_t cpu_probe_store(struct class *class, const char *buf,
80			       size_t count)
81{
82	return arch_cpu_probe(buf, count);
83}
84
85static ssize_t cpu_release_store(struct class *class, const char *buf,
86				 size_t count)
87{
88	return arch_cpu_release(buf, count);
89}
90
91static CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
92static CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
93
94int __init cpu_probe_release_init(void)
95{
96	int rc;
97
98	rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
99			       &class_attr_probe.attr);
100	if (!rc)
101		rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
102				       &class_attr_release.attr);
103
104	return rc;
105}
106device_initcall(cpu_probe_release_init);
107#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
108
109#else /* ... !CONFIG_HOTPLUG_CPU */
110static inline void register_cpu_control(struct cpu *cpu)
111{
112}
113#endif /* CONFIG_HOTPLUG_CPU */
114
115#ifdef CONFIG_KEXEC
116#include <linux/kexec.h>
117
118static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute *attr,
119				char *buf)
120{
121	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
122	ssize_t rc;
123	unsigned long long addr;
124	int cpunum;
125
126	cpunum = cpu->sysdev.id;
127
128	/*
129	 * Might be reading other cpu's data based on which cpu read thread
130	 * has been scheduled. But cpu data (memory) is allocated once during
131	 * boot up and this data does not change there after. Hence this
132	 * operation should be safe. No locking required.
133	 */
134	addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
135	rc = sprintf(buf, "%Lx\n", addr);
136	return rc;
137}
138static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
139#endif
140
141/*
142 * Print cpu online, possible, present, and system maps
143 */
144static ssize_t print_cpus_map(char *buf, const struct cpumask *map)
145{
146	int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map);
147
148	buf[n++] = '\n';
149	buf[n] = '\0';
150	return n;
151}
152
153#define	print_cpus_func(type) \
154static ssize_t print_cpus_##type(struct sysdev_class *class, 		\
155	 		struct sysdev_class_attribute *attr, char *buf)	\
156{									\
157	return print_cpus_map(buf, cpu_##type##_mask);			\
158}									\
159static struct sysdev_class_attribute attr_##type##_map = 		\
160	_SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
161
162print_cpus_func(online);
163print_cpus_func(possible);
164print_cpus_func(present);
165
166/*
167 * Print values for NR_CPUS and offlined cpus
168 */
169static ssize_t print_cpus_kernel_max(struct sysdev_class *class,
170				     struct sysdev_class_attribute *attr, char *buf)
171{
172	int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
173	return n;
174}
175static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
176
177/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
178unsigned int total_cpus;
179
180static ssize_t print_cpus_offline(struct sysdev_class *class,
181				  struct sysdev_class_attribute *attr, char *buf)
182{
183	int n = 0, len = PAGE_SIZE-2;
184	cpumask_var_t offline;
185
186	/* display offline cpus < nr_cpu_ids */
187	if (!alloc_cpumask_var(&offline, GFP_KERNEL))
188		return -ENOMEM;
189	cpumask_complement(offline, cpu_online_mask);
190	n = cpulist_scnprintf(buf, len, offline);
191	free_cpumask_var(offline);
192
193	/* display offline cpus >= nr_cpu_ids */
194	if (total_cpus && nr_cpu_ids < total_cpus) {
195		if (n && n < len)
196			buf[n++] = ',';
197
198		if (nr_cpu_ids == total_cpus-1)
199			n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
200		else
201			n += snprintf(&buf[n], len - n, "%d-%d",
202						      nr_cpu_ids, total_cpus-1);
203	}
204
205	n += snprintf(&buf[n], len - n, "\n");
206	return n;
207}
208static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
209
210static struct sysdev_class_attribute *cpu_state_attr[] = {
211	&attr_online_map,
212	&attr_possible_map,
213	&attr_present_map,
214	&attr_kernel_max,
215	&attr_offline,
216};
217
218static int cpu_states_init(void)
219{
220	int i;
221	int err = 0;
222
223	for (i = 0;  i < ARRAY_SIZE(cpu_state_attr); i++) {
224		int ret;
225		ret = sysdev_class_create_file(&cpu_sysdev_class,
226						cpu_state_attr[i]);
227		if (!err)
228			err = ret;
229	}
230	return err;
231}
232
233/*
234 * register_cpu - Setup a sysfs device for a CPU.
235 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
236 *	  sysfs for this CPU.
237 * @num - CPU number to use when creating the device.
238 *
239 * Initialize and register the CPU device.
240 */
241int __cpuinit register_cpu(struct cpu *cpu, int num)
242{
243	int error;
244	cpu->node_id = cpu_to_node(num);
245	cpu->sysdev.id = num;
246	cpu->sysdev.cls = &cpu_sysdev_class;
247
248	error = sysdev_register(&cpu->sysdev);
249
250	if (!error && cpu->hotpluggable)
251		register_cpu_control(cpu);
252	if (!error)
253		per_cpu(cpu_sys_devices, num) = &cpu->sysdev;
254	if (!error)
255		register_cpu_under_node(num, cpu_to_node(num));
256
257#ifdef CONFIG_KEXEC
258	if (!error)
259		error = sysdev_create_file(&cpu->sysdev, &attr_crash_notes);
260#endif
261	return error;
262}
263
264struct sys_device *get_cpu_sysdev(unsigned cpu)
265{
266	if (cpu < nr_cpu_ids && cpu_possible(cpu))
267		return per_cpu(cpu_sys_devices, cpu);
268	else
269		return NULL;
270}
271EXPORT_SYMBOL_GPL(get_cpu_sysdev);
272
273int __init cpu_dev_init(void)
274{
275	int err;
276
277	err = sysdev_class_register(&cpu_sysdev_class);
278	if (!err)
279		err = cpu_states_init();
280
281#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
282	if (!err)
283		err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class);
284#endif
285
286	return err;
287}
288