1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21#include <linux/cache.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/smp.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/module.h>
29#include <linux/time.h>
30#include <linux/timex.h>
31#include <linux/sched.h>
32#include <linux/cpumask.h>
33#include <linux/cpu.h>
34#include <linux/err.h>
35#include <linux/ftrace.h>
36
37#include <linux/atomic.h>
38#include <asm/cpu.h>
39#include <asm/processor.h>
40#include <asm/idle.h>
41#include <asm/r4k-timer.h>
42#include <asm/mmu_context.h>
43#include <asm/time.h>
44#include <asm/setup.h>
45
46volatile cpumask_t cpu_callin_map;	/* Bitmask of started secondaries */
47
48int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
49EXPORT_SYMBOL(__cpu_number_map);
50
51int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
52EXPORT_SYMBOL(__cpu_logical_map);
53
54/* Number of TCs (or siblings in Intel speak) per CPU core */
55int smp_num_siblings = 1;
56EXPORT_SYMBOL(smp_num_siblings);
57
58/* representing the TCs (or siblings in Intel speak) of each logical CPU */
59cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
60EXPORT_SYMBOL(cpu_sibling_map);
61
62/* representing the core map of multi-core chips of each logical CPU */
63cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
64EXPORT_SYMBOL(cpu_core_map);
65
66/* representing cpus for which sibling maps can be computed */
67static cpumask_t cpu_sibling_setup_map;
68
69/* representing cpus for which core maps can be computed */
70static cpumask_t cpu_core_setup_map;
71
72cpumask_t cpu_coherent_mask;
73
74static inline void set_cpu_sibling_map(int cpu)
75{
76	int i;
77
78	cpu_set(cpu, cpu_sibling_setup_map);
79
80	if (smp_num_siblings > 1) {
81		for_each_cpu_mask(i, cpu_sibling_setup_map) {
82			if (cpu_data[cpu].package == cpu_data[i].package &&
83				    cpu_data[cpu].core == cpu_data[i].core) {
84				cpu_set(i, cpu_sibling_map[cpu]);
85				cpu_set(cpu, cpu_sibling_map[i]);
86			}
87		}
88	} else
89		cpu_set(cpu, cpu_sibling_map[cpu]);
90}
91
92static inline void set_cpu_core_map(int cpu)
93{
94	int i;
95
96	cpu_set(cpu, cpu_core_setup_map);
97
98	for_each_cpu_mask(i, cpu_core_setup_map) {
99		if (cpu_data[cpu].package == cpu_data[i].package) {
100			cpu_set(i, cpu_core_map[cpu]);
101			cpu_set(cpu, cpu_core_map[i]);
102		}
103	}
104}
105
106struct plat_smp_ops *mp_ops;
107EXPORT_SYMBOL(mp_ops);
108
109void register_smp_ops(struct plat_smp_ops *ops)
110{
111	if (mp_ops)
112		printk(KERN_WARNING "Overriding previously set SMP ops\n");
113
114	mp_ops = ops;
115}
116
117/*
118 * First C code run on the secondary CPUs after being started up by
119 * the master.
120 */
121asmlinkage void start_secondary(void)
122{
123	unsigned int cpu;
124
125	cpu_probe();
126	cpu_report();
127	per_cpu_trap_init(false);
128	mips_clockevent_init();
129	mp_ops->init_secondary();
130
131	/*
132	 * XXX parity protection should be folded in here when it's converted
133	 * to an option instead of something based on .cputype
134	 */
135
136	calibrate_delay();
137	preempt_disable();
138	cpu = smp_processor_id();
139	cpu_data[cpu].udelay_val = loops_per_jiffy;
140
141	cpu_set(cpu, cpu_coherent_mask);
142	notify_cpu_starting(cpu);
143
144	set_cpu_online(cpu, true);
145
146	set_cpu_sibling_map(cpu);
147	set_cpu_core_map(cpu);
148
149	cpu_set(cpu, cpu_callin_map);
150
151	synchronise_count_slave(cpu);
152
153	/*
154	 * irq will be enabled in ->smp_finish(), enabling it too early
155	 * is dangerous.
156	 */
157	WARN_ON_ONCE(!irqs_disabled());
158	mp_ops->smp_finish();
159
160	cpu_startup_entry(CPUHP_ONLINE);
161}
162
163/*
164 * Call into both interrupt handlers, as we share the IPI for them
165 */
166void __irq_entry smp_call_function_interrupt(void)
167{
168	irq_enter();
169	generic_smp_call_function_interrupt();
170	irq_exit();
171}
172
173static void stop_this_cpu(void *dummy)
174{
175	/*
176	 * Remove this CPU:
177	 */
178	set_cpu_online(smp_processor_id(), false);
179	for (;;) {
180		if (cpu_wait)
181			(*cpu_wait)();		/* Wait if available. */
182	}
183}
184
185void smp_send_stop(void)
186{
187	smp_call_function(stop_this_cpu, NULL, 0);
188}
189
190void __init smp_cpus_done(unsigned int max_cpus)
191{
192}
193
194/* called from main before smp_init() */
195void __init smp_prepare_cpus(unsigned int max_cpus)
196{
197	init_new_context(current, &init_mm);
198	current_thread_info()->cpu = 0;
199	mp_ops->prepare_cpus(max_cpus);
200	set_cpu_sibling_map(0);
201	set_cpu_core_map(0);
202#ifndef CONFIG_HOTPLUG_CPU
203	init_cpu_present(cpu_possible_mask);
204#endif
205	cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
206}
207
208/* preload SMP state for boot cpu */
209void smp_prepare_boot_cpu(void)
210{
211	set_cpu_possible(0, true);
212	set_cpu_online(0, true);
213	cpu_set(0, cpu_callin_map);
214}
215
216int __cpu_up(unsigned int cpu, struct task_struct *tidle)
217{
218	mp_ops->boot_secondary(cpu, tidle);
219
220	/*
221	 * Trust is futile.  We should really have timeouts ...
222	 */
223	while (!cpu_isset(cpu, cpu_callin_map))
224		udelay(100);
225
226	synchronise_count_master(cpu);
227	return 0;
228}
229
230/* Not really SMP stuff ... */
231int setup_profiling_timer(unsigned int multiplier)
232{
233	return 0;
234}
235
236static void flush_tlb_all_ipi(void *info)
237{
238	local_flush_tlb_all();
239}
240
241void flush_tlb_all(void)
242{
243	on_each_cpu(flush_tlb_all_ipi, NULL, 1);
244}
245
246static void flush_tlb_mm_ipi(void *mm)
247{
248	local_flush_tlb_mm((struct mm_struct *)mm);
249}
250
251/*
252 * Special Variant of smp_call_function for use by TLB functions:
253 *
254 *  o No return value
255 *  o collapses to normal function call on UP kernels
256 *  o collapses to normal function call on systems with a single shared
257 *    primary cache.
258 */
259static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
260{
261	smp_call_function(func, info, 1);
262}
263
264static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
265{
266	preempt_disable();
267
268	smp_on_other_tlbs(func, info);
269	func(info);
270
271	preempt_enable();
272}
273
274/*
275 * The following tlb flush calls are invoked when old translations are
276 * being torn down, or pte attributes are changing. For single threaded
277 * address spaces, a new context is obtained on the current cpu, and tlb
278 * context on other cpus are invalidated to force a new context allocation
279 * at switch_mm time, should the mm ever be used on other cpus. For
280 * multithreaded address spaces, intercpu interrupts have to be sent.
281 * Another case where intercpu interrupts are required is when the target
282 * mm might be active on another cpu (eg debuggers doing the flushes on
283 * behalf of debugees, kswapd stealing pages from another process etc).
284 * Kanoj 07/00.
285 */
286
287void flush_tlb_mm(struct mm_struct *mm)
288{
289	preempt_disable();
290
291	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
292		smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
293	} else {
294		unsigned int cpu;
295
296		for_each_online_cpu(cpu) {
297			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
298				cpu_context(cpu, mm) = 0;
299		}
300	}
301	local_flush_tlb_mm(mm);
302
303	preempt_enable();
304}
305
306struct flush_tlb_data {
307	struct vm_area_struct *vma;
308	unsigned long addr1;
309	unsigned long addr2;
310};
311
312static void flush_tlb_range_ipi(void *info)
313{
314	struct flush_tlb_data *fd = info;
315
316	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
317}
318
319void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
320{
321	struct mm_struct *mm = vma->vm_mm;
322
323	preempt_disable();
324	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
325		struct flush_tlb_data fd = {
326			.vma = vma,
327			.addr1 = start,
328			.addr2 = end,
329		};
330
331		smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
332	} else {
333		unsigned int cpu;
334
335		for_each_online_cpu(cpu) {
336			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
337				cpu_context(cpu, mm) = 0;
338		}
339	}
340	local_flush_tlb_range(vma, start, end);
341	preempt_enable();
342}
343
344static void flush_tlb_kernel_range_ipi(void *info)
345{
346	struct flush_tlb_data *fd = info;
347
348	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
349}
350
351void flush_tlb_kernel_range(unsigned long start, unsigned long end)
352{
353	struct flush_tlb_data fd = {
354		.addr1 = start,
355		.addr2 = end,
356	};
357
358	on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
359}
360
361static void flush_tlb_page_ipi(void *info)
362{
363	struct flush_tlb_data *fd = info;
364
365	local_flush_tlb_page(fd->vma, fd->addr1);
366}
367
368void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
369{
370	preempt_disable();
371	if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
372		struct flush_tlb_data fd = {
373			.vma = vma,
374			.addr1 = page,
375		};
376
377		smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
378	} else {
379		unsigned int cpu;
380
381		for_each_online_cpu(cpu) {
382			if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
383				cpu_context(cpu, vma->vm_mm) = 0;
384		}
385	}
386	local_flush_tlb_page(vma, page);
387	preempt_enable();
388}
389
390static void flush_tlb_one_ipi(void *info)
391{
392	unsigned long vaddr = (unsigned long) info;
393
394	local_flush_tlb_one(vaddr);
395}
396
397void flush_tlb_one(unsigned long vaddr)
398{
399	smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
400}
401
402EXPORT_SYMBOL(flush_tlb_page);
403EXPORT_SYMBOL(flush_tlb_one);
404
405#if defined(CONFIG_KEXEC)
406void (*dump_ipi_function_ptr)(void *) = NULL;
407void dump_send_ipi(void (*dump_ipi_callback)(void *))
408{
409	int i;
410	int cpu = smp_processor_id();
411
412	dump_ipi_function_ptr = dump_ipi_callback;
413	smp_mb();
414	for_each_online_cpu(i)
415		if (i != cpu)
416			mp_ops->send_ipi_single(i, SMP_DUMP);
417
418}
419EXPORT_SYMBOL(dump_send_ipi);
420#endif
421
422#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
423
424static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
425static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
426
427void tick_broadcast(const struct cpumask *mask)
428{
429	atomic_t *count;
430	struct call_single_data *csd;
431	int cpu;
432
433	for_each_cpu(cpu, mask) {
434		count = &per_cpu(tick_broadcast_count, cpu);
435		csd = &per_cpu(tick_broadcast_csd, cpu);
436
437		if (atomic_inc_return(count) == 1)
438			smp_call_function_single_async(cpu, csd);
439	}
440}
441
442static void tick_broadcast_callee(void *info)
443{
444	int cpu = smp_processor_id();
445	tick_receive_broadcast();
446	atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
447}
448
449static int __init tick_broadcast_init(void)
450{
451	struct call_single_data *csd;
452	int cpu;
453
454	for (cpu = 0; cpu < NR_CPUS; cpu++) {
455		csd = &per_cpu(tick_broadcast_csd, cpu);
456		csd->func = tick_broadcast_callee;
457	}
458
459	return 0;
460}
461early_initcall(tick_broadcast_init);
462
463#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
464