1#ifndef _ASM_X86_SMP_H 2#define _ASM_X86_SMP_H 3#ifndef __ASSEMBLY__ 4#include <linux/cpumask.h> 5#include <linux/init.h> 6#include <asm/percpu.h> 7 8/* 9 * We need the APIC definitions automatically as part of 'smp.h' 10 */ 11#ifdef CONFIG_X86_LOCAL_APIC 12# include <asm/mpspec.h> 13# include <asm/apic.h> 14# ifdef CONFIG_X86_IO_APIC 15# include <asm/io_apic.h> 16# endif 17#endif 18#include <asm/thread_info.h> 19#include <asm/cpumask.h> 20#include <asm/cpufeature.h> 21 22extern int smp_num_siblings; 23extern unsigned int num_processors; 24 25static inline bool cpu_has_ht_siblings(void) 26{ 27 bool has_siblings = false; 28#ifdef CONFIG_SMP 29 has_siblings = cpu_has_ht && smp_num_siblings > 1; 30#endif 31 return has_siblings; 32} 33 34DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); 35DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); 36/* cpus sharing the last level cache: */ 37DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); 38DECLARE_PER_CPU(u16, cpu_llc_id); 39DECLARE_PER_CPU(int, cpu_number); 40 41static inline struct cpumask *cpu_sibling_mask(int cpu) 42{ 43 return per_cpu(cpu_sibling_map, cpu); 44} 45 46static inline struct cpumask *cpu_core_mask(int cpu) 47{ 48 return per_cpu(cpu_core_map, cpu); 49} 50 51static inline struct cpumask *cpu_llc_shared_mask(int cpu) 52{ 53 return per_cpu(cpu_llc_shared_map, cpu); 54} 55 56DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); 57DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 58#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 59DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid); 60#endif 61 62/* Static state in head.S used to set up a CPU */ 63extern unsigned long stack_start; /* Initial stack pointer address */ 64 65struct smp_ops { 66 void (*smp_prepare_boot_cpu)(void); 67 void (*smp_prepare_cpus)(unsigned max_cpus); 68 void (*smp_cpus_done)(unsigned max_cpus); 69 70 void (*stop_other_cpus)(int wait); 71 void (*smp_send_reschedule)(int cpu); 72 73 int (*cpu_up)(unsigned cpu); 74 int (*cpu_disable)(void); 75 void (*cpu_die)(unsigned int cpu); 76 void (*play_dead)(void); 77 78 void (*send_call_func_ipi)(const struct cpumask *mask); 79 void (*send_call_func_single_ipi)(int cpu); 80}; 81 82/* Globals due to paravirt */ 83extern void set_cpu_sibling_map(int cpu); 84 85#ifdef CONFIG_SMP 86#ifndef CONFIG_PARAVIRT 87#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) 88#endif 89extern struct smp_ops smp_ops; 90 91static inline void smp_send_stop(void) 92{ 93 smp_ops.stop_other_cpus(0); 94} 95 96static inline void stop_other_cpus(void) 97{ 98 smp_ops.stop_other_cpus(1); 99} 100 101static inline void smp_prepare_boot_cpu(void) 102{ 103 smp_ops.smp_prepare_boot_cpu(); 104} 105 106static inline void smp_prepare_cpus(unsigned int max_cpus) 107{ 108 smp_ops.smp_prepare_cpus(max_cpus); 109} 110 111static inline void smp_cpus_done(unsigned int max_cpus) 112{ 113 smp_ops.smp_cpus_done(max_cpus); 114} 115 116static inline int __cpu_up(unsigned int cpu) 117{ 118 return smp_ops.cpu_up(cpu); 119} 120 121static inline int __cpu_disable(void) 122{ 123 return smp_ops.cpu_disable(); 124} 125 126static inline void __cpu_die(unsigned int cpu) 127{ 128 smp_ops.cpu_die(cpu); 129} 130 131static inline void play_dead(void) 132{ 133 smp_ops.play_dead(); 134} 135 136static inline void smp_send_reschedule(int cpu) 137{ 138 smp_ops.smp_send_reschedule(cpu); 139} 140 141static inline void arch_send_call_function_single_ipi(int cpu) 142{ 143 smp_ops.send_call_func_single_ipi(cpu); 144} 145 146static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) 147{ 148 smp_ops.send_call_func_ipi(mask); 149} 150 151void cpu_disable_common(void); 152void native_smp_prepare_boot_cpu(void); 153void native_smp_prepare_cpus(unsigned int max_cpus); 154void native_smp_cpus_done(unsigned int max_cpus); 155int native_cpu_up(unsigned int cpunum); 156int native_cpu_disable(void); 157void native_cpu_die(unsigned int cpu); 158void native_play_dead(void); 159void play_dead_common(void); 160void wbinvd_on_cpu(int cpu); 161int wbinvd_on_all_cpus(void); 162 163void native_send_call_func_ipi(const struct cpumask *mask); 164void native_send_call_func_single_ipi(int cpu); 165 166void smp_store_cpu_info(int id); 167#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 168 169/* We don't mark CPUs online until __cpu_up(), so we need another measure */ 170static inline int num_booting_cpus(void) 171{ 172 return cpumask_weight(cpu_callout_mask); 173} 174#else /* !CONFIG_SMP */ 175#define wbinvd_on_cpu(cpu) wbinvd() 176static inline int wbinvd_on_all_cpus(void) 177{ 178 wbinvd(); 179 return 0; 180} 181#endif /* CONFIG_SMP */ 182 183extern unsigned disabled_cpus __cpuinitdata; 184 185#ifdef CONFIG_X86_32_SMP 186/* 187 * This function is needed by all SMP systems. It must _always_ be valid 188 * from the initial startup. We map APIC_BASE very early in page_setup(), 189 * so this is correct in the x86 case. 190 */ 191#define raw_smp_processor_id() (percpu_read(cpu_number)) 192extern int safe_smp_processor_id(void); 193 194#elif defined(CONFIG_X86_64_SMP) 195#define raw_smp_processor_id() (percpu_read(cpu_number)) 196 197#define stack_smp_processor_id() \ 198({ \ 199 struct thread_info *ti; \ 200 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ 201 ti->cpu; \ 202}) 203#define safe_smp_processor_id() smp_processor_id() 204 205#endif 206 207#ifdef CONFIG_X86_LOCAL_APIC 208 209#ifndef CONFIG_X86_64 210static inline int logical_smp_processor_id(void) 211{ 212 /* we don't want to mark this access volatile - bad code generation */ 213 return GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); 214} 215 216#endif 217 218extern int hard_smp_processor_id(void); 219 220#else /* CONFIG_X86_LOCAL_APIC */ 221 222# ifndef CONFIG_SMP 223# define hard_smp_processor_id() 0 224# endif 225 226#endif /* CONFIG_X86_LOCAL_APIC */ 227 228#ifdef CONFIG_DEBUG_NMI_SELFTEST 229extern void nmi_selftest(void); 230#else 231#define nmi_selftest() do { } while (0) 232#endif 233 234#endif /* __ASSEMBLY__ */ 235#endif /* _ASM_X86_SMP_H */ 236