#include <linux/acpi.h>
#include <linux/arm_sdei.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched/mm.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/interrupt.h>
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/percpu.h>
#include <linux/clockchips.h>
#include <linux/completion.h>
#include <linux/of.h>
#include <linux/irq_work.h>
#include <linux/kernel_stat.h>
#include <linux/kexec.h>
#include <linux/kgdb.h>
#include <linux/kvm_host.h>
#include <linux/nmi.h>
#include <asm/alternative.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
#include <asm/daifflags.h>
#include <asm/kvm_mmu.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
#include <asm/processor.h>
#include <asm/smp_plat.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <asm/virt.h>
#include <trace/events/ipi.h>
struct secondary_data secondary_data;
static int cpus_stuck_in_kernel;
static int ipi_irq_base __ro_after_init;
static int nr_ipi __ro_after_init = NR_IPI;
struct ipi_descs {
struct irq_desc *descs[MAX_IPI];
};
static DEFINE_PER_CPU_READ_MOSTLY(struct ipi_descs, pcpu_ipi_desc);
#define get_ipi_desc(__cpu, __ipi) (per_cpu_ptr(&pcpu_ipi_desc, __cpu)->descs[__ipi])
static bool percpu_ipi_descs __ro_after_init;
static bool crash_stop;
static void ipi_setup(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
static void ipi_teardown(int cpu);
static int op_cpu_kill(unsigned int cpu);
#else
static inline int op_cpu_kill(unsigned int cpu)
{
return -ENOSYS;
}
#endif
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
const struct cpu_operations *ops = get_cpu_ops(cpu);
if (ops->cpu_boot)
return ops->cpu_boot(cpu);
return -EOPNOTSUPP;
}
static DECLARE_COMPLETION(cpu_running);
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret;
long status;
secondary_data.task = idle;
update_cpu_boot_status(CPU_MMU_OFF);
ret = boot_secondary(cpu, idle);
if (ret) {
if (ret != -EPERM)
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
return ret;
}
wait_for_completion_timeout(&cpu_running,
msecs_to_jiffies(5000));
if (cpu_online(cpu))
return 0;
pr_crit("CPU%u: failed to come online\n", cpu);
secondary_data.task = NULL;
status = READ_ONCE(secondary_data.status);
if (status == CPU_MMU_OFF)
status = READ_ONCE(__early_cpu_boot_status);
switch (status & CPU_BOOT_STATUS_MASK) {
default:
pr_err("CPU%u: failed in unknown state : 0x%lx\n",
cpu, status);
cpus_stuck_in_kernel++;
break;
case CPU_KILL_ME:
if (!op_cpu_kill(cpu)) {
pr_crit("CPU%u: died during early boot\n", cpu);
break;
}
pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
fallthrough;
case CPU_STUCK_IN_KERNEL:
pr_crit("CPU%u: is stuck in kernel\n", cpu);
if (status & CPU_STUCK_REASON_52_BIT_VA)
pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
if (status & CPU_STUCK_REASON_NO_GRAN) {
pr_crit("CPU%u: does not support %luK granule\n",
cpu, PAGE_SIZE / SZ_1K);
}
cpus_stuck_in_kernel++;
break;
case CPU_PANIC_KERNEL:
panic("CPU%u detected unsupported configuration\n", cpu);
}
return -EIO;
}
static void init_gic_priority_masking(void)
{
u32 cpuflags;
if (WARN_ON(!gic_enable_sre()))
return;
cpuflags = read_sysreg(daif);
WARN_ON(!(cpuflags & PSR_I_BIT));
WARN_ON(!(cpuflags & PSR_F_BIT));
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
}
asmlinkage notrace void secondary_start_kernel(void)
{
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
const struct cpu_operations *ops;
unsigned int cpu = smp_processor_id();
mmgrab(mm);
current->active_mm = mm;
cpu_uninstall_idmap();
if (system_uses_irq_prio_masking())
init_gic_priority_masking();
rcutree_report_cpu_starting(cpu);
trace_hardirqs_off();
check_local_cpu_capabilities();
ops = get_cpu_ops(cpu);
if (ops->cpu_postboot)
ops->cpu_postboot();
cpuinfo_store_cpu();
store_cpu_topology(cpu);
notify_cpu_starting(cpu);
ipi_setup(cpu);
numa_add_cpu(cpu);
pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
cpu, (unsigned long)mpidr,
read_cpuid_id());
update_cpu_boot_status(CPU_BOOT_SUCCESS);
set_cpu_online(cpu, true);
complete(&cpu_running);
local_daif_restore(DAIF_PROCCTX);
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
#ifdef CONFIG_HOTPLUG_CPU
static int op_cpu_disable(unsigned int cpu)
{
const struct cpu_operations *ops = get_cpu_ops(cpu);
if (!ops || !ops->cpu_die)
return -EOPNOTSUPP;
if (ops->cpu_disable)
return ops->cpu_disable(cpu);
return 0;
}
int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
int ret;
ret = op_cpu_disable(cpu);
if (ret)
return ret;
remove_cpu_topology(cpu);
numa_remove_cpu(cpu);
set_cpu_online(cpu, false);
ipi_teardown(cpu);
irq_migrate_all_off_this_cpu();
return 0;
}
static int op_cpu_kill(unsigned int cpu)
{
const struct cpu_operations *ops = get_cpu_ops(cpu);
if (!ops->cpu_kill)
return 0;
return ops->cpu_kill(cpu);
}
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
{
int err;
pr_debug("CPU%u: shutdown\n", cpu);
err = op_cpu_kill(cpu);
if (err)
pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err);
}
void __noreturn cpu_die(void)
{
unsigned int cpu = smp_processor_id();
const struct cpu_operations *ops = get_cpu_ops(cpu);
idle_task_exit();
local_daif_mask();
cpuhp_ap_report_dead();
ops->cpu_die(cpu);
BUG();
}
#endif
static void __cpu_try_die(int cpu)
{
#ifdef CONFIG_HOTPLUG_CPU
const struct cpu_operations *ops = get_cpu_ops(cpu);
if (ops && ops->cpu_die)
ops->cpu_die(cpu);
#endif
}
void __noreturn cpu_die_early(void)
{
int cpu = smp_processor_id();
pr_crit("CPU%d: will not boot\n", cpu);
set_cpu_present(cpu, 0);
rcutree_report_cpu_dead();
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
update_cpu_boot_status(CPU_KILL_ME);
__cpu_try_die(cpu);
}
update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
cpu_park_loop();
}
static void __init hyp_mode_check(void)
{
if (is_hyp_mode_available())
pr_info("CPU: All CPU(s) started at EL2\n");
else if (is_hyp_mode_mismatched())
WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
"CPU: CPUs started in inconsistent modes");
else
pr_info("CPU: All CPU(s) started at EL1\n");
if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) {
kvm_compute_layout();
kvm_apply_hyp_relocations();
}
}
void __init smp_cpus_done(unsigned int max_cpus)
{
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
hyp_mode_check();
setup_system_features();
setup_user_features();
mark_linear_text_alias_ro();
}
void __init smp_prepare_boot_cpu(void)
{
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
cpuinfo_store_boot_cpu();
setup_boot_cpu_features();
if (system_uses_irq_prio_masking())
init_gic_priority_masking();
kasan_init_hw_tags();
kasan_init_sw_tags();
}
static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
{
unsigned int i;
for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
if (cpu_logical_map(i) == hwid)
return true;
return false;
}
static int __init smp_cpu_setup(int cpu)
{
const struct cpu_operations *ops;
if (init_cpu_ops(cpu))
return -ENODEV;
ops = get_cpu_ops(cpu);
if (ops->cpu_init(cpu))
return -ENODEV;
set_cpu_possible(cpu, true);
return 0;
}
static bool bootcpu_valid __initdata;
static unsigned int cpu_count = 1;
int arch_register_cpu(int cpu)
{
acpi_handle acpi_handle = acpi_get_processor_handle(cpu);
struct cpu *c = &per_cpu(cpu_devices, cpu);
if (!acpi_disabled && !acpi_handle &&
IS_ENABLED(CONFIG_ACPI_HOTPLUG_CPU))
return -EPROBE_DEFER;
#ifdef CONFIG_ACPI_HOTPLUG_CPU
if (invalid_logical_cpuid(cpu) || !cpu_present(cpu)) {
pr_err_once("Changing CPU present bit is not supported\n");
return -ENODEV;
}
#endif
c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
return register_cpu(c, cpu);
}
#ifdef CONFIG_ACPI_HOTPLUG_CPU
void arch_unregister_cpu(int cpu)
{
acpi_handle acpi_handle = acpi_get_processor_handle(cpu);
struct cpu *c = &per_cpu(cpu_devices, cpu);
acpi_status status;
unsigned long long sta;
if (!acpi_handle) {
pr_err_once("Removing a CPU without associated ACPI handle\n");
return;
}
status = acpi_evaluate_integer(acpi_handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status))
return;
if (cpu_present(cpu) && !(sta & ACPI_STA_DEVICE_PRESENT)) {
pr_err_once("Changing CPU present bit is not supported\n");
return;
}
unregister_cpu(c);
}
#endif
#ifdef CONFIG_ACPI
static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
{
return &cpu_madt_gicc[cpu];
}
EXPORT_SYMBOL_GPL(acpi_cpu_get_madt_gicc);
static void __init
acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
{
u64 hwid = processor->arm_mpidr;
if (!(processor->flags &
(ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) {
pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
return;
}
if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
return;
}
if (is_mpidr_duplicate(cpu_count, hwid)) {
pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
return;
}
if (cpu_logical_map(0) == hwid) {
if (bootcpu_valid) {
pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
hwid);
return;
}
bootcpu_valid = true;
cpu_madt_gicc[0] = *processor;
return;
}
if (cpu_count >= NR_CPUS)
return;
set_cpu_logical_map(cpu_count, hwid);
cpu_madt_gicc[cpu_count] = *processor;
acpi_set_mailbox_entry(cpu_count, processor);
cpu_count++;
}
static int __init
acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_interrupt *processor;
processor = (struct acpi_madt_generic_interrupt *)header;
if (BAD_MADT_GICC_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
acpi_map_gic_cpu_interface(processor);
return 0;
}
static void __init acpi_parse_and_init_cpus(void)
{
int i;
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
acpi_parse_gic_cpu_interface, 0);
acpi_map_cpus_to_nodes();
for (i = 0; i < nr_cpu_ids; i++)
early_map_cpu_to_node(i, acpi_numa_get_nid(i));
}
#else
#define acpi_parse_and_init_cpus(...) do { } while (0)
#endif
static void __init of_parse_and_init_cpus(void)
{
struct device_node *dn;
for_each_of_cpu_node(dn) {
u64 hwid = of_get_cpu_hwid(dn, 0);
if (hwid & ~MPIDR_HWID_BITMASK)
goto next;
if (is_mpidr_duplicate(cpu_count, hwid)) {
pr_err("%pOF: duplicate cpu reg properties in the DT\n",
dn);
goto next;
}
if (hwid == cpu_logical_map(0)) {
if (bootcpu_valid) {
pr_err("%pOF: duplicate boot cpu reg property in DT\n",
dn);
goto next;
}
bootcpu_valid = true;
early_map_cpu_to_node(0, of_node_to_nid(dn));
continue;
}
if (cpu_count >= NR_CPUS)
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
set_cpu_logical_map(cpu_count, hwid);
early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
next:
cpu_count++;
}
}
void __init smp_init_cpus(void)
{
int i;
if (acpi_disabled)
of_parse_and_init_cpus();
else
acpi_parse_and_init_cpus();
if (cpu_count > nr_cpu_ids)
pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
cpu_count, nr_cpu_ids);
if (!bootcpu_valid) {
pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
return;
}
for (i = 1; i < nr_cpu_ids; i++) {
if (cpu_logical_map(i) != INVALID_HWID) {
if (smp_cpu_setup(i))
set_cpu_logical_map(i, INVALID_HWID);
}
}
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
const struct cpu_operations *ops;
int err;
unsigned int cpu;
unsigned int this_cpu;
init_cpu_topology();
this_cpu = smp_processor_id();
store_cpu_topology(this_cpu);
numa_store_cpu_info(this_cpu);
numa_add_cpu(this_cpu);
if (max_cpus == 0)
return;
for_each_possible_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
ops = get_cpu_ops(cpu);
if (!ops)
continue;
err = ops->cpu_prepare(cpu);
if (err)
continue;
set_cpu_present(cpu, true);
numa_store_cpu_info(cpu);
}
}
static const char *ipi_types[MAX_IPI] __tracepoint_string = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
[IPI_CPU_STOP] = "CPU stop interrupts",
[IPI_CPU_STOP_NMI] = "CPU stop NMIs",
[IPI_TIMER] = "Timer broadcast interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
[IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
};
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
unsigned long irq_err_count;
int arch_show_interrupts(struct seq_file *p, int prec)
{
unsigned int cpu, i;
for (i = 0; i < MAX_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", irq_desc_kstat_cpu(get_ipi_desc(cpu, i), cpu));
seq_printf(p, " %s\n", ipi_types[i]);
}
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
return 0;
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
}
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
}
#endif
static void __noreturn local_cpu_stop(unsigned int cpu)
{
set_cpu_online(cpu, false);
local_daif_mask();
sdei_mask_local_cpu();
cpu_park_loop();
}
void __noreturn panic_smp_self_stop(void)
{
local_cpu_stop(smp_processor_id());
}
static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
{
#ifdef CONFIG_KEXEC_CORE
local_daif_mask();
crash_save_cpu(regs, cpu);
set_cpu_online(cpu, false);
sdei_mask_local_cpu();
if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
__cpu_try_die(cpu);
cpu_park_loop();
#else
BUG();
#endif
}
static void arm64_send_ipi(const cpumask_t *mask, unsigned int nr)
{
unsigned int cpu;
if (!percpu_ipi_descs)
__ipi_send_mask(get_ipi_desc(0, nr), mask);
else
for_each_cpu(cpu, mask)
__ipi_send_single(get_ipi_desc(cpu, nr), cpu);
}
static void arm64_backtrace_ipi(cpumask_t *mask)
{
arm64_send_ipi(mask, IPI_CPU_BACKTRACE);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
{
nmi_trigger_cpumask_backtrace(mask, exclude_cpu, arm64_backtrace_ipi);
}
#ifdef CONFIG_KGDB
void kgdb_roundup_cpus(void)
{
int this_cpu = raw_smp_processor_id();
int cpu;
for_each_online_cpu(cpu) {
if (cpu == this_cpu)
continue;
__ipi_send_single(get_ipi_desc(cpu, IPI_KGDB_ROUNDUP), cpu);
}
}
#endif
static void do_handle_IPI(int ipinr)
{
unsigned int cpu = smp_processor_id();
if ((unsigned)ipinr < NR_IPI)
trace_ipi_entry(ipi_types[ipinr]);
switch (ipinr) {
case IPI_RESCHEDULE:
scheduler_ipi();
break;
case IPI_CALL_FUNC:
generic_smp_call_function_interrupt();
break;
case IPI_CPU_STOP:
case IPI_CPU_STOP_NMI:
if (IS_ENABLED(CONFIG_KEXEC_CORE) && crash_stop) {
ipi_cpu_crash_stop(cpu, get_irq_regs());
unreachable();
} else {
local_cpu_stop(cpu);
}
break;
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
tick_receive_broadcast();
break;
#endif
#ifdef CONFIG_IRQ_WORK
case IPI_IRQ_WORK:
irq_work_run();
break;
#endif
case IPI_CPU_BACKTRACE:
nmi_cpu_backtrace(get_irq_regs());
break;
case IPI_KGDB_ROUNDUP:
kgdb_nmicallback(cpu, get_irq_regs());
break;
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break;
}
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit(ipi_types[ipinr]);
}
static irqreturn_t ipi_handler(int irq, void *data)
{
unsigned int ipi = (irq - ipi_irq_base) % nr_ipi;
do_handle_IPI(ipi);
return IRQ_HANDLED;
}
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
trace_ipi_raise(target, ipi_types[ipinr]);
arm64_send_ipi(target, ipinr);
}
static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
{
if (!system_uses_irq_prio_masking())
return false;
switch (ipi) {
case IPI_CPU_STOP_NMI:
case IPI_CPU_BACKTRACE:
case IPI_KGDB_ROUNDUP:
return true;
default:
return false;
}
}
static void ipi_setup(int cpu)
{
int i;
if (WARN_ON_ONCE(!ipi_irq_base))
return;
for (i = 0; i < nr_ipi; i++) {
if (!percpu_ipi_descs) {
if (ipi_should_be_nmi(i)) {
prepare_percpu_nmi(ipi_irq_base + i);
enable_percpu_nmi(ipi_irq_base + i, 0);
} else {
enable_percpu_irq(ipi_irq_base + i, 0);
}
} else {
enable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
}
}
}
#ifdef CONFIG_HOTPLUG_CPU
static void ipi_teardown(int cpu)
{
int i;
if (WARN_ON_ONCE(!ipi_irq_base))
return;
for (i = 0; i < nr_ipi; i++) {
if (!percpu_ipi_descs) {
if (ipi_should_be_nmi(i)) {
disable_percpu_nmi(ipi_irq_base + i);
teardown_percpu_nmi(ipi_irq_base + i);
} else {
disable_percpu_irq(ipi_irq_base + i);
}
} else {
disable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
}
}
}
#endif
static void ipi_setup_sgi(int ipi)
{
int err, irq, cpu;
irq = ipi_irq_base + ipi;
if (ipi_should_be_nmi(ipi)) {
err = request_percpu_nmi(irq, ipi_handler, "IPI", NULL, &irq_stat);
WARN(err, "Could not request IRQ %d as NMI, err=%d\n", irq, err);
} else {
err = request_percpu_irq(irq, ipi_handler, "IPI", &irq_stat);
WARN(err, "Could not request IRQ %d as IRQ, err=%d\n", irq, err);
}
for_each_possible_cpu(cpu)
get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
irq_set_status_flags(irq, IRQ_HIDDEN);
}
static void ipi_setup_lpi(int ipi, int ncpus)
{
for (int cpu = 0; cpu < ncpus; cpu++) {
int err, irq;
irq = ipi_irq_base + (cpu * nr_ipi) + ipi;
err = irq_force_affinity(irq, cpumask_of(cpu));
WARN(err, "Could not force affinity IRQ %d, err=%d\n", irq, err);
err = request_irq(irq, ipi_handler, IRQF_NO_AUTOEN, "IPI",
NULL);
WARN(err, "Could not request IRQ %d, err=%d\n", irq, err);
irq_set_status_flags(irq, (IRQ_HIDDEN | IRQ_NO_BALANCING_MASK));
get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
}
}
void __init set_smp_ipi_range_percpu(int ipi_base, int n, int ncpus)
{
int i;
WARN_ON(n < MAX_IPI);
nr_ipi = min(n, MAX_IPI);
percpu_ipi_descs = !!ncpus;
ipi_irq_base = ipi_base;
for (i = 0; i < nr_ipi; i++) {
if (!percpu_ipi_descs)
ipi_setup_sgi(i);
else
ipi_setup_lpi(i, ncpus);
}
ipi_setup(smp_processor_id());
}
void arch_smp_send_reschedule(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
void arch_send_wakeup_ipi(unsigned int cpu)
{
smp_send_reschedule(cpu);
}
#endif
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_TIMER);
}
#endif
static inline unsigned int num_other_online_cpus(void)
{
unsigned int this_cpu_online = cpu_online(smp_processor_id());
return num_online_cpus() - this_cpu_online;
}
void smp_send_stop(void)
{
static unsigned long stop_in_progress;
static cpumask_t mask;
unsigned long timeout;
if (num_other_online_cpus() == 0)
goto skip_ipi;
if (test_and_set_bit(0, &stop_in_progress))
return;
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
if (system_state <= SYSTEM_RUNNING)
pr_crit("SMP: stopping secondary CPUs\n");
smp_cross_call(&mask, IPI_CPU_STOP);
timeout = USEC_PER_SEC;
while (num_other_online_cpus() && timeout--)
udelay(1);
if (num_other_online_cpus() && ipi_should_be_nmi(IPI_CPU_STOP_NMI)) {
smp_rmb();
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
pr_info("SMP: retry stop with NMI for CPUs %*pbl\n",
cpumask_pr_args(&mask));
smp_cross_call(&mask, IPI_CPU_STOP_NMI);
timeout = USEC_PER_MSEC * 10;
while (num_other_online_cpus() && timeout--)
udelay(1);
}
if (num_other_online_cpus()) {
smp_rmb();
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(&mask));
}
skip_ipi:
sdei_mask_local_cpu();
}
#ifdef CONFIG_KEXEC_CORE
void crash_smp_send_stop(void)
{
if (crash_stop)
return;
crash_stop = 1;
smp_send_stop();
sdei_handler_abort();
}
bool smp_crash_stop_failed(void)
{
return num_other_online_cpus() != 0;
}
#endif
static bool have_cpu_die(void)
{
#ifdef CONFIG_HOTPLUG_CPU
int any_cpu = raw_smp_processor_id();
const struct cpu_operations *ops = get_cpu_ops(any_cpu);
if (ops && ops->cpu_die)
return true;
#endif
return false;
}
bool cpus_are_stuck_in_kernel(void)
{
bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
return !!cpus_stuck_in_kernel || smp_spin_tables ||
is_protected_kvm_enabled();
}