#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/static_key.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/switch_to.h>
#ifdef CONFIG_MMU
DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
static unsigned long num_asids;
static atomic_long_t current_version;
static DEFINE_RAW_SPINLOCK(context_lock);
static cpumask_t context_tlb_flush_pending;
static unsigned long *context_asid_map;
static DEFINE_PER_CPU(atomic_long_t, active_context);
static DEFINE_PER_CPU(unsigned long, reserved_context);
static bool check_update_reserved_context(unsigned long cntx,
unsigned long newcntx)
{
int cpu;
bool hit = false;
for_each_possible_cpu(cpu) {
if (per_cpu(reserved_context, cpu) == cntx) {
hit = true;
per_cpu(reserved_context, cpu) = newcntx;
}
}
return hit;
}
static void __flush_context(void)
{
int i;
unsigned long cntx;
lockdep_assert_held(&context_lock);
bitmap_zero(context_asid_map, num_asids);
for_each_possible_cpu(i) {
cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0);
if (cntx == 0)
cntx = per_cpu(reserved_context, i);
__set_bit(cntx2asid(cntx), context_asid_map);
per_cpu(reserved_context, i) = cntx;
}
__set_bit(0, context_asid_map);
cpumask_setall(&context_tlb_flush_pending);
}
static unsigned long __new_context(struct mm_struct *mm)
{
static u32 cur_idx = 1;
unsigned long cntx = atomic_long_read(&mm->context.id);
unsigned long asid, ver = atomic_long_read(¤t_version);
lockdep_assert_held(&context_lock);
if (cntx != 0) {
unsigned long newcntx = ver | cntx2asid(cntx);
if (check_update_reserved_context(cntx, newcntx))
return newcntx;
if (!__test_and_set_bit(cntx2asid(cntx), context_asid_map))
return newcntx;
}
asid = find_next_zero_bit(context_asid_map, num_asids, cur_idx);
if (asid != num_asids)
goto set_asid;
ver = atomic_long_add_return_relaxed(BIT(SATP_ASID_BITS), ¤t_version);
__flush_context();
asid = find_next_zero_bit(context_asid_map, num_asids, 1);
set_asid:
__set_bit(asid, context_asid_map);
cur_idx = asid;
return asid | ver;
}
static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
{
unsigned long flags;
bool need_flush_tlb = false;
unsigned long cntx, old_active_cntx;
cntx = atomic_long_read(&mm->context.id);
old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
if (old_active_cntx &&
(cntx2version(cntx) == atomic_long_read(¤t_version)) &&
atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
old_active_cntx, cntx))
goto switch_mm_fast;
raw_spin_lock_irqsave(&context_lock, flags);
cntx = atomic_long_read(&mm->context.id);
if (cntx2version(cntx) != atomic_long_read(¤t_version)) {
cntx = __new_context(mm);
atomic_long_set(&mm->context.id, cntx);
}
if (cpumask_test_and_clear_cpu(cpu, &context_tlb_flush_pending))
need_flush_tlb = true;
atomic_long_set(&per_cpu(active_context, cpu), cntx);
raw_spin_unlock_irqrestore(&context_lock, flags);
switch_mm_fast:
csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
(cntx2asid(cntx) << SATP_ASID_SHIFT) |
satp_mode);
if (need_flush_tlb)
local_flush_tlb_all();
}
static void set_mm_noasid(struct mm_struct *mm)
{
csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | satp_mode);
local_flush_tlb_all_asid(0);
}
static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
{
cpumask_set_cpu(cpu, mm_cpumask(next));
if (static_branch_unlikely(&use_asid_allocator)) {
set_mm_asid(next, cpu);
} else {
cpumask_clear_cpu(cpu, mm_cpumask(prev));
set_mm_noasid(next);
}
}
static int __init asids_init(void)
{
unsigned long asid_bits, old;
old = csr_read(CSR_SATP);
asid_bits = old | (SATP_ASID_MASK << SATP_ASID_SHIFT);
csr_write(CSR_SATP, asid_bits);
asid_bits = (csr_read(CSR_SATP) >> SATP_ASID_SHIFT) & SATP_ASID_MASK;
asid_bits = fls_long(asid_bits);
csr_write(CSR_SATP, old);
local_flush_tlb_all();
if (asid_bits) {
num_asids = 1 << asid_bits;
}
if (num_asids > (2 * num_possible_cpus())) {
atomic_long_set(¤t_version, BIT(SATP_ASID_BITS));
context_asid_map = bitmap_zalloc(num_asids, GFP_KERNEL);
if (!context_asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
num_asids);
__set_bit(0, context_asid_map);
static_branch_enable(&use_asid_allocator);
pr_info("ASID allocator using %lu bits (%lu entries)\n",
asid_bits, num_asids);
} else {
pr_info("ASID allocator disabled (%lu bits)\n", asid_bits);
}
return 0;
}
early_initcall(asids_init);
#else
static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
{
}
#endif
static inline void flush_icache_deferred(struct mm_struct *mm, unsigned int cpu,
struct task_struct *task)
{
#ifdef CONFIG_SMP
if (cpumask_test_and_clear_cpu(cpu, &mm->context.icache_stale_mask)) {
smp_mb();
if (!(task && switch_to_should_flush_icache(task)))
local_flush_icache_all();
}
#endif
}
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *task)
{
unsigned int cpu;
if (unlikely(prev == next))
return;
membarrier_arch_switch_mm(prev, next, task);
cpu = smp_processor_id();
set_mm(prev, next, cpu);
flush_icache_deferred(next, cpu, task);
}