#ifndef __ASM__MPAM_H
#define __ASM__MPAM_H
#include <linux/arm_mpam.h>
#include <linux/bitfield.h>
#include <linux/jump_label.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <asm/sysreg.h>
DECLARE_STATIC_KEY_FALSE(mpam_enabled);
DECLARE_PER_CPU(u64, arm64_mpam_default);
DECLARE_PER_CPU(u64, arm64_mpam_current);
extern u64 arm64_mpam_global_default;
#ifdef CONFIG_ARM64_MPAM
static inline u64 __mpam_regval(u16 partid_d, u16 partid_i, u8 pmg_d, u8 pmg_i)
{
return FIELD_PREP(MPAM0_EL1_PARTID_D, partid_d) |
FIELD_PREP(MPAM0_EL1_PARTID_I, partid_i) |
FIELD_PREP(MPAM0_EL1_PMG_D, pmg_d) |
FIELD_PREP(MPAM0_EL1_PMG_I, pmg_i);
}
static inline void mpam_set_cpu_defaults(int cpu, u16 partid_d, u16 partid_i,
u8 pmg_d, u8 pmg_i)
{
u64 default_val = __mpam_regval(partid_d, partid_i, pmg_d, pmg_i);
WRITE_ONCE(per_cpu(arm64_mpam_default, cpu), default_val);
}
static inline u64 mpam_get_regval(struct task_struct *tsk)
{
return READ_ONCE(task_thread_info(tsk)->mpam_partid_pmg);
}
static inline void mpam_set_task_partid_pmg(struct task_struct *tsk,
u16 partid_d, u16 partid_i,
u8 pmg_d, u8 pmg_i)
{
u64 regval = __mpam_regval(partid_d, partid_i, pmg_d, pmg_i);
WRITE_ONCE(task_thread_info(tsk)->mpam_partid_pmg, regval);
}
static inline void mpam_thread_switch(struct task_struct *tsk)
{
u64 oldregval;
int cpu = smp_processor_id();
u64 regval = mpam_get_regval(tsk);
if (!static_branch_likely(&mpam_enabled))
return;
if (regval == READ_ONCE(arm64_mpam_global_default))
regval = READ_ONCE(per_cpu(arm64_mpam_default, cpu));
oldregval = READ_ONCE(per_cpu(arm64_mpam_current, cpu));
if (oldregval == regval)
return;
write_sysreg_s(regval | MPAM1_EL1_MPAMEN, SYS_MPAM1_EL1);
if (system_supports_sme())
write_sysreg_s(regval & (MPAMSM_EL1_PARTID_D | MPAMSM_EL1_PMG_D), SYS_MPAMSM_EL1);
isb();
write_sysreg_s(regval, SYS_MPAM0_EL1);
WRITE_ONCE(per_cpu(arm64_mpam_current, cpu), regval);
}
#else
static inline void mpam_thread_switch(struct task_struct *tsk) {}
#endif
#endif