#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/list_sort.h>
#include <linux/nospec.h>
#include <asm/kvm_hyp.h>
#include "vgic.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
struct vgic_global kvm_vgic_global_state __ro_after_init = {
.gicv3_cpuif = STATIC_KEY_FALSE_INIT,
};
static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = NULL;
rcu_read_lock();
irq = xa_load(&dist->lpi_xa, intid);
if (!vgic_try_get_irq_ref(irq))
irq = NULL;
rcu_read_unlock();
return irq;
}
struct vgic_irq *vgic_get_irq(struct kvm *kvm, u32 intid)
{
if (intid >= VGIC_NR_PRIVATE_IRQS &&
intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
}
if (intid >= VGIC_MIN_LPI)
return vgic_get_lpi(kvm, intid);
return NULL;
}
struct vgic_irq *vgic_get_vcpu_irq(struct kvm_vcpu *vcpu, u32 intid)
{
if (WARN_ON(!vcpu))
return NULL;
if (intid < VGIC_NR_PRIVATE_IRQS) {
intid = array_index_nospec(intid, VGIC_NR_PRIVATE_IRQS);
return &vcpu->arch.vgic_cpu.private_irqs[intid];
}
return vgic_get_irq(vcpu->kvm, intid);
}
static void vgic_release_lpi_locked(struct vgic_dist *dist, struct vgic_irq *irq)
{
lockdep_assert_held(&dist->lpi_xa.xa_lock);
__xa_erase(&dist->lpi_xa, irq->intid);
kfree_rcu(irq, rcu);
}
static __must_check bool __vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
if (irq->intid < VGIC_MIN_LPI)
return false;
return refcount_dec_and_test(&irq->refcount);
}
static __must_check bool vgic_put_irq_norelease(struct kvm *kvm, struct vgic_irq *irq)
{
if (!__vgic_put_irq(kvm, irq))
return false;
irq->pending_release = true;
return true;
}
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long flags;
if (IS_ENABLED(CONFIG_LOCKDEP) && irq->intid >= VGIC_MIN_LPI) {
guard(spinlock_irqsave)(&dist->lpi_xa.xa_lock);
}
if (!__vgic_put_irq(kvm, irq))
return;
xa_lock_irqsave(&dist->lpi_xa, flags);
vgic_release_lpi_locked(dist, irq);
xa_unlock_irqrestore(&dist->lpi_xa, flags);
}
static void vgic_release_deleted_lpis(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long flags, intid;
struct vgic_irq *irq;
xa_lock_irqsave(&dist->lpi_xa, flags);
xa_for_each(&dist->lpi_xa, intid, irq) {
if (irq->pending_release)
vgic_release_lpi_locked(dist, irq);
}
xa_unlock_irqrestore(&dist->lpi_xa, flags);
}
void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq, *tmp;
bool deleted = false;
unsigned long flags;
raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
if (irq->intid >= VGIC_MIN_LPI) {
raw_spin_lock(&irq->irq_lock);
list_del(&irq->ap_list);
irq->vcpu = NULL;
raw_spin_unlock(&irq->irq_lock);
deleted |= vgic_put_irq_norelease(vcpu->kvm, irq);
}
}
raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
if (deleted)
vgic_release_deleted_lpis(vcpu->kvm);
}
void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
{
WARN_ON(irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
pending));
}
bool vgic_get_phys_line_level(struct vgic_irq *irq)
{
bool line_level;
BUG_ON(!irq->hw);
if (irq->ops && irq->ops->get_input_level)
return irq->ops->get_input_level(irq->intid);
WARN_ON(irq_get_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
&line_level));
return line_level;
}
void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
{
BUG_ON(!irq->hw);
WARN_ON(irq_set_irqchip_state(irq->host_irq,
IRQCHIP_STATE_ACTIVE,
active));
}
struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
{
lockdep_assert_held(&irq->irq_lock);
if (irq->active)
return irq->vcpu ? : irq->target_vcpu;
if (irq->enabled && irq_is_pending(irq)) {
if (unlikely(irq->target_vcpu &&
!irq->target_vcpu->kvm->arch.vgic.enabled))
return NULL;
return irq->target_vcpu;
}
return NULL;
}
struct vgic_sort_info {
struct kvm_vcpu *vcpu;
struct vgic_vmcr vmcr;
};
static int vgic_irq_cmp(void *priv, const struct list_head *a,
const struct list_head *b)
{
struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
struct vgic_sort_info *info = priv;
struct kvm_vcpu *vcpu = info->vcpu;
bool penda, pendb;
int ret;
if (unlikely(irqa == irqb))
return 0;
raw_spin_lock(&irqa->irq_lock);
raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
ret = (int)(vgic_target_oracle(irqb) == vcpu) - (int)(vgic_target_oracle(irqa) == vcpu);
if (ret)
goto out;
ret = (int)(irqb->group ? info->vmcr.grpen1 : info->vmcr.grpen0);
ret -= (int)(irqa->group ? info->vmcr.grpen1 : info->vmcr.grpen0);
if (ret)
goto out;
penda = irqa->enabled && irq_is_pending(irqa) && !irqa->active;
pendb = irqb->enabled && irq_is_pending(irqb) && !irqb->active;
ret = (int)pendb - (int)penda;
if (ret)
goto out;
ret = (int)irqa->priority - (int)irqb->priority;
if (ret)
goto out;
ret = (int)irqb->hw - (int)irqa->hw;
out:
raw_spin_unlock(&irqb->irq_lock);
raw_spin_unlock(&irqa->irq_lock);
return ret;
}
static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_sort_info info = { .vcpu = vcpu, };
lockdep_assert_held(&vgic_cpu->ap_list_lock);
vgic_get_vmcr(vcpu, &info.vmcr);
list_sort(&info, &vgic_cpu->ap_list_head, vgic_irq_cmp);
}
static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
{
if (irq->owner != owner)
return false;
switch (irq->config) {
case VGIC_CONFIG_LEVEL:
return irq->line_level != level;
case VGIC_CONFIG_EDGE:
return level;
}
return false;
}
static bool vgic_model_needs_bcst_kick(struct kvm *kvm)
{
return (cpus_have_final_cap(ARM64_HAS_ICH_HCR_EL2_TDIR) &&
kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3);
}
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
unsigned long flags) __releases(&irq->irq_lock)
{
struct kvm_vcpu *vcpu;
bool bcast;
lockdep_assert_held(&irq->irq_lock);
retry:
vcpu = vgic_target_oracle(irq);
if (irq->vcpu || !vcpu) {
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
if (vcpu) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
}
return false;
}
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
raw_spin_lock(&irq->irq_lock);
if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
raw_spin_unlock(&irq->irq_lock);
raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
flags);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
goto retry;
}
vgic_get_irq_ref(irq);
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
irq->vcpu = vcpu;
bcast = (vgic_model_needs_bcst_kick(vcpu->kvm) &&
vgic_valid_spi(vcpu->kvm, irq->intid) &&
atomic_fetch_inc(&vcpu->kvm->arch.vgic.active_spis) == 0);
raw_spin_unlock(&irq->irq_lock);
raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
if (!bcast) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
} else {
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_IRQ_PENDING);
}
return true;
}
int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int intid, bool level, void *owner)
{
struct vgic_irq *irq;
unsigned long flags;
int ret;
ret = vgic_lazy_init(kvm);
if (ret)
return ret;
if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
return -EINVAL;
trace_vgic_update_irq_pending(vcpu ? vcpu->vcpu_idx : 0, intid, level);
if (intid < VGIC_NR_PRIVATE_IRQS)
irq = vgic_get_vcpu_irq(vcpu, intid);
else
irq = vgic_get_irq(kvm, intid);
if (!irq)
return -EINVAL;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (!vgic_validate_injection(irq, level, owner)) {
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(kvm, irq);
return 0;
}
if (irq->config == VGIC_CONFIG_LEVEL)
irq->line_level = level;
else
irq->pending_latch = true;
vgic_queue_irq_unlock(kvm, irq, flags);
vgic_put_irq(kvm, irq);
return 0;
}
static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
unsigned int host_irq,
struct irq_ops *ops)
{
struct irq_desc *desc;
struct irq_data *data;
desc = irq_to_desc(host_irq);
if (!desc) {
kvm_err("%s: no interrupt descriptor\n", __func__);
return -EINVAL;
}
data = irq_desc_get_irq_data(desc);
while (data->parent_data)
data = data->parent_data;
irq->hw = true;
irq->host_irq = host_irq;
irq->hwintid = data->hwirq;
irq->ops = ops;
return 0;
}
static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
{
irq->hw = false;
irq->hwintid = 0;
irq->ops = NULL;
}
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
u32 vintid, struct irq_ops *ops)
{
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
unsigned long flags;
int ret;
BUG_ON(!irq);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
return ret;
}
void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
{
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
unsigned long flags;
if (!irq->hw)
goto out;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->active = false;
irq->pending_latch = false;
irq->line_level = false;
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
out:
vgic_put_irq(vcpu->kvm, irq);
}
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
{
struct vgic_irq *irq;
unsigned long flags;
if (!vgic_initialized(vcpu->kvm))
return -EAGAIN;
irq = vgic_get_vcpu_irq(vcpu, vintid);
BUG_ON(!irq);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
kvm_vgic_unmap_irq(irq);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
return 0;
}
int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid)
{
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
unsigned long flags;
int ret = -1;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw)
ret = irq->hwintid;
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
return ret;
}
int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
{
struct vgic_irq *irq;
unsigned long flags;
int ret = 0;
if (!vgic_initialized(vcpu->kvm))
return -EAGAIN;
if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
return -EINVAL;
irq = vgic_get_vcpu_irq(vcpu, intid);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->owner && irq->owner != owner)
ret = -EEXIST;
else
irq->owner = owner;
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
return ret;
}
static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq, *tmp;
bool deleted_lpis = false;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
retry:
raw_spin_lock(&vgic_cpu->ap_list_lock);
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
bool target_vcpu_needs_kick = false;
raw_spin_lock(&irq->irq_lock);
BUG_ON(vcpu != irq->vcpu);
target_vcpu = vgic_target_oracle(irq);
if (!target_vcpu) {
list_del(&irq->ap_list);
irq->vcpu = NULL;
raw_spin_unlock(&irq->irq_lock);
deleted_lpis |= vgic_put_irq_norelease(vcpu->kvm, irq);
continue;
}
if (target_vcpu == vcpu) {
raw_spin_unlock(&irq->irq_lock);
continue;
}
raw_spin_unlock(&irq->irq_lock);
raw_spin_unlock(&vgic_cpu->ap_list_lock);
if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
vcpuA = vcpu;
vcpuB = target_vcpu;
} else {
vcpuA = target_vcpu;
vcpuB = vcpu;
}
raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
SINGLE_DEPTH_NESTING);
raw_spin_lock(&irq->irq_lock);
if (target_vcpu == vgic_target_oracle(irq)) {
struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
list_del(&irq->ap_list);
irq->vcpu = target_vcpu;
list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
target_vcpu_needs_kick = true;
}
raw_spin_unlock(&irq->irq_lock);
raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
if (target_vcpu_needs_kick) {
kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
kvm_vcpu_kick(target_vcpu);
}
goto retry;
}
raw_spin_unlock(&vgic_cpu->ap_list_lock);
if (unlikely(deleted_lpis))
vgic_release_deleted_lpis(vcpu->kvm);
}
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
{
if (!*host_data_ptr(last_lr_irq))
return;
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_fold_lr_state(vcpu);
else
vgic_v3_fold_lr_state(vcpu);
}
static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
struct vgic_irq *irq, int lr)
{
lockdep_assert_held(&irq->irq_lock);
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_populate_lr(vcpu, irq, lr);
else
vgic_v3_populate_lr(vcpu, irq, lr);
}
static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
{
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_clear_lr(vcpu, lr);
else
vgic_v3_clear_lr(vcpu, lr);
}
static void summarize_ap_list(struct kvm_vcpu *vcpu,
struct ap_list_summary *als)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq;
lockdep_assert_held(&vgic_cpu->ap_list_lock);
*als = (typeof(*als)){};
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
guard(raw_spinlock)(&irq->irq_lock);
if (unlikely(vgic_target_oracle(irq) != vcpu))
continue;
if (!irq->active)
als->nr_pend++;
else
als->nr_act++;
if (irq->intid < VGIC_NR_SGIS)
als->nr_sgi++;
}
}
static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct ap_list_summary als;
struct vgic_irq *irq;
int count = 0;
lockdep_assert_held(&vgic_cpu->ap_list_lock);
summarize_ap_list(vcpu, &als);
if (irqs_outside_lrs(&als))
vgic_sort_ap_list(vcpu);
*host_data_ptr(last_lr_irq) = NULL;
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
scoped_guard(raw_spinlock, &irq->irq_lock) {
if (likely(vgic_target_oracle(irq) == vcpu)) {
vgic_populate_lr(vcpu, irq, count++);
*host_data_ptr(last_lr_irq) = irq;
}
}
if (count == kvm_vgic_global_state.nr_lr)
break;
}
for (int i = count ; i < kvm_vgic_global_state.nr_lr; i++)
vgic_clear_lr(vcpu, i);
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
vgic_v2_configure_hcr(vcpu, &als);
} else {
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
vgic_v3_configure_hcr(vcpu, &als);
}
}
static inline bool can_access_vgic_from_kernel(void)
{
return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
}
static inline void vgic_save_state(struct kvm_vcpu *vcpu)
{
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_save_state(vcpu);
else
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
if (vgic_state_is_nested(vcpu)) {
vgic_v3_sync_nested(vcpu);
return;
}
if (vcpu_has_nv(vcpu))
vgic_v3_nested_update_mi(vcpu);
if (can_access_vgic_from_kernel())
vgic_save_state(vcpu);
vgic_fold_lr_state(vcpu);
vgic_prune_ap_list(vcpu);
}
void kvm_vgic_process_async_update(struct kvm_vcpu *vcpu)
{
unsigned long flags;
local_irq_save(flags);
vgic_prune_ap_list(vcpu);
local_irq_restore(flags);
}
static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
{
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_restore_state(vcpu);
else
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
if (vgic_state_is_nested(vcpu)) {
if (kvm_vgic_vcpu_pending_irq(vcpu))
kvm_make_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu);
vgic_v3_flush_nested(vcpu);
return;
}
if (vcpu_has_nv(vcpu))
vgic_v3_nested_update_mi(vcpu);
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
scoped_guard(raw_spinlock, &vcpu->arch.vgic_cpu.ap_list_lock)
vgic_flush_lr_state(vcpu);
if (can_access_vgic_from_kernel())
vgic_restore_state(vcpu);
if (vgic_supports_direct_irqs(vcpu->kvm))
vgic_v4_commit(vcpu);
}
void kvm_vgic_load(struct kvm_vcpu *vcpu)
{
if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
return;
}
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_load(vcpu);
else
vgic_v3_load(vcpu);
}
void kvm_vgic_put(struct kvm_vcpu *vcpu)
{
if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
return;
}
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_put(vcpu);
else
vgic_v3_put(vcpu);
}
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq;
bool pending = false;
unsigned long flags;
struct vgic_vmcr vmcr;
if (!vcpu->kvm->arch.vgic.enabled)
return false;
if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
return true;
vgic_get_vmcr(vcpu, &vmcr);
raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
raw_spin_lock(&irq->irq_lock);
pending = irq_is_pending(irq) && irq->enabled &&
!irq->active &&
irq->priority < vmcr.pmr;
raw_spin_unlock(&irq->irq_lock);
if (pending)
break;
}
raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
return pending;
}
void vgic_kick_vcpus(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long c;
kvm_for_each_vcpu(c, vcpu, kvm) {
if (kvm_vgic_vcpu_pending_irq(vcpu)) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
}
}
}
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
{
struct vgic_irq *irq;
bool map_is_active;
unsigned long flags;
if (!vgic_initialized(vcpu->kvm))
return false;
irq = vgic_get_vcpu_irq(vcpu, vintid);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
map_is_active = irq->hw && irq->active;
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
return map_is_active;
}
void vgic_irq_handle_resampling(struct vgic_irq *irq,
bool lr_deactivated, bool lr_pending)
{
if (vgic_irq_is_mapped_level(irq)) {
bool resample = false;
if (unlikely(vgic_irq_needs_resampling(irq))) {
resample = !(irq->active || irq->pending_latch);
} else if (lr_pending || (lr_deactivated && irq->line_level)) {
irq->line_level = vgic_get_phys_line_level(irq);
resample = !irq->line_level;
}
if (resample)
vgic_irq_set_phys_active(irq, false);
}
}