vgic_cpu
struct vgic_cpu vgic_cpu;
(atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
&vcpu->arch.vgic_cpu.vgic_v3);
kvm_call_hyp(__vgic_v3_save_aprs, &vcpu->arch.vgic_cpu.vgic_v3);
hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3;
struct vgic_v3_cpu_if *hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3;
struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
if (vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr & ICH_HCR_EL2_TDIR) {
unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
kfree(vgic_cpu->private_irqs);
vgic_cpu->private_irqs = NULL;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
if (vgic_cpu->private_irqs)
vgic_cpu->private_irqs = kzalloc_objs(struct vgic_irq,
if (!vgic_cpu->private_irqs)
struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
raw_spin_lock_init(&vgic_cpu->ap_list_lock);
atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
kfree(vgic_cpu->private_irqs);
vgic_cpu->private_irqs = NULL;
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
return atomic_read(&vgic_cpu->ctlr) == GICR_CTLR_ENABLE_LPIS;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
val = atomic_read(&vgic_cpu->ctlr);
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr,
atomic_set_release(&vgic_cpu->ctlr, 0);
ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr, 0,
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_redist_region *iter, *rdreg = vgic_cpu->rdreg;
if (vgic_cpu->rdreg_index < rdreg->free_index - 1) {
} else if (rdreg->count && vgic_cpu->rdreg_index == (rdreg->count - 1)) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
u64 value = vgic_cpu->pendbaser;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
old_pendbaser = READ_ONCE(vgic_cpu->pendbaser);
} while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
return !!atomic_read(&vcpu->arch.vgic_cpu.syncr_busy);
atomic_inc(&vcpu->arch.vgic_cpu.syncr_busy);
atomic_dec(&vcpu->arch.vgic_cpu.syncr_busy);
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
vgic_cpu->rdreg = rdreg;
vgic_cpu->rdreg_index = rdreg->free_index;
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
if (vcpu->arch.vgic_cpu.rdreg == rdreg)
vcpu->arch.vgic_cpu.rdreg = NULL;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
for (int lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++)
list_for_each_entry_continue(irq, &vgic_cpu->ap_list_head, ap_list) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
struct vgic_v3_cpu_if *host_if = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = cpu_if->used_lrs;
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = 0;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
list_for_each_entry_continue(irq, &vgic_cpu->ap_list_head, ap_list) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
vcpu->arch.vgic_cpu.num_id_bits = FIELD_GET(ICH_VTR_EL2_IDbits,
vcpu->arch.vgic_cpu.num_pri_bits = FIELD_GET(ICH_VTR_EL2_PRIbits,
struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
.vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
scoped_guard(raw_spinlock, &vcpu->arch.vgic_cpu.ap_list_lock)
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
return &vcpu->arch.vgic_cpu.private_irqs[intid];
__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_sort(&info, &vgic_cpu->ap_list_head, vgic_irq_cmp);
raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
raw_spin_lock(&vgic_cpu->ap_list_lock);
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
raw_spin_unlock(&vgic_cpu->ap_list_lock);
raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
raw_spin_unlock(&vgic_cpu->ap_list_lock);
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;