mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
KVM: arm64: vgic: Split out mapping IRQs and setting irq_ops
Prior to this change, the act of mapping a virtual IRQ to a physical one also set the irq_ops. Unmapping then reset the irq_ops to NULL. So far, this has been fine and hasn't caused any major issues. Now, however, as GICv5 support is being added to KVM, it has become apparent that conflating mapping/unmapping IRQs and setting/clearing irq_ops can cause issues. The reason is that the upcoming GICv5 support introduces a set of default irq_ops for PPIs, and removing this when unmapping will cause things to break rather horribly. Split out the mapping/unmapping of IRQs from the setting/clearing of irq_ops. The arch timer code is updated to set the irq_ops following a successful map. The irq_ops are intentionally not removed again on an unmap as the only irq_op introduced by the arch timer only takes effect if the hw bit in struct vgic_irq is set. Therefore, it is safe to leave this in place, and it avoids additional complexity when GICv5 support is introduced. Signed-off-by: Sascha Bischoff <sascha.bischoff@arm.com> Link: https://patch.msgid.link/20260319154937.3619520-6-sascha.bischoff@arm.com Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
committed by
Marc Zyngier
parent
cbd8c958be
commit
663594aafb
@@ -740,13 +740,11 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
|
||||
|
||||
ret = kvm_vgic_map_phys_irq(vcpu,
|
||||
map->direct_vtimer->host_timer_irq,
|
||||
timer_irq(map->direct_vtimer),
|
||||
&arch_timer_irq_ops);
|
||||
timer_irq(map->direct_vtimer));
|
||||
WARN_ON_ONCE(ret);
|
||||
ret = kvm_vgic_map_phys_irq(vcpu,
|
||||
map->direct_ptimer->host_timer_irq,
|
||||
timer_irq(map->direct_ptimer),
|
||||
&arch_timer_irq_ops);
|
||||
timer_irq(map->direct_ptimer));
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
}
|
||||
@@ -1543,6 +1541,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
|
||||
struct timer_map map;
|
||||
struct irq_ops *ops;
|
||||
int ret;
|
||||
|
||||
if (timer->enabled)
|
||||
@@ -1563,20 +1562,21 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
|
||||
|
||||
get_timer_map(vcpu, &map);
|
||||
|
||||
ops = &arch_timer_irq_ops;
|
||||
|
||||
for (int i = 0; i < nr_timers(vcpu); i++)
|
||||
kvm_vgic_set_irq_ops(vcpu, timer_irq(vcpu_get_timer(vcpu, i)), ops);
|
||||
|
||||
ret = kvm_vgic_map_phys_irq(vcpu,
|
||||
map.direct_vtimer->host_timer_irq,
|
||||
timer_irq(map.direct_vtimer),
|
||||
&arch_timer_irq_ops);
|
||||
timer_irq(map.direct_vtimer));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (map.direct_ptimer) {
|
||||
if (map.direct_ptimer)
|
||||
ret = kvm_vgic_map_phys_irq(vcpu,
|
||||
map.direct_ptimer->host_timer_irq,
|
||||
timer_irq(map.direct_ptimer),
|
||||
&arch_timer_irq_ops);
|
||||
}
|
||||
|
||||
timer_irq(map.direct_ptimer));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
||||
@@ -553,10 +553,27 @@ int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_vgic_set_irq_ops(struct kvm_vcpu *vcpu, u32 vintid,
|
||||
struct irq_ops *ops)
|
||||
{
|
||||
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
|
||||
|
||||
BUG_ON(!irq);
|
||||
|
||||
scoped_guard(raw_spinlock_irqsave, &irq->irq_lock)
|
||||
irq->ops = ops;
|
||||
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
}
|
||||
|
||||
void kvm_vgic_clear_irq_ops(struct kvm_vcpu *vcpu, u32 vintid)
|
||||
{
|
||||
kvm_vgic_set_irq_ops(vcpu, vintid, NULL);
|
||||
}
|
||||
|
||||
/* @irq->irq_lock must be held */
|
||||
static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
|
||||
unsigned int host_irq,
|
||||
struct irq_ops *ops)
|
||||
unsigned int host_irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
struct irq_data *data;
|
||||
@@ -576,7 +593,6 @@ static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
|
||||
irq->hw = true;
|
||||
irq->host_irq = host_irq;
|
||||
irq->hwintid = data->hwirq;
|
||||
irq->ops = ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -585,11 +601,10 @@ static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
|
||||
{
|
||||
irq->hw = false;
|
||||
irq->hwintid = 0;
|
||||
irq->ops = NULL;
|
||||
}
|
||||
|
||||
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
|
||||
u32 vintid, struct irq_ops *ops)
|
||||
u32 vintid)
|
||||
{
|
||||
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
|
||||
unsigned long flags;
|
||||
@@ -598,7 +613,7 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
|
||||
BUG_ON(!irq);
|
||||
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops);
|
||||
ret = kvm_vgic_map_irq(vcpu, irq, host_irq);
|
||||
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
|
||||
vgic_put_irq(vcpu->kvm, irq);
|
||||
|
||||
|
||||
@@ -397,8 +397,11 @@ void kvm_vgic_init_cpu_hardware(void);
|
||||
|
||||
int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
unsigned int intid, bool level, void *owner);
|
||||
void kvm_vgic_set_irq_ops(struct kvm_vcpu *vcpu, u32 vintid,
|
||||
struct irq_ops *ops);
|
||||
void kvm_vgic_clear_irq_ops(struct kvm_vcpu *vcpu, u32 vintid);
|
||||
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
|
||||
u32 vintid, struct irq_ops *ops);
|
||||
u32 vintid);
|
||||
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
|
||||
int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid);
|
||||
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
|
||||
|
||||
Reference in New Issue
Block a user