Skip to content

Commit fc4dafe

Browse files
ouptonMarc Zyngier
authored andcommitted
KVM: arm64: Protect vLPI translation with vgic_irq::irq_lock
Though undocumented, KVM generally protects the translation of a vLPI with the its_lock. While this makes perfectly good sense, as the ITS itself contains the guest translation, an upcoming change will require twiddling the vLPI mapping in an atomic context. Switch to using the vIRQ's irq_lock to protect the translation. Use of the its_lock in vgic_v4_unset_forwarding() is preserved for now as it still needs to walk the ITS. Tested-by: Sweet Tea Dorminy <[email protected]> Signed-off-by: Oliver Upton <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]>
1 parent 761aabe commit fc4dafe

File tree

2 files changed

+47
-42
lines changed

2 files changed

+47
-42
lines changed

arch/arm64/kvm/vgic/vgic-its.c

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -306,39 +306,34 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
306306
}
307307
}
308308

309-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
310-
311309
if (irq->hw)
312-
return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
310+
ret = its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
313311

314-
return 0;
312+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
313+
return ret;
315314
}
316315

317316
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
318317
{
319-
int ret = 0;
320-
unsigned long flags;
318+
struct its_vlpi_map map;
319+
int ret;
321320

322-
raw_spin_lock_irqsave(&irq->irq_lock, flags);
321+
guard(raw_spinlock_irqsave)(&irq->irq_lock);
323322
irq->target_vcpu = vcpu;
324-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
325323

326-
if (irq->hw) {
327-
struct its_vlpi_map map;
328-
329-
ret = its_get_vlpi(irq->host_irq, &map);
330-
if (ret)
331-
return ret;
324+
if (!irq->hw)
325+
return 0;
332326

333-
if (map.vpe)
334-
atomic_dec(&map.vpe->vlpi_count);
335-
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
336-
atomic_inc(&map.vpe->vlpi_count);
327+
ret = its_get_vlpi(irq->host_irq, &map);
328+
if (ret)
329+
return ret;
337330

338-
ret = its_map_vlpi(irq->host_irq, &map);
339-
}
331+
if (map.vpe)
332+
atomic_dec(&map.vpe->vlpi_count);
340333

341-
return ret;
334+
map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
335+
atomic_inc(&map.vpe->vlpi_count);
336+
return its_map_vlpi(irq->host_irq, &map);
342337
}
343338

344339
static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm,
@@ -756,12 +751,17 @@ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
756751
/* Requires the its_lock to be held. */
757752
static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
758753
{
754+
struct vgic_irq *irq = ite->irq;
759755
list_del(&ite->ite_list);
760756

761757
/* This put matches the get in vgic_add_lpi. */
762-
if (ite->irq) {
763-
if (ite->irq->hw)
764-
WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
758+
if (irq) {
759+
scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) {
760+
if (irq->hw)
761+
WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
762+
763+
irq->hw = false;
764+
}
765765

766766
vgic_put_irq(kvm, ite->irq);
767767
}

arch/arm64/kvm/vgic/vgic-v4.c

Lines changed: 23 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -457,9 +457,11 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
457457
irq_entry->msi.data, &irq))
458458
return 0;
459459

460+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
461+
460462
/* Silently exit if the vLPI is already mapped */
461463
if (irq->hw)
462-
return 0;
464+
goto out_unlock_irq;
463465

464466
/*
465467
* Emit the mapping request. If it fails, the ITS probably
@@ -479,30 +481,30 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
479481

480482
ret = its_map_vlpi(virq, &map);
481483
if (ret)
482-
return ret;
484+
goto out_unlock_irq;
483485

484486
irq->hw = true;
485487
irq->host_irq = virq;
486488
atomic_inc(&map.vpe->vlpi_count);
487489

488490
/* Transfer pending state */
489-
raw_spin_lock_irqsave(&irq->irq_lock, flags);
490-
if (irq->pending_latch) {
491-
ret = irq_set_irqchip_state(irq->host_irq,
492-
IRQCHIP_STATE_PENDING,
493-
irq->pending_latch);
494-
WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
491+
if (!irq->pending_latch)
492+
goto out_unlock_irq;
495493

496-
/*
497-
* Clear pending_latch and communicate this state
498-
* change via vgic_queue_irq_unlock.
499-
*/
500-
irq->pending_latch = false;
501-
vgic_queue_irq_unlock(kvm, irq, flags);
502-
} else {
503-
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
504-
}
494+
ret = irq_set_irqchip_state(irq->host_irq, IRQCHIP_STATE_PENDING,
495+
irq->pending_latch);
496+
WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
497+
498+
/*
499+
* Clear pending_latch and communicate this state
500+
* change via vgic_queue_irq_unlock.
501+
*/
502+
irq->pending_latch = false;
503+
vgic_queue_irq_unlock(kvm, irq, flags);
504+
return ret;
505505

506+
out_unlock_irq:
507+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
506508
return ret;
507509
}
508510

@@ -511,7 +513,8 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
511513
{
512514
struct vgic_its *its;
513515
struct vgic_irq *irq;
514-
int ret;
516+
unsigned long flags;
517+
int ret = 0;
515518

516519
if (!vgic_supports_direct_msis(kvm))
517520
return 0;
@@ -531,13 +534,15 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
531534
if (ret)
532535
goto out;
533536

537+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
534538
WARN_ON(irq->hw && irq->host_irq != virq);
535539
if (irq->hw) {
536540
atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
537541
irq->hw = false;
538542
ret = its_unmap_vlpi(virq);
539543
}
540544

545+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
541546
out:
542547
mutex_unlock(&its->its_lock);
543548
return ret;

0 commit comments

Comments
 (0)