summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/iommu.c
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2021-03-01 17:36:53 +1100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-05-19 10:08:24 +0200
commit40d1cb16a5783a2589ba9ad90daf1546131cf9d2 (patch)
tree5f9037300e458b038eb4665c40dd1dc8d517c586 /arch/powerpc/kernel/iommu.c
parentd26436a3b913a5a25db29c460d27e6c9673cf741 (diff)
powerpc/iommu: Annotate nested lock for lockdep
[ Upstream commit cc7130bf119add37f36238343a593b71ef6ecc1e ] The IOMMU table is divided into pools for concurrent mappings and each pool has a separate spinlock. When taking the ownership of an IOMMU group to pass through a device to a VM, we lock these spinlocks which triggers a false negative warning in lockdep (below). This fixes it by annotating the large pool's spinlock as a nest lock which makes lockdep not complaining when locking nested locks if the nest lock is locked already. === WARNING: possible recursive locking detected 5.11.0-le_syzkaller_a+fstn1 #100 Not tainted -------------------------------------------- qemu-system-ppc/4129 is trying to acquire lock: c0000000119bddb0 (&(p->lock)/1){....}-{2:2}, at: iommu_take_ownership+0xac/0x1e0 but task is already holding lock: c0000000119bdd30 (&(p->lock)/1){....}-{2:2}, at: iommu_take_ownership+0xac/0x1e0 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&(p->lock)/1); lock(&(p->lock)/1); === Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210301063653.51003-1-aik@ozlabs.ru Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r--arch/powerpc/kernel/iommu.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 9704f3f76e63..d7d42bd448c4 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1057,7 +1057,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
- spin_lock(&tbl->pools[i].lock);
+ spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
iommu_table_release_pages(tbl);
@@ -1085,7 +1085,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
- spin_lock(&tbl->pools[i].lock);
+ spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
memset(tbl->it_map, 0, sz);