dma-mapping: Separate DMA sync issuing and completion waiting

Currently, arch_sync_dma_for_cpu and arch_sync_dma_for_device
always wait for the completion of each DMA buffer. That is,
issuing the DMA sync and waiting for completion is done in a
single API call.

For scatter-gather lists with multiple entries, this means
issuing and waiting is repeated for each entry, which can hurt
performance. Architectures like ARM64 may be able to issue all
DMA sync operations for all entries first and then wait for
completion together.

To address this, arch_sync_dma_for_* now batches DMA operations
and performs a flush afterward. On ARM64, the flush is implemented
with a dsb instruction in arch_sync_dma_flush(). On other
architectures, arch_sync_dma_flush() is currently a nop.

Cc: Leon Romanovsky <leon@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Cc: Tangquan Zheng <zhengtangquan@oppo.com>
Reviewed-by: Juergen Gross <jgross@suse.com> # drivers/xen/swiotlb-xen.c
Tested-by: Xueyuan Chen <xueyuan.chen21@gmail.com>
Signed-off-by: Barry Song <baohua@kernel.org>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20260228221316.59934-1-21cnbao@gmail.com
This commit is contained in:
Barry Song
2026-03-01 06:13:16 +08:00
committed by Marek Szyprowski
parent cf875c4b68
commit d7eafe655b
10 changed files with 78 additions and 22 deletions

View File

@@ -262,10 +262,12 @@ static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys,
done:
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr)))) {
arch_sync_dma_for_device(phys, size, dir);
else
arch_sync_dma_flush();
} else {
xen_dma_sync_for_device(dev, dev_addr, size, dir);
}
}
return dev_addr;
}
@@ -287,10 +289,12 @@ static void xen_swiotlb_unmap_phys(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr)))) {
arch_sync_dma_for_cpu(paddr, size, dir);
else
arch_sync_dma_flush();
} else {
xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
}
}
/* NOTE: We use dev_addr here, not paddr! */
@@ -308,10 +312,12 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
struct io_tlb_pool *pool;
if (!dev_is_dma_coherent(dev)) {
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) {
arch_sync_dma_for_cpu(paddr, size, dir);
else
arch_sync_dma_flush();
} else {
xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
}
}
pool = xen_swiotlb_find_pool(dev, dma_addr);
@@ -331,10 +337,12 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
__swiotlb_sync_single_for_device(dev, paddr, size, dir, pool);
if (!dev_is_dma_coherent(dev)) {
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) {
arch_sync_dma_for_device(paddr, size, dir);
else
arch_sync_dma_flush();
} else {
xen_dma_sync_for_device(dev, dma_addr, size, dir);
}
}
}