mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
dma-mapping: Separate DMA sync issuing and completion waiting
Currently, arch_sync_dma_for_cpu and arch_sync_dma_for_device always wait for the completion of each DMA buffer. That is, issuing the DMA sync and waiting for completion is done in a single API call. For scatter-gather lists with multiple entries, this means issuing and waiting is repeated for each entry, which can hurt performance. Architectures like ARM64 may be able to issue all DMA sync operations for all entries first and then wait for completion together. To address this, arch_sync_dma_for_* now batches DMA operations and performs a flush afterward. On ARM64, the flush is implemented with a dsb instruction in arch_sync_dma_flush(). On other architectures, arch_sync_dma_flush() is currently a nop. Cc: Leon Romanovsky <leon@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Ada Couprie Diaz <ada.coupriediaz@arm.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> Cc: Tangquan Zheng <zhengtangquan@oppo.com> Reviewed-by: Juergen Gross <jgross@suse.com> # drivers/xen/swiotlb-xen.c Tested-by: Xueyuan Chen <xueyuan.chen21@gmail.com> Signed-off-by: Barry Song <baohua@kernel.org> Reviewed-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/20260228221316.59934-1-21cnbao@gmail.com
This commit is contained in:
committed by
Marek Szyprowski
parent
cf875c4b68
commit
d7eafe655b
@@ -55,6 +55,7 @@ config ARM64
|
||||
select ARCH_HAS_STRICT_MODULE_RWX
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select ARCH_HAS_BATCHED_DMA_SYNC
|
||||
select ARCH_HAS_SYSCALL_WRAPPER
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAS_ZONE_DMA_SET if EXPERT
|
||||
|
||||
@@ -87,6 +87,11 @@ int cache_line_size(void);
|
||||
|
||||
#define dma_get_cache_alignment cache_line_size
|
||||
|
||||
static inline void arch_sync_dma_flush(void)
|
||||
{
|
||||
dsb(sy);
|
||||
}
|
||||
|
||||
/* Compress a u64 MPIDR value into 32 bits. */
|
||||
static inline u64 arch_compact_of_hwid(u64 id)
|
||||
{
|
||||
|
||||
@@ -17,7 +17,7 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
{
|
||||
unsigned long start = (unsigned long)phys_to_virt(paddr);
|
||||
|
||||
dcache_clean_poc(start, start + size);
|
||||
dcache_clean_poc_nosync(start, start + size);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
@@ -28,7 +28,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
if (dir == DMA_TO_DEVICE)
|
||||
return;
|
||||
|
||||
dcache_inval_poc(start, start + size);
|
||||
dcache_inval_poc_nosync(start, start + size);
|
||||
}
|
||||
|
||||
void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
|
||||
@@ -1095,8 +1095,10 @@ void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
return;
|
||||
|
||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
if (!dev_is_dma_coherent(dev)) {
|
||||
arch_sync_dma_for_cpu(phys, size, dir);
|
||||
arch_sync_dma_flush();
|
||||
}
|
||||
|
||||
swiotlb_sync_single_for_cpu(dev, phys, size, dir);
|
||||
}
|
||||
@@ -1112,8 +1114,10 @@ void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
|
||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
|
||||
swiotlb_sync_single_for_device(dev, phys, size, dir);
|
||||
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
if (!dev_is_dma_coherent(dev)) {
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
arch_sync_dma_flush();
|
||||
}
|
||||
}
|
||||
|
||||
void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
|
||||
@@ -1122,13 +1126,15 @@ void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
if (sg_dma_is_swiotlb(sgl))
|
||||
if (sg_dma_is_swiotlb(sgl)) {
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
|
||||
sg->length, dir);
|
||||
else if (!dev_is_dma_coherent(dev))
|
||||
} else if (!dev_is_dma_coherent(dev)) {
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
|
||||
arch_sync_dma_flush();
|
||||
}
|
||||
}
|
||||
|
||||
void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
|
||||
@@ -1137,14 +1143,16 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
if (sg_dma_is_swiotlb(sgl))
|
||||
if (sg_dma_is_swiotlb(sgl)) {
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
iommu_dma_sync_single_for_device(dev,
|
||||
sg_dma_address(sg),
|
||||
sg->length, dir);
|
||||
else if (!dev_is_dma_coherent(dev))
|
||||
} else if (!dev_is_dma_coherent(dev)) {
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
|
||||
arch_sync_dma_flush();
|
||||
}
|
||||
}
|
||||
|
||||
static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
|
||||
@@ -1219,8 +1227,10 @@ dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
|
||||
if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
arch_sync_dma_flush();
|
||||
}
|
||||
|
||||
iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
|
||||
if (iova == DMA_MAPPING_ERROR && !(attrs & DMA_ATTR_MMIO))
|
||||
@@ -1242,8 +1252,10 @@ void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
|
||||
if (WARN_ON(!phys))
|
||||
return;
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) {
|
||||
arch_sync_dma_for_cpu(phys, size, dir);
|
||||
arch_sync_dma_flush();
|
||||
}
|
||||
|
||||
__iommu_dma_unmap(dev, dma_handle, size);
|
||||
|
||||
@@ -1980,6 +1992,8 @@ int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
|
||||
dma_addr_t addr = state->addr + offset;
|
||||
size_t iova_start_pad = iova_offset(iovad, addr);
|
||||
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
arch_sync_dma_flush();
|
||||
return iommu_sync_map(domain, addr - iova_start_pad,
|
||||
iova_align(iovad, size + iova_start_pad));
|
||||
}
|
||||
@@ -1993,6 +2007,8 @@ static void iommu_dma_iova_unlink_range_slow(struct device *dev,
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
size_t iova_start_pad = iova_offset(iovad, addr);
|
||||
bool need_sync_dma = !dev_is_dma_coherent(dev) &&
|
||||
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO));
|
||||
dma_addr_t end = addr + size;
|
||||
|
||||
do {
|
||||
@@ -2016,6 +2032,9 @@ static void iommu_dma_iova_unlink_range_slow(struct device *dev,
|
||||
addr += len;
|
||||
iova_start_pad = 0;
|
||||
} while (addr < end);
|
||||
|
||||
if (need_sync_dma)
|
||||
arch_sync_dma_flush();
|
||||
}
|
||||
|
||||
static void __iommu_dma_iova_unlink(struct device *dev,
|
||||
|
||||
@@ -262,10 +262,12 @@ static dma_addr_t xen_swiotlb_map_phys(struct device *dev, phys_addr_t phys,
|
||||
|
||||
done:
|
||||
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
|
||||
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr)))) {
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
else
|
||||
arch_sync_dma_flush();
|
||||
} else {
|
||||
xen_dma_sync_for_device(dev, dev_addr, size, dir);
|
||||
}
|
||||
}
|
||||
return dev_addr;
|
||||
}
|
||||
@@ -287,10 +289,12 @@ static void xen_swiotlb_unmap_phys(struct device *hwdev, dma_addr_t dev_addr,
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||
if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
|
||||
if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr)))) {
|
||||
arch_sync_dma_for_cpu(paddr, size, dir);
|
||||
else
|
||||
arch_sync_dma_flush();
|
||||
} else {
|
||||
xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
|
||||
}
|
||||
}
|
||||
|
||||
/* NOTE: We use dev_addr here, not paddr! */
|
||||
@@ -308,10 +312,12 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
|
||||
struct io_tlb_pool *pool;
|
||||
|
||||
if (!dev_is_dma_coherent(dev)) {
|
||||
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
|
||||
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) {
|
||||
arch_sync_dma_for_cpu(paddr, size, dir);
|
||||
else
|
||||
arch_sync_dma_flush();
|
||||
} else {
|
||||
xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
|
||||
}
|
||||
}
|
||||
|
||||
pool = xen_swiotlb_find_pool(dev, dma_addr);
|
||||
@@ -331,10 +337,12 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
|
||||
__swiotlb_sync_single_for_device(dev, paddr, size, dir, pool);
|
||||
|
||||
if (!dev_is_dma_coherent(dev)) {
|
||||
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
|
||||
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) {
|
||||
arch_sync_dma_for_device(paddr, size, dir);
|
||||
else
|
||||
arch_sync_dma_flush();
|
||||
} else {
|
||||
xen_dma_sync_for_device(dev, dma_addr, size, dir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -361,6 +361,12 @@ static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
}
|
||||
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
|
||||
|
||||
#ifndef CONFIG_ARCH_HAS_BATCHED_DMA_SYNC
|
||||
static inline void arch_sync_dma_flush(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
|
||||
void arch_sync_dma_for_cpu_all(void);
|
||||
#else
|
||||
|
||||
@@ -72,6 +72,9 @@ config ARCH_HAS_DMA_PREP_COHERENT
|
||||
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
bool
|
||||
|
||||
config ARCH_HAS_BATCHED_DMA_SYNC
|
||||
bool
|
||||
|
||||
#
|
||||
# Select this option if the architecture assumes DMA devices are coherent
|
||||
# by default.
|
||||
|
||||
@@ -406,6 +406,8 @@ void dma_direct_sync_sg_for_device(struct device *dev,
|
||||
arch_sync_dma_for_device(paddr, sg->length,
|
||||
dir);
|
||||
}
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
arch_sync_dma_flush();
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -427,8 +429,10 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
swiotlb_sync_single_for_cpu(dev, paddr, sg->length, dir);
|
||||
}
|
||||
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
if (!dev_is_dma_coherent(dev)) {
|
||||
arch_sync_dma_flush();
|
||||
arch_sync_dma_for_cpu_all();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -60,8 +60,10 @@ static inline void dma_direct_sync_single_for_device(struct device *dev,
|
||||
|
||||
swiotlb_sync_single_for_device(dev, paddr, size, dir);
|
||||
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
if (!dev_is_dma_coherent(dev)) {
|
||||
arch_sync_dma_for_device(paddr, size, dir);
|
||||
arch_sync_dma_flush();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
@@ -71,6 +73,7 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
|
||||
if (!dev_is_dma_coherent(dev)) {
|
||||
arch_sync_dma_for_cpu(paddr, size, dir);
|
||||
arch_sync_dma_flush();
|
||||
arch_sync_dma_for_cpu_all();
|
||||
}
|
||||
|
||||
@@ -106,8 +109,10 @@ static inline dma_addr_t dma_direct_map_phys(struct device *dev,
|
||||
}
|
||||
|
||||
if (!dev_is_dma_coherent(dev) &&
|
||||
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
|
||||
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
arch_sync_dma_flush();
|
||||
}
|
||||
return dma_addr;
|
||||
|
||||
err_overflow:
|
||||
|
||||
@@ -867,6 +867,9 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
|
||||
if (orig_addr == INVALID_PHYS_ADDR)
|
||||
return;
|
||||
|
||||
if (dir == DMA_FROM_DEVICE && !dev_is_dma_coherent(dev))
|
||||
arch_sync_dma_flush();
|
||||
|
||||
/*
|
||||
* It's valid for tlb_offset to be negative. This can happen when the
|
||||
* "offset" returned by swiotlb_align_offset() is non-zero, and the
|
||||
@@ -1595,8 +1598,10 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||
arch_sync_dma_for_device(swiotlb_addr, size, dir);
|
||||
arch_sync_dma_flush();
|
||||
}
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user