mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
dma: compile-out DMA sync op calls when not used
Some platforms do have DMA, but DMA there is always direct and coherent. Currently, even on such platforms DMA sync operations are compiled and called. Add a new hidden Kconfig symbol, DMA_NEED_SYNC, and set it only when either sync operations are needed or there is DMA ops or swiotlb or DMA debug is enabled. Compile global dma_sync_*() and dma_need_sync() only when it's set, otherwise provide empty inline stubs. The change allows for future optimizations of DMA sync calls depending on runtime conditions. Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
committed by
Christoph Hellwig
parent
2650073f1b
commit
fe7514b149
@@ -329,6 +329,7 @@ void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_resource);
|
||||
|
||||
#ifdef CONFIG_DMA_NEED_SYNC
|
||||
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
@@ -385,6 +386,17 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_sg_for_device);
|
||||
|
||||
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_map_direct(dev, ops))
|
||||
return dma_direct_need_sync(dev, dma_addr);
|
||||
return ops->sync_single_for_cpu || ops->sync_single_for_device;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_need_sync);
|
||||
#endif /* CONFIG_DMA_NEED_SYNC */
|
||||
|
||||
/*
|
||||
* The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
|
||||
* that the intention is to allow exporting memory allocated via the
|
||||
@@ -841,16 +853,6 @@ size_t dma_opt_mapping_size(struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
|
||||
|
||||
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_map_direct(dev, ops))
|
||||
return dma_direct_need_sync(dev, dma_addr);
|
||||
return ops->sync_single_for_cpu || ops->sync_single_for_device;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_need_sync);
|
||||
|
||||
unsigned long dma_get_merge_boundary(struct device *dev)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
Reference in New Issue
Block a user