Merge tag 'dma-mapping-6.8-2024-01-08' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - reduce area lock contention for non-primary IO TLB pools (Petr
   Tesarik)

 - don't store redundant offsets in the dma_ranges stuctures (Robin
   Murphy)

 - clear dev->dma_mem when freeing per-device pools (Joakim Zhang)

* tag 'dma-mapping-6.8-2024-01-08' of git://git.infradead.org/users/hch/dma-mapping:
  dma-mapping: clear dev->dma_mem to NULL after freeing it
  swiotlb: reduce area lock contention for non-primary IO TLB pools
  dma-mapping: don't store redundant offsets
This commit is contained in:
Linus Torvalds
2024-01-11 13:46:50 -08:00
6 changed files with 70 additions and 46 deletions

View File

@@ -957,7 +957,7 @@ static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
#endif /* CONFIG_DEBUG_FS */
/**
* swiotlb_area_find_slots() - search for slots in one IO TLB memory area
* swiotlb_search_pool_area() - search one memory area in one pool
* @dev: Device which maps the buffer.
* @pool: Memory pool to be searched.
* @area_index: Index of the IO TLB memory area to be searched.
@@ -972,7 +972,7 @@ static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
*
* Return: Index of the first allocated slot, or -1 on error.
*/
static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool,
int area_index, phys_addr_t orig_addr, size_t alloc_size,
unsigned int alloc_align_mask)
{
@@ -1066,41 +1066,50 @@ found:
return slot_index;
}
#ifdef CONFIG_SWIOTLB_DYNAMIC
/**
* swiotlb_pool_find_slots() - search for slots in one memory pool
* swiotlb_search_area() - search one memory area in all pools
* @dev: Device which maps the buffer.
* @pool: Memory pool to be searched.
* @start_cpu: Start CPU number.
* @cpu_offset: Offset from @start_cpu.
* @orig_addr: Original (non-bounced) IO buffer address.
* @alloc_size: Total requested size of the bounce buffer,
* including initial alignment padding.
* @alloc_align_mask: Required alignment of the allocated buffer.
* @retpool: Used memory pool, updated on return.
*
* Search through one memory pool to find a sequence of slots that match the
* Search one memory area in all pools for a sequence of slots that match the
* allocation constraints.
*
* Return: Index of the first allocated slot, or -1 on error.
*/
static int swiotlb_pool_find_slots(struct device *dev, struct io_tlb_pool *pool,
phys_addr_t orig_addr, size_t alloc_size,
unsigned int alloc_align_mask)
static int swiotlb_search_area(struct device *dev, int start_cpu,
int cpu_offset, phys_addr_t orig_addr, size_t alloc_size,
unsigned int alloc_align_mask, struct io_tlb_pool **retpool)
{
int start = raw_smp_processor_id() & (pool->nareas - 1);
int i = start, index;
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool;
int area_index;
int index = -1;
do {
index = swiotlb_area_find_slots(dev, pool, i, orig_addr,
alloc_size, alloc_align_mask);
if (index >= 0)
return index;
if (++i >= pool->nareas)
i = 0;
} while (i != start);
return -1;
rcu_read_lock();
list_for_each_entry_rcu(pool, &mem->pools, node) {
if (cpu_offset >= pool->nareas)
continue;
area_index = (start_cpu + cpu_offset) & (pool->nareas - 1);
index = swiotlb_search_pool_area(dev, pool, area_index,
orig_addr, alloc_size,
alloc_align_mask);
if (index >= 0) {
*retpool = pool;
break;
}
}
rcu_read_unlock();
return index;
}
#ifdef CONFIG_SWIOTLB_DYNAMIC
/**
* swiotlb_find_slots() - search for slots in the whole swiotlb
* @dev: Device which maps the buffer.
@@ -1124,18 +1133,17 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
unsigned long nslabs;
unsigned long flags;
u64 phys_limit;
int cpu, i;
int index;
rcu_read_lock();
list_for_each_entry_rcu(pool, &mem->pools, node) {
index = swiotlb_pool_find_slots(dev, pool, orig_addr,
alloc_size, alloc_align_mask);
if (index >= 0) {
rcu_read_unlock();
cpu = raw_smp_processor_id();
for (i = 0; i < default_nareas; ++i) {
index = swiotlb_search_area(dev, cpu, i, orig_addr, alloc_size,
alloc_align_mask, &pool);
if (index >= 0)
goto found;
}
}
rcu_read_unlock();
if (!mem->can_grow)
return -1;
@@ -1148,8 +1156,8 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
if (!pool)
return -1;
index = swiotlb_pool_find_slots(dev, pool, orig_addr,
alloc_size, alloc_align_mask);
index = swiotlb_search_pool_area(dev, pool, 0, orig_addr,
alloc_size, alloc_align_mask);
if (index < 0) {
swiotlb_dyn_free(&pool->rcu);
return -1;
@@ -1192,9 +1200,21 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
size_t alloc_size, unsigned int alloc_align_mask,
struct io_tlb_pool **retpool)
{
*retpool = &dev->dma_io_tlb_mem->defpool;
return swiotlb_pool_find_slots(dev, *retpool,
orig_addr, alloc_size, alloc_align_mask);
struct io_tlb_pool *pool;
int start, i;
int index;
*retpool = pool = &dev->dma_io_tlb_mem->defpool;
i = start = raw_smp_processor_id() & (pool->nareas - 1);
do {
index = swiotlb_search_pool_area(dev, pool, i, orig_addr,
alloc_size, alloc_align_mask);
if (index >= 0)
return index;
if (++i >= pool->nareas)
i = 0;
} while (i != start);
return -1;
}
#endif /* CONFIG_SWIOTLB_DYNAMIC */