diff --git a/include/uapi/linux/map_benchmark.h b/include/uapi/linux/map_benchmark.h index e076748f2120..4b17829a9f17 100644 --- a/include/uapi/linux/map_benchmark.h +++ b/include/uapi/linux/map_benchmark.h @@ -19,6 +19,7 @@ enum { DMA_MAP_BENCH_SINGLE_MODE, + DMA_MAP_BENCH_SG_MODE, DMA_MAP_BENCH_MODE_MAX }; @@ -33,7 +34,9 @@ struct map_benchmark { __u32 dma_bits; /* DMA addressing capability */ __u32 dma_dir; /* DMA data direction */ __u32 dma_trans_ns; /* time for DMA transmission in ns */ - __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ + __u32 granule; /* - SINGLE_MODE: number of pages mapped/unmapped per operation + * - SG_MODE: number of scatterlist entries (each maps one page) + */ __u8 map_mode; /* the mode of dma map */ __u8 expansion[75]; /* For future use */ }; diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c index b80e0fb399b1..29eeb5fdf199 100644 --- a/kernel/dma/map_benchmark.c +++ b/kernel/dma/map_benchmark.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -117,8 +118,122 @@ static struct map_benchmark_ops dma_single_map_benchmark_ops = { .do_unmap = dma_single_map_benchmark_do_unmap, }; +struct dma_sg_map_param { + struct sg_table sgt; + struct device *dev; + void **buf; + u32 npages; + u32 dma_dir; +}; + +static void *dma_sg_map_benchmark_prepare(struct map_benchmark_data *map) +{ + struct scatterlist *sg; + int i; + + struct dma_sg_map_param *params = kzalloc(sizeof(*params), GFP_KERNEL); + + if (!params) + return NULL; + /* + * Set the number of scatterlist entries based on the granule. + * In SG mode, 'granule' represents the number of scatterlist entries. + * Each scatterlist entry corresponds to a single page. + */ + params->npages = map->bparam.granule; + params->dma_dir = map->bparam.dma_dir; + params->dev = map->dev; + params->buf = kmalloc_array(params->npages, sizeof(*params->buf), + GFP_KERNEL); + if (!params->buf) + goto out; + + if (sg_alloc_table(¶ms->sgt, params->npages, GFP_KERNEL)) + goto free_buf; + + for_each_sgtable_sg(¶ms->sgt, sg, i) { + params->buf[i] = (void *)__get_free_page(GFP_KERNEL); + if (!params->buf[i]) + goto free_page; + + sg_set_buf(sg, params->buf[i], PAGE_SIZE); + } + + return params; + +free_page: + while (i-- > 0) + free_page((unsigned long)params->buf[i]); + + sg_free_table(¶ms->sgt); +free_buf: + kfree(params->buf); +out: + kfree(params); + return NULL; +} + +static void dma_sg_map_benchmark_unprepare(void *mparam) +{ + struct dma_sg_map_param *params = mparam; + int i; + + for (i = 0; i < params->npages; i++) + free_page((unsigned long)params->buf[i]); + + sg_free_table(¶ms->sgt); + + kfree(params->buf); + kfree(params); +} + +static void dma_sg_map_benchmark_initialize_data(void *mparam) +{ + struct dma_sg_map_param *params = mparam; + struct scatterlist *sg; + int i = 0; + + if (params->dma_dir == DMA_FROM_DEVICE) + return; + + for_each_sgtable_sg(¶ms->sgt, sg, i) + memset(params->buf[i], 0x66, PAGE_SIZE); +} + +static int dma_sg_map_benchmark_do_map(void *mparam) +{ + struct dma_sg_map_param *params = mparam; + int ret = 0; + + int sg_mapped = dma_map_sg(params->dev, params->sgt.sgl, + params->npages, params->dma_dir); + if (!sg_mapped) { + pr_err("dma_map_sg failed on %s\n", dev_name(params->dev)); + ret = -ENOMEM; + } + + return ret; +} + +static void dma_sg_map_benchmark_do_unmap(void *mparam) +{ + struct dma_sg_map_param *params = mparam; + + dma_unmap_sg(params->dev, params->sgt.sgl, params->npages, + params->dma_dir); +} + +static struct map_benchmark_ops dma_sg_map_benchmark_ops = { + .prepare = dma_sg_map_benchmark_prepare, + .unprepare = dma_sg_map_benchmark_unprepare, + .initialize_data = dma_sg_map_benchmark_initialize_data, + .do_map = dma_sg_map_benchmark_do_map, + .do_unmap = dma_sg_map_benchmark_do_unmap, +}; + static struct map_benchmark_ops *dma_map_benchmark_ops[DMA_MAP_BENCH_MODE_MAX] = { [DMA_MAP_BENCH_SINGLE_MODE] = &dma_single_map_benchmark_ops, + [DMA_MAP_BENCH_SG_MODE] = &dma_sg_map_benchmark_ops, }; static int map_benchmark_thread(void *data)