of: reserved_mem: switch to ops based OF_DECLARE()

Move init function from OF_DECLARE() argument to the given reserved
memory region ops structure and then pass that structure to the
OF_DECLARE() initializer. This node_init callback is mandatory for the
reserved mem driver. Such change makes it possible in the future to add
more functions called by the generic code before given memory region is
initialized and rmem object is created.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://patch.msgid.link/20260325090023.3175348-4-m.szyprowski@samsung.com
Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
This commit is contained in:
Marek Szyprowski
2026-03-25 10:00:19 +01:00
committed by Rob Herring (Arm)
parent 9d5149b3f2
commit c640cad6a5
6 changed files with 49 additions and 39 deletions

View File

@@ -362,10 +362,6 @@ static void rmem_dma_device_release(struct reserved_mem *rmem,
dev->dma_mem = NULL;
}
static const struct reserved_mem_ops rmem_dma_ops = {
.device_init = rmem_dma_device_init,
.device_release = rmem_dma_device_release,
};
static int __init rmem_dma_setup(unsigned long node, struct reserved_mem *rmem)
{
@@ -388,7 +384,6 @@ static int __init rmem_dma_setup(unsigned long node, struct reserved_mem *rmem)
}
#endif
rmem->ops = &rmem_dma_ops;
pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
@@ -405,5 +400,11 @@ static int __init dma_init_reserved_memory(void)
core_initcall(dma_init_reserved_memory);
#endif /* CONFIG_DMA_GLOBAL_POOL */
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
static const struct reserved_mem_ops rmem_dma_ops = {
.node_init = rmem_dma_setup,
.device_init = rmem_dma_device_init,
.device_release = rmem_dma_device_release,
};
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", &rmem_dma_ops);
#endif

View File

@@ -470,11 +470,6 @@ static void rmem_cma_device_release(struct reserved_mem *rmem,
dev->cma_area = NULL;
}
static const struct reserved_mem_ops rmem_cma_ops = {
.device_init = rmem_cma_device_init,
.device_release = rmem_cma_device_release,
};
static int __init rmem_cma_setup(unsigned long node, struct reserved_mem *rmem)
{
bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
@@ -499,7 +494,6 @@ static int __init rmem_cma_setup(unsigned long node, struct reserved_mem *rmem)
if (default_cma)
dma_contiguous_default_area = cma;
rmem->ops = &rmem_cma_ops;
rmem->priv = cma;
pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
@@ -511,5 +505,12 @@ static int __init rmem_cma_setup(unsigned long node, struct reserved_mem *rmem)
return 0;
}
RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
static const struct reserved_mem_ops rmem_cma_ops = {
.node_init = rmem_cma_setup,
.device_init = rmem_cma_device_init,
.device_release = rmem_cma_device_release,
};
RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", &rmem_cma_ops);
#endif

View File

@@ -1855,11 +1855,6 @@ static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
dev->dma_io_tlb_mem = &io_tlb_default_mem;
}
static const struct reserved_mem_ops rmem_swiotlb_ops = {
.device_init = rmem_swiotlb_device_init,
.device_release = rmem_swiotlb_device_release,
};
static int __init rmem_swiotlb_setup(unsigned long node,
struct reserved_mem *rmem)
{
@@ -1869,11 +1864,16 @@ static int __init rmem_swiotlb_setup(unsigned long node,
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;
rmem->ops = &rmem_swiotlb_ops;
pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
}
RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
static const struct reserved_mem_ops rmem_swiotlb_ops = {
.node_init = rmem_swiotlb_setup,
.device_init = rmem_swiotlb_device_init,
.device_release = rmem_swiotlb_device_release,
};
RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", &rmem_swiotlb_ops);
#endif /* CONFIG_DMA_RESTRICTED_POOL */