Merge branch 'dt-reserved-mem-cleanups' into dma-mapping-for-next

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
This commit is contained in:
Marek Szyprowski
2026-03-27 08:48:43 +01:00
10 changed files with 297 additions and 223 deletions

View File

@@ -70,19 +70,20 @@ static void tegra210_emc_table_device_release(struct reserved_mem *rmem,
memunmap(timings);
}
static const struct reserved_mem_ops tegra210_emc_table_ops = {
.device_init = tegra210_emc_table_device_init,
.device_release = tegra210_emc_table_device_release,
};
static int tegra210_emc_table_init(struct reserved_mem *rmem)
static int tegra210_emc_table_init(unsigned long node,
struct reserved_mem *rmem)
{
pr_debug("Tegra210 EMC table at %pa, size %lu bytes\n", &rmem->base,
(unsigned long)rmem->size);
rmem->ops = &tegra210_emc_table_ops;
return 0;
}
static const struct reserved_mem_ops tegra210_emc_table_ops = {
.node_init = tegra210_emc_table_init,
.device_init = tegra210_emc_table_device_init,
.device_release = tegra210_emc_table_device_release,
};
RESERVEDMEM_OF_DECLARE(tegra210_emc_table, "nvidia,tegra210-emc-table",
tegra210_emc_table_init);
&tegra210_emc_table_ops);

View File

@@ -1274,7 +1274,7 @@ void __init unflatten_device_tree(void)
void *fdt = initial_boot_params;
/* Save the statically-placed regions in the reserved_mem array */
fdt_scan_reserved_mem_reg_nodes();
fdt_scan_reserved_mem_late();
/* Populate an empty root node when bootloader doesn't provide one */
if (!fdt) {

View File

@@ -186,7 +186,7 @@ static inline struct device_node *__of_get_dma_parent(const struct device_node *
#endif
int fdt_scan_reserved_mem(void);
void __init fdt_scan_reserved_mem_reg_nodes(void);
void __init fdt_scan_reserved_mem_late(void);
bool of_fdt_device_is_available(const void *blob, unsigned long node);

View File

@@ -24,8 +24,6 @@
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/kmemleak.h>
#include <linux/cma.h>
#include <linux/dma-map-ops.h>
#include "of_private.h"
@@ -104,30 +102,12 @@ static void __init alloc_reserved_mem_array(void)
reserved_mem = new_array;
}
static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem);
/*
* fdt_reserved_mem_save_node() - save fdt node for second pass initialization
*/
static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size)
{
struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
if (reserved_mem_count == total_reserved_mem_cnt) {
pr_err("not enough space for all defined regions.\n");
return;
}
rmem->fdt_node = node;
rmem->name = uname;
rmem->base = base;
rmem->size = size;
/* Call the region specific initialization function */
fdt_init_reserved_mem_node(rmem);
reserved_mem_count++;
}
static void fdt_init_reserved_mem_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size);
static int fdt_validate_reserved_mem_node(unsigned long node,
phys_addr_t *align);
static int fdt_fixup_reserved_mem_node(unsigned long node,
phys_addr_t base, phys_addr_t size);
static int __init early_init_dt_reserve_memory(phys_addr_t base,
phys_addr_t size, bool nomap)
@@ -154,21 +134,19 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
const char *uname)
{
phys_addr_t base, size;
int i, len;
int i, len, err;
const __be32 *prop;
bool nomap, default_cma;
bool nomap;
prop = of_flat_dt_get_addr_size_prop(node, "reg", &len);
if (!prop)
return -ENOENT;
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
if (default_cma && cma_skip_dt_default_reserved_mem()) {
pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n");
return -EINVAL;
}
err = fdt_validate_reserved_mem_node(node, NULL);
if (err && err != -ENODEV)
return err;
for (i = 0; i < len; i++) {
u64 b, s;
@@ -179,10 +157,7 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
size = s;
if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) {
/* Architecture specific contiguous memory fixup. */
if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
of_get_flat_dt_prop(node, "reusable", NULL))
dma_contiguous_early_fixup(base, size);
fdt_fixup_reserved_mem_node(node, base, size);
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
} else {
@@ -216,19 +191,66 @@ static int __init __reserved_mem_check_root(unsigned long node)
return 0;
}
static void __init __rmem_check_for_overlap(void);
static int __init __rmem_cmp(const void *a, const void *b)
{
const struct reserved_mem *ra = a, *rb = b;
if (ra->base < rb->base)
return -1;
if (ra->base > rb->base)
return 1;
/*
* Put the dynamic allocations (address == 0, size == 0) before static
* allocations at address 0x0 so that overlap detection works
* correctly.
*/
if (ra->size < rb->size)
return -1;
if (ra->size > rb->size)
return 1;
return 0;
}
static void __init __rmem_check_for_overlap(void)
{
int i;
if (reserved_mem_count < 2)
return;
sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
__rmem_cmp, NULL);
for (i = 0; i < reserved_mem_count - 1; i++) {
struct reserved_mem *this, *next;
this = &reserved_mem[i];
next = &reserved_mem[i + 1];
if (this->base + this->size > next->base) {
phys_addr_t this_end, next_end;
this_end = this->base + this->size;
next_end = next->base + next->size;
pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
this->name, &this->base, &this_end,
next->name, &next->base, &next_end);
}
}
}
/**
* fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined
* reserved memory regions.
* fdt_scan_reserved_mem_late() - Scan FDT and initialize remaining reserved
* memory regions.
*
* This function is used to scan through the DT and store the
* information for the reserved memory regions that are defined using
* the "reg" property. The region node number, name, base address, and
* size are all stored in the reserved_mem array by calling the
* fdt_reserved_mem_save_node() function.
* This function is used to scan again through the DT and initialize the
* "static" reserved memory regions, that are defined using the "reg"
* property. Each such region is then initialized with its specific init
* function and stored in the global reserved_mem array.
*/
void __init fdt_scan_reserved_mem_reg_nodes(void)
void __init fdt_scan_reserved_mem_late(void)
{
const void *fdt = initial_boot_params;
phys_addr_t base, size;
@@ -253,23 +275,25 @@ void __init fdt_scan_reserved_mem_reg_nodes(void)
fdt_for_each_subnode(child, fdt, node) {
const char *uname;
bool default_cma = of_get_flat_dt_prop(child, "linux,cma-default", NULL);
u64 b, s;
int ret;
if (!of_fdt_device_is_available(fdt, child))
continue;
if (default_cma && cma_skip_dt_default_reserved_mem())
continue;
if (!of_flat_dt_get_addr_size(child, "reg", &b, &s))
continue;
ret = fdt_validate_reserved_mem_node(child, NULL);
if (ret && ret != -ENODEV)
continue;
base = b;
size = s;
if (size) {
uname = fdt_get_name(fdt, child, NULL);
fdt_reserved_mem_save_node(child, uname, base, size);
fdt_init_reserved_mem_node(child, uname, base, size);
}
}
@@ -280,7 +304,14 @@ void __init fdt_scan_reserved_mem_reg_nodes(void)
static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname);
/*
* fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
* fdt_scan_reserved_mem() - reserve and allocate memory occupied by
* reserved memory regions.
*
* This function is used to scan through the FDT and mark memory occupied
* by all static (defined by the "reg" property) reserved memory regions.
* Then memory for all dynamic regions (defined by size & alignment) is
* allocated, a region specific init function is called and region information
* is stored in the reserved_mem array.
*/
int __init fdt_scan_reserved_mem(void)
{
@@ -397,7 +428,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
phys_addr_t base = 0, align = 0, size;
int i, len;
const __be32 *prop;
bool nomap, default_cma;
bool nomap;
int ret;
prop = of_get_flat_dt_prop(node, "size", &len);
@@ -421,19 +452,10 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
if (default_cma && cma_skip_dt_default_reserved_mem()) {
pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n");
return -EINVAL;
}
/* Need adjust the alignment to satisfy the CMA requirement */
if (IS_ENABLED(CONFIG_CMA)
&& of_flat_dt_is_compatible(node, "shared-dma-pool")
&& of_get_flat_dt_prop(node, "reusable", NULL)
&& !nomap)
align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
ret = fdt_validate_reserved_mem_node(node, &align);
if (ret && ret != -ENODEV)
return ret;
prop = of_flat_dt_get_addr_size_prop(node, "alloc-ranges", &len);
if (prop) {
@@ -468,121 +490,151 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
uname, (unsigned long)(size / SZ_1M));
return -ENOMEM;
}
/* Architecture specific contiguous memory fixup. */
if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
of_get_flat_dt_prop(node, "reusable", NULL))
dma_contiguous_early_fixup(base, size);
/* Save region in the reserved_mem array */
fdt_reserved_mem_save_node(node, uname, base, size);
fdt_fixup_reserved_mem_node(node, base, size);
fdt_init_reserved_mem_node(node, uname, base, size);
return 0;
}
extern const struct of_device_id __reservedmem_of_table[];
static const struct of_device_id __rmem_of_table_sentinel
__used __section("__reservedmem_of_table_end");
/*
* __reserved_mem_init_node() - call region specific reserved memory init code
/**
* fdt_fixup_reserved_mem_node() - call fixup function for a reserved memory node
* @node: FDT node to fixup
* @base: base address of the reserved memory region
* @size: size of the reserved memory region
*
* This function iterates through the reserved memory drivers and calls
* the node_fixup callback for the compatible entry matching the node.
*
* Return: 0 on success, -ENODEV if no compatible match found
*/
static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
static int __init fdt_fixup_reserved_mem_node(unsigned long node,
phys_addr_t base, phys_addr_t size)
{
extern const struct of_device_id __reservedmem_of_table[];
const struct of_device_id *i;
int ret = -ENOENT;
int ret = -ENODEV;
for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
reservedmem_of_init_fn initfn = i->data;
const char *compat = i->compatible;
for (i = __reservedmem_of_table; ret == -ENODEV &&
i < &__rmem_of_table_sentinel; i++) {
const struct reserved_mem_ops *ops = i->data;
if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
if (!of_flat_dt_is_compatible(node, i->compatible))
continue;
ret = initfn(rmem);
if (ops->node_fixup)
ret = ops->node_fixup(node, base, size);
}
return ret;
}
/**
* fdt_validate_reserved_mem_node() - validate a reserved memory node
* @node: FDT node to validate
* @align: pointer to store the validated alignment (may be modified by callback)
*
* This function iterates through the reserved memory drivers and calls
* the node_validate callback for the compatible entry matching the node.
*
* Return: 0 on success, -ENODEV if no compatible match found
*/
static int __init fdt_validate_reserved_mem_node(unsigned long node, phys_addr_t *align)
{
const struct of_device_id *i;
int ret = -ENODEV;
for (i = __reservedmem_of_table; ret == -ENODEV &&
i < &__rmem_of_table_sentinel; i++) {
const struct reserved_mem_ops *ops = i->data;
if (!of_flat_dt_is_compatible(node, i->compatible))
continue;
if (ops->node_validate)
ret = ops->node_validate(node, align);
}
return ret;
}
/**
* __reserved_mem_init_node() - initialize a reserved memory region
* @rmem: reserved_mem structure to initialize
* @node: FDT node describing the reserved memory region
*
* This function iterates through the reserved memory drivers and calls the
* node_init callback for the compatible entry matching the node. On success,
* the operations pointer is stored in the reserved_mem structure.
*
* Return: 0 on success, -ENODEV if no compatible match found
*/
static int __init __reserved_mem_init_node(struct reserved_mem *rmem,
unsigned long node)
{
const struct of_device_id *i;
int ret = -ENODEV;
for (i = __reservedmem_of_table; ret == -ENODEV &&
i < &__rmem_of_table_sentinel; i++) {
const struct reserved_mem_ops *ops = i->data;
const char *compat = i->compatible;
if (!of_flat_dt_is_compatible(node, compat))
continue;
ret = ops->node_init(node, rmem);
if (ret == 0) {
rmem->ops = ops;
pr_info("initialized node %s, compatible id %s\n",
rmem->name, compat);
break;
return ret;
}
}
return ret;
}
static int __init __rmem_cmp(const void *a, const void *b)
{
const struct reserved_mem *ra = a, *rb = b;
if (ra->base < rb->base)
return -1;
if (ra->base > rb->base)
return 1;
/*
* Put the dynamic allocations (address == 0, size == 0) before static
* allocations at address 0x0 so that overlap detection works
* correctly.
*/
if (ra->size < rb->size)
return -1;
if (ra->size > rb->size)
return 1;
if (ra->fdt_node < rb->fdt_node)
return -1;
if (ra->fdt_node > rb->fdt_node)
return 1;
return 0;
}
static void __init __rmem_check_for_overlap(void)
{
int i;
if (reserved_mem_count < 2)
return;
sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
__rmem_cmp, NULL);
for (i = 0; i < reserved_mem_count - 1; i++) {
struct reserved_mem *this, *next;
this = &reserved_mem[i];
next = &reserved_mem[i + 1];
if (this->base + this->size > next->base) {
phys_addr_t this_end, next_end;
this_end = this->base + this->size;
next_end = next->base + next->size;
pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
this->name, &this->base, &this_end,
next->name, &next->base, &next_end);
}
}
}
/**
* fdt_init_reserved_mem_node() - Initialize a reserved memory region
* @rmem: reserved_mem struct of the memory region to be initialized.
* @node: fdt node of the initialized region
* @uname: name of the reserved memory node
* @base: base address of the reserved memory region
* @size: size of the reserved memory region
*
* This function is used to call the region specific initialization
* function for a reserved memory region.
* This function calls the region-specific initialization function for a
* reserved memory region and saves all region-specific data to the
* reserved_mem array to allow of_reserved_mem_lookup() to find it.
*/
static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem)
static void __init fdt_init_reserved_mem_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size)
{
unsigned long node = rmem->fdt_node;
int err = 0;
bool nomap;
struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
if (reserved_mem_count == total_reserved_mem_cnt) {
pr_err("not enough space for all defined regions.\n");
return;
}
rmem->name = uname;
rmem->base = base;
rmem->size = size;
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
err = __reserved_mem_init_node(rmem);
if (err != 0 && err != -ENOENT) {
err = __reserved_mem_init_node(rmem, node);
if (err != 0 && err != -ENODEV) {
pr_info("node %s compatible matching fail\n", rmem->name);
rmem->name = NULL;
if (nomap)
memblock_clear_nomap(rmem->base, rmem->size);
else
memblock_phys_free(rmem->base, rmem->size);
return;
} else {
phys_addr_t end = rmem->base + rmem->size - 1;
bool reusable =
@@ -594,6 +646,8 @@ static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem)
reusable ? "reusable" : "non-reusable",
rmem->name ? rmem->name : "unknown");
}
reserved_mem_count++;
}
struct rmem_assigned_device {

View File

@@ -61,14 +61,4 @@ extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
extern void cma_reserve_pages_on_error(struct cma *cma);
#ifdef CONFIG_DMA_CMA
extern bool cma_skip_dt_default_reserved_mem(void);
#else
static inline bool cma_skip_dt_default_reserved_mem(void)
{
return false;
}
#endif
#endif

View File

@@ -147,9 +147,6 @@ static inline void dma_free_contiguous(struct device *dev, struct page *page,
{
__free_pages(page, get_order(size));
}
static inline void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
{
}
#endif /* CONFIG_DMA_CMA*/
#ifdef CONFIG_DMA_DECLARE_COHERENT

View File

@@ -11,7 +11,6 @@ struct resource;
struct reserved_mem {
const char *name;
unsigned long fdt_node;
const struct reserved_mem_ops *ops;
phys_addr_t base;
phys_addr_t size;
@@ -19,18 +18,20 @@ struct reserved_mem {
};
struct reserved_mem_ops {
int (*node_validate)(unsigned long fdt_node, phys_addr_t *align);
int (*node_fixup)(unsigned long fdt_node, phys_addr_t base,
phys_addr_t size);
int (*node_init)(unsigned long fdt_node, struct reserved_mem *rmem);
int (*device_init)(struct reserved_mem *rmem,
struct device *dev);
void (*device_release)(struct reserved_mem *rmem,
struct device *dev);
};
typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
#ifdef CONFIG_OF_RESERVED_MEM
#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
_OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
#define RESERVEDMEM_OF_DECLARE(name, compat, ops) \
_OF_DECLARE(reservedmem, name, compat, ops, struct reserved_mem_ops *)
int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx);
@@ -48,8 +49,9 @@ int of_reserved_mem_region_count(const struct device_node *np);
#else
#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
_OF_DECLARE_STUB(reservedmem, name, compat, init, reservedmem_of_init_fn)
#define RESERVEDMEM_OF_DECLARE(name, compat, ops) \
_OF_DECLARE_STUB(reservedmem, name, compat, ops, \
struct reserved_mem_ops *)
static inline int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx)

View File

@@ -362,17 +362,11 @@ static void rmem_dma_device_release(struct reserved_mem *rmem,
dev->dma_mem = NULL;
}
static const struct reserved_mem_ops rmem_dma_ops = {
.device_init = rmem_dma_device_init,
.device_release = rmem_dma_device_release,
};
static int __init rmem_dma_setup(struct reserved_mem *rmem)
static int __init rmem_dma_setup(unsigned long node, struct reserved_mem *rmem)
{
unsigned long node = rmem->fdt_node;
if (of_get_flat_dt_prop(node, "reusable", NULL))
return -EINVAL;
return -ENODEV;
#ifdef CONFIG_ARM
if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
@@ -390,7 +384,6 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem)
}
#endif
rmem->ops = &rmem_dma_ops;
pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
@@ -407,5 +400,11 @@ static int __init dma_init_reserved_memory(void)
core_initcall(dma_init_reserved_memory);
#endif /* CONFIG_DMA_GLOBAL_POOL */
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
static const struct reserved_mem_ops rmem_dma_ops = {
.node_init = rmem_dma_setup,
.device_init = rmem_dma_device_init,
.device_release = rmem_dma_device_release,
};
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", &rmem_dma_ops);
#endif

View File

@@ -91,16 +91,6 @@ static int __init early_cma(char *p)
}
early_param("cma", early_cma);
/*
* cma_skip_dt_default_reserved_mem - This is called from the
* reserved_mem framework to detect if the default cma region is being
* set by the "cma=" kernel parameter.
*/
bool __init cma_skip_dt_default_reserved_mem(void)
{
return size_cmdline != -1;
}
#ifdef CONFIG_DMA_NUMA_CMA
static struct cma *dma_contiguous_numa_area[MAX_NUMNODES];
@@ -470,47 +460,89 @@ static void rmem_cma_device_release(struct reserved_mem *rmem,
dev->cma_area = NULL;
}
static const struct reserved_mem_ops rmem_cma_ops = {
.device_init = rmem_cma_device_init,
.device_release = rmem_cma_device_release,
};
static int __init rmem_cma_setup(struct reserved_mem *rmem)
static int __init __rmem_cma_verify_node(unsigned long node)
{
unsigned long node = rmem->fdt_node;
bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
struct cma *cma;
int err;
if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;
return -ENODEV;
if (size_cmdline != -1 &&
of_get_flat_dt_prop(node, "linux,cma-default", NULL)) {
pr_err("Skipping dt linux,cma-default node in favor for \"cma=\" kernel param.\n");
return -EBUSY;
}
return 0;
}
static int __init rmem_cma_validate(unsigned long node, phys_addr_t *align)
{
int ret = __rmem_cma_verify_node(node);
if (ret)
return ret;
if (align)
*align = max_t(phys_addr_t, *align, CMA_MIN_ALIGNMENT_BYTES);
return 0;
}
static int __init rmem_cma_fixup(unsigned long node, phys_addr_t base,
phys_addr_t size)
{
int ret = __rmem_cma_verify_node(node);
if (ret)
return ret;
/* Architecture specific contiguous memory fixup. */
dma_contiguous_early_fixup(base, size);
return 0;
}
static int __init rmem_cma_setup(unsigned long node, struct reserved_mem *rmem)
{
bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
struct cma *cma;
int ret;
ret = __rmem_cma_verify_node(node);
if (ret)
return ret;
if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) {
pr_err("Reserved memory: incorrect alignment of CMA region\n");
return -EINVAL;
}
err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
if (err) {
ret = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
if (ret) {
pr_err("Reserved memory: unable to setup CMA region\n");
return err;
return ret;
}
if (default_cma)
dma_contiguous_default_area = cma;
rmem->ops = &rmem_cma_ops;
rmem->priv = cma;
pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
err = dma_heap_cma_register_heap(cma);
if (err)
ret = dma_heap_cma_register_heap(cma);
if (ret)
pr_warn("Couldn't register CMA heap.");
return 0;
}
RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
static const struct reserved_mem_ops rmem_cma_ops = {
.node_validate = rmem_cma_validate,
.node_fixup = rmem_cma_fixup,
.node_init = rmem_cma_setup,
.device_init = rmem_cma_device_init,
.device_release = rmem_cma_device_release,
};
RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", &rmem_cma_ops);
#endif

View File

@@ -1877,26 +1877,25 @@ static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
dev->dma_io_tlb_mem = &io_tlb_default_mem;
}
static const struct reserved_mem_ops rmem_swiotlb_ops = {
.device_init = rmem_swiotlb_device_init,
.device_release = rmem_swiotlb_device_release,
};
static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
static int __init rmem_swiotlb_setup(unsigned long node,
struct reserved_mem *rmem)
{
unsigned long node = rmem->fdt_node;
if (of_get_flat_dt_prop(node, "reusable", NULL) ||
of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;
rmem->ops = &rmem_swiotlb_ops;
pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
}
RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
static const struct reserved_mem_ops rmem_swiotlb_ops = {
.node_init = rmem_swiotlb_setup,
.device_init = rmem_swiotlb_device_init,
.device_release = rmem_swiotlb_device_release,
};
RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", &rmem_swiotlb_ops);
#endif /* CONFIG_DMA_RESTRICTED_POOL */