Files
linux/drivers/dax/hmem/device.c
Dan Williams f8dc1bde18 dax/hmem: Fix singleton confusion between dax_hmem_work and hmem devices
dax_hmem (ab)uses a platform device to allow for a module to autoload in
the presence of "Soft Reserved" resources. The dax_hmem driver had no
dependencies on the "hmem_platform" device being a singleton until the
recent "dax_hmem vs dax_cxl" takeover solution.

Replace the layering violation of dax_hmem_work assuming that there will
never be more than one "hmem_platform" device associated with a global work
item with a dax_hmem local workqueue that can theoretically support any
number of hmem_platform devices.

Fixup the reference counting to only pin the device while it is live in the
queue.

Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Alison Schofield <alison.schofield@intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Link: https://patch.msgid.link/20260327052821.440749-7-dan.j.williams@intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
2026-04-01 08:12:18 -07:00

104 lines
2.2 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/module.h>
#include <linux/dax.h>
#include <linux/mm.h>
#include "../bus.h"
static bool nohmem;
module_param_named(disable, nohmem, bool, 0444);
static bool platform_initialized;
static DEFINE_MUTEX(hmem_resource_lock);
static struct resource hmem_active = {
.name = "HMEM devices",
.start = 0,
.end = -1,
.flags = IORESOURCE_MEM,
};
int walk_hmem_resources(struct device *host, walk_hmem_fn fn)
{
struct resource *res;
int rc = 0;
mutex_lock(&hmem_resource_lock);
for (res = hmem_active.child; res; res = res->sibling) {
rc = fn(host, (int) res->desc, res);
if (rc)
break;
}
mutex_unlock(&hmem_resource_lock);
return rc;
}
EXPORT_SYMBOL_GPL(walk_hmem_resources);
static void hmem_work(struct work_struct *work)
{
/* place holder until dax_hmem driver attaches */
}
static struct hmem_platform_device hmem_platform = {
.pdev = {
.name = "hmem_platform",
.id = 0,
},
.work = __WORK_INITIALIZER(hmem_platform.work, hmem_work),
};
static void __hmem_register_resource(int target_nid, struct resource *res)
{
struct resource *new;
int rc;
new = __request_region(&hmem_active, res->start, resource_size(res), "",
0);
if (!new) {
pr_debug("hmem range %pr already active\n", res);
return;
}
new->desc = target_nid;
if (platform_initialized)
return;
rc = platform_device_register(&hmem_platform.pdev);
if (rc) {
pr_err_once("failed to register device-dax hmem_platform device\n");
return;
}
platform_initialized = true;
}
void hmem_register_resource(int target_nid, struct resource *res)
{
if (nohmem)
return;
mutex_lock(&hmem_resource_lock);
__hmem_register_resource(target_nid, res);
mutex_unlock(&hmem_resource_lock);
}
static __init int hmem_register_one(struct resource *res, void *data)
{
hmem_register_resource(phys_to_target_node(res->start), res);
return 0;
}
static __init int hmem_init(void)
{
walk_soft_reserve_res(0, -1, NULL, hmem_register_one);
return 0;
}
/*
* As this is a fallback for address ranges unclaimed by the ACPI HMAT
* parsing it must be at an initcall level greater than hmat_init().
*/
device_initcall(hmem_init);