mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
When platform firmware is committed to publishing EFI_CONVENTIONAL_MEMORY in the memory map, but CXL fails to assemble the region, dax_hmem can attempt to attach a dax device to the memory range. Take advantage of the new ability to support multiple "hmem_platform" devices, and to enable regression testing of several scenarios: * CXL correctly assembles a region, check dax_hmem fails to attach dax * CXL fails to assemble a region, check dax_hmem successfully attaches dax * Check that loading the dax_cxl driver loads the dax_hmem driver * Attempt to race cxl_mock_mem async probe vs dax_hmem probe flushing. Check that both positive and negative cases. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Dave Jiang <dave.jiang@intel.com> Reviewed-by: Alison Schofield <alison.schofield@intel.com> Tested-by: Alison Schofield <alison.schofield@intel.com> Link: https://patch.msgid.link/20260327052821.440749-10-dan.j.williams@intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com>
44 lines
1.7 KiB
C
44 lines
1.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#include <linux/list.h>
|
|
#include <linux/acpi.h>
|
|
#include <linux/dax.h>
|
|
#include <cxl.h>
|
|
|
|
struct cxl_mock_ops {
|
|
struct list_head list;
|
|
bool (*is_mock_adev)(struct acpi_device *dev);
|
|
int (*acpi_table_parse_cedt)(enum acpi_cedt_type id,
|
|
acpi_tbl_entry_handler_arg handler_arg,
|
|
void *arg);
|
|
bool (*is_mock_bridge)(struct device *dev);
|
|
acpi_status (*acpi_evaluate_integer)(acpi_handle handle,
|
|
acpi_string pathname,
|
|
struct acpi_object_list *arguments,
|
|
unsigned long long *data);
|
|
struct acpi_pci_root *(*acpi_pci_find_root)(acpi_handle handle);
|
|
bool (*is_mock_bus)(struct pci_bus *bus);
|
|
bool (*is_mock_port)(struct device *dev);
|
|
bool (*is_mock_dev)(struct device *dev);
|
|
int (*devm_cxl_switch_port_decoders_setup)(struct cxl_port *port);
|
|
int (*devm_cxl_endpoint_decoders_setup)(struct cxl_port *port);
|
|
void (*cxl_endpoint_parse_cdat)(struct cxl_port *port);
|
|
struct cxl_dport *(*devm_cxl_add_dport_by_dev)(struct cxl_port *port,
|
|
struct device *dport_dev);
|
|
int (*hmat_get_extended_linear_cache_size)(struct resource *backing_res,
|
|
int nid,
|
|
resource_size_t *cache_size);
|
|
int (*walk_hmem_resources)(struct device *host, walk_hmem_fn fn);
|
|
int (*region_intersects)(resource_size_t start, size_t size,
|
|
unsigned long flags, unsigned long desc);
|
|
int (*region_intersects_soft_reserve)(resource_size_t start,
|
|
size_t size);
|
|
};
|
|
|
|
int hmem_test_init(void);
|
|
void hmem_test_exit(void);
|
|
void register_cxl_mock_ops(struct cxl_mock_ops *ops);
|
|
void unregister_cxl_mock_ops(struct cxl_mock_ops *ops);
|
|
struct cxl_mock_ops *get_cxl_mock_ops(int *index);
|
|
void put_cxl_mock_ops(int index);
|