Merge tag 'driver-core-7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core

Pull driver core updates from Danilo Krummrich:
 "debugfs:
   - Fix NULL pointer dereference in debugfs_create_str()
   - Fix misplaced EXPORT_SYMBOL_GPL for debugfs_create_str()
   - Fix soundwire debugfs NULL pointer dereference from uninitialized
     firmware_file

  device property:
   - Make fwnode flags modifications thread safe; widen the field to
     unsigned long and use set_bit() / clear_bit() based accessors
   - Document how to check for the property presence

  devres:
   - Separate struct devres_node from its "subclasses" (struct devres,
     struct devres_group); give struct devres_node its own release and
     free callbacks for per-type dispatch
   - Introduce struct devres_action for devres actions, avoiding the
     ARCH_DMA_MINALIGN alignment overhead of struct devres
   - Export struct devres_node and its init/add/remove/dbginfo
     primitives for use by Rust Devres<T>
   - Fix missing node debug info in devm_krealloc()
   - Use guard(spinlock_irqsave) where applicable; consolidate unlock
     paths in devres_release_group()

  driver_override:
   - Convert PCI, WMI, vdpa, s390/cio, s390/ap, and fsl-mc to the
     generic driver_override infrastructure, replacing per-bus
     driver_override strings, sysfs attributes, and match logic; fixes a
     potential UAF from unsynchronized access to driver_override in bus
     match() callbacks
   - Simplify __device_set_driver_override() logic

  kernfs:
   - Send IN_DELETE_SELF and IN_IGNORED inotify events on kernfs file
     and directory removal
   - Add corresponding selftests for memcg

  platform:
   - Allow attaching software nodes when creating platform devices via a
     new 'swnode' field in struct platform_device_info
   - Add kerneldoc for struct platform_device_info

  software node:
   - Move software node initialization from postcore_initcall() to
     driver_init(), making it available early in the boot process
   - Move kernel_kobj initialization (ksysfs_init) earlier to support
     the above
   - Remove software_node_exit(); dead code in a built-in unit

  SoC:
   - Introduce of_machine_read_compatible() and of_machine_read_model()
     OF helpers and export soc_attr_read_machine() to replace direct
     accesses to of_root from SoC drivers; also enables
     CONFIG_COMPILE_TEST coverage for these drivers

  sysfs:
   - Constify attribute group array pointers to
     'const struct attribute_group *const *' in sysfs functions,
     device_add_groups() / device_remove_groups(), and struct class

  Rust:
   - Devres:
      - Embed struct devres_node directly in Devres<T> instead of going
        through devm_add_action(), avoiding the extra allocation and the
        unnecessary ARCH_DMA_MINALIGN alignment

   - I/O:
      - Turn IoCapable from a marker trait into a functional trait
        carrying the raw I/O accessor implementation (io_read /
        io_write), providing working defaults for the per-type Io
        methods
      - Add RelaxedMmio wrapper type, making relaxed accessors usable in
        code generic over the Io trait
      - Remove overloaded per-type Io methods and per-backend macros
        from Mmio and PCI ConfigSpace

   - I/O (Register):
      - Add IoLoc trait and generic read/write/update methods to the Io
        trait, making I/O operations parameterizable by typed locations
      - Add register! macro for defining hardware register types with
        typed bitfield accessors backed by Bounded values; supports
        direct, relative, and array register addressing
      - Add write_reg() / try_write_reg() and LocatedRegister trait
      - Update PCI sample driver to demonstrate the register! macro

         Example:

         ```
             register! {
                 /// UART control register.
                 CTRL(u32) @ 0x18 {
                     /// Receiver enable.
                     19:19   rx_enable => bool;
                     /// Parity configuration.
                     14:13   parity ?=> Parity;
                 }

                 /// FIFO watermark and counter register.
                 WATER(u32) @ 0x2c {
                     /// Number of datawords in the receive FIFO.
                     26:24   rx_count;
                     /// RX interrupt threshold.
                     17:16   rx_water;
                 }
             }

             impl WATER {
                 fn rx_above_watermark(&self) -> bool {
                     self.rx_count() > self.rx_water()
                 }
             }

             fn init(bar: &pci::Bar<BAR0_SIZE>) {
                 let water = WATER::zeroed()
                     .with_const_rx_water::<1>(); // > 3 would not compile
                 bar.write_reg(water);

                 let ctrl = CTRL::zeroed()
                     .with_parity(Parity::Even)
                     .with_rx_enable(true);
                 bar.write_reg(ctrl);
             }

             fn handle_rx(bar: &pci::Bar<BAR0_SIZE>) {
                 if bar.read(WATER).rx_above_watermark() {
                     // drain the FIFO
                 }
             }

             fn set_parity(bar: &pci::Bar<BAR0_SIZE>, parity: Parity) {
                 bar.update(CTRL, |r| r.with_parity(parity));
             }
         ```

   - IRQ:
      - Move 'static bounds from where clauses to trait declarations for
        IRQ handler traits

   - Misc:
      - Enable the generic_arg_infer Rust feature
      - Extend Bounded with shift operations, single-bit bool
        conversion, and const get()

  Misc:
   - Make deferred_probe_timeout default a Kconfig option
   - Drop auxiliary_dev_pm_ops; the PM core falls back to driver PM
     callbacks when no bus type PM ops are set
   - Add conditional guard support for device_lock()
   - Add ksysfs.c to the DRIVER CORE MAINTAINERS entry
   - Fix kernel-doc warnings in base.h
   - Fix stale reference to memory_block_add_nid() in documentation"

* tag 'driver-core-7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core: (67 commits)
  bus: fsl-mc: use generic driver_override infrastructure
  s390/ap: use generic driver_override infrastructure
  s390/cio: use generic driver_override infrastructure
  vdpa: use generic driver_override infrastructure
  platform/wmi: use generic driver_override infrastructure
  PCI: use generic driver_override infrastructure
  driver core: make software nodes available earlier
  software node: remove software_node_exit()
  kernel: ksysfs: initialize kernel_kobj earlier
  MAINTAINERS: add ksysfs.c to the DRIVER CORE entry
  drivers/base/memory: fix stale reference to memory_block_add_nid()
  device property: Document how to check for the property presence
  soundwire: debugfs: initialize firmware_file to empty string
  debugfs: fix placement of EXPORT_SYMBOL_GPL for debugfs_create_str()
  debugfs: check for NULL pointer in debugfs_create_str()
  driver core: Make deferred_probe_timeout default a Kconfig option
  driver core: simplify __device_set_driver_override() clearing logic
  driver core: auxiliary bus: Drop auxiliary_dev_pm_ops
  device property: Make modifications of fwnode "flags" thread safe
  rust: devres: embed struct devres_node directly
  ...
This commit is contained in:
Linus Torvalds
2026-04-13 19:03:11 -07:00
68 changed files with 2847 additions and 1022 deletions

View File

@@ -7807,8 +7807,10 @@ F: include/linux/debugfs.h
F: include/linux/device.h
F: include/linux/fwnode.h
F: include/linux/kobj*
F: include/linux/ksysfs.h
F: include/linux/property.h
F: include/linux/sysfs.h
F: kernel/ksysfs.c
F: lib/kobj*
F: rust/kernel/debugfs.rs
F: rust/kernel/debugfs/

View File

@@ -73,6 +73,15 @@ config DEVTMPFS_SAFE
with the PROT_EXEC flag. This can break, for example, non-KMS
video drivers.
config DRIVER_DEFERRED_PROBE_TIMEOUT
int "Default value for deferred_probe_timeout"
default 0 if !MODULES
default 10 if MODULES
help
Set the default value for the deferred_probe_timeout kernel parameter.
See Documentation/admin-guide/kernel-parameters.txt for a description
of the deferred_probe_timeout kernel parameter.
config STANDALONE
bool "Select only drivers that don't need compile-time external firmware"
default y

View File

@@ -207,11 +207,6 @@ static int auxiliary_uevent(const struct device *dev, struct kobj_uevent_env *en
(int)(p - name), name);
}
static const struct dev_pm_ops auxiliary_dev_pm_ops = {
SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume)
};
static int auxiliary_bus_probe(struct device *dev)
{
const struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
@@ -258,7 +253,6 @@ static const struct bus_type auxiliary_bus_type = {
.shutdown = auxiliary_bus_shutdown,
.match = auxiliary_match,
.uevent = auxiliary_uevent,
.pm = &auxiliary_dev_pm_ops,
};
/**

View File

@@ -13,25 +13,26 @@
#include <linux/notifier.h>
/**
* struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure.
*
* @subsys - the struct kset that defines this subsystem
* @devices_kset - the subsystem's 'devices' directory
* @interfaces - list of subsystem interfaces associated
* @mutex - protect the devices, and interfaces lists.
*
* @drivers_kset - the list of drivers associated
* @klist_devices - the klist to iterate over the @devices_kset
* @klist_drivers - the klist to iterate over the @drivers_kset
* @bus_notifier - the bus notifier list for anything that cares about things
* struct subsys_private - structure to hold the private to the driver core
* portions of the bus_type/class structure.
* @subsys: the struct kset that defines this subsystem
* @devices_kset: the subsystem's 'devices' directory
* @interfaces: list of subsystem interfaces associated
* @mutex: protect the devices, and interfaces lists.
* @drivers_kset: the list of drivers associated
* @klist_devices: the klist to iterate over the @devices_kset
* @klist_drivers: the klist to iterate over the @drivers_kset
* @bus_notifier: the bus notifier list for anything that cares about things
* on this bus.
* @bus - pointer back to the struct bus_type that this structure is associated
* @drivers_autoprobe: gate whether new devices are automatically attached to
* registered drivers, or new drivers automatically attach
* to existing devices.
* @bus: pointer back to the struct bus_type that this structure is associated
* with.
* @dev_root: Default device to use as the parent.
*
* @glue_dirs - "glue" directory to put in-between the parent device to
* @glue_dirs: "glue" directory to put in-between the parent device to
* avoid namespace conflicts
* @class - pointer back to the struct class that this structure is associated
* @class: pointer back to the struct class that this structure is associated
* with.
* @lock_key: Lock class key for use by the lock validator
*
@@ -98,22 +99,24 @@ struct driver_type {
#endif
/**
* struct device_private - structure to hold the private to the driver core portions of the device structure.
*
* @klist_children - klist containing all children of this device
* @knode_parent - node in sibling list
* @knode_driver - node in driver list
* @knode_bus - node in bus list
* @knode_class - node in class list
* @deferred_probe - entry in deferred_probe_list which is used to retry the
* binding of drivers which were unable to get all the resources needed by
* the device; typically because it depends on another driver getting
* probed first.
* @async_driver - pointer to device driver awaiting probe via async_probe
* @device - pointer back to the struct device that this structure is
* struct device_private - structure to hold the private to the driver core
* portions of the device structure.
* @klist_children: klist containing all children of this device
* @knode_parent: node in sibling list
* @knode_driver: node in driver list
* @knode_bus: node in bus list
* @knode_class: node in class list
* @deferred_probe: entry in deferred_probe_list which is used to retry the
* binding of drivers which were unable to get all the
* resources needed by the device; typically because it depends
* on another driver getting probed first.
* @async_driver: pointer to device driver awaiting probe via async_probe
* @deferred_probe_reason: capture the -EPROBE_DEFER message emitted with
* dev_err_probe() for later retrieval via debugfs
* @device: pointer back to the struct device that this structure is
* associated with.
* @driver_type - The type of the bound Rust driver.
* @dead - This device is currently either in the process of or has been
* @driver_type: The type of the bound Rust driver.
* @dead: This device is currently either in the process of or has been
* removed from the system. Any asynchronous events scheduled for this
* device should exit without taking any action.
*
@@ -213,6 +216,24 @@ static inline void device_set_driver(struct device *dev, const struct device_dri
WRITE_ONCE(dev->driver, (struct device_driver *)drv);
}
struct devres_node;
typedef void (*dr_node_release_t)(struct device *dev, struct devres_node *node);
typedef void (*dr_node_free_t)(struct devres_node *node);
struct devres_node {
struct list_head entry;
dr_node_release_t release;
dr_node_free_t free_node;
const char *name;
size_t size;
};
void devres_node_init(struct devres_node *node, dr_node_release_t release,
dr_node_free_t free_node);
void devres_node_add(struct device *dev, struct devres_node *node);
bool devres_node_remove(struct device *dev, struct devres_node *node);
void devres_set_node_dbginfo(struct devres_node *node, const char *name,
size_t size);
void devres_for_each_res(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data,
void (*fn)(struct device *, void *, void *),
@@ -291,6 +312,7 @@ static inline int devtmpfs_create_node(struct device *dev) { return 0; }
static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
#endif
void software_node_init(void);
void software_node_notify(struct device *dev);
void software_node_notify_remove(struct device *dev);

View File

@@ -182,7 +182,7 @@ void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
if (fwnode->dev)
return;
fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
fwnode_set_flag(fwnode, FWNODE_FLAG_NOT_DEVICE);
fwnode_links_purge_consumers(fwnode);
fwnode_for_each_available_child_node(fwnode, child)
@@ -228,7 +228,7 @@ static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode,
if (fwnode->dev && fwnode->dev->bus)
return;
fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
fwnode_set_flag(fwnode, FWNODE_FLAG_NOT_DEVICE);
__fwnode_links_move_consumers(fwnode, new_sup);
fwnode_for_each_available_child_node(fwnode, child)
@@ -1012,7 +1012,7 @@ static void device_links_missing_supplier(struct device *dev)
static bool dev_is_best_effort(struct device *dev)
{
return (fw_devlink_best_effort && dev->can_match) ||
(dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
(dev->fwnode && fwnode_test_flag(dev->fwnode, FWNODE_FLAG_BEST_EFFORT));
}
static struct fwnode_handle *fwnode_links_check_suppliers(
@@ -1723,11 +1723,11 @@ bool fw_devlink_is_strict(void)
static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
{
if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
if (fwnode_test_flag(fwnode, FWNODE_FLAG_LINKS_ADDED))
return;
fwnode_call_int_op(fwnode, add_links);
fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
fwnode_set_flag(fwnode, FWNODE_FLAG_LINKS_ADDED);
}
static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
@@ -1885,7 +1885,7 @@ static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
struct device *dev;
bool ret;
if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED))
if (!fwnode_test_flag(fwnode, FWNODE_FLAG_INITIALIZED))
return false;
dev = get_dev_from_fwnode(fwnode);
@@ -2001,10 +2001,10 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
* We aren't trying to find all cycles. Just a cycle between con and
* sup_handle.
*/
if (sup_handle->flags & FWNODE_FLAG_VISITED)
if (fwnode_test_flag(sup_handle, FWNODE_FLAG_VISITED))
return false;
sup_handle->flags |= FWNODE_FLAG_VISITED;
fwnode_set_flag(sup_handle, FWNODE_FLAG_VISITED);
/* Termination condition. */
if (sup_handle == con_handle) {
@@ -2074,7 +2074,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
}
out:
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
fwnode_clear_flag(sup_handle, FWNODE_FLAG_VISITED);
put_device(sup_dev);
put_device(con_dev);
put_device(par_dev);
@@ -2127,7 +2127,7 @@ static int fw_devlink_create_devlink(struct device *con,
* When such a flag is set, we can't create device links where P is the
* supplier of C as that would delay the probe of C.
*/
if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
if (fwnode_test_flag(sup_handle, FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD) &&
fwnode_is_ancestor_of(sup_handle, con->fwnode))
return -EINVAL;
@@ -2150,7 +2150,7 @@ static int fw_devlink_create_devlink(struct device *con,
else
flags = FW_DEVLINK_FLAGS_PERMISSIVE;
if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
if (fwnode_test_flag(sup_handle, FWNODE_FLAG_NOT_DEVICE))
sup_dev = fwnode_get_next_parent_dev(sup_handle);
else
sup_dev = get_dev_from_fwnode(sup_handle);
@@ -2162,7 +2162,7 @@ static int fw_devlink_create_devlink(struct device *con,
* supplier device indefinitely.
*/
if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
fwnode_test_flag(sup_handle, FWNODE_FLAG_INITIALIZED)) {
dev_dbg(con,
"Not linking %pfwf - dev might never probe\n",
sup_handle);
@@ -2831,14 +2831,15 @@ static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(removable);
int device_add_groups(struct device *dev, const struct attribute_group **groups)
int device_add_groups(struct device *dev,
const struct attribute_group *const *groups)
{
return sysfs_create_groups(&dev->kobj, groups);
}
EXPORT_SYMBOL_GPL(device_add_groups);
void device_remove_groups(struct device *dev,
const struct attribute_group **groups)
const struct attribute_group *const *groups)
{
sysfs_remove_groups(&dev->kobj, groups);
}

View File

@@ -257,11 +257,7 @@ static int deferred_devs_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(deferred_devs);
#ifdef CONFIG_MODULES
static int driver_deferred_probe_timeout = 10;
#else
static int driver_deferred_probe_timeout;
#endif
static int driver_deferred_probe_timeout = CONFIG_DRIVER_DEFERRED_PROBE_TIMEOUT;
static int __init deferred_probe_timeout_setup(char *str)
{
@@ -383,8 +379,7 @@ __exitcall(deferred_probe_exit);
int __device_set_driver_override(struct device *dev, const char *s, size_t len)
{
const char *new, *old;
char *cp;
const char *new = NULL, *old;
if (!s)
return -EINVAL;
@@ -404,37 +399,30 @@ int __device_set_driver_override(struct device *dev, const char *s, size_t len)
*/
len = strlen(s);
if (!len) {
/* Empty string passed - clear override */
spin_lock(&dev->driver_override.lock);
old = dev->driver_override.name;
dev->driver_override.name = NULL;
spin_unlock(&dev->driver_override.lock);
kfree(old);
return 0;
}
/* Handle trailing newline */
if (len) {
char *cp;
cp = strnchr(s, len, '\n');
if (cp)
len = cp - s;
}
/*
* If empty string or "\n" passed, new remains NULL, clearing
* the driver_override.name.
*/
if (len) {
new = kstrndup(s, len, GFP_KERNEL);
if (!new)
return -ENOMEM;
spin_lock(&dev->driver_override.lock);
old = dev->driver_override.name;
if (cp != s) {
dev->driver_override.name = new;
spin_unlock(&dev->driver_override.lock);
} else {
/* "\n" passed - clear override */
dev->driver_override.name = NULL;
spin_unlock(&dev->driver_override.lock);
kfree(new);
}
scoped_guard(spinlock, &dev->driver_override.lock) {
old = dev->driver_override.name;
dev->driver_override.name = new;
}
kfree(old);
return 0;

View File

@@ -16,15 +16,9 @@
#include "base.h"
#include "trace.h"
struct devres_node {
struct list_head entry;
dr_release_t release;
const char *name;
size_t size;
};
struct devres {
struct devres_node node;
dr_release_t release;
/*
* Some archs want to perform DMA into kmalloc caches
* and need a guaranteed alignment larger than
@@ -42,7 +36,21 @@ struct devres_group {
/* -- 8 pointers */
};
static void set_node_dbginfo(struct devres_node *node, const char *name,
void devres_node_init(struct devres_node *node,
dr_node_release_t release,
dr_node_free_t free_node)
{
INIT_LIST_HEAD(&node->entry);
node->release = release;
node->free_node = free_node;
}
static inline void free_node(struct devres_node *node)
{
node->free_node(node);
}
void devres_set_node_dbginfo(struct devres_node *node, const char *name,
size_t size)
{
node->name = name;
@@ -75,12 +83,12 @@ static void devres_log(struct device *dev, struct devres_node *node,
* Release functions for devres group. These callbacks are used only
* for identification.
*/
static void group_open_release(struct device *dev, void *res)
static void group_open_release(struct device *dev, struct devres_node *node)
{
/* noop */
}
static void group_close_release(struct device *dev, void *res)
static void group_close_release(struct device *dev, struct devres_node *node)
{
/* noop */
}
@@ -107,6 +115,20 @@ static bool check_dr_size(size_t size, size_t *tot_size)
return true;
}
static void dr_node_release(struct device *dev, struct devres_node *node)
{
struct devres *dr = container_of(node, struct devres, node);
dr->release(dev, dr->data);
}
static void dr_node_free(struct devres_node *node)
{
struct devres *dr = container_of(node, struct devres, node);
kfree(dr);
}
static __always_inline struct devres *alloc_dr(dr_release_t release,
size_t size, gfp_t gfp, int nid)
{
@@ -124,8 +146,8 @@ static __always_inline struct devres *alloc_dr(dr_release_t release,
if (!(gfp & __GFP_ZERO))
memset(dr, 0, offsetof(struct devres, data));
INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release;
devres_node_init(&dr->node, dr_node_release, dr_node_free);
dr->release = release;
return dr;
}
@@ -167,7 +189,7 @@ void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
if (unlikely(!dr))
return NULL;
set_node_dbginfo(&dr->node, name, size);
devres_set_node_dbginfo(&dr->node, name, size);
return dr->data;
}
EXPORT_SYMBOL_GPL(__devres_alloc_node);
@@ -194,26 +216,31 @@ void devres_for_each_res(struct device *dev, dr_release_t release,
{
struct devres_node *node;
struct devres_node *tmp;
unsigned long flags;
if (!fn)
return;
spin_lock_irqsave(&dev->devres_lock, flags);
guard(spinlock_irqsave)(&dev->devres_lock);
list_for_each_entry_safe_reverse(node, tmp,
&dev->devres_head, entry) {
struct devres *dr = container_of(node, struct devres, node);
if (node->release != release)
if (node->release != dr_node_release)
continue;
if (dr->release != release)
continue;
if (match && !match(dev, dr->data, match_data))
continue;
fn(dev, dr->data, data);
}
spin_unlock_irqrestore(&dev->devres_lock, flags);
}
EXPORT_SYMBOL_GPL(devres_for_each_res);
static inline void free_dr(struct devres *dr)
{
free_node(&dr->node);
}
/**
* devres_free - Free device resource data
* @res: Pointer to devres data to free
@@ -226,11 +253,18 @@ void devres_free(void *res)
struct devres *dr = container_of(res, struct devres, data);
BUG_ON(!list_empty(&dr->node.entry));
kfree(dr);
free_dr(dr);
}
}
EXPORT_SYMBOL_GPL(devres_free);
void devres_node_add(struct device *dev, struct devres_node *node)
{
guard(spinlock_irqsave)(&dev->devres_lock);
add_dr(dev, node);
}
/**
* devres_add - Register device resource
* @dev: Device to add resource to
@@ -243,11 +277,8 @@ EXPORT_SYMBOL_GPL(devres_free);
void devres_add(struct device *dev, void *res)
{
struct devres *dr = container_of(res, struct devres, data);
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &dr->node);
spin_unlock_irqrestore(&dev->devres_lock, flags);
devres_node_add(dev, &dr->node);
}
EXPORT_SYMBOL_GPL(devres_add);
@@ -259,7 +290,9 @@ static struct devres *find_dr(struct device *dev, dr_release_t release,
list_for_each_entry_reverse(node, &dev->devres_head, entry) {
struct devres *dr = container_of(node, struct devres, node);
if (node->release != release)
if (node->release != dr_node_release)
continue;
if (dr->release != release)
continue;
if (match && !match(dev, dr->data, match_data))
continue;
@@ -287,14 +320,12 @@ void *devres_find(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
guard(spinlock_irqsave)(&dev->devres_lock);
dr = find_dr(dev, release, match, match_data);
spin_unlock_irqrestore(&dev->devres_lock, flags);
if (dr)
return dr->data;
return NULL;
}
EXPORT_SYMBOL_GPL(devres_find);
@@ -321,7 +352,7 @@ void *devres_get(struct device *dev, void *new_res,
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
dr = find_dr(dev, new_dr->node.release, match, match_data);
dr = find_dr(dev, new_dr->release, match, match_data);
if (!dr) {
add_dr(dev, &new_dr->node);
dr = new_dr;
@@ -334,6 +365,22 @@ void *devres_get(struct device *dev, void *new_res,
}
EXPORT_SYMBOL_GPL(devres_get);
bool devres_node_remove(struct device *dev, struct devres_node *node)
{
struct devres_node *__node;
guard(spinlock_irqsave)(&dev->devres_lock);
list_for_each_entry_reverse(__node, &dev->devres_head, entry) {
if (__node == node) {
list_del_init(&node->entry);
devres_log(dev, node, "REM");
return true;
}
}
return false;
}
/**
* devres_remove - Find a device resource and remove it
* @dev: Device to find resource from
@@ -353,18 +400,15 @@ void *devres_remove(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data)
{
struct devres *dr;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
guard(spinlock_irqsave)(&dev->devres_lock);
dr = find_dr(dev, release, match, match_data);
if (dr) {
list_del_init(&dr->node.entry);
devres_log(dev, &dr->node, "REM");
}
spin_unlock_irqrestore(&dev->devres_lock, flags);
if (dr)
return dr->data;
}
return NULL;
}
EXPORT_SYMBOL_GPL(devres_remove);
@@ -495,15 +539,12 @@ static int remove_nodes(struct device *dev,
static void release_nodes(struct device *dev, struct list_head *todo)
{
struct devres *dr, *tmp;
struct devres_node *node, *tmp;
/* Release. Note that both devres and devres_group are
* handled as devres in the following loop. This is safe.
*/
list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
devres_log(dev, &dr->node, "REL");
dr->node.release(dev, dr->data);
kfree(dr);
list_for_each_entry_safe_reverse(node, tmp, todo, entry) {
devres_log(dev, node, "REL");
node->release(dev, node);
free_node(node);
}
}
@@ -536,6 +577,13 @@ int devres_release_all(struct device *dev)
return cnt;
}
static void devres_group_free(struct devres_node *node)
{
struct devres_group *grp = container_of(node, struct devres_group, node[0]);
kfree(grp);
}
/**
* devres_open_group - Open a new devres group
* @dev: Device to open devres group for
@@ -552,26 +600,21 @@ int devres_release_all(struct device *dev)
void *devres_open_group(struct device *dev, void *id, gfp_t gfp)
{
struct devres_group *grp;
unsigned long flags;
grp = kmalloc_obj(*grp, gfp);
if (unlikely(!grp))
return NULL;
grp->node[0].release = &group_open_release;
grp->node[1].release = &group_close_release;
INIT_LIST_HEAD(&grp->node[0].entry);
INIT_LIST_HEAD(&grp->node[1].entry);
set_node_dbginfo(&grp->node[0], "grp<", 0);
set_node_dbginfo(&grp->node[1], "grp>", 0);
devres_node_init(&grp->node[0], &group_open_release, devres_group_free);
devres_node_init(&grp->node[1], &group_close_release, NULL);
devres_set_node_dbginfo(&grp->node[0], "grp<", 0);
devres_set_node_dbginfo(&grp->node[1], "grp>", 0);
grp->id = grp;
if (id)
grp->id = id;
grp->color = 0;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &grp->node[0]);
spin_unlock_irqrestore(&dev->devres_lock, flags);
devres_node_add(dev, &grp->node[0]);
return grp->id;
}
EXPORT_SYMBOL_GPL(devres_open_group);
@@ -613,17 +656,13 @@ static struct devres_group *find_group(struct device *dev, void *id)
void devres_close_group(struct device *dev, void *id)
{
struct devres_group *grp;
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
guard(spinlock_irqsave)(&dev->devres_lock);
grp = find_group(dev, id);
if (grp)
add_dr(dev, &grp->node[1]);
else
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
}
EXPORT_SYMBOL_GPL(devres_close_group);
@@ -677,7 +716,6 @@ int devres_release_group(struct device *dev, void *id)
int cnt = 0;
spin_lock_irqsave(&dev->devres_lock, flags);
grp = find_group(dev, id);
if (grp) {
struct list_head *first = &grp->node[0].entry;
@@ -687,20 +725,18 @@ int devres_release_group(struct device *dev, void *id)
end = grp->node[1].entry.next;
cnt = remove_nodes(dev, first, end, &todo);
spin_unlock_irqrestore(&dev->devres_lock, flags);
release_nodes(dev, &todo);
} else if (list_empty(&dev->devres_head)) {
/*
* dev is probably dying via devres_release_all(): groups
* have already been removed and are on the process of
* being released - don't touch and don't warn.
*/
spin_unlock_irqrestore(&dev->devres_lock, flags);
} else {
WARN_ON(1);
spin_unlock_irqrestore(&dev->devres_lock, flags);
}
spin_unlock_irqrestore(&dev->devres_lock, flags);
release_nodes(dev, &todo);
return cnt;
}
@@ -716,20 +752,29 @@ struct action_devres {
void (*action)(void *);
};
static int devm_action_match(struct device *dev, void *res, void *p)
{
struct action_devres *devres = res;
struct action_devres *target = p;
struct devres_action {
struct devres_node node;
struct action_devres action;
};
return devres->action == target->action &&
devres->data == target->data;
static int devm_action_match(struct devres_action *devres, struct action_devres *target)
{
return devres->action.action == target->action &&
devres->action.data == target->data;
}
static void devm_action_release(struct device *dev, void *res)
static void devm_action_release(struct device *dev, struct devres_node *node)
{
struct action_devres *devres = res;
struct devres_action *devres = container_of(node, struct devres_action, node);
devres->action(devres->data);
devres->action.action(devres->action.data);
}
static void devm_action_free(struct devres_node *node)
{
struct devres_action *action = container_of(node, struct devres_action, node);
kfree(action);
}
/**
@@ -744,32 +789,71 @@ static void devm_action_release(struct device *dev, void *res)
*/
int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name)
{
struct action_devres *devres;
struct devres_action *devres;
devres = __devres_alloc_node(devm_action_release, sizeof(struct action_devres),
GFP_KERNEL, NUMA_NO_NODE, name);
devres = kzalloc_obj(*devres);
if (!devres)
return -ENOMEM;
devres->data = data;
devres->action = action;
devres_node_init(&devres->node, devm_action_release, devm_action_free);
devres_set_node_dbginfo(&devres->node, name, sizeof(*devres));
devres_add(dev, devres);
devres->action.data = data;
devres->action.action = action;
devres_node_add(dev, &devres->node);
return 0;
}
EXPORT_SYMBOL_GPL(__devm_add_action);
bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data)
static struct devres_action *devres_action_find(struct device *dev,
void (*action)(void *),
void *data)
{
struct action_devres devres = {
struct devres_node *node;
struct action_devres target = {
.data = data,
.action = action,
};
return devres_find(dev, devm_action_release, devm_action_match, &devres);
list_for_each_entry_reverse(node, &dev->devres_head, entry) {
struct devres_action *dr = container_of(node, struct devres_action, node);
if (node->release != devm_action_release)
continue;
if (devm_action_match(dr, &target))
return dr;
}
return NULL;
}
bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data)
{
guard(spinlock_irqsave)(&dev->devres_lock);
return !!devres_action_find(dev, action, data);
}
EXPORT_SYMBOL_GPL(devm_is_action_added);
static struct devres_action *remove_action(struct device *dev,
void (*action)(void *),
void *data)
{
struct devres_action *dr;
guard(spinlock_irqsave)(&dev->devres_lock);
dr = devres_action_find(dev, action, data);
if (!dr)
return ERR_PTR(-ENOENT);
list_del_init(&dr->node.entry);
devres_log(dev, &dr->node, "REM");
return dr;
}
/**
* devm_remove_action_nowarn() - removes previously added custom action
* @dev: Device that owns the action
@@ -794,13 +878,15 @@ int devm_remove_action_nowarn(struct device *dev,
void (*action)(void *),
void *data)
{
struct action_devres devres = {
.data = data,
.action = action,
};
struct devres_action *dr;
return devres_destroy(dev, devm_action_release, devm_action_match,
&devres);
dr = remove_action(dev, action, data);
if (IS_ERR(dr))
return PTR_ERR(dr);
kfree(dr);
return 0;
}
EXPORT_SYMBOL_GPL(devm_remove_action_nowarn);
@@ -816,14 +902,15 @@ EXPORT_SYMBOL_GPL(devm_remove_action_nowarn);
*/
void devm_release_action(struct device *dev, void (*action)(void *), void *data)
{
struct action_devres devres = {
.data = data,
.action = action,
};
struct devres_action *dr;
WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
&devres));
dr = remove_action(dev, action, data);
if (WARN_ON(IS_ERR(dr)))
return;
dr->action.action(dr->action.data);
kfree(dr);
}
EXPORT_SYMBOL_GPL(devm_release_action);
@@ -869,7 +956,7 @@ void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
* This is named devm_kzalloc_release for historical reasons
* The initial implementation did not support kmalloc, only kzalloc
*/
set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
devres_set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
devres_add(dev, dr->data);
return dr->data;
}
@@ -940,6 +1027,8 @@ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
if (!new_dr)
return NULL;
devres_set_node_dbginfo(&new_dr->node, "devm_krealloc_release", new_size);
/*
* The spinlock protects the linked list against concurrent
* modifications but not the resource itself.
@@ -949,7 +1038,7 @@ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
if (!old_dr) {
spin_unlock_irqrestore(&dev->devres_lock, flags);
kfree(new_dr);
free_dr(new_dr);
WARN(1, "Memory chunk not managed or managed by a different device.");
return NULL;
}
@@ -969,7 +1058,7 @@ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
* list. This is also the reason why we must not use devm_kfree() - the
* links are no longer valid.
*/
kfree(old_dr);
free_dr(old_dr);
return new_dr->data;
}

View File

@@ -34,6 +34,7 @@ void __init driver_init(void)
*/
faux_bus_init();
of_core_init();
software_node_init();
platform_bus_init();
auxiliary_bus_init();
memory_dev_init();

View File

@@ -815,7 +815,7 @@ static int add_memory_block(unsigned long block_id, int nid, unsigned long state
/*
* MEM_ONLINE at this point implies early memory. With NUMA,
* we'll determine the zone when setting the node id via
* memory_block_add_nid(). Memory hotplug updated the zone
* memory_block_add_nid_early(). Memory hotplug updated the zone
* manually when memory onlining/offlining succeeds.
*/
mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);

View File

@@ -172,7 +172,7 @@ static const struct cpumask *get_irq_affinity(struct platform_device *dev,
* @num: interrupt number index
* @affinity: optional cpumask pointer to get the affinity of a per-cpu interrupt
*
* Gets an interupt for a platform device. Device drivers should check the
* Gets an interrupt for a platform device. Device drivers should check the
* return value for errors so as to not pass a negative integer value to
* the request_irq() APIs. Optional affinity information is provided in the
* affinity pointer if available, and NULL otherwise.
@@ -843,12 +843,14 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
*
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo)
struct platform_device *platform_device_register_full(const struct platform_device_info *pdevinfo)
{
int ret;
struct platform_device *pdev;
if (pdevinfo->swnode && pdevinfo->properties)
return ERR_PTR(-EINVAL);
pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
if (!pdev)
return ERR_PTR(-ENOMEM);
@@ -864,17 +866,19 @@ struct platform_device *platform_device_register_full(
pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
}
ret = platform_device_add_resources(pdev,
pdevinfo->res, pdevinfo->num_res);
ret = platform_device_add_resources(pdev, pdevinfo->res, pdevinfo->num_res);
if (ret)
goto err;
ret = platform_device_add_data(pdev,
pdevinfo->data, pdevinfo->size_data);
ret = platform_device_add_data(pdev, pdevinfo->data, pdevinfo->size_data);
if (ret)
goto err;
if (pdevinfo->properties) {
if (pdevinfo->swnode) {
ret = device_add_software_node(&pdev->dev, pdevinfo->swnode);
if (ret)
goto err;
} else if (pdevinfo->properties) {
ret = device_create_managed_software_node(&pdev->dev,
pdevinfo->properties, NULL);
if (ret)
@@ -898,8 +902,7 @@ EXPORT_SYMBOL_GPL(platform_device_register_full);
* @drv: platform driver structure
* @owner: owning module/driver
*/
int __platform_driver_register(struct platform_driver *drv,
struct module *owner)
int __platform_driver_register(struct platform_driver *drv, struct module *owner)
{
drv->driver.owner = owner;
drv->driver.bus = &platform_bus_type;
@@ -951,7 +954,8 @@ static int is_bound_to_driver(struct device *dev, void *driver)
* a negative error code and with the driver not registered.
*/
int __init_or_module __platform_driver_probe(struct platform_driver *drv,
int (*probe)(struct platform_device *), struct module *module)
int (*probe)(struct platform_device *),
struct module *module)
{
int retval;
@@ -1013,8 +1017,8 @@ EXPORT_SYMBOL_GPL(__platform_driver_probe);
*
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
struct platform_device * __init_or_module __platform_create_bundle(
struct platform_driver *driver,
struct platform_device * __init_or_module
__platform_create_bundle(struct platform_driver *driver,
int (*probe)(struct platform_device *),
struct resource *res, unsigned int n_res,
const void *data, size_t size, struct module *module)
@@ -1116,9 +1120,8 @@ void platform_unregister_drivers(struct platform_driver * const *drivers,
}
EXPORT_SYMBOL_GPL(platform_unregister_drivers);
static const struct platform_device_id *platform_match_id(
const struct platform_device_id *id,
struct platform_device *pdev)
static const struct platform_device_id *
platform_match_id(const struct platform_device_id *id, struct platform_device *pdev)
{
while (id->name[0]) {
if (strcmp(pdev->name, id->name) == 0) {
@@ -1311,13 +1314,12 @@ static struct attribute *platform_dev_attrs[] = {
NULL,
};
static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
int n)
static umode_t platform_dev_attrs_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, typeof(*dev), kobj);
if (a == &dev_attr_numa_node.attr &&
dev_to_node(dev) == NUMA_NO_NODE)
if (a == &dev_attr_numa_node.attr && dev_to_node(dev) == NUMA_NO_NODE)
return 0;
return a->mode;
@@ -1329,7 +1331,6 @@ static const struct attribute_group platform_dev_group = {
};
__ATTRIBUTE_GROUPS(platform_dev);
/**
* platform_match - bind platform device to platform driver.
* @dev: device.
@@ -1384,8 +1385,7 @@ static int platform_uevent(const struct device *dev, struct kobj_uevent_env *env
if (rc != -ENODEV)
return rc;
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
pdev->name);
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, pdev->name);
return 0;
}

View File

@@ -38,6 +38,8 @@ EXPORT_SYMBOL_GPL(__dev_fwnode_const);
* @propname: Name of the property
*
* Check if property @propname is present in the device firmware description.
* This function is the unambiguous way to check that given property is present
* in the device firmware description.
*
* Return: true if property @propname is present. Otherwise, returns false.
*/
@@ -52,6 +54,10 @@ EXPORT_SYMBOL_GPL(device_property_present);
* @fwnode: Firmware node whose property to check
* @propname: Name of the property
*
* Check if property @propname is present in the firmware node description.
* This function is the unambiguous way to check that given property is present
* in the firmware node description.
*
* Return: true if property @propname is present. Otherwise, returns false.
*/
bool fwnode_property_present(const struct fwnode_handle *fwnode,
@@ -75,9 +81,9 @@ EXPORT_SYMBOL_GPL(fwnode_property_present);
* @dev: Device whose property is being checked
* @propname: Name of the property
*
* Return if property @propname is true or false in the device firmware description.
* Use device_property_present() to check for the property presence.
*
* Return: true if property @propname is present. Otherwise, returns false.
* Return: if property @propname is true or false in the device firmware description.
*/
bool device_property_read_bool(const struct device *dev, const char *propname)
{
@@ -90,7 +96,9 @@ EXPORT_SYMBOL_GPL(device_property_read_bool);
* @fwnode: Firmware node whose property to check
* @propname: Name of the property
*
* Return if property @propname is true or false in the firmware description.
* Use fwnode_property_present() to check for the property presence.
*
* Return: if property @propname is true or false in the firmware node description.
*/
bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
const char *propname)

View File

@@ -5,16 +5,16 @@
* Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
*/
#include <linux/sysfs.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/err.h>
#include <linux/glob.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
#include <linux/sys_soc.h>
static DEFINE_IDA(soc_ida);
@@ -111,17 +111,14 @@ static void soc_release(struct device *dev)
kfree(soc_dev);
}
static void soc_device_get_machine(struct soc_device_attribute *soc_dev_attr)
int soc_attr_read_machine(struct soc_device_attribute *soc_dev_attr)
{
struct device_node *np;
if (soc_dev_attr->machine)
return;
return -EBUSY;
np = of_find_node_by_path("/");
of_property_read_string(np, "model", &soc_dev_attr->machine);
of_node_put(np);
return of_machine_read_model(&soc_dev_attr->machine);
}
EXPORT_SYMBOL_GPL(soc_attr_read_machine);
static struct soc_device_attribute *early_soc_dev_attr;
@@ -131,7 +128,7 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
const struct attribute_group **soc_attr_groups;
int ret;
soc_device_get_machine(soc_dev_attr);
soc_attr_read_machine(soc_dev_attr);
if (!soc_bus_registered) {
if (early_soc_dev_attr)

View File

@@ -1127,18 +1127,9 @@ void software_node_notify_remove(struct device *dev)
}
}
static int __init software_node_init(void)
void __init software_node_init(void)
{
swnode_kset = kset_create_and_add("software_nodes", NULL, kernel_kobj);
if (!swnode_kset)
return -ENOMEM;
return 0;
pr_err("failed to register software nodes\n");
}
postcore_initcall(software_node_init);
static void __exit software_node_exit(void)
{
ida_destroy(&swnode_root_ids);
kset_unregister(swnode_kset);
}
__exitcall(software_node_exit);

View File

@@ -86,12 +86,16 @@ static int fsl_mc_bus_match(struct device *dev, const struct device_driver *drv)
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
const struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
bool found = false;
int ret;
/* When driver_override is set, only bind to the matching driver */
if (mc_dev->driver_override) {
found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
ret = device_match_driver_override(dev, drv);
if (ret > 0) {
found = true;
goto out;
}
if (ret == 0)
goto out;
if (!mc_drv->match_id_table)
goto out;
@@ -210,39 +214,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(modalias);
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
int ret;
if (WARN_ON(dev->bus != &fsl_mc_bus_type))
return -EINVAL;
ret = driver_set_override(dev, &mc_dev->driver_override, buf, count);
if (ret)
return ret;
return count;
}
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
ssize_t len;
device_lock(dev);
len = sysfs_emit(buf, "%s\n", mc_dev->driver_override);
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);
static struct attribute *fsl_mc_dev_attrs[] = {
&dev_attr_modalias.attr,
&dev_attr_driver_override.attr,
NULL,
};
@@ -345,6 +318,7 @@ ATTRIBUTE_GROUPS(fsl_mc_bus);
const struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.driver_override = true,
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
.probe = fsl_mc_probe,
@@ -910,9 +884,6 @@ static struct notifier_block fsl_mc_nb;
*/
void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
{
kfree(mc_dev->driver_override);
mc_dev->driver_override = NULL;
/*
* The device-specific remove callback will get invoked by device_del()
*/

View File

@@ -332,7 +332,7 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action,
* fw_devlink doesn't skip adding consumers to this
* device.
*/
rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
fwnode_clear_flag(&rd->dn->fwnode, FWNODE_FLAG_NOT_DEVICE);
if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) {
dev_err(&pdev->dev,
"Failed to create child device '%pOF'\n",

View File

@@ -180,7 +180,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
* Clear the flag before adding the device so that fw_devlink
* doesn't skip adding consumers to this device.
*/
rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
fwnode_clear_flag(&rd->dn->fwnode, FWNODE_FLAG_NOT_DEVICE);
client = of_i2c_register_device(adap, rd->dn);
if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%pOF'\n",

View File

@@ -294,8 +294,8 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
return -EINVAL;
if (bus->parent && bus->parent->of_node)
bus->parent->of_node->fwnode.flags |=
FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
fwnode_set_flag(&bus->parent->of_node->fwnode,
FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD);
WARN(bus->state != MDIOBUS_ALLOCATED &&
bus->state != MDIOBUS_UNREGISTERED,

View File

@@ -434,6 +434,34 @@ bool of_machine_compatible_match(const char *const *compats)
}
EXPORT_SYMBOL(of_machine_compatible_match);
/**
* of_machine_read_compatible - Get the compatible string of this machine
* @compatible: address at which the address of the compatible string will be
* stored
* @index: index of the compatible entry in the list
*
* Returns:
* 0 on success, negative error number on failure.
*/
int of_machine_read_compatible(const char **compatible, unsigned int index)
{
return of_property_read_string_index(of_root, "compatible", index, compatible);
}
EXPORT_SYMBOL_GPL(of_machine_read_compatible);
/**
* of_machine_read_model - Get the model string of this machine
* @model: address at which the address of the model string will be stored
*
* Returns:
* 0 on success, negative error number on failure.
*/
int of_machine_read_model(const char **model)
{
return of_property_read_string(of_root, "model", model);
}
EXPORT_SYMBOL_GPL(of_machine_read_model);
/**
* of_machine_device_match - Test root of device tree against a of_device_id array
* @matches: NULL terminated array of of_device_id match structures to search in
@@ -1915,7 +1943,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
if (name)
of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
if (of_stdout)
of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT;
fwnode_set_flag(&of_stdout->fwnode, FWNODE_FLAG_BEST_EFFORT);
}
if (!of_aliases)

View File

@@ -225,7 +225,7 @@ static void __of_attach_node(struct device_node *np)
np->sibling = np->parent->child;
np->parent->child = np;
of_node_clear_flag(np, OF_DETACHED);
np->fwnode.flags |= FWNODE_FLAG_NOT_DEVICE;
fwnode_set_flag(&np->fwnode, FWNODE_FLAG_NOT_DEVICE);
raw_spin_unlock_irqrestore(&devtree_lock, flags);

View File

@@ -742,7 +742,7 @@ static int of_platform_notify(struct notifier_block *nb,
* Clear the flag before adding the device so that fw_devlink
* doesn't skip adding consumers to this device.
*/
rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
fwnode_clear_flag(&rd->dn->fwnode, FWNODE_FLAG_NOT_DEVICE);
/* pdev_parent may be NULL when no bus platform device */
pdev_parent = of_find_device_by_node(parent);
pdev = of_platform_device_create(rd->dn, NULL,

View File

@@ -138,9 +138,11 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
{
struct pci_dynid *dynid;
const struct pci_device_id *found_id = NULL, *ids;
int ret;
/* When driver_override is set, only bind to the matching driver */
if (dev->driver_override && strcmp(dev->driver_override, drv->name))
ret = device_match_driver_override(&dev->dev, &drv->driver);
if (ret == 0)
return NULL;
/* Look at the dynamic ids first, before the static ones */
@@ -164,7 +166,7 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
* matching.
*/
if (found_id->override_only) {
if (dev->driver_override)
if (ret > 0)
return found_id;
} else {
return found_id;
@@ -172,7 +174,7 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
}
/* driver_override will always match, send a dummy id */
if (dev->driver_override)
if (ret > 0)
return &pci_device_id_any;
return NULL;
}
@@ -452,7 +454,7 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
static inline bool pci_device_can_probe(struct pci_dev *pdev)
{
return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
pdev->driver_override);
device_has_driver_override(&pdev->dev));
}
#else
static inline bool pci_device_can_probe(struct pci_dev *pdev)
@@ -1722,6 +1724,7 @@ static const struct cpumask *pci_device_irq_get_affinity(struct device *dev,
const struct bus_type pci_bus_type = {
.name = "pci",
.driver_override = true,
.match = pci_bus_match,
.uevent = pci_uevent,
.probe = pci_device_probe,

View File

@@ -615,33 +615,6 @@ static ssize_t devspec_show(struct device *dev,
static DEVICE_ATTR_RO(devspec);
#endif
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
int ret;
ret = driver_set_override(dev, &pdev->driver_override, buf, count);
if (ret)
return ret;
return count;
}
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
ssize_t len;
device_lock(dev);
len = sysfs_emit(buf, "%s\n", pdev->driver_override);
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);
static struct attribute *pci_dev_attrs[] = {
&dev_attr_power_state.attr,
&dev_attr_resource.attr,
@@ -669,7 +642,6 @@ static struct attribute *pci_dev_attrs[] = {
#ifdef CONFIG_OF
&dev_attr_devspec.attr,
#endif
&dev_attr_driver_override.attr,
&dev_attr_ari_enabled.attr,
NULL,
};

View File

@@ -2488,7 +2488,6 @@ static void pci_release_dev(struct device *dev)
pci_release_of_node(pci_dev);
pcibios_release_device(pci_dev);
pci_bus_put(pci_dev->bus);
kfree(pci_dev->driver_override);
bitmap_free(pci_dev->dma_alias_mask);
dev_dbg(dev, "device released\n");
kfree(pci_dev);

View File

@@ -842,39 +842,11 @@ static ssize_t expensive_show(struct device *dev,
}
static DEVICE_ATTR_RO(expensive);
static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct wmi_device *wdev = to_wmi_device(dev);
ssize_t ret;
device_lock(dev);
ret = sysfs_emit(buf, "%s\n", wdev->driver_override);
device_unlock(dev);
return ret;
}
static ssize_t driver_override_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct wmi_device *wdev = to_wmi_device(dev);
int ret;
ret = driver_set_override(dev, &wdev->driver_override, buf, count);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_RW(driver_override);
static struct attribute *wmi_attrs[] = {
&dev_attr_modalias.attr,
&dev_attr_guid.attr,
&dev_attr_instance_count.attr,
&dev_attr_expensive.attr,
&dev_attr_driver_override.attr,
NULL
};
ATTRIBUTE_GROUPS(wmi);
@@ -943,7 +915,6 @@ static void wmi_dev_release(struct device *dev)
{
struct wmi_block *wblock = dev_to_wblock(dev);
kfree(wblock->dev.driver_override);
kfree(wblock);
}
@@ -952,10 +923,12 @@ static int wmi_dev_match(struct device *dev, const struct device_driver *driver)
const struct wmi_driver *wmi_driver = to_wmi_driver(driver);
struct wmi_block *wblock = dev_to_wblock(dev);
const struct wmi_device_id *id = wmi_driver->id_table;
int ret;
/* When driver_override is set, only bind to the matching driver */
if (wblock->dev.driver_override)
return !strcmp(wblock->dev.driver_override, driver->name);
ret = device_match_driver_override(dev, driver);
if (ret >= 0)
return ret;
if (id == NULL)
return 0;
@@ -1076,6 +1049,7 @@ static struct class wmi_bus_class = {
static const struct bus_type wmi_bus_type = {
.name = "wmi",
.dev_groups = wmi_groups,
.driver_override = true,
.match = wmi_dev_match,
.uevent = wmi_dev_uevent,
.probe = wmi_dev_probe,

View File

@@ -103,11 +103,6 @@ struct subchannel {
struct work_struct todo_work;
struct schib_config config;
u64 dma_mask;
/*
* Driver name to force a match. Do not set directly, because core
* frees it. Use driver_set_override() to set or clear it.
*/
const char *driver_override;
} __attribute__ ((aligned(8)));
DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);

View File

@@ -159,7 +159,6 @@ static void css_subchannel_release(struct device *dev)
sch->config.intparm = 0;
cio_commit_config(sch);
kfree(sch->driver_override);
kfree(sch);
}
@@ -323,37 +322,9 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(modalias);
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct subchannel *sch = to_subchannel(dev);
int ret;
ret = driver_set_override(dev, &sch->driver_override, buf, count);
if (ret)
return ret;
return count;
}
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct subchannel *sch = to_subchannel(dev);
ssize_t len;
device_lock(dev);
len = sysfs_emit(buf, "%s\n", sch->driver_override);
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);
static struct attribute *subch_attrs[] = {
&dev_attr_type.attr,
&dev_attr_modalias.attr,
&dev_attr_driver_override.attr,
NULL,
};
@@ -1356,9 +1327,11 @@ static int css_bus_match(struct device *dev, const struct device_driver *drv)
struct subchannel *sch = to_subchannel(dev);
const struct css_driver *driver = to_cssdriver(drv);
struct css_device_id *id;
int ret;
/* When driver_override is set, only bind to the matching driver */
if (sch->driver_override && strcmp(sch->driver_override, drv->name))
ret = device_match_driver_override(dev, drv);
if (ret == 0)
return 0;
for (id = driver->subchannel_type; id->match_flags; id++) {
@@ -1415,6 +1388,7 @@ static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
static const struct bus_type css_bus_type = {
.name = "css",
.driver_override = true,
.match = css_bus_match,
.probe = css_probe,
.remove = css_remove,

View File

@@ -859,26 +859,25 @@ static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
static int __ap_revise_reserved(struct device *dev, void *dummy)
{
int rc, card, queue, devres, drvres;
int rc, card, queue, devres, drvres, ovrd;
if (is_queue_dev(dev)) {
struct ap_driver *ap_drv = to_ap_drv(dev->driver);
struct ap_queue *aq = to_ap_queue(dev);
struct ap_device *ap_dev = &aq->ap_dev;
card = AP_QID_CARD(aq->qid);
queue = AP_QID_QUEUE(aq->qid);
if (ap_dev->driver_override) {
if (strcmp(ap_dev->driver_override,
ap_drv->driver.name)) {
ovrd = device_match_driver_override(dev, &ap_drv->driver);
if (ovrd > 0) {
/* override set and matches, nothing to do */
} else if (ovrd == 0) {
pr_debug("reprobing queue=%02x.%04x\n", card, queue);
rc = device_reprobe(dev);
if (rc) {
AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
__func__, card, queue);
}
}
} else {
mutex_lock(&ap_attr_mutex);
devres = test_bit_inv(card, ap_perms.apm) &&
@@ -928,7 +927,7 @@ int ap_owned_by_def_drv(int card, int queue)
if (aq) {
const struct device_driver *drv = aq->ap_dev.device.driver;
const struct ap_driver *ap_drv = to_ap_drv(drv);
bool override = !!aq->ap_dev.driver_override;
bool override = device_has_driver_override(&aq->ap_dev.device);
if (override && drv && ap_drv->flags & AP_DRIVER_FLAG_DEFAULT)
rc = 1;
@@ -977,7 +976,7 @@ static int ap_device_probe(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = to_ap_drv(dev->driver);
int card, queue, devres, drvres, rc = -ENODEV;
int card, queue, devres, drvres, rc = -ENODEV, ovrd;
if (!get_device(dev))
return rc;
@@ -991,9 +990,10 @@ static int ap_device_probe(struct device *dev)
*/
card = AP_QID_CARD(to_ap_queue(dev)->qid);
queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
if (ap_dev->driver_override) {
if (strcmp(ap_dev->driver_override,
ap_drv->driver.name))
ovrd = device_match_driver_override(dev, &ap_drv->driver);
if (ovrd > 0) {
/* override set and matches, nothing to do */
} else if (ovrd == 0) {
goto out;
} else {
mutex_lock(&ap_attr_mutex);

View File

@@ -166,7 +166,6 @@ void ap_driver_unregister(struct ap_driver *);
struct ap_device {
struct device device;
int device_type; /* AP device type. */
const char *driver_override;
};
#define to_ap_dev(x) container_of((x), struct ap_device, device)

View File

@@ -734,26 +734,14 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
struct ap_device *ap_dev = &aq->ap_dev;
int rc;
device_lock(dev);
if (ap_dev->driver_override)
rc = sysfs_emit(buf, "%s\n", ap_dev->driver_override);
else
rc = sysfs_emit(buf, "\n");
device_unlock(dev);
return rc;
guard(spinlock)(&dev->driver_override.lock);
return sysfs_emit(buf, "%s\n", dev->driver_override.name ?: "");
}
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ap_queue *aq = to_ap_queue(dev);
struct ap_device *ap_dev = &aq->ap_dev;
int rc = -EINVAL;
bool old_value;
@@ -764,13 +752,13 @@ static ssize_t driver_override_store(struct device *dev,
if (ap_apmask_aqmask_in_use)
goto out;
old_value = ap_dev->driver_override ? true : false;
rc = driver_set_override(dev, &ap_dev->driver_override, buf, count);
old_value = device_has_driver_override(dev);
rc = __device_set_driver_override(dev, buf, count);
if (rc)
goto out;
if (old_value && !ap_dev->driver_override)
if (old_value && !device_has_driver_override(dev))
--ap_driver_override_ctr;
else if (!old_value && ap_dev->driver_override)
else if (!old_value && device_has_driver_override(dev))
++ap_driver_override_ctr;
rc = count;

View File

@@ -186,7 +186,6 @@ static int __init fsl_guts_init(void)
const struct fsl_soc_data *soc_data;
const struct of_device_id *match;
struct ccsr_guts __iomem *regs;
const char *machine = NULL;
struct device_node *np;
bool little_endian;
u64 soc_uid = 0;
@@ -217,13 +216,9 @@ static int __init fsl_guts_init(void)
if (!soc_dev_attr)
return -ENOMEM;
if (of_property_read_string(of_root, "model", &machine))
of_property_read_string_index(of_root, "compatible", 0, &machine);
if (machine) {
soc_dev_attr->machine = kstrdup(machine, GFP_KERNEL);
if (!soc_dev_attr->machine)
goto err_nomem;
}
ret = soc_attr_read_machine(soc_dev_attr);
if (ret)
of_machine_read_compatible(&soc_dev_attr->machine, 0);
soc_die = fsl_soc_die_match(svr, fsl_soc_die);
if (soc_die) {
@@ -267,7 +262,6 @@ static int __init fsl_guts_init(void)
err_nomem:
ret = -ENOMEM;
err:
kfree(soc_dev_attr->machine);
kfree(soc_dev_attr->family);
kfree(soc_dev_attr->soc_id);
kfree(soc_dev_attr->revision);

View File

@@ -226,7 +226,6 @@ static int imx8m_soc_probe(struct platform_device *pdev)
const struct imx8_soc_data *data;
struct imx8_soc_drvdata *drvdata;
struct device *dev = &pdev->dev;
const struct of_device_id *id;
struct soc_device *soc_dev;
u32 soc_rev = 0;
u64 soc_uid[2] = {0, 0};
@@ -244,15 +243,11 @@ static int imx8m_soc_probe(struct platform_device *pdev)
soc_dev_attr->family = "Freescale i.MX";
ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine);
ret = soc_attr_read_machine(soc_dev_attr);
if (ret)
return ret;
id = of_match_node(imx8_soc_match, of_root);
if (!id)
return -ENODEV;
data = id->data;
data = device_get_match_data(dev);
if (data) {
soc_dev_attr->soc_id = data->name;
ret = imx8m_soc_prepare(pdev, data->ocotp_compatible);
@@ -326,7 +321,7 @@ static int __init imx8_soc_init(void)
int ret;
/* No match means this is non-i.MX8M hardware, do nothing. */
if (!of_match_node(imx8_soc_match, of_root))
if (!of_machine_device_match(imx8_soc_match))
return 0;
ret = platform_driver_register(&imx8m_soc_driver);

View File

@@ -30,7 +30,7 @@ static int imx9_soc_probe(struct platform_device *pdev)
if (!attr)
return -ENOMEM;
err = of_property_read_string(of_root, "model", &attr->machine);
err = soc_attr_read_machine(attr);
if (err)
return dev_err_probe(dev, err, "%s: missing model property\n", __func__);
@@ -89,7 +89,7 @@ static int __init imx9_soc_init(void)
struct platform_device *pdev;
/* No match means it is not an i.MX 9 series SoC, do nothing. */
if (!of_match_node(imx9_soc_match, of_root))
if (!of_machine_device_match(imx9_soc_match))
return 0;
ret = platform_driver_register(&imx9_soc_driver);

View File

@@ -118,7 +118,7 @@ static const char * const sunxi_mbus_platforms[] __initconst = {
static int __init sunxi_mbus_init(void)
{
if (!of_device_compatible_match(of_root, sunxi_mbus_platforms))
if (!of_machine_compatible_match(sunxi_mbus_platforms))
return 0;
bus_register_notifier(&platform_bus_type, &sunxi_mbus_nb);

View File

@@ -358,7 +358,7 @@ void sdw_slave_debugfs_init(struct sdw_slave *slave)
debugfs_create_file("go", 0200, d, slave, &cmd_go_fops);
debugfs_create_file("read_buffer", 0400, d, slave, &read_buffer_fops);
firmware_file = NULL;
if (firmware_file)
debugfs_create_str("firmware_file", 0200, d, &firmware_file);
slave->debugfs = d;
@@ -371,10 +371,15 @@ void sdw_slave_debugfs_exit(struct sdw_slave *slave)
void sdw_debugfs_init(void)
{
if (!firmware_file)
firmware_file = kstrdup("", GFP_KERNEL);
sdw_debugfs_root = debugfs_create_dir("soundwire", NULL);
}
void sdw_debugfs_exit(void)
{
debugfs_remove_recursive(sdw_debugfs_root);
kfree(firmware_file);
firmware_file = NULL;
}

View File

@@ -4943,7 +4943,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
* Clear the flag before adding the device so that fw_devlink
* doesn't skip adding consumers to this device.
*/
rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
fwnode_clear_flag(&rd->dn->fwnode, FWNODE_FLAG_NOT_DEVICE);
spi = of_register_spi_device(ctlr, rd->dn);
put_device(&ctlr->dev);

View File

@@ -67,57 +67,20 @@ static void vdpa_dev_remove(struct device *d)
static int vdpa_dev_match(struct device *dev, const struct device_driver *drv)
{
struct vdpa_device *vdev = dev_to_vdpa(dev);
int ret;
/* Check override first, and if set, only use the named driver */
if (vdev->driver_override)
return strcmp(vdev->driver_override, drv->name) == 0;
ret = device_match_driver_override(dev, drv);
if (ret >= 0)
return ret;
/* Currently devices must be supported by all vDPA bus drivers */
return 1;
}
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct vdpa_device *vdev = dev_to_vdpa(dev);
int ret;
ret = driver_set_override(dev, &vdev->driver_override, buf, count);
if (ret)
return ret;
return count;
}
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vdpa_device *vdev = dev_to_vdpa(dev);
ssize_t len;
device_lock(dev);
len = sysfs_emit(buf, "%s\n", vdev->driver_override);
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);
static struct attribute *vdpa_dev_attrs[] = {
&dev_attr_driver_override.attr,
NULL,
};
static const struct attribute_group vdpa_dev_group = {
.attrs = vdpa_dev_attrs,
};
__ATTRIBUTE_GROUPS(vdpa_dev);
static const struct bus_type vdpa_bus = {
.name = "vdpa",
.dev_groups = vdpa_dev_groups,
.driver_override = true,
.match = vdpa_dev_match,
.probe = vdpa_dev_probe,
.remove = vdpa_dev_remove,
@@ -132,7 +95,6 @@ static void vdpa_release_dev(struct device *d)
ops->free(vdev);
ida_free(&vdpa_index_ida, vdev->index);
kfree(vdev->driver_override);
kfree(vdev);
}

View File

@@ -424,9 +424,7 @@ static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb,
if (action == BUS_NOTIFY_ADD_DEVICE &&
vdev->mc_dev == mc_cont) {
mc_dev->driver_override = kasprintf(GFP_KERNEL, "%s",
vfio_fsl_mc_ops.name);
if (!mc_dev->driver_override)
if (device_set_driver_override(dev, vfio_fsl_mc_ops.name))
dev_warn(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s failed\n",
dev_name(&mc_cont->dev));
else

View File

@@ -1987,9 +1987,8 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
pdev->is_virtfn && physfn == vdev->pdev) {
pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n",
pci_name(pdev));
pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
vdev->vdev.ops->name);
WARN_ON(!pdev->driver_override);
WARN_ON(device_set_driver_override(&pdev->dev,
vdev->vdev.ops->name));
} else if (action == BUS_NOTIFY_BOUND_DRIVER &&
pdev->is_virtfn && physfn == vdev->pdev) {
struct pci_driver *drv = pci_dev_driver(pdev);

View File

@@ -598,6 +598,8 @@ static int pcistub_seize(struct pci_dev *dev,
return err;
}
static struct pci_driver xen_pcibk_pci_driver;
/* Called when 'bind'. This means we must _NOT_ call pci_reset_function or
* other functions that take the sysfs lock. */
static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -609,8 +611,8 @@ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
match = pcistub_match(dev);
if ((dev->driver_override &&
!strcmp(dev->driver_override, PCISTUB_DRIVER_NAME)) ||
if (device_match_driver_override(&dev->dev,
&xen_pcibk_pci_driver.driver) > 0 ||
match) {
if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL

View File

@@ -1047,7 +1047,6 @@ ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
return ret;
}
EXPORT_SYMBOL_GPL(debugfs_create_str);
static ssize_t debugfs_write_file_str(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -1127,7 +1126,7 @@ static const struct file_operations fops_str_wo = {
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
* from. This pointer and the string it points to must not be %NULL.
*
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
@@ -1136,9 +1135,13 @@ static const struct file_operations fops_str_wo = {
void debugfs_create_str(const char *name, umode_t mode,
struct dentry *parent, char **value)
{
if (WARN_ON(!value || !*value))
return;
debugfs_create_mode_unsafe(name, mode, parent, value, &fops_str,
&fops_str_ro, &fops_str_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_str);
static ssize_t read_file_blob(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)

View File

@@ -498,12 +498,14 @@ void kernfs_put_active(struct kernfs_node *kn)
/**
* kernfs_drain - drain kernfs_node
* @kn: kernfs_node to drain
* @drop_supers: Set to true if this function is called with the
* kernfs_supers_rwsem locked.
*
* Drain existing usages and nuke all existing mmaps of @kn. Multiple
* removers may invoke this function concurrently on @kn and all will
* return after draining is complete.
*/
static void kernfs_drain(struct kernfs_node *kn)
static void kernfs_drain(struct kernfs_node *kn, bool drop_supers)
__releases(&kernfs_root(kn)->kernfs_rwsem)
__acquires(&kernfs_root(kn)->kernfs_rwsem)
{
@@ -523,6 +525,8 @@ static void kernfs_drain(struct kernfs_node *kn)
return;
up_write(&root->kernfs_rwsem);
if (drop_supers)
up_read(&root->kernfs_supers_rwsem);
if (kernfs_lockdep(kn)) {
rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
@@ -541,6 +545,8 @@ static void kernfs_drain(struct kernfs_node *kn)
if (kernfs_should_drain_open_files(kn))
kernfs_drain_open_files(kn);
if (drop_supers)
down_read(&root->kernfs_supers_rwsem);
down_write(&root->kernfs_rwsem);
}
@@ -1492,12 +1498,43 @@ void kernfs_show(struct kernfs_node *kn, bool show)
kn->flags |= KERNFS_HIDDEN;
if (kernfs_active(kn))
atomic_add(KN_DEACTIVATED_BIAS, &kn->active);
kernfs_drain(kn);
kernfs_drain(kn, false);
}
up_write(&root->kernfs_rwsem);
}
/*
* This function enables VFS to send fsnotify events for deletions.
* There is gap in this implementation for certain file removals due their
* unique nature in kernfs. Directory removals that trigger file removals occur
* through vfs_rmdir, which shrinks the dcache and emits fsnotify events after
* the rmdir operation; there is no issue here. However kernfs writes to
* particular files (e.g. cgroup.subtree_control) can also cause file removal,
* but vfs_write does not attempt to emit fsnotify events after the write
* operation, even if i_nlink counts are 0. As a usecase for monitoring this
* category of file removals is not known, they are left without having
* IN_DELETE or IN_DELETE_SELF events generated.
* Fanotify recursive monitoring also does not work for kernfs nodes that do not
* have inodes attached, as they are created on-demand in kernfs.
*/
static void kernfs_clear_inode_nlink(struct kernfs_node *kn)
{
struct kernfs_root *root = kernfs_root(kn);
struct kernfs_super_info *info;
lockdep_assert_held_read(&root->kernfs_supers_rwsem);
list_for_each_entry(info, &root->supers, node) {
struct inode *inode = ilookup(info->sb, kernfs_ino(kn));
if (inode) {
clear_nlink(inode);
iput(inode);
}
}
}
static void __kernfs_remove(struct kernfs_node *kn)
{
struct kernfs_node *pos, *parent;
@@ -1506,6 +1543,7 @@ static void __kernfs_remove(struct kernfs_node *kn)
if (!kn)
return;
lockdep_assert_held_read(&kernfs_root(kn)->kernfs_supers_rwsem);
lockdep_assert_held_write(&kernfs_root(kn)->kernfs_rwsem);
/*
@@ -1518,12 +1556,14 @@ static void __kernfs_remove(struct kernfs_node *kn)
pr_debug("kernfs %s: removing\n", kernfs_rcu_name(kn));
/* prevent new usage by marking all nodes removing and deactivating */
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
pos = NULL;
while ((pos = kernfs_next_descendant_post(pos, kn))) {
pos->flags |= KERNFS_REMOVING;
if (kernfs_active(pos))
atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
}
up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
/* deactivate and unlink the subtree node-by-node */
do {
@@ -1537,7 +1577,7 @@ static void __kernfs_remove(struct kernfs_node *kn)
*/
kernfs_get(pos);
kernfs_drain(pos);
kernfs_drain(pos, true);
parent = kernfs_parent(pos);
/*
* kernfs_unlink_sibling() succeeds once per node. Use it
@@ -1547,9 +1587,11 @@ static void __kernfs_remove(struct kernfs_node *kn)
struct kernfs_iattrs *ps_iattr =
parent ? parent->iattr : NULL;
/* update timestamps on the parent */
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
kernfs_clear_inode_nlink(pos);
/* update timestamps on the parent */
if (ps_iattr) {
ktime_get_real_ts64(&ps_iattr->ia_ctime);
ps_iattr->ia_mtime = ps_iattr->ia_ctime;
@@ -1578,9 +1620,11 @@ void kernfs_remove(struct kernfs_node *kn)
root = kernfs_root(kn);
down_read(&root->kernfs_supers_rwsem);
down_write(&root->kernfs_rwsem);
__kernfs_remove(kn);
up_write(&root->kernfs_rwsem);
up_read(&root->kernfs_supers_rwsem);
}
/**
@@ -1671,6 +1715,7 @@ bool kernfs_remove_self(struct kernfs_node *kn)
bool ret;
struct kernfs_root *root = kernfs_root(kn);
down_read(&root->kernfs_supers_rwsem);
down_write(&root->kernfs_rwsem);
kernfs_break_active_protection(kn);
@@ -1700,7 +1745,9 @@ bool kernfs_remove_self(struct kernfs_node *kn)
break;
up_write(&root->kernfs_rwsem);
up_read(&root->kernfs_supers_rwsem);
schedule();
down_read(&root->kernfs_supers_rwsem);
down_write(&root->kernfs_rwsem);
}
finish_wait(waitq, &wait);
@@ -1715,6 +1762,7 @@ bool kernfs_remove_self(struct kernfs_node *kn)
kernfs_unbreak_active_protection(kn);
up_write(&root->kernfs_rwsem);
up_read(&root->kernfs_supers_rwsem);
return ret;
}
@@ -1741,6 +1789,7 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
}
root = kernfs_root(parent);
down_read(&root->kernfs_supers_rwsem);
down_write(&root->kernfs_rwsem);
kn = kernfs_find_ns(parent, name, ns);
@@ -1751,6 +1800,7 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
}
up_write(&root->kernfs_rwsem);
up_read(&root->kernfs_supers_rwsem);
if (kn)
return 0;

View File

@@ -177,7 +177,7 @@ static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
*/
set_inode_attr(inode, attrs);
if (kernfs_type(kn) == KERNFS_DIR)
if (kernfs_type(kn) == KERNFS_DIR && !(kn->flags & KERNFS_REMOVING))
set_nlink(inode, kn->dir.subdirs + 2);
}

View File

@@ -217,7 +217,7 @@ int sysfs_create_group(struct kobject *kobj,
EXPORT_SYMBOL_GPL(sysfs_create_group);
static int internal_create_groups(struct kobject *kobj, int update,
const struct attribute_group **groups)
const struct attribute_group *const *groups)
{
int error = 0;
int i;
@@ -250,7 +250,7 @@ static int internal_create_groups(struct kobject *kobj, int update,
* Returns 0 on success or error code from sysfs_create_group on failure.
*/
int sysfs_create_groups(struct kobject *kobj,
const struct attribute_group **groups)
const struct attribute_group *const *groups)
{
return internal_create_groups(kobj, 0, groups);
}
@@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(sysfs_create_groups);
* Returns 0 on success or error code from sysfs_update_group on failure.
*/
int sysfs_update_groups(struct kobject *kobj,
const struct attribute_group **groups)
const struct attribute_group *const *groups)
{
return internal_create_groups(kobj, 1, groups);
}
@@ -342,7 +342,7 @@ EXPORT_SYMBOL_GPL(sysfs_remove_group);
* If groups is not NULL, remove the specified groups from the kobject.
*/
void sysfs_remove_groups(struct kobject *kobj,
const struct attribute_group **groups)
const struct attribute_group *const *groups)
{
int i;
@@ -613,7 +613,7 @@ EXPORT_SYMBOL_GPL(sysfs_group_change_owner);
* Returns 0 on success or error code on failure.
*/
int sysfs_groups_change_owner(struct kobject *kobj,
const struct attribute_group **groups,
const struct attribute_group *const *groups,
kuid_t kuid, kgid_t kgid)
{
int error = 0, i;

View File

@@ -965,6 +965,7 @@ static inline void device_unlock(struct device *dev)
}
DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
DEFINE_GUARD_COND(device, _intr, device_lock_interruptible(_T), _RET == 0)
static inline void device_lock_assert(struct device *dev)
{
@@ -1185,9 +1186,9 @@ device_create_with_groups(const struct class *cls, struct device *parent, dev_t
void device_destroy(const struct class *cls, dev_t devt);
int __must_check device_add_groups(struct device *dev,
const struct attribute_group **groups);
const struct attribute_group *const *groups);
void device_remove_groups(struct device *dev,
const struct attribute_group **groups);
const struct attribute_group *const *groups);
static inline int __must_check device_add_group(struct device *dev,
const struct attribute_group *grp)

View File

@@ -50,8 +50,8 @@ struct fwnode_handle;
struct class {
const char *name;
const struct attribute_group **class_groups;
const struct attribute_group **dev_groups;
const struct attribute_group *const *class_groups;
const struct attribute_group *const *dev_groups;
int (*dev_uevent)(const struct device *dev, struct kobj_uevent_env *env);
char *(*devnode)(const struct device *dev, umode_t *mode);

View File

@@ -178,9 +178,6 @@ struct fsl_mc_obj_desc {
* @regions: pointer to array of MMIO region entries
* @irqs: pointer to array of pointers to interrupts allocated to this device
* @resource: generic resource associated with this MC object device, if any.
* @driver_override: driver name to force a match; do not set directly,
* because core frees it; use driver_set_override() to
* set or clear it.
*
* Generic device object for MC object devices that are "attached" to a
* MC bus.
@@ -214,7 +211,6 @@ struct fsl_mc_device {
struct fsl_mc_device_irq **irqs;
struct fsl_mc_resource *resource;
struct device_link *consumer_link;
const char *driver_override;
};
#define to_fsl_mc_device(_dev) \

View File

@@ -15,6 +15,7 @@
#define _LINUX_FWNODE_H_
#include <linux/bits.h>
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/types.h>
@@ -42,12 +43,12 @@ struct device;
* suppliers. Only enforce ordering with suppliers that have
* drivers.
*/
#define FWNODE_FLAG_LINKS_ADDED BIT(0)
#define FWNODE_FLAG_NOT_DEVICE BIT(1)
#define FWNODE_FLAG_INITIALIZED BIT(2)
#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
#define FWNODE_FLAG_BEST_EFFORT BIT(4)
#define FWNODE_FLAG_VISITED BIT(5)
#define FWNODE_FLAG_LINKS_ADDED 0
#define FWNODE_FLAG_NOT_DEVICE 1
#define FWNODE_FLAG_INITIALIZED 2
#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD 3
#define FWNODE_FLAG_BEST_EFFORT 4
#define FWNODE_FLAG_VISITED 5
struct fwnode_handle {
struct fwnode_handle *secondary;
@@ -57,7 +58,7 @@ struct fwnode_handle {
struct device *dev;
struct list_head suppliers;
struct list_head consumers;
u8 flags;
unsigned long flags;
};
/*
@@ -212,16 +213,37 @@ static inline void fwnode_init(struct fwnode_handle *fwnode,
INIT_LIST_HEAD(&fwnode->suppliers);
}
static inline void fwnode_set_flag(struct fwnode_handle *fwnode,
unsigned int bit)
{
set_bit(bit, &fwnode->flags);
}
static inline void fwnode_clear_flag(struct fwnode_handle *fwnode,
unsigned int bit)
{
clear_bit(bit, &fwnode->flags);
}
static inline void fwnode_assign_flag(struct fwnode_handle *fwnode,
unsigned int bit, bool value)
{
assign_bit(bit, &fwnode->flags, value);
}
static inline bool fwnode_test_flag(struct fwnode_handle *fwnode,
unsigned int bit)
{
return test_bit(bit, &fwnode->flags);
}
static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
bool initialized)
{
if (IS_ERR_OR_NULL(fwnode))
return;
if (initialized)
fwnode->flags |= FWNODE_FLAG_INITIALIZED;
else
fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
fwnode_assign_flag(fwnode, FWNODE_FLAG_INITIALIZED, initialized);
}
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,

8
include/linux/ksysfs.h Normal file
View File

@@ -0,0 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _KSYSFS_H_
#define _KSYSFS_H_
void ksysfs_init(void);
#endif /* _KSYSFS_H_ */

View File

@@ -426,6 +426,9 @@ static inline bool of_machine_is_compatible(const char *compat)
return of_machine_compatible_match(compats);
}
int of_machine_read_compatible(const char **compatible, unsigned int index);
int of_machine_read_model(const char **model);
extern int of_add_property(struct device_node *np, struct property *prop);
extern int of_remove_property(struct device_node *np, struct property *prop);
extern int of_update_property(struct device_node *np, struct property *newprop);
@@ -851,6 +854,17 @@ static inline int of_machine_is_compatible(const char *compat)
return 0;
}
static inline int of_machine_read_compatible(const char **compatible,
unsigned int index)
{
return -ENOSYS;
}
static inline int of_machine_read_model(const char **model)
{
return -ENOSYS;
}
static inline int of_add_property(struct device_node *np, struct property *prop)
{
return 0;

View File

@@ -575,12 +575,6 @@ struct pci_dev {
u8 supported_speeds; /* Supported Link Speeds Vector */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
/*
* Driver name to force a match. Do not set directly, because core
* frees it. Use driver_set_override() to set or clear it.
*/
const char *driver_override;
unsigned long priv_flags; /* Private flags for the PCI driver */
/* These methods index pci_reset_fn_methods[] */

View File

@@ -113,6 +113,41 @@ extern int platform_get_irq_byname_optional(struct platform_device *dev,
const char *name);
extern int platform_add_devices(struct platform_device **, int);
/**
* struct platform_device_info - set of parameters for creating a platform device
* @parent: parent device for the new platform device.
* @fwnode: firmware node associated with the device.
* @of_node_reused: indicates that device tree node associated with the device
* is shared with another device, typically its ancestor. Setting this to
* %true prevents the device from being matched via the OF match table,
* and stops the device core from automatically binding pinctrl
* configuration to avoid disrupting the other device.
* @name: name of the device.
* @id: instance ID of the device. Use %PLATFORM_DEVID_NONE if there is only
* one instance of the device, or %PLATFORM_DEVID_AUTO to let the
* kernel automatically assign a unique instance ID.
* @res: set of resources to attach to the device.
* @num_res: number of entries in @res.
* @data: device-specific data for this platform device.
* @size_data: size of device-specific data.
* @dma_mask: DMA mask for the device.
* @swnode: a secondary software node to be attached to the device. The node
* will be automatically registered and its lifetime tied to the platform
* device if it is not registered yet.
* @properties: a set of software properties for the device. If provided,
* a managed software node will be automatically created and
* assigned to the device. The properties array must be terminated
* with a sentinel entry. Specifying both @properties and @swnode is not
* allowed.
*
* This structure is used to hold information needed to create and register
* a platform device using platform_device_register_full().
*
* platform_device_register_full() makes deep copies of @name, @res, @data and
* @properties, so the caller does not need to keep them after registration.
* If the registration is performed during initialization, these can be marked
* as __initconst.
*/
struct platform_device_info {
struct device *parent;
struct fwnode_handle *fwnode;
@@ -128,6 +163,7 @@ struct platform_device_info {
size_t size_data;
u64 dma_mask;
const struct software_node *swnode;
const struct property_entry *properties;
};
extern struct platform_device *platform_device_register_full(

View File

@@ -37,6 +37,16 @@ void soc_device_unregister(struct soc_device *soc_dev);
*/
struct device *soc_device_to_device(struct soc_device *soc);
/**
* soc_attr_read_machine - retrieve the machine model and store it in
* the soc_device_attribute structure
* @soc_dev_attr: SoC attribute structure to store the model in
*
* Returns:
* 0 on success, negative error number on failure.
*/
int soc_attr_read_machine(struct soc_device_attribute *soc_dev_attr);
#ifdef CONFIG_SOC_BUS
const struct soc_device_attribute *soc_device_match(
const struct soc_device_attribute *matches);

View File

@@ -445,15 +445,15 @@ void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
int __must_check sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp);
int __must_check sysfs_create_groups(struct kobject *kobj,
const struct attribute_group **groups);
const struct attribute_group *const *groups);
int __must_check sysfs_update_groups(struct kobject *kobj,
const struct attribute_group **groups);
const struct attribute_group *const *groups);
int sysfs_update_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_remove_groups(struct kobject *kobj,
const struct attribute_group **groups);
const struct attribute_group *const *groups);
int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group);
void sysfs_remove_file_from_group(struct kobject *kobj,
@@ -486,7 +486,7 @@ int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid);
int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ,
const char *name, kuid_t kuid, kgid_t kgid);
int sysfs_groups_change_owner(struct kobject *kobj,
const struct attribute_group **groups,
const struct attribute_group *const *groups,
kuid_t kuid, kgid_t kgid);
int sysfs_group_change_owner(struct kobject *kobj,
const struct attribute_group *groups, kuid_t kuid,
@@ -629,13 +629,13 @@ static inline int sysfs_create_group(struct kobject *kobj,
}
static inline int sysfs_create_groups(struct kobject *kobj,
const struct attribute_group **groups)
const struct attribute_group *const *groups)
{
return 0;
}
static inline int sysfs_update_groups(struct kobject *kobj,
const struct attribute_group **groups)
const struct attribute_group *const *groups)
{
return 0;
}
@@ -652,7 +652,7 @@ static inline void sysfs_remove_group(struct kobject *kobj,
}
static inline void sysfs_remove_groups(struct kobject *kobj,
const struct attribute_group **groups)
const struct attribute_group *const *groups)
{
}
@@ -733,7 +733,7 @@ static inline int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t k
}
static inline int sysfs_groups_change_owner(struct kobject *kobj,
const struct attribute_group **groups,
const struct attribute_group *const *groups,
kuid_t kuid, kgid_t kgid)
{
return 0;

View File

@@ -72,9 +72,6 @@ struct vdpa_mgmt_dev;
* struct vdpa_device - representation of a vDPA device
* @dev: underlying device
* @vmap: the metadata passed to upper layer to be used for mapping
* @driver_override: driver name to force a match; do not set directly,
* because core frees it; use driver_set_override() to
* set or clear it.
* @config: the configuration ops for this device.
* @map: the map ops for this device
* @cf_lock: Protects get and set access to configuration layout.
@@ -90,7 +87,6 @@ struct vdpa_mgmt_dev;
struct vdpa_device {
struct device dev;
union virtio_map vmap;
const char *driver_override;
const struct vdpa_config_ops *config;
const struct virtio_map_ops *map;
struct rw_semaphore cf_lock; /* Protects get/set config */

View File

@@ -18,16 +18,12 @@
* struct wmi_device - WMI device structure
* @dev: Device associated with this WMI device
* @setable: True for devices implementing the Set Control Method
* @driver_override: Driver name to force a match; do not set directly,
* because core frees it; use driver_set_override() to
* set or clear it.
*
* This represents WMI devices discovered by the WMI driver core.
*/
struct wmi_device {
struct device dev;
bool setable;
const char *driver_override;
};
/**

View File

@@ -36,6 +36,7 @@
#include <linux/kmod.h>
#include <linux/kprobes.h>
#include <linux/kmsan.h>
#include <linux/ksysfs.h>
#include <linux/vmalloc.h>
#include <linux/kernel_stat.h>
#include <linux/start_kernel.h>
@@ -1480,6 +1481,7 @@ static void __init do_initcalls(void)
static void __init do_basic_setup(void)
{
cpuset_init_smp();
ksysfs_init();
driver_init();
init_irq_proc();
do_ctors();

View File

@@ -8,6 +8,7 @@
#include <asm/byteorder.h>
#include <linux/kobject.h>
#include <linux/ksysfs.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/export.h>
@@ -213,7 +214,7 @@ static const struct attribute_group kernel_attr_group = {
.attrs = kernel_attrs,
};
static int __init ksysfs_init(void)
void __init ksysfs_init(void)
{
int error;
@@ -234,14 +235,12 @@ static int __init ksysfs_init(void)
goto group_exit;
}
return 0;
return;
group_exit:
sysfs_remove_group(kernel_kobj, &kernel_attr_group);
kset_exit:
kobject_put(kernel_kobj);
exit:
return error;
pr_err("failed to initialize the kernel kobject: %d\n", error);
}
core_initcall(ksysfs_init);

View File

@@ -23,9 +23,22 @@ use crate::{
rcu,
Arc, //
},
types::ForeignOwnable,
types::{
ForeignOwnable,
Opaque, //
},
};
/// Inner type that embeds a `struct devres_node` and the `Revocable<T>`.
#[repr(C)]
#[pin_data]
struct Inner<T> {
#[pin]
node: Opaque<bindings::devres_node>,
#[pin]
data: Revocable<T>,
}
/// This abstraction is meant to be used by subsystems to containerize [`Device`] bound resources to
/// manage their lifetime.
///
@@ -111,12 +124,64 @@ use crate::{
/// ```
pub struct Devres<T: Send> {
dev: ARef<Device>,
/// Pointer to [`Self::devres_callback`].
///
/// Has to be stored, since Rust does not guarantee to always return the same address for a
/// function. However, the C API uses the address as a key.
callback: unsafe extern "C" fn(*mut c_void),
data: Arc<Revocable<T>>,
inner: Arc<Inner<T>>,
}
// Calling the FFI functions from the `base` module directly from the `Devres<T>` impl may result in
// them being called directly from driver modules. This happens since the Rust compiler will use
// monomorphisation, so it might happen that functions are instantiated within the calling driver
// module. For now, work around this with `#[inline(never)]` helpers.
//
// TODO: Remove once a more generic solution has been implemented. For instance, we may be able to
// leverage `bindgen` to take care of this depending on whether a symbol is (already) exported.
mod base {
use kernel::{
bindings,
prelude::*, //
};
#[inline(never)]
#[allow(clippy::missing_safety_doc)]
pub(super) unsafe fn devres_node_init(
node: *mut bindings::devres_node,
release: bindings::dr_node_release_t,
free: bindings::dr_node_free_t,
) {
// SAFETY: Safety requirements are the same as `bindings::devres_node_init`.
unsafe { bindings::devres_node_init(node, release, free) }
}
#[inline(never)]
#[allow(clippy::missing_safety_doc)]
pub(super) unsafe fn devres_set_node_dbginfo(
node: *mut bindings::devres_node,
name: *const c_char,
size: usize,
) {
// SAFETY: Safety requirements are the same as `bindings::devres_set_node_dbginfo`.
unsafe { bindings::devres_set_node_dbginfo(node, name, size) }
}
#[inline(never)]
#[allow(clippy::missing_safety_doc)]
pub(super) unsafe fn devres_node_add(
dev: *mut bindings::device,
node: *mut bindings::devres_node,
) {
// SAFETY: Safety requirements are the same as `bindings::devres_node_add`.
unsafe { bindings::devres_node_add(dev, node) }
}
#[must_use]
#[inline(never)]
#[allow(clippy::missing_safety_doc)]
pub(super) unsafe fn devres_node_remove(
dev: *mut bindings::device,
node: *mut bindings::devres_node,
) -> bool {
// SAFETY: Safety requirements are the same as `bindings::devres_node_remove`.
unsafe { bindings::devres_node_remove(dev, node) }
}
}
impl<T: Send> Devres<T> {
@@ -128,58 +193,86 @@ impl<T: Send> Devres<T> {
where
Error: From<E>,
{
let callback = Self::devres_callback;
let data = Arc::pin_init(Revocable::new(data), GFP_KERNEL)?;
let devres_data = data.clone();
let inner = Arc::pin_init::<Error>(
try_pin_init!(Inner {
node <- Opaque::ffi_init(|node: *mut bindings::devres_node| {
// SAFETY: `node` is a valid pointer to an uninitialized `struct devres_node`.
unsafe {
base::devres_node_init(
node,
Some(Self::devres_node_release),
Some(Self::devres_node_free_node),
)
};
// SAFETY: `node` is a valid pointer to an uninitialized `struct devres_node`.
unsafe {
base::devres_set_node_dbginfo(
node,
// TODO: Use `core::any::type_name::<T>()` once it is a `const fn`,
// such that we can convert the `&str` to a `&CStr` at compile-time.
c"Devres<T>".as_char_ptr(),
core::mem::size_of::<Revocable<T>>(),
)
};
}),
data <- Revocable::new(data),
}),
GFP_KERNEL,
)?;
// SAFETY:
// - `dev.as_raw()` is a pointer to a valid bound device.
// - `data` is guaranteed to be a valid for the duration of the lifetime of `Self`.
// - `devm_add_action()` is guaranteed not to call `callback` for the entire lifetime of
// `dev`.
to_result(unsafe {
bindings::devm_add_action(
dev.as_raw(),
Some(callback),
Arc::as_ptr(&data).cast_mut().cast(),
)
})?;
// - `dev` is a valid pointer to a bound `struct device`.
// - `node` is a valid pointer to a `struct devres_node`.
// - `devres_node_add()` is guaranteed not to call `devres_node_release()` for the entire
// lifetime of `dev`.
unsafe { base::devres_node_add(dev.as_raw(), inner.node.get()) };
// `devm_add_action()` was successful and has consumed the reference count.
core::mem::forget(devres_data);
// Take additional reference count for `devres_node_add()`.
core::mem::forget(inner.clone());
Ok(Self {
dev: dev.into(),
callback,
data,
inner,
})
}
fn data(&self) -> &Revocable<T> {
&self.data
&self.inner.data
}
#[allow(clippy::missing_safety_doc)]
unsafe extern "C" fn devres_callback(ptr: *mut kernel::ffi::c_void) {
// SAFETY: In `Self::new` we've passed a valid pointer of `Revocable<T>` to
// `devm_add_action()`, hence `ptr` must be a valid pointer to `Revocable<T>`.
let data = unsafe { Arc::from_raw(ptr.cast::<Revocable<T>>()) };
unsafe extern "C" fn devres_node_release(
_dev: *mut bindings::device,
node: *mut bindings::devres_node,
) {
let node = Opaque::cast_from(node);
data.revoke();
// SAFETY: `node` is in the same allocation as its container.
let inner = unsafe { kernel::container_of!(node, Inner<T>, node) };
// SAFETY: `inner` is a valid `Inner<T>` pointer.
let inner = unsafe { &*inner };
inner.data.revoke();
}
fn remove_action(&self) -> bool {
#[allow(clippy::missing_safety_doc)]
unsafe extern "C" fn devres_node_free_node(node: *mut bindings::devres_node) {
let node = Opaque::cast_from(node);
// SAFETY: `node` is in the same allocation as its container.
let inner = unsafe { kernel::container_of!(node, Inner<T>, node) };
// SAFETY: `inner` points to the entire `Inner<T>` allocation.
drop(unsafe { Arc::from_raw(inner) });
}
fn remove_node(&self) -> bool {
// SAFETY:
// - `self.dev` is a valid `Device`,
// - the `action` and `data` pointers are the exact same ones as given to
// `devm_add_action()` previously,
(unsafe {
bindings::devm_remove_action_nowarn(
self.dev.as_raw(),
Some(self.callback),
core::ptr::from_ref(self.data()).cast_mut().cast(),
)
} == 0)
// - `self.device().as_raw()` is a valid pointer to a bound `struct device`.
// - `self.inner.node.get()` is a valid pointer to a `struct devres_node`.
unsafe { base::devres_node_remove(self.device().as_raw(), self.inner.node.get()) }
}
/// Return a reference of the [`Device`] this [`Devres`] instance has been created with.
@@ -261,12 +354,12 @@ impl<T: Send> Drop for Devres<T> {
// SAFETY: When `drop` runs, it is guaranteed that nobody is accessing the revocable data
// anymore, hence it is safe not to wait for the grace period to finish.
if unsafe { self.data().revoke_nosync() } {
// We revoked `self.data` before the devres action did, hence try to remove it.
if self.remove_action() {
// We revoked `self.data` before devres did, hence try to remove it.
if self.remove_node() {
// SAFETY: In `Self::new` we have taken an additional reference count of `self.data`
// for `devm_add_action()`. Since `remove_action()` was successful, we have to drop
// for `devres_node_add()`. Since `remove_node()` was successful, we have to drop
// this additional reference count.
drop(unsafe { Arc::from_raw(Arc::as_ptr(&self.data)) });
drop(unsafe { Arc::from_raw(Arc::as_ptr(&self.inner)) });
}
}
}

View File

@@ -11,10 +11,14 @@ use crate::{
pub mod mem;
pub mod poll;
pub mod register;
pub mod resource;
pub use crate::register;
pub use resource::Resource;
use register::LocatedRegister;
/// Physical address type.
///
/// This is a type alias to either `u32` or `u64` depending on the config option
@@ -137,177 +141,6 @@ impl<const SIZE: usize> MmioRaw<SIZE> {
#[repr(transparent)]
pub struct Mmio<const SIZE: usize = 0>(MmioRaw<SIZE>);
/// Internal helper macros used to invoke C MMIO read functions.
///
/// This macro is intended to be used by higher-level MMIO access macros (io_define_read) and
/// provides a unified expansion for infallible vs. fallible read semantics. It emits a direct call
/// into the corresponding C helper and performs the required cast to the Rust return type.
///
/// # Parameters
///
/// * `$c_fn` The C function performing the MMIO read.
/// * `$self` The I/O backend object.
/// * `$ty` The type of the value to be read.
/// * `$addr` The MMIO address to read.
///
/// This macro does not perform any validation; all invariants must be upheld by the higher-level
/// abstraction invoking it.
macro_rules! call_mmio_read {
(infallible, $c_fn:ident, $self:ident, $type:ty, $addr:expr) => {
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
unsafe { bindings::$c_fn($addr as *const c_void) as $type }
};
(fallible, $c_fn:ident, $self:ident, $type:ty, $addr:expr) => {{
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
Ok(unsafe { bindings::$c_fn($addr as *const c_void) as $type })
}};
}
/// Internal helper macros used to invoke C MMIO write functions.
///
/// This macro is intended to be used by higher-level MMIO access macros (io_define_write) and
/// provides a unified expansion for infallible vs. fallible write semantics. It emits a direct call
/// into the corresponding C helper and performs the required cast to the Rust return type.
///
/// # Parameters
///
/// * `$c_fn` The C function performing the MMIO write.
/// * `$self` The I/O backend object.
/// * `$ty` The type of the written value.
/// * `$addr` The MMIO address to write.
/// * `$value` The value to write.
///
/// This macro does not perform any validation; all invariants must be upheld by the higher-level
/// abstraction invoking it.
macro_rules! call_mmio_write {
(infallible, $c_fn:ident, $self:ident, $ty:ty, $addr:expr, $value:expr) => {
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
unsafe { bindings::$c_fn($value, $addr as *mut c_void) }
};
(fallible, $c_fn:ident, $self:ident, $ty:ty, $addr:expr, $value:expr) => {{
// SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
unsafe { bindings::$c_fn($value, $addr as *mut c_void) };
Ok(())
}};
}
/// Generates an accessor method for reading from an I/O backend.
///
/// This macro reduces boilerplate by automatically generating either compile-time bounds-checked
/// (infallible) or runtime bounds-checked (fallible) read methods. It abstracts the address
/// calculation and bounds checking, and delegates the actual I/O read operation to a specified
/// helper macro, making it generic over different I/O backends.
///
/// # Parameters
///
/// * `infallible` / `fallible` - Determines the bounds-checking strategy. `infallible` relies on
/// `IoKnownSize` for compile-time checks and returns the value directly. `fallible` performs
/// runtime checks against `maxsize()` and returns a `Result<T>`.
/// * `$(#[$attr:meta])*` - Optional attributes to apply to the generated method (e.g.,
/// `#[cfg(CONFIG_64BIT)]` or inline directives).
/// * `$vis:vis` - The visibility of the generated method (e.g., `pub`).
/// * `$name:ident` / `$try_name:ident` - The name of the generated method (e.g., `read32`,
/// `try_read8`).
/// * `$call_macro:ident` - The backend-specific helper macro used to emit the actual I/O call
/// (e.g., `call_mmio_read`).
/// * `$c_fn:ident` - The backend-specific C function or identifier to be passed into the
/// `$call_macro`.
/// * `$type_name:ty` - The Rust type of the value being read (e.g., `u8`, `u32`).
#[macro_export]
macro_rules! io_define_read {
(infallible, $(#[$attr:meta])* $vis:vis $name:ident, $call_macro:ident($c_fn:ident) ->
$type_name:ty) => {
/// Read IO data from a given offset known at compile time.
///
/// Bound checks are performed on compile time, hence if the offset is not known at compile
/// time, the build will fail.
$(#[$attr])*
// Always inline to optimize out error path of `io_addr_assert`.
#[inline(always)]
$vis fn $name(&self, offset: usize) -> $type_name {
let addr = self.io_addr_assert::<$type_name>(offset);
// SAFETY: By the type invariant `addr` is a valid address for IO operations.
$call_macro!(infallible, $c_fn, self, $type_name, addr)
}
};
(fallible, $(#[$attr:meta])* $vis:vis $try_name:ident, $call_macro:ident($c_fn:ident) ->
$type_name:ty) => {
/// Read IO data from a given offset.
///
/// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
/// out of bounds.
$(#[$attr])*
$vis fn $try_name(&self, offset: usize) -> Result<$type_name> {
let addr = self.io_addr::<$type_name>(offset)?;
// SAFETY: By the type invariant `addr` is a valid address for IO operations.
$call_macro!(fallible, $c_fn, self, $type_name, addr)
}
};
}
pub use io_define_read;
/// Generates an accessor method for writing to an I/O backend.
///
/// This macro reduces boilerplate by automatically generating either compile-time bounds-checked
/// (infallible) or runtime bounds-checked (fallible) write methods. It abstracts the address
/// calculation and bounds checking, and delegates the actual I/O write operation to a specified
/// helper macro, making it generic over different I/O backends.
///
/// # Parameters
///
/// * `infallible` / `fallible` - Determines the bounds-checking strategy. `infallible` relies on
/// `IoKnownSize` for compile-time checks and returns `()`. `fallible` performs runtime checks
/// against `maxsize()` and returns a `Result`.
/// * `$(#[$attr:meta])*` - Optional attributes to apply to the generated method (e.g.,
/// `#[cfg(CONFIG_64BIT)]` or inline directives).
/// * `$vis:vis` - The visibility of the generated method (e.g., `pub`).
/// * `$name:ident` / `$try_name:ident` - The name of the generated method (e.g., `write32`,
/// `try_write8`).
/// * `$call_macro:ident` - The backend-specific helper macro used to emit the actual I/O call
/// (e.g., `call_mmio_write`).
/// * `$c_fn:ident` - The backend-specific C function or identifier to be passed into the
/// `$call_macro`.
/// * `$type_name:ty` - The Rust type of the value being written (e.g., `u8`, `u32`). Note the use
/// of `<-` before the type to denote a write operation.
#[macro_export]
macro_rules! io_define_write {
(infallible, $(#[$attr:meta])* $vis:vis $name:ident, $call_macro:ident($c_fn:ident) <-
$type_name:ty) => {
/// Write IO data from a given offset known at compile time.
///
/// Bound checks are performed on compile time, hence if the offset is not known at compile
/// time, the build will fail.
$(#[$attr])*
// Always inline to optimize out error path of `io_addr_assert`.
#[inline(always)]
$vis fn $name(&self, value: $type_name, offset: usize) {
let addr = self.io_addr_assert::<$type_name>(offset);
$call_macro!(infallible, $c_fn, self, $type_name, addr, value);
}
};
(fallible, $(#[$attr:meta])* $vis:vis $try_name:ident, $call_macro:ident($c_fn:ident) <-
$type_name:ty) => {
/// Write IO data from a given offset.
///
/// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
/// out of bounds.
$(#[$attr])*
$vis fn $try_name(&self, value: $type_name, offset: usize) -> Result {
let addr = self.io_addr::<$type_name>(offset)?;
$call_macro!(fallible, $c_fn, self, $type_name, addr, value)
}
};
}
pub use io_define_write;
/// Checks whether an access of type `U` at the given `offset`
/// is valid within this region.
#[inline]
@@ -320,14 +153,74 @@ const fn offset_valid<U>(offset: usize, size: usize) -> bool {
}
}
/// Marker trait indicating that an I/O backend supports operations of a certain type.
/// Trait indicating that an I/O backend supports operations of a certain type and providing an
/// implementation for these operations.
///
/// Different I/O backends can implement this trait to expose only the operations they support.
///
/// For example, a PCI configuration space may implement `IoCapable<u8>`, `IoCapable<u16>`,
/// and `IoCapable<u32>`, but not `IoCapable<u64>`, while an MMIO region on a 64-bit
/// system might implement all four.
pub trait IoCapable<T> {}
pub trait IoCapable<T> {
/// Performs an I/O read of type `T` at `address` and returns the result.
///
/// # Safety
///
/// The range `[address..address + size_of::<T>()]` must be within the bounds of `Self`.
unsafe fn io_read(&self, address: usize) -> T;
/// Performs an I/O write of `value` at `address`.
///
/// # Safety
///
/// The range `[address..address + size_of::<T>()]` must be within the bounds of `Self`.
unsafe fn io_write(&self, value: T, address: usize);
}
/// Describes a given I/O location: its offset, width, and type to convert the raw value from and
/// into.
///
/// This trait is the key abstraction allowing [`Io::read`], [`Io::write`], and [`Io::update`] (and
/// their fallible [`try_read`](Io::try_read), [`try_write`](Io::try_write) and
/// [`try_update`](Io::try_update) counterparts) to work uniformly with both raw [`usize`] offsets
/// (for primitive types like [`u32`]) and typed ones (like those generated by the [`register!`]
/// macro).
///
/// An `IoLoc<T>` carries three pieces of information:
///
/// - The offset to access (returned by [`IoLoc::offset`]),
/// - The width of the access (determined by [`IoLoc::IoType`]),
/// - The type `T` in which the raw data is returned or provided.
///
/// `T` and `IoLoc::IoType` may differ: for instance, a typed register has `T` = the register type
/// with its bitfields, and `IoType` = its backing primitive (e.g. `u32`).
pub trait IoLoc<T> {
/// Size ([`u8`], [`u16`], etc) of the I/O performed on the returned [`offset`](IoLoc::offset).
type IoType: Into<T> + From<T>;
/// Consumes `self` and returns the offset of this location.
fn offset(self) -> usize;
}
/// Implements [`IoLoc<$ty>`] for [`usize`], allowing [`usize`] to be used as a parameter of
/// [`Io::read`] and [`Io::write`].
macro_rules! impl_usize_ioloc {
($($ty:ty),*) => {
$(
impl IoLoc<$ty> for usize {
type IoType = $ty;
#[inline(always)]
fn offset(self) -> usize {
self
}
}
)*
}
}
// Provide the ability to read any primitive type from a [`usize`].
impl_usize_ioloc!(u8, u16, u32, u64);
/// Types implementing this trait (e.g. MMIO BARs or PCI config regions)
/// can perform I/O operations on regions of memory.
@@ -369,146 +262,445 @@ pub trait Io {
/// Fallible 8-bit read with runtime bounds check.
#[inline(always)]
fn try_read8(&self, _offset: usize) -> Result<u8>
fn try_read8(&self, offset: usize) -> Result<u8>
where
Self: IoCapable<u8>,
{
build_error!("Backend does not support fallible 8-bit read")
self.try_read(offset)
}
/// Fallible 16-bit read with runtime bounds check.
#[inline(always)]
fn try_read16(&self, _offset: usize) -> Result<u16>
fn try_read16(&self, offset: usize) -> Result<u16>
where
Self: IoCapable<u16>,
{
build_error!("Backend does not support fallible 16-bit read")
self.try_read(offset)
}
/// Fallible 32-bit read with runtime bounds check.
#[inline(always)]
fn try_read32(&self, _offset: usize) -> Result<u32>
fn try_read32(&self, offset: usize) -> Result<u32>
where
Self: IoCapable<u32>,
{
build_error!("Backend does not support fallible 32-bit read")
self.try_read(offset)
}
/// Fallible 64-bit read with runtime bounds check.
#[inline(always)]
fn try_read64(&self, _offset: usize) -> Result<u64>
fn try_read64(&self, offset: usize) -> Result<u64>
where
Self: IoCapable<u64>,
{
build_error!("Backend does not support fallible 64-bit read")
self.try_read(offset)
}
/// Fallible 8-bit write with runtime bounds check.
#[inline(always)]
fn try_write8(&self, _value: u8, _offset: usize) -> Result
fn try_write8(&self, value: u8, offset: usize) -> Result
where
Self: IoCapable<u8>,
{
build_error!("Backend does not support fallible 8-bit write")
self.try_write(offset, value)
}
/// Fallible 16-bit write with runtime bounds check.
#[inline(always)]
fn try_write16(&self, _value: u16, _offset: usize) -> Result
fn try_write16(&self, value: u16, offset: usize) -> Result
where
Self: IoCapable<u16>,
{
build_error!("Backend does not support fallible 16-bit write")
self.try_write(offset, value)
}
/// Fallible 32-bit write with runtime bounds check.
#[inline(always)]
fn try_write32(&self, _value: u32, _offset: usize) -> Result
fn try_write32(&self, value: u32, offset: usize) -> Result
where
Self: IoCapable<u32>,
{
build_error!("Backend does not support fallible 32-bit write")
self.try_write(offset, value)
}
/// Fallible 64-bit write with runtime bounds check.
#[inline(always)]
fn try_write64(&self, _value: u64, _offset: usize) -> Result
fn try_write64(&self, value: u64, offset: usize) -> Result
where
Self: IoCapable<u64>,
{
build_error!("Backend does not support fallible 64-bit write")
self.try_write(offset, value)
}
/// Infallible 8-bit read with compile-time bounds check.
#[inline(always)]
fn read8(&self, _offset: usize) -> u8
fn read8(&self, offset: usize) -> u8
where
Self: IoKnownSize + IoCapable<u8>,
{
build_error!("Backend does not support infallible 8-bit read")
self.read(offset)
}
/// Infallible 16-bit read with compile-time bounds check.
#[inline(always)]
fn read16(&self, _offset: usize) -> u16
fn read16(&self, offset: usize) -> u16
where
Self: IoKnownSize + IoCapable<u16>,
{
build_error!("Backend does not support infallible 16-bit read")
self.read(offset)
}
/// Infallible 32-bit read with compile-time bounds check.
#[inline(always)]
fn read32(&self, _offset: usize) -> u32
fn read32(&self, offset: usize) -> u32
where
Self: IoKnownSize + IoCapable<u32>,
{
build_error!("Backend does not support infallible 32-bit read")
self.read(offset)
}
/// Infallible 64-bit read with compile-time bounds check.
#[inline(always)]
fn read64(&self, _offset: usize) -> u64
fn read64(&self, offset: usize) -> u64
where
Self: IoKnownSize + IoCapable<u64>,
{
build_error!("Backend does not support infallible 64-bit read")
self.read(offset)
}
/// Infallible 8-bit write with compile-time bounds check.
#[inline(always)]
fn write8(&self, _value: u8, _offset: usize)
fn write8(&self, value: u8, offset: usize)
where
Self: IoKnownSize + IoCapable<u8>,
{
build_error!("Backend does not support infallible 8-bit write")
self.write(offset, value)
}
/// Infallible 16-bit write with compile-time bounds check.
#[inline(always)]
fn write16(&self, _value: u16, _offset: usize)
fn write16(&self, value: u16, offset: usize)
where
Self: IoKnownSize + IoCapable<u16>,
{
build_error!("Backend does not support infallible 16-bit write")
self.write(offset, value)
}
/// Infallible 32-bit write with compile-time bounds check.
#[inline(always)]
fn write32(&self, _value: u32, _offset: usize)
fn write32(&self, value: u32, offset: usize)
where
Self: IoKnownSize + IoCapable<u32>,
{
build_error!("Backend does not support infallible 32-bit write")
self.write(offset, value)
}
/// Infallible 64-bit write with compile-time bounds check.
#[inline(always)]
fn write64(&self, _value: u64, _offset: usize)
fn write64(&self, value: u64, offset: usize)
where
Self: IoKnownSize + IoCapable<u64>,
{
build_error!("Backend does not support infallible 64-bit write")
self.write(offset, value)
}
/// Generic fallible read with runtime bounds check.
///
/// # Examples
///
/// Read a primitive type from an I/O address:
///
/// ```no_run
/// use kernel::io::{
/// Io,
/// Mmio,
/// };
///
/// fn do_reads(io: &Mmio) -> Result {
/// // 32-bit read from address `0x10`.
/// let v: u32 = io.try_read(0x10)?;
///
/// // 8-bit read from address `0xfff`.
/// let v: u8 = io.try_read(0xfff)?;
///
/// Ok(())
/// }
/// ```
#[inline(always)]
fn try_read<T, L>(&self, location: L) -> Result<T>
where
L: IoLoc<T>,
Self: IoCapable<L::IoType>,
{
let address = self.io_addr::<L::IoType>(location.offset())?;
// SAFETY: `address` has been validated by `io_addr`.
Ok(unsafe { self.io_read(address) }.into())
}
/// Generic fallible write with runtime bounds check.
///
/// # Examples
///
/// Write a primitive type to an I/O address:
///
/// ```no_run
/// use kernel::io::{
/// Io,
/// Mmio,
/// };
///
/// fn do_writes(io: &Mmio) -> Result {
/// // 32-bit write of value `1` at address `0x10`.
/// io.try_write(0x10, 1u32)?;
///
/// // 8-bit write of value `0xff` at address `0xfff`.
/// io.try_write(0xfff, 0xffu8)?;
///
/// Ok(())
/// }
/// ```
#[inline(always)]
fn try_write<T, L>(&self, location: L, value: T) -> Result
where
L: IoLoc<T>,
Self: IoCapable<L::IoType>,
{
let address = self.io_addr::<L::IoType>(location.offset())?;
let io_value = value.into();
// SAFETY: `address` has been validated by `io_addr`.
unsafe { self.io_write(io_value, address) }
Ok(())
}
/// Generic fallible write of a fully-located register value.
///
/// # Examples
///
/// Tuples carrying a location and a value can be used with this method:
///
/// ```no_run
/// use kernel::io::{
/// register,
/// Io,
/// Mmio,
/// };
///
/// register! {
/// VERSION(u32) @ 0x100 {
/// 15:8 major;
/// 7:0 minor;
/// }
/// }
///
/// impl VERSION {
/// fn new(major: u8, minor: u8) -> Self {
/// VERSION::zeroed().with_major(major).with_minor(minor)
/// }
/// }
///
/// fn do_write_reg(io: &Mmio) -> Result {
///
/// io.try_write_reg(VERSION::new(1, 0))
/// }
/// ```
#[inline(always)]
fn try_write_reg<T, L, V>(&self, value: V) -> Result
where
L: IoLoc<T>,
V: LocatedRegister<Location = L, Value = T>,
Self: IoCapable<L::IoType>,
{
let (location, value) = value.into_io_op();
self.try_write(location, value)
}
/// Generic fallible update with runtime bounds check.
///
/// Note: this does not perform any synchronization. The caller is responsible for ensuring
/// exclusive access if required.
///
/// # Examples
///
/// Read the u32 value at address `0x10`, increment it, and store the updated value back:
///
/// ```no_run
/// use kernel::io::{
/// Io,
/// Mmio,
/// };
///
/// fn do_update(io: &Mmio<0x1000>) -> Result {
/// io.try_update(0x10, |v: u32| {
/// v + 1
/// })
/// }
/// ```
#[inline(always)]
fn try_update<T, L, F>(&self, location: L, f: F) -> Result
where
L: IoLoc<T>,
Self: IoCapable<L::IoType>,
F: FnOnce(T) -> T,
{
let address = self.io_addr::<L::IoType>(location.offset())?;
// SAFETY: `address` has been validated by `io_addr`.
let value: T = unsafe { self.io_read(address) }.into();
let io_value = f(value).into();
// SAFETY: `address` has been validated by `io_addr`.
unsafe { self.io_write(io_value, address) }
Ok(())
}
/// Generic infallible read with compile-time bounds check.
///
/// # Examples
///
/// Read a primitive type from an I/O address:
///
/// ```no_run
/// use kernel::io::{
/// Io,
/// Mmio,
/// };
///
/// fn do_reads(io: &Mmio<0x1000>) {
/// // 32-bit read from address `0x10`.
/// let v: u32 = io.read(0x10);
///
/// // 8-bit read from the top of the I/O space.
/// let v: u8 = io.read(0xfff);
/// }
/// ```
#[inline(always)]
fn read<T, L>(&self, location: L) -> T
where
L: IoLoc<T>,
Self: IoKnownSize + IoCapable<L::IoType>,
{
let address = self.io_addr_assert::<L::IoType>(location.offset());
// SAFETY: `address` has been validated by `io_addr_assert`.
unsafe { self.io_read(address) }.into()
}
/// Generic infallible write with compile-time bounds check.
///
/// # Examples
///
/// Write a primitive type to an I/O address:
///
/// ```no_run
/// use kernel::io::{
/// Io,
/// Mmio,
/// };
///
/// fn do_writes(io: &Mmio<0x1000>) {
/// // 32-bit write of value `1` at address `0x10`.
/// io.write(0x10, 1u32);
///
/// // 8-bit write of value `0xff` at the top of the I/O space.
/// io.write(0xfff, 0xffu8);
/// }
/// ```
#[inline(always)]
fn write<T, L>(&self, location: L, value: T)
where
L: IoLoc<T>,
Self: IoKnownSize + IoCapable<L::IoType>,
{
let address = self.io_addr_assert::<L::IoType>(location.offset());
let io_value = value.into();
// SAFETY: `address` has been validated by `io_addr_assert`.
unsafe { self.io_write(io_value, address) }
}
/// Generic infallible write of a fully-located register value.
///
/// # Examples
///
/// Tuples carrying a location and a value can be used with this method:
///
/// ```no_run
/// use kernel::io::{
/// register,
/// Io,
/// Mmio,
/// };
///
/// register! {
/// VERSION(u32) @ 0x100 {
/// 15:8 major;
/// 7:0 minor;
/// }
/// }
///
/// impl VERSION {
/// fn new(major: u8, minor: u8) -> Self {
/// VERSION::zeroed().with_major(major).with_minor(minor)
/// }
/// }
///
/// fn do_write_reg(io: &Mmio<0x1000>) {
/// io.write_reg(VERSION::new(1, 0));
/// }
/// ```
#[inline(always)]
fn write_reg<T, L, V>(&self, value: V)
where
L: IoLoc<T>,
V: LocatedRegister<Location = L, Value = T>,
Self: IoKnownSize + IoCapable<L::IoType>,
{
let (location, value) = value.into_io_op();
self.write(location, value)
}
/// Generic infallible update with compile-time bounds check.
///
/// Note: this does not perform any synchronization. The caller is responsible for ensuring
/// exclusive access if required.
///
/// # Examples
///
/// Read the u32 value at address `0x10`, increment it, and store the updated value back:
///
/// ```no_run
/// use kernel::io::{
/// Io,
/// Mmio,
/// };
///
/// fn do_update(io: &Mmio<0x1000>) {
/// io.update(0x10, |v: u32| {
/// v + 1
/// })
/// }
/// ```
#[inline(always)]
fn update<T, L, F>(&self, location: L, f: F)
where
L: IoLoc<T>,
Self: IoKnownSize + IoCapable<L::IoType> + Sized,
F: FnOnce(T) -> T,
{
let address = self.io_addr_assert::<L::IoType>(location.offset());
// SAFETY: `address` has been validated by `io_addr_assert`.
let value: T = unsafe { self.io_read(address) }.into();
let io_value = f(value).into();
// SAFETY: `address` has been validated by `io_addr_assert`.
unsafe { self.io_write(io_value, address) }
}
}
@@ -534,14 +726,36 @@ pub trait IoKnownSize: Io {
}
}
// MMIO regions support 8, 16, and 32-bit accesses.
impl<const SIZE: usize> IoCapable<u8> for Mmio<SIZE> {}
impl<const SIZE: usize> IoCapable<u16> for Mmio<SIZE> {}
impl<const SIZE: usize> IoCapable<u32> for Mmio<SIZE> {}
/// Implements [`IoCapable`] on `$mmio` for `$ty` using `$read_fn` and `$write_fn`.
macro_rules! impl_mmio_io_capable {
($mmio:ident, $(#[$attr:meta])* $ty:ty, $read_fn:ident, $write_fn:ident) => {
$(#[$attr])*
impl<const SIZE: usize> IoCapable<$ty> for $mmio<SIZE> {
unsafe fn io_read(&self, address: usize) -> $ty {
// SAFETY: By the trait invariant `address` is a valid address for MMIO operations.
unsafe { bindings::$read_fn(address as *const c_void) }
}
unsafe fn io_write(&self, value: $ty, address: usize) {
// SAFETY: By the trait invariant `address` is a valid address for MMIO operations.
unsafe { bindings::$write_fn(value, address as *mut c_void) }
}
}
};
}
// MMIO regions support 8, 16, and 32-bit accesses.
impl_mmio_io_capable!(Mmio, u8, readb, writeb);
impl_mmio_io_capable!(Mmio, u16, readw, writew);
impl_mmio_io_capable!(Mmio, u32, readl, writel);
// MMIO regions on 64-bit systems also support 64-bit accesses.
impl_mmio_io_capable!(
Mmio,
#[cfg(CONFIG_64BIT)]
impl<const SIZE: usize> IoCapable<u64> for Mmio<SIZE> {}
u64,
readq,
writeq
);
impl<const SIZE: usize> Io for Mmio<SIZE> {
/// Returns the base address of this mapping.
@@ -555,46 +769,6 @@ impl<const SIZE: usize> Io for Mmio<SIZE> {
fn maxsize(&self) -> usize {
self.0.maxsize()
}
io_define_read!(fallible, try_read8, call_mmio_read(readb) -> u8);
io_define_read!(fallible, try_read16, call_mmio_read(readw) -> u16);
io_define_read!(fallible, try_read32, call_mmio_read(readl) -> u32);
io_define_read!(
fallible,
#[cfg(CONFIG_64BIT)]
try_read64,
call_mmio_read(readq) -> u64
);
io_define_write!(fallible, try_write8, call_mmio_write(writeb) <- u8);
io_define_write!(fallible, try_write16, call_mmio_write(writew) <- u16);
io_define_write!(fallible, try_write32, call_mmio_write(writel) <- u32);
io_define_write!(
fallible,
#[cfg(CONFIG_64BIT)]
try_write64,
call_mmio_write(writeq) <- u64
);
io_define_read!(infallible, read8, call_mmio_read(readb) -> u8);
io_define_read!(infallible, read16, call_mmio_read(readw) -> u16);
io_define_read!(infallible, read32, call_mmio_read(readl) -> u32);
io_define_read!(
infallible,
#[cfg(CONFIG_64BIT)]
read64,
call_mmio_read(readq) -> u64
);
io_define_write!(infallible, write8, call_mmio_write(writeb) <- u8);
io_define_write!(infallible, write16, call_mmio_write(writew) <- u16);
io_define_write!(infallible, write32, call_mmio_write(writel) <- u32);
io_define_write!(
infallible,
#[cfg(CONFIG_64BIT)]
write64,
call_mmio_write(writeq) <- u64
);
}
impl<const SIZE: usize> IoKnownSize for Mmio<SIZE> {
@@ -612,44 +786,70 @@ impl<const SIZE: usize> Mmio<SIZE> {
// SAFETY: `Mmio` is a transparent wrapper around `MmioRaw`.
unsafe { &*core::ptr::from_ref(raw).cast() }
}
io_define_read!(infallible, pub read8_relaxed, call_mmio_read(readb_relaxed) -> u8);
io_define_read!(infallible, pub read16_relaxed, call_mmio_read(readw_relaxed) -> u16);
io_define_read!(infallible, pub read32_relaxed, call_mmio_read(readl_relaxed) -> u32);
io_define_read!(
infallible,
#[cfg(CONFIG_64BIT)]
pub read64_relaxed,
call_mmio_read(readq_relaxed) -> u64
);
io_define_read!(fallible, pub try_read8_relaxed, call_mmio_read(readb_relaxed) -> u8);
io_define_read!(fallible, pub try_read16_relaxed, call_mmio_read(readw_relaxed) -> u16);
io_define_read!(fallible, pub try_read32_relaxed, call_mmio_read(readl_relaxed) -> u32);
io_define_read!(
fallible,
#[cfg(CONFIG_64BIT)]
pub try_read64_relaxed,
call_mmio_read(readq_relaxed) -> u64
);
io_define_write!(infallible, pub write8_relaxed, call_mmio_write(writeb_relaxed) <- u8);
io_define_write!(infallible, pub write16_relaxed, call_mmio_write(writew_relaxed) <- u16);
io_define_write!(infallible, pub write32_relaxed, call_mmio_write(writel_relaxed) <- u32);
io_define_write!(
infallible,
#[cfg(CONFIG_64BIT)]
pub write64_relaxed,
call_mmio_write(writeq_relaxed) <- u64
);
io_define_write!(fallible, pub try_write8_relaxed, call_mmio_write(writeb_relaxed) <- u8);
io_define_write!(fallible, pub try_write16_relaxed, call_mmio_write(writew_relaxed) <- u16);
io_define_write!(fallible, pub try_write32_relaxed, call_mmio_write(writel_relaxed) <- u32);
io_define_write!(
fallible,
#[cfg(CONFIG_64BIT)]
pub try_write64_relaxed,
call_mmio_write(writeq_relaxed) <- u64
);
}
/// [`Mmio`] wrapper using relaxed accessors.
///
/// This type provides an implementation of [`Io`] that uses relaxed I/O MMIO operands instead of
/// the regular ones.
///
/// See [`Mmio::relaxed`] for a usage example.
#[repr(transparent)]
pub struct RelaxedMmio<const SIZE: usize = 0>(Mmio<SIZE>);
impl<const SIZE: usize> Io for RelaxedMmio<SIZE> {
#[inline]
fn addr(&self) -> usize {
self.0.addr()
}
#[inline]
fn maxsize(&self) -> usize {
self.0.maxsize()
}
}
impl<const SIZE: usize> IoKnownSize for RelaxedMmio<SIZE> {
const MIN_SIZE: usize = SIZE;
}
impl<const SIZE: usize> Mmio<SIZE> {
/// Returns a [`RelaxedMmio`] reference that performs relaxed I/O operations.
///
/// Relaxed accessors do not provide ordering guarantees with respect to DMA or memory accesses
/// and can be used when such ordering is not required.
///
/// # Examples
///
/// ```no_run
/// use kernel::io::{
/// Io,
/// Mmio,
/// RelaxedMmio,
/// };
///
/// fn do_io(io: &Mmio<0x100>) {
/// // The access is performed using `readl_relaxed` instead of `readl`.
/// let v = io.relaxed().read32(0x10);
/// }
///
/// ```
pub fn relaxed(&self) -> &RelaxedMmio<SIZE> {
// SAFETY: `RelaxedMmio` is `#[repr(transparent)]` over `Mmio`, so `Mmio<SIZE>` and
// `RelaxedMmio<SIZE>` have identical layout.
unsafe { core::mem::transmute(self) }
}
}
// MMIO regions support 8, 16, and 32-bit accesses.
impl_mmio_io_capable!(RelaxedMmio, u8, readb_relaxed, writeb_relaxed);
impl_mmio_io_capable!(RelaxedMmio, u16, readw_relaxed, writew_relaxed);
impl_mmio_io_capable!(RelaxedMmio, u32, readl_relaxed, writel_relaxed);
// MMIO regions on 64-bit systems also support 64-bit accesses.
impl_mmio_io_capable!(
RelaxedMmio,
#[cfg(CONFIG_64BIT)]
u64,
readq_relaxed,
writeq_relaxed
);

View File

@@ -54,6 +54,7 @@ impl<'a> IoRequest<'a> {
/// use kernel::{
/// bindings,
/// device::Core,
/// io::Io,
/// of,
/// platform,
/// };
@@ -78,9 +79,9 @@ impl<'a> IoRequest<'a> {
/// let io = iomem.access(pdev.as_ref())?;
///
/// // Read and write a 32-bit value at `offset`.
/// let data = io.read32_relaxed(offset);
/// let data = io.read32(offset);
///
/// io.write32_relaxed(data, offset);
/// io.write32(data, offset);
///
/// # Ok(SampleDriver)
/// }
@@ -117,6 +118,7 @@ impl<'a> IoRequest<'a> {
/// use kernel::{
/// bindings,
/// device::Core,
/// io::Io,
/// of,
/// platform,
/// };
@@ -141,9 +143,9 @@ impl<'a> IoRequest<'a> {
///
/// let io = iomem.access(pdev.as_ref())?;
///
/// let data = io.try_read32_relaxed(offset)?;
/// let data = io.try_read32(offset)?;
///
/// io.try_write32_relaxed(data, offset)?;
/// io.try_write32(data, offset)?;
///
/// # Ok(SampleDriver)
/// }

1260
rust/kernel/io/register.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -27,7 +27,7 @@ pub enum IrqReturn {
}
/// Callbacks for an IRQ handler.
pub trait Handler: Sync {
pub trait Handler: Sync + 'static {
/// The hard IRQ handler.
///
/// This is executed in interrupt context, hence all corresponding
@@ -45,7 +45,7 @@ impl<T: ?Sized + Handler + Send> Handler for Arc<T> {
}
}
impl<T: ?Sized + Handler, A: Allocator> Handler for Box<T, A> {
impl<T: ?Sized + Handler, A: Allocator + 'static> Handler for Box<T, A> {
fn handle(&self, device: &Device<Bound>) -> IrqReturn {
T::handle(self, device)
}
@@ -181,7 +181,7 @@ impl<'a> IrqRequest<'a> {
///
/// * We own an irq handler whose cookie is a pointer to `Self`.
#[pin_data]
pub struct Registration<T: Handler + 'static> {
pub struct Registration<T: Handler> {
#[pin]
inner: Devres<RegistrationInner>,
@@ -194,7 +194,7 @@ pub struct Registration<T: Handler + 'static> {
_pin: PhantomPinned,
}
impl<T: Handler + 'static> Registration<T> {
impl<T: Handler> Registration<T> {
/// Registers the IRQ handler with the system for the given IRQ number.
pub fn new<'a>(
request: IrqRequest<'a>,
@@ -260,10 +260,7 @@ impl<T: Handler + 'static> Registration<T> {
/// # Safety
///
/// This function should be only used as the callback in `request_irq`.
unsafe extern "C" fn handle_irq_callback<T: Handler + 'static>(
_irq: i32,
ptr: *mut c_void,
) -> c_uint {
unsafe extern "C" fn handle_irq_callback<T: Handler>(_irq: i32, ptr: *mut c_void) -> c_uint {
// SAFETY: `ptr` is a pointer to `Registration<T>` set in `Registration::new`
let registration = unsafe { &*(ptr as *const Registration<T>) };
// SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq
@@ -287,7 +284,7 @@ pub enum ThreadedIrqReturn {
}
/// Callbacks for a threaded IRQ handler.
pub trait ThreadedHandler: Sync {
pub trait ThreadedHandler: Sync + 'static {
/// The hard IRQ handler.
///
/// This is executed in interrupt context, hence all corresponding
@@ -318,7 +315,7 @@ impl<T: ?Sized + ThreadedHandler + Send> ThreadedHandler for Arc<T> {
}
}
impl<T: ?Sized + ThreadedHandler, A: Allocator> ThreadedHandler for Box<T, A> {
impl<T: ?Sized + ThreadedHandler, A: Allocator + 'static> ThreadedHandler for Box<T, A> {
fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn {
T::handle(self, device)
}
@@ -401,7 +398,7 @@ impl<T: ?Sized + ThreadedHandler, A: Allocator> ThreadedHandler for Box<T, A> {
///
/// * We own an irq handler whose cookie is a pointer to `Self`.
#[pin_data]
pub struct ThreadedRegistration<T: ThreadedHandler + 'static> {
pub struct ThreadedRegistration<T: ThreadedHandler> {
#[pin]
inner: Devres<RegistrationInner>,
@@ -414,7 +411,7 @@ pub struct ThreadedRegistration<T: ThreadedHandler + 'static> {
_pin: PhantomPinned,
}
impl<T: ThreadedHandler + 'static> ThreadedRegistration<T> {
impl<T: ThreadedHandler> ThreadedRegistration<T> {
/// Registers the IRQ handler with the system for the given IRQ number.
pub fn new<'a>(
request: IrqRequest<'a>,
@@ -481,7 +478,7 @@ impl<T: ThreadedHandler + 'static> ThreadedRegistration<T> {
/// # Safety
///
/// This function should be only used as the callback in `request_threaded_irq`.
unsafe extern "C" fn handle_threaded_irq_callback<T: ThreadedHandler + 'static>(
unsafe extern "C" fn handle_threaded_irq_callback<T: ThreadedHandler>(
_irq: i32,
ptr: *mut c_void,
) -> c_uint {
@@ -497,10 +494,7 @@ unsafe extern "C" fn handle_threaded_irq_callback<T: ThreadedHandler + 'static>(
/// # Safety
///
/// This function should be only used as the callback in `request_threaded_irq`.
unsafe extern "C" fn thread_fn_callback<T: ThreadedHandler + 'static>(
_irq: i32,
ptr: *mut c_void,
) -> c_uint {
unsafe extern "C" fn thread_fn_callback<T: ThreadedHandler>(_irq: i32, ptr: *mut c_void) -> c_uint {
// SAFETY: `ptr` is a pointer to `ThreadedRegistration<T>` set in `ThreadedRegistration::new`
let registration = unsafe { &*(ptr as *const ThreadedRegistration<T>) };
// SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq

View File

@@ -16,6 +16,9 @@
// Please see https://github.com/Rust-for-Linux/linux/issues/2 for details on
// the unstable features in use.
//
// Stable since Rust 1.89.0.
#![feature(generic_arg_infer)]
//
// Expected to become stable.
#![feature(arbitrary_self_types)]
#![feature(derive_coerce_pointee)]

View File

@@ -375,6 +375,9 @@ where
/// Returns the wrapped value as the backing type.
///
/// This is similar to the [`Deref`] implementation, but doesn't enforce the size invariant of
/// the [`Bounded`], which might produce slightly less optimal code.
///
/// # Examples
///
/// ```
@@ -383,8 +386,8 @@ where
/// let v = Bounded::<u32, 4>::new::<7>();
/// assert_eq!(v.get(), 7u32);
/// ```
pub fn get(self) -> T {
*self.deref()
pub const fn get(self) -> T {
self.0
}
/// Increases the number of bits usable for `self`.
@@ -467,6 +470,48 @@ where
// `N` bits, and with the same signedness.
unsafe { Bounded::__new(value) }
}
/// Right-shifts `self` by `SHIFT` and returns the result as a `Bounded<_, RES>`, where `RES >=
/// N - SHIFT`.
///
/// # Examples
///
/// ```
/// use kernel::num::Bounded;
///
/// let v = Bounded::<u32, 16>::new::<0xff00>();
/// let v_shifted: Bounded::<u32, 8> = v.shr::<8, _>();
///
/// assert_eq!(v_shifted.get(), 0xff);
/// ```
pub fn shr<const SHIFT: u32, const RES: u32>(self) -> Bounded<T, RES> {
const { assert!(RES + SHIFT >= N) }
// SAFETY: We shift the value right by `SHIFT`, reducing the number of bits needed to
// represent the shifted value by as much, and just asserted that `RES >= N - SHIFT`.
unsafe { Bounded::__new(self.0 >> SHIFT) }
}
/// Left-shifts `self` by `SHIFT` and returns the result as a `Bounded<_, RES>`, where `RES >=
/// N + SHIFT`.
///
/// # Examples
///
/// ```
/// use kernel::num::Bounded;
///
/// let v = Bounded::<u32, 8>::new::<0xff>();
/// let v_shifted: Bounded::<u32, 16> = v.shl::<8, _>();
///
/// assert_eq!(v_shifted.get(), 0xff00);
/// ```
pub fn shl<const SHIFT: u32, const RES: u32>(self) -> Bounded<T, RES> {
const { assert!(RES >= N + SHIFT) }
// SAFETY: We shift the value left by `SHIFT`, augmenting the number of bits needed to
// represent the shifted value by as much, and just asserted that `RES >= N + SHIFT`.
unsafe { Bounded::__new(self.0 << SHIFT) }
}
}
impl<T, const N: u32> Deref for Bounded<T, N>
@@ -1053,3 +1098,24 @@ where
unsafe { Self::__new(T::from(value)) }
}
}
impl<T> Bounded<T, 1>
where
T: Integer + Zeroable,
{
/// Converts this [`Bounded`] into a [`bool`].
///
/// This is a shorter way of writing `bool::from(self)`.
///
/// # Examples
///
/// ```
/// use kernel::num::Bounded;
///
/// assert_eq!(Bounded::<u8, 1>::new::<0>().into_bool(), false);
/// assert_eq!(Bounded::<u8, 1>::new::<1>().into_bool(), true);
/// ```
pub fn into_bool(self) -> bool {
self.into()
}
}

View File

@@ -8,8 +8,6 @@ use crate::{
device,
devres::Devres,
io::{
io_define_read,
io_define_write,
Io,
IoCapable,
IoKnownSize,
@@ -85,67 +83,41 @@ pub struct ConfigSpace<'a, S: ConfigSpaceKind = Extended> {
_marker: PhantomData<S>,
}
/// Internal helper macros used to invoke C PCI configuration space read functions.
///
/// This macro is intended to be used by higher-level PCI configuration space access macros
/// (io_define_read) and provides a unified expansion for infallible vs. fallible read semantics. It
/// emits a direct call into the corresponding C helper and performs the required cast to the Rust
/// return type.
///
/// # Parameters
///
/// * `$c_fn` The C function performing the PCI configuration space write.
/// * `$self` The I/O backend object.
/// * `$ty` The type of the value to read.
/// * `$addr` The PCI configuration space offset to read.
///
/// This macro does not perform any validation; all invariants must be upheld by the higher-level
/// abstraction invoking it.
macro_rules! call_config_read {
(infallible, $c_fn:ident, $self:ident, $ty:ty, $addr:expr) => {{
/// Implements [`IoCapable`] on [`ConfigSpace`] for `$ty` using `$read_fn` and `$write_fn`.
macro_rules! impl_config_space_io_capable {
($ty:ty, $read_fn:ident, $write_fn:ident) => {
impl<'a, S: ConfigSpaceKind> IoCapable<$ty> for ConfigSpace<'a, S> {
unsafe fn io_read(&self, address: usize) -> $ty {
let mut val: $ty = 0;
// SAFETY: By the type invariant `$self.pdev` is a valid address.
// CAST: The offset is cast to `i32` because the C functions expect a 32-bit signed offset
// parameter. PCI configuration space size is at most 4096 bytes, so the value always fits
// within `i32` without truncation or sign change.
// Return value from C function is ignored in infallible accessors.
let _ret = unsafe { bindings::$c_fn($self.pdev.as_raw(), $addr as i32, &mut val) };
let _ret =
// SAFETY: By the type invariant `self.pdev` is a valid address.
// CAST: The offset is cast to `i32` because the C functions expect a 32-bit
// signed offset parameter. PCI configuration space size is at most 4096 bytes,
// so the value always fits within `i32` without truncation or sign change.
unsafe { bindings::$read_fn(self.pdev.as_raw(), address as i32, &mut val) };
val
}};
}
/// Internal helper macros used to invoke C PCI configuration space write functions.
///
/// This macro is intended to be used by higher-level PCI configuration space access macros
/// (io_define_write) and provides a unified expansion for infallible vs. fallible read semantics.
/// It emits a direct call into the corresponding C helper and performs the required cast to the
/// Rust return type.
///
/// # Parameters
///
/// * `$c_fn` The C function performing the PCI configuration space write.
/// * `$self` The I/O backend object.
/// * `$ty` The type of the written value.
/// * `$addr` The configuration space offset to write.
/// * `$value` The value to write.
///
/// This macro does not perform any validation; all invariants must be upheld by the higher-level
/// abstraction invoking it.
macro_rules! call_config_write {
(infallible, $c_fn:ident, $self:ident, $ty:ty, $addr:expr, $value:expr) => {
// SAFETY: By the type invariant `$self.pdev` is a valid address.
// CAST: The offset is cast to `i32` because the C functions expect a 32-bit signed offset
// parameter. PCI configuration space size is at most 4096 bytes, so the value always fits
// within `i32` without truncation or sign change.
unsafe fn io_write(&self, value: $ty, address: usize) {
// Return value from C function is ignored in infallible accessors.
let _ret = unsafe { bindings::$c_fn($self.pdev.as_raw(), $addr as i32, $value) };
let _ret =
// SAFETY: By the type invariant `self.pdev` is a valid address.
// CAST: The offset is cast to `i32` because the C functions expect a 32-bit
// signed offset parameter. PCI configuration space size is at most 4096 bytes,
// so the value always fits within `i32` without truncation or sign change.
unsafe { bindings::$write_fn(self.pdev.as_raw(), address as i32, value) };
}
}
};
}
// PCI configuration space supports 8, 16, and 32-bit accesses.
impl<'a, S: ConfigSpaceKind> IoCapable<u8> for ConfigSpace<'a, S> {}
impl<'a, S: ConfigSpaceKind> IoCapable<u16> for ConfigSpace<'a, S> {}
impl<'a, S: ConfigSpaceKind> IoCapable<u32> for ConfigSpace<'a, S> {}
impl_config_space_io_capable!(u8, pci_read_config_byte, pci_write_config_byte);
impl_config_space_io_capable!(u16, pci_read_config_word, pci_write_config_word);
impl_config_space_io_capable!(u32, pci_read_config_dword, pci_write_config_dword);
impl<'a, S: ConfigSpaceKind> Io for ConfigSpace<'a, S> {
/// Returns the base address of the I/O region. It is always 0 for configuration space.
@@ -159,17 +131,6 @@ impl<'a, S: ConfigSpaceKind> Io for ConfigSpace<'a, S> {
fn maxsize(&self) -> usize {
self.pdev.cfg_size().into_raw()
}
// PCI configuration space does not support fallible operations.
// The default implementations from the Io trait are not used.
io_define_read!(infallible, read8, call_config_read(pci_read_config_byte) -> u8);
io_define_read!(infallible, read16, call_config_read(pci_read_config_word) -> u16);
io_define_read!(infallible, read32, call_config_read(pci_read_config_dword) -> u32);
io_define_write!(infallible, write8, call_config_write(pci_write_config_byte) <- u8);
io_define_write!(infallible, write16, call_config_write(pci_write_config_word) <- u16);
io_define_write!(infallible, write32, call_config_write(pci_write_config_dword) <- u32);
}
impl<'a, S: ConfigSpaceKind> IoKnownSize for ConfigSpace<'a, S> {

View File

@@ -5,30 +5,63 @@
//! To make this driver probe, QEMU must be run with `-device pci-testdev`.
use kernel::{
device::Bound,
device::Core,
device::{
Bound,
Core, //
},
devres::Devres,
io::Io,
io::{
register,
register::Array,
Io, //
},
num::Bounded,
pci,
prelude::*,
sync::aref::ARef, //
};
struct Regs;
mod regs {
use super::*;
impl Regs {
const TEST: usize = 0x0;
const OFFSET: usize = 0x4;
const DATA: usize = 0x8;
const COUNT: usize = 0xC;
const END: usize = 0x10;
register! {
pub(super) TEST(u8) @ 0x0 {
7:0 index => TestIndex;
}
type Bar0 = pci::Bar<{ Regs::END }>;
pub(super) OFFSET(u32) @ 0x4 {
31:0 offset;
}
pub(super) DATA(u8) @ 0x8 {
7:0 data;
}
pub(super) COUNT(u32) @ 0xC {
31:0 count;
}
}
pub(super) const END: usize = 0x10;
}
type Bar0 = pci::Bar<{ regs::END }>;
#[derive(Copy, Clone, Debug)]
struct TestIndex(u8);
impl From<Bounded<u8, 8>> for TestIndex {
fn from(value: Bounded<u8, 8>) -> Self {
Self(value.into())
}
}
impl From<TestIndex> for Bounded<u8, 8> {
fn from(value: TestIndex) -> Self {
value.0.into()
}
}
impl TestIndex {
const NO_EVENTFD: Self = Self(0);
}
@@ -54,40 +87,53 @@ kernel::pci_device_table!(
impl SampleDriver {
fn testdev(index: &TestIndex, bar: &Bar0) -> Result<u32> {
// Select the test.
bar.write8(index.0, Regs::TEST);
bar.write_reg(regs::TEST::zeroed().with_index(*index));
let offset = bar.read32(Regs::OFFSET) as usize;
let data = bar.read8(Regs::DATA);
let offset = bar.read(regs::OFFSET).into_raw() as usize;
let data = bar.read(regs::DATA).into();
// Write `data` to `offset` to increase `count` by one.
//
// Note that we need `try_write8`, since `offset` can't be checked at compile-time.
bar.try_write8(data, offset)?;
Ok(bar.read32(Regs::COUNT))
Ok(bar.read(regs::COUNT).into())
}
fn config_space(pdev: &pci::Device<Bound>) {
let config = pdev.config_space();
// TODO: use the register!() macro for defining PCI configuration space registers once it
// has been move out of nova-core.
// Some PCI configuration space registers.
register! {
VENDOR_ID(u16) @ 0x0 {
15:0 vendor_id;
}
REVISION_ID(u8) @ 0x8 {
7:0 revision_id;
}
BAR(u32)[6] @ 0x10 {
31:0 value;
}
}
dev_info!(
pdev,
"pci-testdev config space read8 rev ID: {:x}\n",
config.read8(0x8)
config.read(REVISION_ID).revision_id()
);
dev_info!(
pdev,
"pci-testdev config space read16 vendor ID: {:x}\n",
config.read16(0)
config.read(VENDOR_ID).vendor_id()
);
dev_info!(
pdev,
"pci-testdev config space read32 BAR 0: {:x}\n",
config.read32(0x10)
config.read(BAR::at(0)).value()
);
}
}
@@ -111,7 +157,7 @@ impl pci::Driver for SampleDriver {
pdev.set_master();
Ok(try_pin_init!(Self {
bar <- pdev.iomap_region_sized::<{ Regs::END }>(0, c"rust_driver_pci"),
bar <- pdev.iomap_region_sized::<{ regs::END }>(0, c"rust_driver_pci"),
index: *info,
_: {
let bar = bar.access(pdev.as_ref())?;
@@ -131,7 +177,7 @@ impl pci::Driver for SampleDriver {
fn unbind(pdev: &pci::Device<Core>, this: Pin<&Self>) {
if let Ok(bar) = this.bar.access(pdev.as_ref()) {
// Reset pci-testdev by writing a new test index.
bar.write8(this.index.0, Regs::TEST);
bar.write_reg(regs::TEST::zeroed().with_index(this.index));
}
}
}

View File

@@ -311,12 +311,13 @@ $(obj)/%.lst: $(obj)/%.c FORCE
# The features in this list are the ones allowed for non-`rust/` code.
#
# - Stable since Rust 1.87.0: `feature(asm_goto)`.
# - Stable since Rust 1.89.0: `feature(generic_arg_infer)`.
# - Expected to become stable: `feature(arbitrary_self_types)`.
# - To be determined: `feature(used_with_arg)`.
#
# Please see https://github.com/Rust-for-Linux/linux/issues/2 for details on
# the unstable features in use.
rust_allowed_features := arbitrary_self_types,asm_goto,used_with_arg
rust_allowed_features := arbitrary_self_types,asm_goto,generic_arg_infer,used_with_arg
# `--out-dir` is required to avoid temporaries being created by `rustc` in the
# current working directory, which may be not accessible in the out-of-tree

View File

@@ -10,6 +10,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/inotify.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <arpa/inet.h>
@@ -1643,6 +1644,115 @@ cleanup:
return ret;
}
static int read_event(int inotify_fd, int expected_event, int expected_wd)
{
struct inotify_event event;
ssize_t len = 0;
len = read(inotify_fd, &event, sizeof(event));
if (len < (ssize_t)sizeof(event))
return -1;
if (event.mask != expected_event || event.wd != expected_wd) {
fprintf(stderr,
"event does not match expected values: mask %d (expected %d) wd %d (expected %d)\n",
event.mask, expected_event, event.wd, expected_wd);
return -1;
}
return 0;
}
static int test_memcg_inotify_delete_file(const char *root)
{
int ret = KSFT_FAIL;
char *memcg = NULL;
int fd, wd;
memcg = cg_name(root, "memcg_test_0");
if (!memcg)
goto cleanup;
if (cg_create(memcg))
goto cleanup;
fd = inotify_init1(0);
if (fd == -1)
goto cleanup;
wd = inotify_add_watch(fd, cg_control(memcg, "memory.events"), IN_DELETE_SELF);
if (wd == -1)
goto cleanup;
if (cg_destroy(memcg))
goto cleanup;
free(memcg);
memcg = NULL;
if (read_event(fd, IN_DELETE_SELF, wd))
goto cleanup;
if (read_event(fd, IN_IGNORED, wd))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (fd >= 0)
close(fd);
if (memcg)
cg_destroy(memcg);
free(memcg);
return ret;
}
static int test_memcg_inotify_delete_dir(const char *root)
{
int ret = KSFT_FAIL;
char *memcg = NULL;
int fd, wd;
memcg = cg_name(root, "memcg_test_0");
if (!memcg)
goto cleanup;
if (cg_create(memcg))
goto cleanup;
fd = inotify_init1(0);
if (fd == -1)
goto cleanup;
wd = inotify_add_watch(fd, memcg, IN_DELETE_SELF);
if (wd == -1)
goto cleanup;
if (cg_destroy(memcg))
goto cleanup;
free(memcg);
memcg = NULL;
if (read_event(fd, IN_DELETE_SELF, wd))
goto cleanup;
if (read_event(fd, IN_IGNORED, wd))
goto cleanup;
ret = KSFT_PASS;
cleanup:
if (fd >= 0)
close(fd);
if (memcg)
cg_destroy(memcg);
free(memcg);
return ret;
}
#define T(x) { x, #x }
struct memcg_test {
int (*fn)(const char *root);
@@ -1662,6 +1772,8 @@ struct memcg_test {
T(test_memcg_oom_group_leaf_events),
T(test_memcg_oom_group_parent_events),
T(test_memcg_oom_group_score_events),
T(test_memcg_inotify_delete_file),
T(test_memcg_inotify_delete_dir),
};
#undef T