treewide: Replace kmalloc with kmalloc_obj for non-scalar types

This is the result of running the Coccinelle script from
scripts/coccinelle/api/kmalloc_objs.cocci. The script is designed to
avoid scalar types (which need careful case-by-case checking), and
instead replace kmalloc-family calls that allocate struct or union
object instances:

Single allocations:	kmalloc(sizeof(TYPE), ...)
are replaced with:	kmalloc_obj(TYPE, ...)

Array allocations:	kmalloc_array(COUNT, sizeof(TYPE), ...)
are replaced with:	kmalloc_objs(TYPE, COUNT, ...)

Flex array allocations:	kmalloc(struct_size(PTR, FAM, COUNT), ...)
are replaced with:	kmalloc_flex(*PTR, FAM, COUNT, ...)

(where TYPE may also be *VAR)

The resulting allocations no longer return "void *", instead returning
"TYPE *".

Signed-off-by: Kees Cook <kees@kernel.org>
This commit is contained in:
Kees Cook
2026-02-20 23:49:23 -08:00
parent d39a1d7486
commit 69050f8d6d
8016 changed files with 20055 additions and 20913 deletions

View File

@@ -1527,8 +1527,7 @@ static CLOSURE_CALLBACK(flash_dev_flush)
static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
{
int err = -ENOMEM;
struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
GFP_KERNEL);
struct bcache_device *d = kzalloc_obj(struct bcache_device, GFP_KERNEL);
if (!d)
goto err_ret;
@@ -1864,7 +1863,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
{
int iter_size;
struct cache *ca = container_of(sb, struct cache, sb);
struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
struct cache_set *c = kzalloc_obj(struct cache_set, GFP_KERNEL);
if (!c)
return NULL;
@@ -2543,8 +2542,8 @@ static void register_device_async(struct async_reg_args *args)
static void *alloc_holder_object(struct cache_sb *sb)
{
if (SB_IS_BDEV(sb))
return kzalloc(sizeof(struct cached_dev), GFP_KERNEL);
return kzalloc(sizeof(struct cache), GFP_KERNEL);
return kzalloc_obj(struct cached_dev, GFP_KERNEL);
return kzalloc_obj(struct cache, GFP_KERNEL);
}
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
@@ -2581,7 +2580,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!path)
goto out_module_put;
sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
sb = kmalloc_obj(struct cache_sb, GFP_KERNEL);
if (!sb)
goto out_free_path;
@@ -2633,7 +2632,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (async_registration) {
/* register in asynchronous way */
struct async_reg_args *args =
kzalloc(sizeof(struct async_reg_args), GFP_KERNEL);
kzalloc_obj(struct async_reg_args, GFP_KERNEL);
if (!args) {
ret = -ENOMEM;
@@ -2710,7 +2709,7 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
mutex_lock(&bch_register_lock);
list_for_each_entry_safe(dc, tdc, &uncached_devices, list) {
pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL);
pdev = kmalloc_obj(struct pdev, GFP_KERNEL);
if (!pdev)
break;
pdev->dc = dc;