mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
treewide: Replace kmalloc with kmalloc_obj for non-scalar types
This is the result of running the Coccinelle script from scripts/coccinelle/api/kmalloc_objs.cocci. The script is designed to avoid scalar types (which need careful case-by-case checking), and instead replace kmalloc-family calls that allocate struct or union object instances: Single allocations: kmalloc(sizeof(TYPE), ...) are replaced with: kmalloc_obj(TYPE, ...) Array allocations: kmalloc_array(COUNT, sizeof(TYPE), ...) are replaced with: kmalloc_objs(TYPE, COUNT, ...) Flex array allocations: kmalloc(struct_size(PTR, FAM, COUNT), ...) are replaced with: kmalloc_flex(*PTR, FAM, COUNT, ...) (where TYPE may also be *VAR) The resulting allocations no longer return "void *", instead returning "TYPE *". Signed-off-by: Kees Cook <kees@kernel.org>
This commit is contained in:
@@ -795,7 +795,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
|
||||
struct flat_binder_object *fp)
|
||||
{
|
||||
struct binder_node *node;
|
||||
struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
struct binder_node *new_node = kzalloc_obj(*node, GFP_KERNEL);
|
||||
|
||||
if (!new_node)
|
||||
return NULL;
|
||||
@@ -1469,7 +1469,7 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
|
||||
ref = binder_get_ref_for_node_olocked(proc, node, NULL);
|
||||
if (!ref) {
|
||||
binder_proc_unlock(proc);
|
||||
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
|
||||
new_ref = kzalloc_obj(*ref, GFP_KERNEL);
|
||||
if (!new_ref)
|
||||
return -ENOMEM;
|
||||
binder_proc_lock(proc);
|
||||
@@ -2009,7 +2009,7 @@ static void binder_deferred_fd_close(int fd)
|
||||
{
|
||||
struct binder_task_work_cb *twcb;
|
||||
|
||||
twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
|
||||
twcb = kzalloc_obj(*twcb, GFP_KERNEL);
|
||||
if (!twcb)
|
||||
return;
|
||||
init_task_work(&twcb->twork, binder_do_fd_close);
|
||||
@@ -2386,7 +2386,7 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
|
||||
* of the fd in the target needs to be done from a
|
||||
* target thread.
|
||||
*/
|
||||
fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
|
||||
fixup = kzalloc_obj(*fixup, GFP_KERNEL);
|
||||
if (!fixup) {
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc;
|
||||
@@ -2579,7 +2579,7 @@ static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
|
||||
static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
|
||||
const void __user *sender_uaddr, size_t length)
|
||||
{
|
||||
struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
|
||||
struct binder_sg_copy *bc = kzalloc_obj(*bc, GFP_KERNEL);
|
||||
|
||||
if (!bc)
|
||||
return -ENOMEM;
|
||||
@@ -2622,7 +2622,7 @@ static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
|
||||
static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
|
||||
binder_uintptr_t fixup, size_t skip_size)
|
||||
{
|
||||
struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
|
||||
struct binder_ptr_fixup *pf = kzalloc_obj(*pf, GFP_KERNEL);
|
||||
struct binder_ptr_fixup *tmppf;
|
||||
|
||||
if (!pf)
|
||||
@@ -3101,7 +3101,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
|
||||
binder_inner_proc_unlock(proc);
|
||||
|
||||
t = kzalloc(sizeof(*t), GFP_KERNEL);
|
||||
t = kzalloc_obj(*t, GFP_KERNEL);
|
||||
if (!t) {
|
||||
binder_txn_error("%d:%d cannot allocate transaction\n",
|
||||
thread->pid, proc->pid);
|
||||
@@ -3320,7 +3320,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
e->to_thread = target_thread->pid;
|
||||
e->to_proc = target_proc->pid;
|
||||
|
||||
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
|
||||
tcomplete = kzalloc_obj(*tcomplete, GFP_KERNEL);
|
||||
if (tcomplete == NULL) {
|
||||
binder_txn_error("%d:%d cannot allocate work for transaction\n",
|
||||
thread->pid, proc->pid);
|
||||
@@ -3926,7 +3926,7 @@ binder_request_freeze_notification(struct binder_proc *proc,
|
||||
struct binder_ref_freeze *freeze;
|
||||
struct binder_ref *ref;
|
||||
|
||||
freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
|
||||
freeze = kzalloc_obj(*freeze, GFP_KERNEL);
|
||||
if (!freeze)
|
||||
return -ENOMEM;
|
||||
binder_proc_lock(proc);
|
||||
@@ -4394,7 +4394,7 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
* Allocate memory for death notification
|
||||
* before taking lock
|
||||
*/
|
||||
death = kzalloc(sizeof(*death), GFP_KERNEL);
|
||||
death = kzalloc_obj(*death, GFP_KERNEL);
|
||||
if (death == NULL) {
|
||||
WARN_ON(thread->return_error.cmd !=
|
||||
BR_OK);
|
||||
@@ -5293,7 +5293,7 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
|
||||
thread = binder_get_thread_ilocked(proc, NULL);
|
||||
binder_inner_proc_unlock(proc);
|
||||
if (!thread) {
|
||||
new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
|
||||
new_thread = kzalloc_obj(*thread, GFP_KERNEL);
|
||||
if (new_thread == NULL)
|
||||
return NULL;
|
||||
binder_inner_proc_lock(proc);
|
||||
@@ -5902,9 +5902,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
goto err;
|
||||
}
|
||||
|
||||
target_procs = kcalloc(target_procs_count,
|
||||
sizeof(struct binder_proc *),
|
||||
GFP_KERNEL);
|
||||
target_procs = kzalloc_objs(struct binder_proc *,
|
||||
target_procs_count, GFP_KERNEL);
|
||||
|
||||
if (!target_procs) {
|
||||
mutex_unlock(&binder_procs_lock);
|
||||
@@ -6061,7 +6060,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
||||
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
|
||||
current->tgid, current->pid);
|
||||
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
|
||||
proc = kzalloc_obj(*proc, GFP_KERNEL);
|
||||
if (proc == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -7065,7 +7064,7 @@ static int __init init_binder_device(const char *name)
|
||||
int ret;
|
||||
struct binder_device *binder_device;
|
||||
|
||||
binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
|
||||
binder_device = kzalloc_obj(*binder_device, GFP_KERNEL);
|
||||
if (!binder_device)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
@@ -145,7 +145,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
|
||||
ret = -ENOMEM;
|
||||
device = kzalloc(sizeof(*device), GFP_KERNEL);
|
||||
device = kzalloc_obj(*device, GFP_KERNEL);
|
||||
if (!device)
|
||||
goto err;
|
||||
|
||||
@@ -387,7 +387,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
|
||||
bool use_reserve = true;
|
||||
#endif
|
||||
|
||||
device = kzalloc(sizeof(*device), GFP_KERNEL);
|
||||
device = kzalloc_obj(*device, GFP_KERNEL);
|
||||
if (!device)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -642,7 +642,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
sb->s_op = &binderfs_super_ops;
|
||||
sb->s_time_gran = 1;
|
||||
|
||||
sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
|
||||
sb->s_fs_info = kzalloc_obj(struct binderfs_info, GFP_KERNEL);
|
||||
if (!sb->s_fs_info)
|
||||
return -ENOMEM;
|
||||
info = sb->s_fs_info;
|
||||
@@ -721,7 +721,7 @@ static int binderfs_init_fs_context(struct fs_context *fc)
|
||||
{
|
||||
struct binderfs_mount_opts *ctx;
|
||||
|
||||
ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
|
||||
ctx = kzalloc_obj(struct binderfs_mount_opts, GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
@@ -289,7 +289,7 @@ static struct page *binder_page_alloc(struct binder_alloc *alloc,
|
||||
return NULL;
|
||||
|
||||
/* allocate and install shrinker metadata under page->private */
|
||||
mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
|
||||
mdata = kzalloc_obj(*mdata, GFP_KERNEL);
|
||||
if (!mdata) {
|
||||
__free_page(page);
|
||||
return NULL;
|
||||
@@ -672,7 +672,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
}
|
||||
|
||||
/* Preallocate the next buffer */
|
||||
next = kzalloc(sizeof(*next), GFP_KERNEL);
|
||||
next = kzalloc_obj(*next, GFP_KERNEL);
|
||||
if (!next)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@@ -916,16 +916,15 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
||||
|
||||
alloc->vm_start = vma->vm_start;
|
||||
|
||||
alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
|
||||
sizeof(alloc->pages[0]),
|
||||
GFP_KERNEL);
|
||||
alloc->pages = kvzalloc_objs(alloc->pages[0],
|
||||
alloc->buffer_size / PAGE_SIZE, GFP_KERNEL);
|
||||
if (!alloc->pages) {
|
||||
ret = -ENOMEM;
|
||||
failure_string = "alloc page array";
|
||||
goto err_alloc_pages_failed;
|
||||
}
|
||||
|
||||
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
buffer = kzalloc_obj(*buffer, GFP_KERNEL);
|
||||
if (!buffer) {
|
||||
ret = -ENOMEM;
|
||||
failure_string = "alloc buffer struct";
|
||||
|
||||
@@ -145,7 +145,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
|
||||
mutex_unlock(&binderfs_minors_mutex);
|
||||
|
||||
ret = -ENOMEM;
|
||||
device = kzalloc(sizeof(*device), GFP_KERNEL);
|
||||
device = kzalloc_obj(*device, GFP_KERNEL);
|
||||
if (!device)
|
||||
goto err;
|
||||
|
||||
@@ -396,7 +396,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
|
||||
bool use_reserve = true;
|
||||
#endif
|
||||
|
||||
device = kzalloc(sizeof(*device), GFP_KERNEL);
|
||||
device = kzalloc_obj(*device, GFP_KERNEL);
|
||||
if (!device)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -638,7 +638,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
sb->s_op = &binderfs_super_ops;
|
||||
sb->s_time_gran = 1;
|
||||
|
||||
sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
|
||||
sb->s_fs_info = kzalloc_obj(struct binderfs_info, GFP_KERNEL);
|
||||
if (!sb->s_fs_info)
|
||||
return -ENOMEM;
|
||||
info = sb->s_fs_info;
|
||||
@@ -717,7 +717,7 @@ static int binderfs_init_fs_context(struct fs_context *fc)
|
||||
{
|
||||
struct binderfs_mount_opts *ctx;
|
||||
|
||||
ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
|
||||
ctx = kzalloc_obj(struct binderfs_mount_opts, GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user