treewide: Replace kmalloc with kmalloc_obj for non-scalar types

This is the result of running the Coccinelle script from
scripts/coccinelle/api/kmalloc_objs.cocci. The script is designed to
avoid scalar types (which need careful case-by-case checking), and
instead replace kmalloc-family calls that allocate struct or union
object instances:

Single allocations:	kmalloc(sizeof(TYPE), ...)
are replaced with:	kmalloc_obj(TYPE, ...)

Array allocations:	kmalloc_array(COUNT, sizeof(TYPE), ...)
are replaced with:	kmalloc_objs(TYPE, COUNT, ...)

Flex array allocations:	kmalloc(struct_size(PTR, FAM, COUNT), ...)
are replaced with:	kmalloc_flex(*PTR, FAM, COUNT, ...)

(where TYPE may also be *VAR)

The resulting allocations no longer return "void *", instead returning
"TYPE *".

Signed-off-by: Kees Cook <kees@kernel.org>
This commit is contained in:
Kees Cook
2026-02-20 23:49:23 -08:00
parent d39a1d7486
commit 69050f8d6d
8016 changed files with 20055 additions and 20913 deletions

View File

@@ -447,7 +447,7 @@ int aldebaran_reset_init(struct amdgpu_device *adev)
{
struct amdgpu_reset_control *reset_ctl;
reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
reset_ctl = kzalloc_obj(*reset_ctl, GFP_KERNEL);
if (!reset_ctl)
return -ENOMEM;

View File

@@ -52,7 +52,7 @@ static int aca_banks_add_bank(struct aca_banks *banks, struct aca_bank *bank)
if (!bank)
return -EINVAL;
node = kvzalloc(sizeof(*node), GFP_KERNEL);
node = kvzalloc_obj(*node, GFP_KERNEL);
if (!node)
return -ENOMEM;
@@ -230,7 +230,7 @@ static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_
{
struct aca_bank_error *bank_error;
bank_error = kvzalloc(sizeof(*bank_error), GFP_KERNEL);
bank_error = kvzalloc_obj(*bank_error, GFP_KERNEL);
if (!bank_error)
return NULL;

View File

@@ -246,7 +246,7 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
return -EINVAL;
acp_base = adev->rmmio_base;
adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
adev->acp.acp_genpd = kzalloc_obj(struct acp_pm_domain, GFP_KERNEL);
if (!adev->acp.acp_genpd)
return -ENOMEM;
@@ -260,20 +260,21 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
switch (acp_machine_id) {
case ST_JADEITE:
{
adev->acp.acp_cell = kcalloc(2, sizeof(struct mfd_cell),
GFP_KERNEL);
adev->acp.acp_cell = kzalloc_objs(struct mfd_cell, 2,
GFP_KERNEL);
if (!adev->acp.acp_cell) {
r = -ENOMEM;
goto failure;
}
adev->acp.acp_res = kcalloc(3, sizeof(struct resource), GFP_KERNEL);
adev->acp.acp_res = kzalloc_objs(struct resource, 3, GFP_KERNEL);
if (!adev->acp.acp_res) {
r = -ENOMEM;
goto failure;
}
i2s_pdata = kcalloc(1, sizeof(struct i2s_platform_data), GFP_KERNEL);
i2s_pdata = kzalloc_objs(struct i2s_platform_data, 1,
GFP_KERNEL);
if (!i2s_pdata) {
r = -ENOMEM;
goto failure;
@@ -324,21 +325,22 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
break;
}
default:
adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
GFP_KERNEL);
adev->acp.acp_cell = kzalloc_objs(struct mfd_cell, ACP_DEVS,
GFP_KERNEL);
if (!adev->acp.acp_cell) {
r = -ENOMEM;
goto failure;
}
adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
adev->acp.acp_res = kzalloc_objs(struct resource, 5, GFP_KERNEL);
if (!adev->acp.acp_res) {
r = -ENOMEM;
goto failure;
}
i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
i2s_pdata = kzalloc_objs(struct i2s_platform_data, 3,
GFP_KERNEL);
if (!i2s_pdata) {
r = -ENOMEM;
goto failure;

View File

@@ -897,7 +897,7 @@ static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm)
if (!numa_info) {
struct sysinfo info;
numa_info = kzalloc(sizeof(*numa_info), GFP_KERNEL);
numa_info = kzalloc_obj(*numa_info, GFP_KERNEL);
if (!numa_info)
return NULL;
@@ -1016,7 +1016,7 @@ static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info,
int ret = -ENOENT;
*dev_info = NULL;
tmp = kzalloc(sizeof(struct amdgpu_acpi_dev_info), GFP_KERNEL);
tmp = kzalloc_obj(struct amdgpu_acpi_dev_info, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
@@ -1166,8 +1166,7 @@ int amdgpu_acpi_enumerate_xcc(void)
break;
}
xcc_info = kzalloc(sizeof(struct amdgpu_acpi_xcc_info),
GFP_KERNEL);
xcc_info = kzalloc_obj(struct amdgpu_acpi_xcc_info, GFP_KERNEL);
if (!xcc_info)
return -ENOMEM;

View File

@@ -829,11 +829,11 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
return 0;
ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
ring_funcs = kzalloc_obj(*ring_funcs, GFP_KERNEL);
if (!ring_funcs)
return -ENOMEM;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
ring = kzalloc_obj(*ring, GFP_KERNEL);
if (!ring) {
r = -ENOMEM;
goto free_ring_funcs;

View File

@@ -199,7 +199,7 @@ int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;

View File

@@ -67,7 +67,7 @@ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
{
struct amdgpu_amdkfd_fence *fence;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
fence = kzalloc_obj(*fence, GFP_KERNEL);
if (fence == NULL)
return NULL;

View File

@@ -141,7 +141,7 @@ static int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;

View File

@@ -352,7 +352,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -449,7 +449,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;

View File

@@ -338,7 +338,7 @@ static int hqd_dump_v10_3(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -435,7 +435,7 @@ static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+12)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;

View File

@@ -323,7 +323,7 @@ static int hqd_dump_v11(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -420,7 +420,7 @@ static int hqd_sdma_dump_v11(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (7+11+1+12+12)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;

View File

@@ -115,7 +115,7 @@ static int hqd_dump_v12(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -146,7 +146,7 @@ static int hqd_sdma_dump_v12(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (last_reg - first_reg + 1)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;

View File

@@ -214,7 +214,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -301,7 +301,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+4)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;

View File

@@ -238,7 +238,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -324,7 +324,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+4+2+3+7)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;

View File

@@ -363,7 +363,7 @@ int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -460,7 +460,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
*dump = kmalloc_objs(**dump, HQD_N_REGS, GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;

View File

@@ -540,7 +540,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
static struct sg_table *create_sg_table(uint64_t addr, uint32_t size)
{
struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
struct sg_table *sg = kmalloc_obj(*sg, GFP_KERNEL);
if (!sg)
return NULL;
@@ -573,7 +573,7 @@ kfd_mem_dmamap_userptr(struct kgd_mem *mem,
if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
return -EINVAL;
ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
ttm->sg = kmalloc_obj(*ttm->sg, GFP_KERNEL);
if (unlikely(!ttm->sg))
return -ENOMEM;
@@ -1409,7 +1409,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
process = container_of(process_info, struct kfd_process, kgd_process_info);
if (!*process_info) {
info = kzalloc(sizeof(*info), GFP_KERNEL);
info = kzalloc_obj(*info, GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -1773,7 +1773,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
*mem = kzalloc_obj(struct kgd_mem, GFP_KERNEL);
if (!*mem) {
ret = -ENOMEM;
goto err;
@@ -2374,7 +2374,7 @@ static int import_obj_create(struct amdgpu_device *adev,
/* Only VRAM and GTT BOs are supported */
return -EINVAL;
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
*mem = kzalloc_obj(struct kgd_mem, GFP_KERNEL);
if (!*mem)
return -ENOMEM;
@@ -3129,7 +3129,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
if (!info || !gws)
return -EINVAL;
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
*mem = kzalloc_obj(struct kgd_mem, GFP_KERNEL);
if (!*mem)
return -ENOMEM;

View File

@@ -1897,7 +1897,7 @@ void amdgpu_atombios_fini(struct amdgpu_device *adev)
int amdgpu_atombios_init(struct amdgpu_device *adev)
{
struct card_info *atom_card_info =
kzalloc(sizeof(struct card_info), GFP_KERNEL);
kzalloc_obj(struct card_info, GFP_KERNEL);
if (!atom_card_info)
return -ENOMEM;

View File

@@ -76,7 +76,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
unsigned i;
int r;
list = kvzalloc(struct_size(list, entries, num_entries), GFP_KERNEL);
list = kvzalloc_flex(*list, entries, num_entries, GFP_KERNEL);
if (!list)
return -ENOMEM;

View File

@@ -399,8 +399,8 @@ static const struct cgs_ops amdgpu_cgs_ops = {
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
{
struct amdgpu_cgs_device *cgs_device =
kmalloc(sizeof(*cgs_device), GFP_KERNEL);
struct amdgpu_cgs_device *cgs_device = kmalloc_obj(*cgs_device,
GFP_KERNEL);
if (!cgs_device) {
drm_err(adev_to_drm(adev), "Couldn't allocate CGS device structure\n");

View File

@@ -1652,7 +1652,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
}
}
amdgpu_connector = kzalloc(sizeof(struct amdgpu_connector), GFP_KERNEL);
amdgpu_connector = kzalloc_obj(struct amdgpu_connector, GFP_KERNEL);
if (!amdgpu_connector)
return;
@@ -1673,7 +1673,8 @@ amdgpu_connector_add(struct amdgpu_device *adev,
}
if (is_dp_bridge) {
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig,
GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
@@ -1828,7 +1829,8 @@ amdgpu_connector_add(struct amdgpu_device *adev,
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig,
GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
@@ -1885,7 +1887,8 @@ amdgpu_connector_add(struct amdgpu_device *adev,
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig,
GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
@@ -1934,7 +1937,8 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig,
GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
@@ -1983,7 +1987,8 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_eDP:
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig,
GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
@@ -2010,7 +2015,8 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
amdgpu_dig_connector = kzalloc_obj(struct amdgpu_connector_atom_dig,
GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;

View File

@@ -192,8 +192,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
return PTR_ERR(chunk_array);
p->nchunks = cs->in.num_chunks;
p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
GFP_KERNEL);
p->chunks = kvmalloc_objs(struct amdgpu_cs_chunk, p->nchunks,
GFP_KERNEL);
if (!p->chunks) {
ret = -ENOMEM;
goto free_chunk;
@@ -523,8 +523,7 @@ static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
if (p->post_deps)
return -EINVAL;
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
GFP_KERNEL);
p->post_deps = kmalloc_objs(*p->post_deps, num_deps, GFP_KERNEL);
p->num_post_deps = 0;
if (!p->post_deps)
@@ -557,8 +556,7 @@ static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
if (p->post_deps)
return -EINVAL;
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
GFP_KERNEL);
p->post_deps = kmalloc_objs(*p->post_deps, num_deps, GFP_KERNEL);
p->num_post_deps = 0;
if (!p->post_deps)
@@ -1691,7 +1689,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
long r;
/* Prepare the fence array */
array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
array = kzalloc_objs(struct dma_fence *, fence_count, GFP_KERNEL);
if (array == NULL)
return -ENOMEM;

View File

@@ -212,8 +212,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
int32_t ctx_prio;
int r;
entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
GFP_KERNEL);
entity = kzalloc_flex(*entity, fences, amdgpu_sched_jobs, GFP_KERNEL);
if (!entity)
return -ENOMEM;
@@ -483,7 +482,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_ctx *ctx;
int r;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
ctx = kmalloc_obj(*ctx, GFP_KERNEL);
if (!ctx)
return -ENOMEM;

View File

@@ -206,7 +206,7 @@ static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
{
struct amdgpu_debugfs_regs2_data *rd;
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
rd = kzalloc_obj(*rd, GFP_KERNEL);
if (!rd)
return -ENOMEM;
rd->adev = file_inode(file)->i_private;
@@ -371,7 +371,7 @@ static int amdgpu_debugfs_gprwave_open(struct inode *inode, struct file *file)
{
struct amdgpu_debugfs_gprwave_data *rd;
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
rd = kzalloc_obj(*rd, GFP_KERNEL);
if (!rd)
return -ENOMEM;
rd->adev = file_inode(file)->i_private;

View File

@@ -332,7 +332,7 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
struct amdgpu_coredump_info *coredump;
struct drm_sched_job *s_job;
coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
coredump = kzalloc_obj(*coredump, GFP_NOWAIT);
if (!coredump)
return;

View File

@@ -2596,7 +2596,7 @@ out:
static void amdgpu_uid_init(struct amdgpu_device *adev)
{
/* Initialize the UID for the device */
adev->uid_info = kzalloc(sizeof(struct amdgpu_uid), GFP_KERNEL);
adev->uid_info = kzalloc_obj(struct amdgpu_uid, GFP_KERNEL);
if (!adev->uid_info) {
dev_warn(adev->dev, "Failed to allocate memory for UID\n");
return;

View File

@@ -1149,7 +1149,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
* block if not yet registered.
*/
if (!ip_hw_id) {
ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
ip_hw_id = kzalloc_obj(*ip_hw_id, GFP_KERNEL);
if (!ip_hw_id)
return -ENOMEM;
ip_hw_id->hw_id = ii;
@@ -1177,10 +1177,10 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
/* Now register its instance.
*/
ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
base_addr,
ip->num_base_address),
GFP_KERNEL);
ip_hw_instance = kzalloc_flex(*ip_hw_instance,
base_addr,
ip->num_base_address,
GFP_KERNEL);
if (!ip_hw_instance) {
DRM_ERROR("no memory for ip_hw_instance");
return -ENOMEM;
@@ -1255,7 +1255,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
* amdgpu_discovery_reg_base_init().
*/
ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
ip_die_entry = kzalloc_obj(*ip_die_entry, GFP_KERNEL);
if (!ip_die_entry)
return -ENOMEM;
@@ -1287,7 +1287,7 @@ static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
if (!discovery_bin)
return -EINVAL;
ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL);
ip_top = kzalloc_obj(*ip_top, GFP_KERNEL);
if (!ip_top)
return -ENOMEM;
@@ -1931,9 +1931,8 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
switch (le16_to_cpu(nps_info->v1.header.version_major)) {
case 1:
mem_ranges = kvcalloc(nps_info->v1.count,
sizeof(*mem_ranges),
GFP_KERNEL);
mem_ranges = kvzalloc_objs(*mem_ranges, nps_info->v1.count,
GFP_KERNEL);
if (!mem_ranges)
return -ENOMEM;
*nps_type = nps_info->v1.nps_type;

View File

@@ -204,7 +204,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
u64 tiling_flags;
int i, r;
work = kzalloc(sizeof(*work), GFP_KERNEL);
work = kzalloc_obj(*work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
@@ -1323,7 +1323,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
amdgpu_fb = kzalloc_obj(*amdgpu_fb, GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_put(obj);
return ERR_PTR(-ENOMEM);

View File

@@ -161,7 +161,7 @@ amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
{
struct amdgpu_eviction_fence *ev_fence;
ev_fence = kzalloc(sizeof(*ev_fence), GFP_KERNEL);
ev_fence = kzalloc_obj(*ev_fence, GFP_KERNEL);
if (!ev_fence)
return NULL;

View File

@@ -130,7 +130,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
return 0;
if (!adev->fru_info) {
adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL);
adev->fru_info = kzalloc_obj(*adev->fru_info, GFP_KERNEL);
if (!adev->fru_info)
return -ENOMEM;
}

View File

@@ -153,7 +153,7 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
dev_info(adev->dev, "%s dma_addr:%pad\n", __func__, &dma_addr);
/* Create SG table */
sg = kmalloc(sizeof(*sg), GFP_KERNEL);
sg = kmalloc_obj(*sg, GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto error;

View File

@@ -1183,7 +1183,7 @@ int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
return 0;
}
bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL);
bo_entries = kvzalloc_objs(*bo_entries, num_bos, GFP_KERNEL);
if (!bo_entries)
return -ENOMEM;

View File

@@ -1727,9 +1727,9 @@ int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev)
{
bool valid;
adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES,
sizeof(struct amdgpu_mem_partition_info),
GFP_KERNEL);
adev->gmc.mem_partitions = kzalloc_objs(struct amdgpu_mem_partition_info,
AMDGPU_MAX_MEM_RANGES,
GFP_KERNEL);
if (!adev->gmc.mem_partitions)
return -ENOMEM;

View File

@@ -122,7 +122,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct ttm_range_mgr_node *node;
int r;
node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
node = kzalloc_flex(*node, mm_nodes, 1, GFP_KERNEL);
if (!node)
return -ENOMEM;

View File

@@ -265,7 +265,7 @@ struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
{
struct amdgpu_hmm_range *range;
range = kzalloc(sizeof(*range), GFP_KERNEL);
range = kzalloc_obj(*range, GFP_KERNEL);
if (!range)
return NULL;

View File

@@ -168,7 +168,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
if (rec->mm_i2c && (amdgpu_hw_i2c == 0))
return NULL;
i2c = kzalloc(sizeof(struct amdgpu_i2c_chan), GFP_KERNEL);
i2c = kzalloc_obj(struct amdgpu_i2c_chan, GFP_KERNEL);
if (i2c == NULL)
return NULL;

View File

@@ -169,7 +169,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = 0;
gds_va = 0;
init_shadow = false;
af = kzalloc(sizeof(*af), GFP_ATOMIC);
af = kzalloc_obj(*af, GFP_ATOMIC);
if (!af)
return -ENOMEM;
}

View File

@@ -119,7 +119,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
return;
}
cb = kmalloc(sizeof(*cb), GFP_KERNEL);
cb = kmalloc_obj(*cb, GFP_KERNEL);
if (!cb) {
/* Last resort when we are OOM */
dma_fence_wait(fence, false);

View File

@@ -436,9 +436,8 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
if (!adev->irq.client[client_id].sources) {
adev->irq.client[client_id].sources =
kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
sizeof(struct amdgpu_irq_src *),
GFP_KERNEL);
kzalloc_objs(struct amdgpu_irq_src *,
AMDGPU_MAX_IRQ_SRC_ID, GFP_KERNEL);
if (!adev->irq.client[client_id].sources)
return -ENOMEM;
}
@@ -449,8 +448,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
if (source->num_types && !source->enabled_types) {
atomic_t *types;
types = kcalloc(source->num_types, sizeof(atomic_t),
GFP_KERNEL);
types = kzalloc_objs(atomic_t, source->num_types, GFP_KERNEL);
if (!types)
return -ENOMEM;

View File

@@ -198,18 +198,18 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (num_ibs == 0)
return -EINVAL;
*job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
*job = kzalloc_flex(**job, ibs, num_ibs, GFP_KERNEL);
if (!*job)
return -ENOMEM;
af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
af = kzalloc_obj(struct amdgpu_fence, GFP_KERNEL);
if (!af) {
r = -ENOMEM;
goto err_job;
}
(*job)->hw_fence = af;
af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
af = kzalloc_obj(struct amdgpu_fence, GFP_KERNEL);
if (!af) {
r = -ENOMEM;
goto err_fence;

View File

@@ -942,7 +942,7 @@ out:
uint64_t vm_size;
uint32_t pcie_gen_mask, pcie_width_mask;
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
dev_info = kzalloc_obj(*dev_info, GFP_KERNEL);
if (!dev_info)
return -ENOMEM;
@@ -1329,7 +1329,7 @@ out:
return -EINVAL;
}
caps = kzalloc(sizeof(*caps), GFP_KERNEL);
caps = kzalloc_obj(*caps, GFP_KERNEL);
if (!caps)
return -ENOMEM;

View File

@@ -169,7 +169,7 @@ static int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mc
if (!entry)
return -EINVAL;
node = kvzalloc(sizeof(*node), GFP_KERNEL);
node = kvzalloc_obj(*node, GFP_KERNEL);
if (!node)
return -ENOMEM;

View File

@@ -446,25 +446,25 @@ static int amdgpu_pmu_alloc_pmu_attrs(
struct amdgpu_pmu_event_attribute **evt_attr,
struct amdgpu_pmu_config *config)
{
*fmt_attr = kcalloc(config->num_formats, sizeof(**fmt_attr),
GFP_KERNEL);
*fmt_attr = kzalloc_objs(**fmt_attr, config->num_formats, GFP_KERNEL);
if (!(*fmt_attr))
return -ENOMEM;
fmt_attr_group->attrs = kcalloc(config->num_formats + 1,
sizeof(*fmt_attr_group->attrs), GFP_KERNEL);
fmt_attr_group->attrs = kzalloc_objs(*fmt_attr_group->attrs,
config->num_formats + 1,
GFP_KERNEL);
if (!fmt_attr_group->attrs)
goto err_fmt_attr_grp;
*evt_attr = kcalloc(config->num_events, sizeof(**evt_attr), GFP_KERNEL);
*evt_attr = kzalloc_objs(**evt_attr, config->num_events, GFP_KERNEL);
if (!(*evt_attr))
goto err_evt_attr;
evt_attr_group->attrs = kcalloc(config->num_events + 1,
sizeof(*evt_attr_group->attrs), GFP_KERNEL);
evt_attr_group->attrs = kzalloc_objs(*evt_attr_group->attrs,
config->num_events + 1, GFP_KERNEL);
if (!evt_attr_group->attrs)
goto err_evt_attr_grp;
@@ -599,7 +599,7 @@ static struct amdgpu_pmu_entry *create_pmu_entry(struct amdgpu_device *adev,
{
struct amdgpu_pmu_entry *pmu_entry;
pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL);
pmu_entry = kzalloc_obj(struct amdgpu_pmu_entry, GFP_KERNEL);
if (!pmu_entry)
return pmu_entry;

View File

@@ -61,7 +61,7 @@ static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_resource **res)
{
*res = kzalloc(sizeof(**res), GFP_KERNEL);
*res = kzalloc_obj(**res, GFP_KERNEL);
if (!*res)
return -ENOMEM;

View File

@@ -457,7 +457,7 @@ static int psp_sw_init(struct amdgpu_ip_block *ip_block)
struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
struct psp_runtime_scpm_entry scpm_entry;
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
psp->cmd = kzalloc_obj(struct psp_gfx_cmd_resp, GFP_KERNEL);
if (!psp->cmd) {
dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
return -ENOMEM;
@@ -4384,7 +4384,7 @@ static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
return -EBUSY;
}
bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
bo_triplet = kzalloc_obj(struct spirom_bo, GFP_KERNEL);
if (!bo_triplet) {
mutex_unlock(&adev->psp.mutex);
return -ENOMEM;

View File

@@ -891,7 +891,7 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
if (head->block == AMDGPU_RAS_BLOCK__GFX &&
!amdgpu_sriov_vf(adev) &&
!amdgpu_ras_intr_triggered()) {
info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
info = kzalloc_obj(union ta_ras_cmd_input, GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -1904,7 +1904,7 @@ static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
memset(buf, 0, count);
bps_count = end - start;
bps = kmalloc_array(bps_count, sizeof(*bps), GFP_KERNEL);
bps = kmalloc_objs(*bps, bps_count, GFP_KERNEL);
if (!bps)
return 0;
@@ -2811,7 +2811,7 @@ static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
if (!bps || !count)
return -EINVAL;
output = kmalloc(sizeof(*output), GFP_KERNEL);
output = kmalloc_obj(*output, GFP_KERNEL);
if (!output)
return -ENOMEM;
@@ -2991,7 +2991,7 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
unsigned int align_space = ALIGN(new_space, 512);
void *bps = kmalloc_array(align_space, sizeof(*data->bps), GFP_KERNEL);
void *bps = kmalloc_objs(*data->bps, align_space, GFP_KERNEL);
if (!bps) {
return -ENOMEM;
@@ -3238,8 +3238,8 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
if (from_rom) {
err_data.err_addr =
kcalloc(adev->umc.retire_unit,
sizeof(struct eeprom_table_record), GFP_KERNEL);
kzalloc_objs(struct eeprom_table_record,
adev->umc.retire_unit, GFP_KERNEL);
if (!err_data.err_addr) {
dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
return -ENOMEM;
@@ -3375,7 +3375,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
return 0;
bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
bps = kzalloc_objs(*bps, control->ras_num_recs, GFP_KERNEL);
if (!bps)
return -ENOMEM;
@@ -3863,7 +3863,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
return 0;
data = &con->eh_data;
*data = kzalloc(sizeof(**data), GFP_KERNEL);
*data = kzalloc_obj(**data, GFP_KERNEL);
if (!*data) {
ret = -ENOMEM;
goto out;
@@ -4499,7 +4499,7 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
/* Those are the cached values at init.
*/
query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
query_info = kzalloc_obj(*query_info, GFP_KERNEL);
if (!query_info)
return -ENOMEM;
memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
@@ -5188,7 +5188,7 @@ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
if (!adev || !ras_block_obj)
return -EINVAL;
ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
ras_node = kzalloc_obj(*ras_node, GFP_KERNEL);
if (!ras_node)
return -ENOMEM;
@@ -5389,7 +5389,7 @@ static struct ras_err_node *amdgpu_ras_error_node_new(void)
{
struct ras_err_node *err_node;
err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
err_node = kvzalloc_obj(*err_node, GFP_KERNEL);
if (!err_node)
return NULL;
@@ -5682,7 +5682,7 @@ int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
/* Record new critical amdgpu bo */
list_for_each_entry(block, &vres->blocks, link) {
region = kzalloc(sizeof(*region), GFP_KERNEL);
region = kzalloc_obj(*region, GFP_KERNEL);
if (!region) {
ret = -ENOMEM;
goto out;

View File

@@ -276,7 +276,7 @@ struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_d
{
struct amdgpu_reset_domain *reset_domain;
reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL);
reset_domain = kvzalloc_obj(struct amdgpu_reset_domain, GFP_KERNEL);
if (!reset_domain) {
DRM_ERROR("Failed to allocate amdgpu_reset_domain!");
return NULL;

View File

@@ -507,13 +507,13 @@ static ssize_t amdgpu_ras_cper_debugfs_read(struct file *f, char __user *buf,
const uint8_t ring_header_size = 12;
struct amdgpu_ring *ring = file_inode(f)->i_private;
struct ras_cmd_cper_snapshot_req *snapshot_req __free(kfree) =
kzalloc(sizeof(struct ras_cmd_cper_snapshot_req), GFP_KERNEL);
kzalloc_obj(struct ras_cmd_cper_snapshot_req, GFP_KERNEL);
struct ras_cmd_cper_snapshot_rsp *snapshot_rsp __free(kfree) =
kzalloc(sizeof(struct ras_cmd_cper_snapshot_rsp), GFP_KERNEL);
kzalloc_obj(struct ras_cmd_cper_snapshot_rsp, GFP_KERNEL);
struct ras_cmd_cper_record_req *record_req __free(kfree) =
kzalloc(sizeof(struct ras_cmd_cper_record_req), GFP_KERNEL);
kzalloc_obj(struct ras_cmd_cper_record_req, GFP_KERNEL);
struct ras_cmd_cper_record_rsp *record_rsp __free(kfree) =
kzalloc(sizeof(struct ras_cmd_cper_record_rsp), GFP_KERNEL);
kzalloc_obj(struct ras_cmd_cper_record_rsp, GFP_KERNEL);
uint8_t *ring_header __free(kfree) =
kzalloc(ring_header_size, GFP_KERNEL);
uint32_t total_cper_num;

View File

@@ -153,7 +153,8 @@ int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
mux->real_ring = ring;
mux->num_ring_entries = 0;
mux->ring_entry = kcalloc(entry_size, sizeof(struct amdgpu_mux_entry), GFP_KERNEL);
mux->ring_entry = kzalloc_objs(struct amdgpu_mux_entry, entry_size,
GFP_KERNEL);
if (!mux->ring_entry)
return -ENOMEM;

View File

@@ -1122,7 +1122,7 @@ int amdgpu_ttm_mmio_remap_alloc_sgt(struct amdgpu_device *adev,
phys = adev->rmmio_remap.bus_addr + cur.start;
/* Build a single-entry sg_table mapped as I/O (no struct page backing). */
*sgt = kzalloc(sizeof(**sgt), GFP_KERNEL);
*sgt = kzalloc_obj(**sgt, GFP_KERNEL);
if (!*sgt)
return -ENOMEM;
r = sg_alloc_table(*sgt, 1, GFP_KERNEL);
@@ -1172,7 +1172,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
struct amdgpu_ttm_tt *gtt;
enum ttm_caching caching;
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
gtt = kzalloc_obj(struct amdgpu_ttm_tt, GFP_KERNEL);
if (!gtt)
return NULL;
@@ -1213,7 +1213,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
if (gtt->userptr) {
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
ttm->sg = kzalloc_obj(struct sg_table, GFP_KERNEL);
if (!ttm->sg)
return -ENOMEM;
return 0;
@@ -1880,9 +1880,9 @@ static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions)
return 0;
adev->mman.ttm_pools = kcalloc(adev->gmc.num_mem_partitions,
sizeof(*adev->mman.ttm_pools),
GFP_KERNEL);
adev->mman.ttm_pools = kzalloc_objs(*adev->mman.ttm_pools,
adev->gmc.num_mem_partitions,
GFP_KERNEL);
if (!adev->mman.ttm_pools)
return -ENOMEM;

View File

@@ -58,8 +58,8 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
return ret;
err_data.err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
sizeof(struct eeprom_table_record), GFP_KERNEL);
kzalloc_objs(struct eeprom_table_record,
adev->umc.max_ras_err_cnt_per_query, GFP_KERNEL);
if (!err_data.err_addr) {
dev_warn(adev->dev,
"Failed to alloc memory for umc error record in MCA notifier!\n");
@@ -105,8 +105,8 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
err_data->err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
sizeof(struct eeprom_table_record), GFP_KERNEL);
kzalloc_objs(struct eeprom_table_record,
adev->umc.max_ras_err_cnt_per_query, GFP_KERNEL);
/* still call query_ras_error_address to clear error status
* even NOMEM error is encountered
@@ -131,8 +131,9 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
adev->umc.max_ras_err_cnt_per_query) {
err_data->err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
sizeof(struct eeprom_table_record), GFP_KERNEL);
kzalloc_objs(struct eeprom_table_record,
adev->umc.max_ras_err_cnt_per_query,
GFP_KERNEL);
/* still call query_ras_error_address to clear error status
* even NOMEM error is encountered
@@ -161,8 +162,9 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
adev->umc.ras->ecc_info_query_ras_error_address &&
adev->umc.max_ras_err_cnt_per_query) {
err_data->err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
sizeof(struct eeprom_table_record), GFP_KERNEL);
kzalloc_objs(struct eeprom_table_record,
adev->umc.max_ras_err_cnt_per_query,
GFP_KERNEL);
/* still call query_ras_error_address to clear error status
* even NOMEM error is encountered
@@ -551,8 +553,8 @@ int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
int i, ret;
struct ras_err_data err_data;
err_data.err_addr = kcalloc(adev->umc.retire_unit,
sizeof(struct eeprom_table_record), GFP_KERNEL);
err_data.err_addr = kzalloc_objs(struct eeprom_table_record,
adev->umc.retire_unit, GFP_KERNEL);
if (!err_data.err_addr) {
dev_warn(adev->dev, "Failed to alloc memory in bad page lookup!\n");
return 0;

View File

@@ -217,7 +217,7 @@ static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
struct amdgpu_userq_va_cursor *va_cursor;
struct userq_va_list;
va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL);
va_cursor = kzalloc_obj(*va_cursor, GFP_KERNEL);
if (!va_cursor)
return -ENOMEM;
@@ -781,7 +781,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
goto unlock;
}
queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
queue = kzalloc_obj(struct amdgpu_usermode_queue, GFP_KERNEL);
if (!queue) {
drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
r = -ENOMEM;

View File

@@ -82,7 +82,7 @@ int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
unsigned long flags;
int r;
fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
fence_drv = kzalloc_obj(*fence_drv, GFP_KERNEL);
if (!fence_drv)
return -ENOMEM;
@@ -266,9 +266,8 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
count++;
userq_fence->fence_drv_array =
kvmalloc_array(count,
sizeof(struct amdgpu_userq_fence_driver *),
GFP_ATOMIC);
kvmalloc_objs(struct amdgpu_userq_fence_driver *, count,
GFP_ATOMIC);
if (userq_fence->fence_drv_array) {
xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {

View File

@@ -294,15 +294,15 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
void *bps = NULL;
struct amdgpu_bo **bps_bo = NULL;
*data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
*data = kmalloc_obj(struct amdgpu_virt_ras_err_handler_data, GFP_KERNEL);
if (!*data)
goto data_failure;
bps = kmalloc_array(align_space, sizeof(*(*data)->bps), GFP_KERNEL);
bps = kmalloc_objs(*(*data)->bps, align_space, GFP_KERNEL);
if (!bps)
goto bps_failure;
bps_bo = kmalloc_array(align_space, sizeof(*(*data)->bps_bo), GFP_KERNEL);
bps_bo = kmalloc_objs(*(*data)->bps_bo, align_space, GFP_KERNEL);
if (!bps_bo)
goto bps_bo_failure;
@@ -966,7 +966,8 @@ int amdgpu_virt_init_critical_region(struct amdgpu_device *adev)
}
/* Allocate for init_data_hdr */
init_data_hdr = kzalloc(sizeof(struct amd_sriov_msg_init_data_header), GFP_KERNEL);
init_data_hdr = kzalloc_obj(struct amd_sriov_msg_init_data_header,
GFP_KERNEL);
if (!init_data_hdr)
return -ENOMEM;

View File

@@ -411,7 +411,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
struct drm_plane *plane;
int ret;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
plane = kzalloc_obj(*plane, GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
@@ -499,8 +499,9 @@ static int amdgpu_vkms_sw_init(struct amdgpu_ip_block *ip_block)
int r, i;
struct amdgpu_device *adev = ip_block->adev;
adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc,
sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
adev->amdgpu_vkms_output = kzalloc_objs(struct amdgpu_vkms_output,
adev->mode_info.num_crtc,
GFP_KERNEL);
if (!adev->amdgpu_vkms_output)
return -ENOMEM;

View File

@@ -1118,7 +1118,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return -ENODEV;
tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
tlb_cb = kmalloc_obj(*tlb_cb, GFP_KERNEL);
if (!tlb_cb) {
drm_dev_exit(idx);
return -ENOMEM;
@@ -1471,7 +1471,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
if (!adev->gmc.gmc_funcs->set_prt)
return;
cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
cb = kmalloc_obj(struct amdgpu_prt_cb, GFP_KERNEL);
if (!cb) {
/* Last resort when we are OOM */
if (fence)
@@ -1737,7 +1737,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
amdgpu_vm_assert_locked(vm);
bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
bo_va = kzalloc_obj(struct amdgpu_bo_va, GFP_KERNEL);
if (bo_va == NULL) {
return NULL;
}
@@ -1866,7 +1866,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
return -EINVAL;
}
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
mapping = kmalloc_obj(*mapping, GFP_KERNEL);
if (!mapping)
return -ENOMEM;
@@ -1913,7 +1913,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
mapping = kmalloc_obj(*mapping, GFP_KERNEL);
if (!mapping)
return -ENOMEM;
@@ -2033,12 +2033,12 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);
before = kzalloc_obj(*before, GFP_KERNEL);
if (!before)
return -ENOMEM;
INIT_LIST_HEAD(&before->list);
after = kzalloc(sizeof(*after), GFP_KERNEL);
after = kzalloc_obj(*after, GFP_KERNEL);
if (!after) {
kfree(before);
return -ENOMEM;
@@ -2533,7 +2533,7 @@ amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
{
vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
vm->task_info = kzalloc_obj(struct amdgpu_task_info, GFP_KERNEL);
if (!vm->task_info)
return -ENOMEM;

View File

@@ -80,7 +80,7 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
{
struct amdgpu_tlb_fence *f;
f = kmalloc(sizeof(*f), GFP_KERNEL);
f = kmalloc_obj(*f, GFP_KERNEL);
if (!f) {
/*
* We can't fail since the PDEs and PTEs are already updated, so

View File

@@ -340,7 +340,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
{
struct amdgpu_vram_reservation *rsv;
rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
rsv = kzalloc_obj(*rsv, GFP_KERNEL);
if (!rsv)
return -ENOMEM;
@@ -478,7 +478,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
tbo->page_alignment);
}
vres = kzalloc(sizeof(*vres), GFP_KERNEL);
vres = kzalloc_obj(*vres, GFP_KERNEL);
if (!vres)
return -ENOMEM;
@@ -684,7 +684,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
int num_entries = 0;
int i, r;
*sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
*sgt = kmalloc_obj(**sgt, GFP_KERNEL);
if (!*sgt)
return -ENOMEM;

View File

@@ -334,7 +334,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
if (!xcp_funcs || !xcp_funcs->get_ip_details)
return -EINVAL;
xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
xcp_mgr = kzalloc_obj(*xcp_mgr, GFP_KERNEL);
if (!xcp_mgr)
return -ENOMEM;
@@ -907,7 +907,7 @@ static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
if (!adev->xcp_mgr)
return;
xcp_cfg = kzalloc(sizeof(*xcp_cfg), GFP_KERNEL);
xcp_cfg = kzalloc_obj(*xcp_cfg, GFP_KERNEL);
if (!xcp_cfg)
return;
xcp_cfg->xcp_mgr = adev->xcp_mgr;

View File

@@ -690,7 +690,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
goto pro_end;
}
hive = kzalloc(sizeof(*hive), GFP_KERNEL);
hive = kzalloc_obj(*hive, GFP_KERNEL);
if (!hive) {
dev_err(adev->dev, "XGMI: allocation failed\n");
ret = -ENOMEM;

View File

@@ -1524,7 +1524,7 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
{
int base;
struct atom_context *ctx =
kzalloc(sizeof(struct atom_context), GFP_KERNEL);
kzalloc_obj(struct atom_context, GFP_KERNEL);
struct _ATOM_ROM_HEADER *atom_rom_header;
struct _ATOM_MASTER_DATA_TABLE *master_table;
struct _ATOM_FIRMWARE_INFO *atom_fw_info;

View File

@@ -191,7 +191,7 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
goto register_acpi_backlight;
}
pdata = kmalloc(sizeof(struct amdgpu_backlight_privdata), GFP_KERNEL);
pdata = kmalloc_obj(struct amdgpu_backlight_privdata, GFP_KERNEL);
if (!pdata) {
DRM_ERROR("Memory allocation failed\n");
goto error;
@@ -1980,7 +1980,7 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
lvds_info =
(union lvds_info *)(mode_info->atom_context->bios + data_offset);
lvds =
kzalloc(sizeof(struct amdgpu_encoder_atom_dig), GFP_KERNEL);
kzalloc_obj(struct amdgpu_encoder_atom_dig, GFP_KERNEL);
if (!lvds)
return NULL;
@@ -2107,7 +2107,8 @@ struct amdgpu_encoder_atom_dig *
amdgpu_atombios_encoder_get_dig_info(struct amdgpu_encoder *amdgpu_encoder)
{
int encoder_enum = (amdgpu_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
struct amdgpu_encoder_atom_dig *dig = kzalloc(sizeof(struct amdgpu_encoder_atom_dig), GFP_KERNEL);
struct amdgpu_encoder_atom_dig *dig = kzalloc_obj(struct amdgpu_encoder_atom_dig,
GFP_KERNEL);
if (!dig)
return NULL;

View File

@@ -1775,7 +1775,8 @@ static int dce_v10_0_afmt_init(struct amdgpu_device *adev)
/* DCE10 has audio blocks tied to DIG encoders */
for (i = 0; i < adev->mode_info.num_dig; i++) {
adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt,
GFP_KERNEL);
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
@@ -3516,7 +3517,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
}
/* add a new one */
amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
amdgpu_encoder = kzalloc_obj(struct amdgpu_encoder, GFP_KERNEL);
if (!amdgpu_encoder)
return;

View File

@@ -1818,7 +1818,8 @@ static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
/* DCE6 has audio blocks tied to DIG encoders */
for (i = 0; i < adev->mode_info.num_dig; i++) {
adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt,
GFP_KERNEL);
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
@@ -3413,7 +3414,7 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
}
/* add a new one */
amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
amdgpu_encoder = kzalloc_obj(struct amdgpu_encoder, GFP_KERNEL);
if (!amdgpu_encoder)
return;

View File

@@ -1722,7 +1722,8 @@ static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
/* DCE8 has audio blocks tied to DIG encoders */
for (i = 0; i < adev->mode_info.num_dig; i++) {
adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
adev->mode_info.afmt[i] = kzalloc_obj(struct amdgpu_afmt,
GFP_KERNEL);
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
@@ -3424,7 +3425,7 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
}
/* add a new one */
amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
amdgpu_encoder = kzalloc_obj(struct amdgpu_encoder, GFP_KERNEL);
if (!amdgpu_encoder)
return;

View File

@@ -1064,8 +1064,8 @@ static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vm_manager.vram_base_offset = 0;
}
adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
GFP_KERNEL);
adev->gmc.vm_fault_info = kmalloc_obj(struct kfd_vm_fault_info,
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);

View File

@@ -1179,8 +1179,8 @@ static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vm_manager.vram_base_offset = 0;
}
adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
GFP_KERNEL);
adev->gmc.vm_fault_info = kmalloc_obj(struct kfd_vm_fault_info,
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);

View File

@@ -50,7 +50,7 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp_base = adev->rmmio_base;
isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
isp->isp_cell = kzalloc_objs(struct mfd_cell, 3, GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
drm_err(&adev->ddev,
@@ -59,8 +59,7 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
}
num_res = MAX_ISP410_MEM_RES + MAX_ISP410_INT_SRC;
isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
isp->isp_res = kzalloc_objs(struct resource, num_res, GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
drm_err(&adev->ddev,
@@ -68,7 +67,7 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
goto failure;
}
isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
isp->isp_pdata = kzalloc_obj(*isp->isp_pdata, GFP_KERNEL);
if (!isp->isp_pdata) {
r = -ENOMEM;
drm_err(&adev->ddev,
@@ -107,7 +106,7 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
/* initialize isp i2c platform data */
isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
isp->isp_i2c_res = kzalloc_objs(struct resource, 1, GFP_KERNEL);
if (!isp->isp_i2c_res) {
r = -ENOMEM;
drm_err(&adev->ddev,
@@ -127,7 +126,7 @@ static int isp_v4_1_0_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
/* initialize isp gpiochip platform data */
isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
isp->isp_gpio_res = kzalloc_objs(struct resource, 1, GFP_KERNEL);
if (!isp->isp_gpio_res) {
r = -ENOMEM;
drm_err(&adev->ddev,

View File

@@ -259,7 +259,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
return -EINVAL;
}
isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
isp->isp_cell = kzalloc_objs(struct mfd_cell, 3, GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
drm_err(&adev->ddev, "isp mfd cell alloc failed (%d)\n", r);
@@ -268,15 +268,14 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
num_res = MAX_ISP411_MEM_RES + MAX_ISP411_INT_SRC;
isp->isp_res = kcalloc(num_res, sizeof(struct resource),
GFP_KERNEL);
isp->isp_res = kzalloc_objs(struct resource, num_res, GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
drm_err(&adev->ddev, "isp mfd resource alloc failed (%d)\n", r);
goto failure;
}
isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
isp->isp_pdata = kzalloc_obj(*isp->isp_pdata, GFP_KERNEL);
if (!isp->isp_pdata) {
r = -ENOMEM;
drm_err(&adev->ddev, "isp platform data alloc failed (%d)\n", r);
@@ -318,7 +317,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
/* initialize isp i2c platform data */
isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
isp->isp_i2c_res = kzalloc_objs(struct resource, 1, GFP_KERNEL);
if (!isp->isp_i2c_res) {
r = -ENOMEM;
drm_err(&adev->ddev, "isp mfd res alloc failed (%d)\n", r);
@@ -337,7 +336,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[1].pdata_size = sizeof(struct isp_platform_data);
/* initialize isp gpiochip platform data */
isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
isp->isp_gpio_res = kzalloc_objs(struct resource, 1, GFP_KERNEL);
if (!isp->isp_gpio_res) {
r = -ENOMEM;
drm_err(&adev->ddev, "isp gpio resource alloc failed (%d)\n", r);

View File

@@ -283,7 +283,7 @@ static int mes_userq_mqd_create(struct amdgpu_usermode_queue *queue,
int r;
/* Structure to initialize MQD for userqueue using generic MQD init function */
userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL);
userq_props = kzalloc_obj(struct amdgpu_mqd_prop, GFP_KERNEL);
if (!userq_props) {
DRM_ERROR("Failed to allocate memory for userq_props\n");
return -ENOMEM;

View File

@@ -273,7 +273,7 @@ int sienna_cichlid_reset_init(struct amdgpu_device *adev)
{
struct amdgpu_reset_control *reset_ctl;
reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
reset_ctl = kzalloc_obj(*reset_ctl, GFP_KERNEL);
if (!reset_ctl)
return -ENOMEM;

View File

@@ -270,7 +270,7 @@ int smu_v13_0_10_reset_init(struct amdgpu_device *adev)
{
struct amdgpu_reset_control *reset_ctl;
reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
reset_ctl = kzalloc_obj(*reset_ctl, GFP_KERNEL);
if (!reset_ctl)
return -ENOMEM;

View File

@@ -567,7 +567,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
if (ret)
return ret;
ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL);
ecc_err = kzalloc_obj(*ecc_err, GFP_KERNEL);
if (!ecc_err)
return -ENOMEM;