aio: Stop using i_private_data and i_private_lock

Instead of using i_private_data and i_private_lock, just create aio
inodes with appropriate necessary fields.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jan Kara <jack@suse.cz>
Link: https://patch.msgid.link/20260326095354.16340-67-jack@suse.cz
Tested-by: syzbot@syzkaller.appspotmail.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Jan Kara
2026-03-26 10:54:19 +01:00
committed by Christian Brauner
parent 2811f2a82f
commit 3833d335d7

View File

@@ -218,6 +218,17 @@ struct aio_kiocb {
struct eventfd_ctx *ki_eventfd;
};
struct aio_inode_info {
struct inode vfs_inode;
spinlock_t migrate_lock;
struct kioctx *ctx;
};
static inline struct aio_inode_info *AIO_I(struct inode *inode)
{
return container_of(inode, struct aio_inode_info, vfs_inode);
}
/*------ sysctl variables----*/
static DEFINE_SPINLOCK(aio_nr_lock);
static unsigned long aio_nr; /* current system wide number of aio requests */
@@ -251,6 +262,7 @@ static void __init aio_sysctl_init(void)
static struct kmem_cache *kiocb_cachep;
static struct kmem_cache *kioctx_cachep;
static struct kmem_cache *aio_inode_cachep;
static struct vfsmount *aio_mnt;
@@ -261,11 +273,12 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
{
struct file *file;
struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
if (IS_ERR(inode))
return ERR_CAST(inode);
inode->i_mapping->a_ops = &aio_ctx_aops;
inode->i_mapping->i_private_data = ctx;
AIO_I(inode)->ctx = ctx;
inode->i_size = PAGE_SIZE * nr_pages;
file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
@@ -275,14 +288,49 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
return file;
}
static struct inode *aio_alloc_inode(struct super_block *sb)
{
struct aio_inode_info *ai;
ai = alloc_inode_sb(sb, aio_inode_cachep, GFP_KERNEL);
if (!ai)
return NULL;
ai->ctx = NULL;
return &ai->vfs_inode;
}
static void aio_free_inode(struct inode *inode)
{
kmem_cache_free(aio_inode_cachep, AIO_I(inode));
}
static const struct super_operations aio_super_operations = {
.alloc_inode = aio_alloc_inode,
.free_inode = aio_free_inode,
.statfs = simple_statfs,
};
static int aio_init_fs_context(struct fs_context *fc)
{
if (!init_pseudo(fc, AIO_RING_MAGIC))
struct pseudo_fs_context *pfc;
pfc = init_pseudo(fc, AIO_RING_MAGIC);
if (!pfc)
return -ENOMEM;
fc->s_iflags |= SB_I_NOEXEC;
pfc->ops = &aio_super_operations;
return 0;
}
static void init_once(void *obj)
{
struct aio_inode_info *ai = obj;
inode_init_once(&ai->vfs_inode);
spin_lock_init(&ai->migrate_lock);
}
/* aio_setup
* Creates the slab caches used by the aio routines, panic on
* failure as this is done early during the boot sequence.
@@ -294,6 +342,11 @@ static int __init aio_setup(void)
.init_fs_context = aio_init_fs_context,
.kill_sb = kill_anon_super,
};
aio_inode_cachep = kmem_cache_create("aio_inode_cache",
sizeof(struct aio_inode_info), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT),
init_once);
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
@@ -308,17 +361,17 @@ __initcall(aio_setup);
static void put_aio_ring_file(struct kioctx *ctx)
{
struct file *aio_ring_file = ctx->aio_ring_file;
struct address_space *i_mapping;
if (aio_ring_file) {
truncate_setsize(file_inode(aio_ring_file), 0);
struct inode *inode = file_inode(aio_ring_file);
truncate_setsize(inode, 0);
/* Prevent further access to the kioctx from migratepages */
i_mapping = aio_ring_file->f_mapping;
spin_lock(&i_mapping->i_private_lock);
i_mapping->i_private_data = NULL;
spin_lock(&AIO_I(inode)->migrate_lock);
AIO_I(inode)->ctx = NULL;
ctx->aio_ring_file = NULL;
spin_unlock(&i_mapping->i_private_lock);
spin_unlock(&AIO_I(inode)->migrate_lock);
fput(aio_ring_file);
}
@@ -408,13 +461,14 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode)
{
struct kioctx *ctx;
struct aio_inode_info *ai = AIO_I(mapping->host);
unsigned long flags;
pgoff_t idx;
int rc = 0;
/* mapping->i_private_lock here protects against the kioctx teardown. */
spin_lock(&mapping->i_private_lock);
ctx = mapping->i_private_data;
/* ai->migrate_lock here protects against the kioctx teardown. */
spin_lock(&ai->migrate_lock);
ctx = ai->ctx;
if (!ctx) {
rc = -EINVAL;
goto out;
@@ -467,7 +521,7 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
out_unlock:
mutex_unlock(&ctx->ring_lock);
out:
spin_unlock(&mapping->i_private_lock);
spin_unlock(&ai->migrate_lock);
return rc;
}
#else