Merge tag 'fs_for_v7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull ext2, udf, quota updates from Jan Kara:

 - A fix for a race in quota code that can expose ocfs2 to
   use-after-free issues

 - UDF fix to avoid memory corruption in face of corrupted format

 - Couple of ext2 fixes for better handling of fs corruption

 - Some more various code cleanups in UDF & ext2

* tag 'fs_for_v7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  ext2: reject inodes with zero i_nlink and valid mode in ext2_iget()
  ext2: use get_random_u32() where appropriate
  quota: Fix race of dquot_scan_active() with quota deactivation
  udf: fix partition descriptor append bookkeeping
  ext2: avoid drop_nlink() during unlink of zero-nlink inode in ext2_unlink()
  ext2: guard reservation window dump with EXT2FS_DEBUG
  ext2: replace BUG_ON with WARN_ON_ONCE in ext2_get_blocks
  ext2: remove stale TODO about kmap
  fs: udf: avoid assignment in condition when selecting allocation goal
This commit is contained in:
Linus Torvalds
2026-04-15 19:22:16 -07:00
8 changed files with 59 additions and 33 deletions

View File

@@ -201,7 +201,7 @@ static void group_adjust_blocks(struct super_block *sb, int group_no,
* windows(start, end). Otherwise, it will only print out the "bad" windows,
* those windows that overlap with their immediate neighbors.
*/
#if 1
#ifdef EXT2FS_DEBUG
static void __rsv_window_dump(struct rb_root *root, int verbose,
const char *fn)
{
@@ -248,7 +248,7 @@ restart:
__rsv_window_dump((root), (verbose), __func__)
#else
#define rsv_window_dump(root, verbose) do {} while (0)
#endif
#endif /* EXT2FS_DEBUG */
/**
* goal_in_my_reservation()

View File

@@ -639,7 +639,8 @@ static int ext2_get_blocks(struct inode *inode,
int count = 0;
ext2_fsblk_t first_block = 0;
BUG_ON(maxblocks == 0);
if (WARN_ON_ONCE(maxblocks == 0))
return -EINVAL;
depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
@@ -1433,9 +1434,17 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
* the test is that same one that e2fsck uses
* NeilBrown 1999oct15
*/
if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
/* this inode is deleted */
ret = -ESTALE;
if (inode->i_nlink == 0) {
if (inode->i_mode == 0 || ei->i_dtime) {
/* this inode is deleted */
ret = -ESTALE;
} else {
ext2_error(sb, __func__,
"inode %lu has zero i_nlink with mode 0%o and no dtime, "
"filesystem may be corrupt",
ino, inode->i_mode);
ret = -EFSCORRUPTED;
}
goto bad_inode;
}
inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);

View File

@@ -14,8 +14,6 @@
*
* The only non-static object here is ext2_dir_inode_operations.
*
* TODO: get rid of kmap() use, add readahead.
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
@@ -293,7 +291,10 @@ static int ext2_unlink(struct inode *dir, struct dentry *dentry)
goto out;
inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode);
if (inode->i_nlink)
inode_dec_link_count(inode);
err = 0;
out:
return err;

View File

@@ -1152,7 +1152,7 @@ static int ext2_fill_super(struct super_block *sb, struct fs_context *fc)
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
sbi->s_next_generation = get_random_u32();
spin_lock_init(&sbi->s_next_gen_lock);
/* per filesystem reservation list head & lock */

View File

@@ -363,6 +363,31 @@ static inline int dquot_active(struct dquot *dquot)
return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
}
static struct dquot *__dqgrab(struct dquot *dquot)
{
lockdep_assert_held(&dq_list_lock);
if (!atomic_read(&dquot->dq_count))
remove_free_dquot(dquot);
atomic_inc(&dquot->dq_count);
return dquot;
}
/*
* Get reference to dquot when we got pointer to it by some other means. The
* dquot has to be active and the caller has to make sure it cannot get
* deactivated under our hands.
*/
struct dquot *dqgrab(struct dquot *dquot)
{
spin_lock(&dq_list_lock);
WARN_ON_ONCE(!dquot_active(dquot));
dquot = __dqgrab(dquot);
spin_unlock(&dq_list_lock);
return dquot;
}
EXPORT_SYMBOL_GPL(dqgrab);
static inline int dquot_dirty(struct dquot *dquot)
{
return test_bit(DQ_MOD_B, &dquot->dq_flags);
@@ -641,15 +666,14 @@ int dquot_scan_active(struct super_block *sb,
continue;
if (dquot->dq_sb != sb)
continue;
/* Now we have active dquot so we can just increase use count */
atomic_inc(&dquot->dq_count);
__dqgrab(dquot);
spin_unlock(&dq_list_lock);
dqput(old_dquot);
old_dquot = dquot;
/*
* ->release_dquot() can be racing with us. Our reference
* protects us from new calls to it so just wait for any
* outstanding call and recheck the DQ_ACTIVE_B after that.
* protects us from dquot_release() proceeding so just wait for
* any outstanding call and recheck the DQ_ACTIVE_B after that.
*/
wait_on_dquot(dquot);
if (dquot_active(dquot)) {
@@ -717,7 +741,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
/* Now we have active dquot from which someone is
* holding reference so we can safely just increase
* use count */
dqgrab(dquot);
__dqgrab(dquot);
spin_unlock(&dq_list_lock);
err = dquot_write_dquot(dquot);
if (err && !ret)
@@ -963,9 +987,7 @@ we_slept:
spin_unlock(&dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
} else {
if (!atomic_read(&dquot->dq_count))
remove_free_dquot(dquot);
atomic_inc(&dquot->dq_count);
__dqgrab(dquot);
spin_unlock(&dq_list_lock);
dqstats_inc(DQST_CACHE_HITS);
dqstats_inc(DQST_LOOKUPS);

View File

@@ -733,7 +733,7 @@ static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
sector_t offset = 0;
int8_t etype, tmpetype;
struct udf_inode_info *iinfo = UDF_I(inode);
udf_pblk_t goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
udf_pblk_t goal = 0, pgoal = 0;
int lastblock = 0;
bool isBeyondEOF = false;
int ret = 0;
@@ -892,11 +892,10 @@ static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
else { /* otherwise, allocate a new block */
if (iinfo->i_next_alloc_block == map->lblk)
goal = iinfo->i_next_alloc_goal;
if (!goal) {
if (!(goal = pgoal)) /* XXX: what was intended here? */
goal = iinfo->i_location.logicalBlockNum + 1;
}
if (!goal)
goal = pgoal;
if (!goal)
goal = iinfo->i_location.logicalBlockNum + 1;
newblocknum = udf_new_block(inode->i_sb, inode,
iinfo->i_location.partitionReferenceNum,

View File

@@ -1695,8 +1695,9 @@ static struct udf_vds_record *handle_partition_descriptor(
return &(data->part_descs_loc[i].rec);
if (data->num_part_descs >= data->size_part_descs) {
struct part_desc_seq_scan_data *new_loc;
unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
unsigned int new_size;
new_size = data->num_part_descs + PART_DESC_ALLOC_STEP;
new_loc = kzalloc_objs(*new_loc, new_size);
if (!new_loc)
return ERR_PTR(-ENOMEM);
@@ -1706,6 +1707,7 @@ static struct udf_vds_record *handle_partition_descriptor(
data->part_descs_loc = new_loc;
data->size_part_descs = new_size;
}
data->part_descs_loc[data->num_part_descs].partnum = partnum;
return &(data->part_descs_loc[data->num_part_descs++].rec);
}

View File

@@ -44,14 +44,7 @@ int dquot_initialize(struct inode *inode);
bool dquot_initialize_needed(struct inode *inode);
void dquot_drop(struct inode *inode);
struct dquot *dqget(struct super_block *sb, struct kqid qid);
static inline struct dquot *dqgrab(struct dquot *dquot)
{
/* Make sure someone else has active reference to dquot */
WARN_ON_ONCE(!atomic_read(&dquot->dq_count));
WARN_ON_ONCE(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
atomic_inc(&dquot->dq_count);
return dquot;
}
struct dquot *dqgrab(struct dquot *dquot);
static inline bool dquot_is_busy(struct dquot *dquot)
{