block: refactor blkdev_zone_mgmt_ioctl

Split the zone reset case into a separate helper so that the conditional
locking goes away.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://patch.msgid.link/20260327090032.3722065-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig
2026-03-27 10:00:32 +01:00
committed by Jens Axboe
parent b774765fb8
commit 539fb773a3

View File

@@ -417,20 +417,32 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
return 0; return 0;
} }
static int blkdev_truncate_zone_range(struct block_device *bdev, static int blkdev_reset_zone(struct block_device *bdev, blk_mode_t mode,
blk_mode_t mode, const struct blk_zone_range *zrange) struct blk_zone_range *zrange)
{ {
loff_t start, end; loff_t start, end;
int ret = -EINVAL;
inode_lock(bdev->bd_mapping->host);
filemap_invalidate_lock(bdev->bd_mapping);
if (zrange->sector + zrange->nr_sectors <= zrange->sector || if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk)) zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
/* Out of range */ /* Out of range */
return -EINVAL; goto out_unlock;
start = zrange->sector << SECTOR_SHIFT; start = zrange->sector << SECTOR_SHIFT;
end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1; end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
return truncate_bdev_range(bdev, mode, start, end); ret = truncate_bdev_range(bdev, mode, start, end);
if (ret)
goto out_unlock;
ret = blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET, zrange->sector,
zrange->nr_sectors);
out_unlock:
filemap_invalidate_unlock(bdev->bd_mapping);
inode_unlock(bdev->bd_mapping->host);
return ret;
} }
/* /*
@@ -443,7 +455,6 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
void __user *argp = (void __user *)arg; void __user *argp = (void __user *)arg;
struct blk_zone_range zrange; struct blk_zone_range zrange;
enum req_op op; enum req_op op;
int ret;
if (!argp) if (!argp)
return -EINVAL; return -EINVAL;
@@ -459,15 +470,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
switch (cmd) { switch (cmd) {
case BLKRESETZONE: case BLKRESETZONE:
op = REQ_OP_ZONE_RESET; return blkdev_reset_zone(bdev, mode, &zrange);
/* Invalidate the page cache, including dirty pages. */
inode_lock(bdev->bd_mapping->host);
filemap_invalidate_lock(bdev->bd_mapping);
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
if (ret)
goto fail;
break;
case BLKOPENZONE: case BLKOPENZONE:
op = REQ_OP_ZONE_OPEN; op = REQ_OP_ZONE_OPEN;
break; break;
@@ -481,15 +484,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
return -ENOTTY; return -ENOTTY;
} }
ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors); return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
fail:
if (cmd == BLKRESETZONE) {
filemap_invalidate_unlock(bdev->bd_mapping);
inode_unlock(bdev->bd_mapping->host);
}
return ret;
} }
static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone) static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)