mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
Merge tag 'md-6.15-20250312' of https://git.kernel.org/pub/scm/linux/kernel/git/mdraid/linux into for-6.15/block
Merge MD changes from Yu: "- fix recovery can preempt resync (Li Nan) - fix md-bitmap IO limit (Su Yue) - fix raid10 discard with REQ_NOWAIT (Xiao Ni) - fix raid1 memory leak (Zheng Qixing) - fix mddev uaf (Yu Kuai) - fix raid1,raid10 IO flags (Yu Kuai) - some refactor and cleanup (Yu Kuai)" * tag 'md-6.15-20250312' of https://git.kernel.org/pub/scm/linux/kernel/git/mdraid/linux: md/raid10: wait barrier before returning discard request with REQ_NOWAIT md/md-bitmap: fix wrong bitmap_limit for clustermd when write sb md/raid1,raid10: don't ignore IO flags md/raid5: merge reshape_progress checking inside get_reshape_loc() md: fix mddev uaf while iterating all_mddevs list md: switch md-cluster to use md_submodle_head md: don't export md_cluster_ops md/md-cluster: cleanup md_cluster_ops reference md: switch personalities to use md_submodule_head md: introduce struct md_submodule_head and APIs md: only include md-cluster.h if necessary md: merge common code into find_pers() md/raid1: fix memory leak in raid1_run() if no active rdev md: ensure resync is prioritized over recovery
This commit is contained in:
@@ -24,6 +24,7 @@
|
||||
#include "raid10.h"
|
||||
#include "raid0.h"
|
||||
#include "md-bitmap.h"
|
||||
#include "md-cluster.h"
|
||||
|
||||
/*
|
||||
* RAID10 provides a combination of RAID0 and RAID1 functionality.
|
||||
@@ -1146,8 +1147,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
||||
{
|
||||
struct r10conf *conf = mddev->private;
|
||||
struct bio *read_bio;
|
||||
const enum req_op op = bio_op(bio);
|
||||
const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
|
||||
int max_sectors;
|
||||
struct md_rdev *rdev;
|
||||
char b[BDEVNAME_SIZE];
|
||||
@@ -1228,7 +1227,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
||||
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
|
||||
choose_data_offset(r10_bio, rdev);
|
||||
read_bio->bi_end_io = raid10_end_read_request;
|
||||
read_bio->bi_opf = op | do_sync;
|
||||
if (test_bit(FailFast, &rdev->flags) &&
|
||||
test_bit(R10BIO_FailFast, &r10_bio->state))
|
||||
read_bio->bi_opf |= MD_FAILFAST;
|
||||
@@ -1247,10 +1245,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
|
||||
struct bio *bio, bool replacement,
|
||||
int n_copy)
|
||||
{
|
||||
const enum req_op op = bio_op(bio);
|
||||
const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
|
||||
const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
|
||||
const blk_opf_t do_atomic = bio->bi_opf & REQ_ATOMIC;
|
||||
unsigned long flags;
|
||||
struct r10conf *conf = mddev->private;
|
||||
struct md_rdev *rdev;
|
||||
@@ -1269,7 +1263,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
|
||||
mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
|
||||
choose_data_offset(r10_bio, rdev));
|
||||
mbio->bi_end_io = raid10_end_write_request;
|
||||
mbio->bi_opf = op | do_sync | do_fua | do_atomic;
|
||||
if (!replacement && test_bit(FailFast,
|
||||
&conf->mirrors[devnum].rdev->flags)
|
||||
&& enough(conf, devnum))
|
||||
@@ -1355,9 +1348,9 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||
int error;
|
||||
|
||||
if ((mddev_is_clustered(mddev) &&
|
||||
md_cluster_ops->area_resyncing(mddev, WRITE,
|
||||
bio->bi_iter.bi_sector,
|
||||
bio_end_sector(bio)))) {
|
||||
mddev->cluster_ops->area_resyncing(mddev, WRITE,
|
||||
bio->bi_iter.bi_sector,
|
||||
bio_end_sector(bio)))) {
|
||||
DEFINE_WAIT(w);
|
||||
/* Bail out if REQ_NOWAIT is set for the bio */
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
@@ -1367,7 +1360,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
|
||||
for (;;) {
|
||||
prepare_to_wait(&conf->wait_barrier,
|
||||
&w, TASK_IDLE);
|
||||
if (!md_cluster_ops->area_resyncing(mddev, WRITE,
|
||||
if (!mddev->cluster_ops->area_resyncing(mddev, WRITE,
|
||||
bio->bi_iter.bi_sector, bio_end_sector(bio)))
|
||||
break;
|
||||
schedule();
|
||||
@@ -1631,11 +1624,10 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
|
||||
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
|
||||
return -EAGAIN;
|
||||
|
||||
if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
|
||||
if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
|
||||
bio_wouldblock_error(bio);
|
||||
return 0;
|
||||
}
|
||||
wait_barrier(conf, false);
|
||||
|
||||
/*
|
||||
* Check reshape again to avoid reshape happens after checking
|
||||
@@ -3716,7 +3708,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
||||
conf->cluster_sync_low = mddev->curr_resync_completed;
|
||||
raid10_set_cluster_sync_high(conf);
|
||||
/* Send resync message */
|
||||
md_cluster_ops->resync_info_update(mddev,
|
||||
mddev->cluster_ops->resync_info_update(mddev,
|
||||
conf->cluster_sync_low,
|
||||
conf->cluster_sync_high);
|
||||
}
|
||||
@@ -3749,7 +3741,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
||||
}
|
||||
if (broadcast_msg) {
|
||||
raid10_set_cluster_sync_high(conf);
|
||||
md_cluster_ops->resync_info_update(mddev,
|
||||
mddev->cluster_ops->resync_info_update(mddev,
|
||||
conf->cluster_sync_low,
|
||||
conf->cluster_sync_high);
|
||||
}
|
||||
@@ -4543,7 +4535,7 @@ static int raid10_start_reshape(struct mddev *mddev)
|
||||
if (ret)
|
||||
goto abort;
|
||||
|
||||
ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
|
||||
ret = mddev->cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
|
||||
if (ret) {
|
||||
mddev->bitmap_ops->resize(mddev, oldsize, 0, false);
|
||||
goto abort;
|
||||
@@ -4834,7 +4826,7 @@ read_more:
|
||||
conf->cluster_sync_low = sb_reshape_pos;
|
||||
}
|
||||
|
||||
md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
|
||||
mddev->cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
|
||||
conf->cluster_sync_high);
|
||||
}
|
||||
|
||||
@@ -4979,7 +4971,7 @@ static void raid10_update_reshape_pos(struct mddev *mddev)
|
||||
struct r10conf *conf = mddev->private;
|
||||
sector_t lo, hi;
|
||||
|
||||
md_cluster_ops->resync_info_get(mddev, &lo, &hi);
|
||||
mddev->cluster_ops->resync_info_get(mddev, &lo, &hi);
|
||||
if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
|
||||
|| mddev->reshape_position == MaxSector)
|
||||
conf->reshape_progress = mddev->reshape_position;
|
||||
@@ -5125,9 +5117,13 @@ static void raid10_finish_reshape(struct mddev *mddev)
|
||||
|
||||
static struct md_personality raid10_personality =
|
||||
{
|
||||
.name = "raid10",
|
||||
.level = 10,
|
||||
.owner = THIS_MODULE,
|
||||
.head = {
|
||||
.type = MD_PERSONALITY,
|
||||
.id = ID_RAID10,
|
||||
.name = "raid10",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
|
||||
.make_request = raid10_make_request,
|
||||
.run = raid10_run,
|
||||
.free = raid10_free,
|
||||
@@ -5147,18 +5143,18 @@ static struct md_personality raid10_personality =
|
||||
.update_reshape_pos = raid10_update_reshape_pos,
|
||||
};
|
||||
|
||||
static int __init raid_init(void)
|
||||
static int __init raid10_init(void)
|
||||
{
|
||||
return register_md_personality(&raid10_personality);
|
||||
return register_md_submodule(&raid10_personality.head);
|
||||
}
|
||||
|
||||
static void raid_exit(void)
|
||||
static void __exit raid10_exit(void)
|
||||
{
|
||||
unregister_md_personality(&raid10_personality);
|
||||
unregister_md_submodule(&raid10_personality.head);
|
||||
}
|
||||
|
||||
module_init(raid_init);
|
||||
module_exit(raid_exit);
|
||||
module_init(raid10_init);
|
||||
module_exit(raid10_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
|
||||
MODULE_ALIAS("md-personality-9"); /* RAID10 */
|
||||
|
||||
Reference in New Issue
Block a user