md: remove recovery_disabled

'recovery_disabled' logic is complex and confusing, originally intended to
preserve raid in extreme scenarios. It was used in following cases:
- When sync fails and setting badblocks also fails, kick out non-In_sync
  rdev and block spare rdev from joining to preserve raid [1]
- When last backup is unavailable, prevent repeated add-remove of spares
  triggering recovery [2]

The original issues are now resolved:
- Error handlers in all raid types prevent last rdev from being kicked out
- Disks with failed recovery are marked Faulty and can't re-join

Therefore, remove 'recovery_disabled' as it's no longer needed.

[1] 5389042ffa ("md: change managed of recovery_disabled.")
[2] 4044ba58dd ("md: don't retry recovery of raid1 that fails due to error on source drive.")

Link: https://lore.kernel.org/linux-raid/20260105110300.1442509-13-linan666@huaweicloud.com
Signed-off-by: Li Nan <linan122@huawei.com>
Signed-off-by: Yu Kuai <yukuai@fnnas.com>
This commit is contained in:
Li Nan
2026-01-05 19:03:00 +08:00
committed by Yu Kuai
parent 7435b73f05
commit 5d1dd57929
8 changed files with 4 additions and 51 deletions

View File

@@ -1760,7 +1760,6 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(MD_BROKEN, &mddev->flags);
if (!test_bit(MD_FAILLAST_DEV, &mddev->flags)) {
conf->recovery_disabled = mddev->recovery_disabled;
spin_unlock_irqrestore(&conf->device_lock, flags);
return;
}
@@ -1904,7 +1903,6 @@ static bool raid1_remove_conf(struct r1conf *conf, int disk)
/* Only remove non-faulty devices if recovery is not possible. */
if (!test_bit(Faulty, &rdev->flags) &&
rdev->mddev->recovery_disabled != conf->recovery_disabled &&
rdev->mddev->degraded < conf->raid_disks)
return false;
@@ -1924,9 +1922,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int first = 0;
int last = conf->raid_disks - 1;
if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY;
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
@@ -2346,7 +2341,6 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
*/
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) ||
!fix_sync_read_error(r1_bio)) {
conf->recovery_disabled = mddev->recovery_disabled;
md_done_sync(mddev, r1_bio->sectors);
md_sync_error(mddev);
put_buf(r1_bio);
@@ -2948,16 +2942,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
*skipped = 1;
put_buf(r1_bio);
if (!ok) {
/* Cannot record the badblocks, so need to
if (!ok)
/* Cannot record the badblocks, md_error has set INTR,
* abort the resync.
* If there are multiple read targets, could just
* fail the really bad ones ???
*/
conf->recovery_disabled = mddev->recovery_disabled;
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
return 0;
} else
else
return min_bad;
}
@@ -3144,7 +3134,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
init_waitqueue_head(&conf->wait_barrier);
bio_list_init(&conf->pending_bio_list);
conf->recovery_disabled = mddev->recovery_disabled - 1;
err = -EIO;
for (i = 0; i < conf->raid_disks * 2; i++) {