diff options
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r-- | drivers/md/raid10.c | 102 |
1 files changed, 57 insertions, 45 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 6c66357f92f5..4fcfcb350d2b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -952,7 +952,9 @@ static void flush_pending_writes(struct r10conf *conf) static void raise_barrier(struct r10conf *conf, int force) { write_seqlock_irq(&conf->resync_lock); - BUG_ON(force && !conf->barrier); + + if (WARN_ON_ONCE(force && !conf->barrier)) + force = false; /* Wait until no block IO is waiting (unless 'force') */ wait_event_barrier(conf, force || !conf->nr_waiting); @@ -995,11 +997,15 @@ static bool stop_waiting_barrier(struct r10conf *conf) (!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1]))) return true; - /* move on if recovery thread is blocked by us */ - if (conf->mddev->thread->tsk == current && - test_bit(MD_RECOVERY_RUNNING, &conf->mddev->recovery) && - conf->nr_queued > 0) + /* + * move on if io is issued from raid10d(), nr_pending is not released + * from original io(see handle_read_error()). All raise barrier is + * blocked until this io is done. + */ + if (conf->mddev->thread->tsk == current) { + WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0); return true; + } return false; } @@ -1244,7 +1250,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, } slot = r10_bio->read_slot; - if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) + if (!r10_bio->start_time && + blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) r10_bio->start_time = bio_start_io_acct(bio); read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set); @@ -1574,6 +1581,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) r10_bio->sector = bio->bi_iter.bi_sector; r10_bio->state = 0; r10_bio->read_slot = -1; + r10_bio->start_time = 0; memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->geo.raid_disks); @@ -1626,7 +1634,7 @@ static void raid10_end_discard_request(struct bio *bio) /* * raid10_remove_disk uses smp_mb to make sure rdev is set to * replacement before setting replacement to NULL. It can read - * rdev first without barrier protect even replacment is NULL + * rdev first without barrier protect even replacement is NULL */ smp_rmb(); rdev = conf->mirrors[dev].rdev; @@ -2609,11 +2617,22 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) { struct r10conf *conf = mddev->private; int d; - struct bio *wbio, *wbio2; + struct bio *wbio = r10_bio->devs[1].bio; + struct bio *wbio2 = r10_bio->devs[1].repl_bio; + + /* Need to test wbio2->bi_end_io before we call + * submit_bio_noacct as if the former is NULL, + * the latter is free to free wbio2. + */ + if (wbio2 && !wbio2->bi_end_io) + wbio2 = NULL; if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { fix_recovery_read_error(r10_bio); - end_sync_request(r10_bio); + if (wbio->bi_end_io) + end_sync_request(r10_bio); + if (wbio2) + end_sync_request(r10_bio); return; } @@ -2622,14 +2641,6 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) * and submit the write request */ d = r10_bio->devs[1].devnum; - wbio = r10_bio->devs[1].bio; - wbio2 = r10_bio->devs[1].repl_bio; - /* Need to test wbio2->bi_end_io before we call - * submit_bio_noacct as if the former is NULL, - * the latter is free to free wbio2. - */ - if (wbio2 && !wbio2->bi_end_io) - wbio2 = NULL; if (wbio->bi_end_io) { atomic_inc(&conf->mirrors[d].rdev->nr_pending); md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); @@ -2978,9 +2989,13 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) md_error(mddev, rdev); rdev_dec_pending(rdev, mddev); - allow_barrier(conf); r10_bio->state = 0; raid10_read_request(mddev, r10_bio->master_bio, r10_bio); + /* + * allow_barrier after re-submit to ensure no sync io + * can be issued while regular io pending. + */ + allow_barrier(conf); } static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) @@ -3289,10 +3304,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, sector_t chunk_mask = conf->geo.chunk_mask; int page_idx = 0; - if (!mempool_initialized(&conf->r10buf_pool)) - if (init_resync(conf)) - return 0; - /* * Allow skipping a full rebuild for incremental assembly * of a clean array, like RAID1 does. @@ -3308,6 +3319,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, return mddev->dev_sectors - sector_nr; } + if (!mempool_initialized(&conf->r10buf_pool)) + if (init_resync(conf)) + return 0; + skipped: max_sector = mddev->dev_sectors; if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || @@ -4004,6 +4019,20 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) return nc*fc; } +static void raid10_free_conf(struct r10conf *conf) +{ + if (!conf) + return; + + mempool_exit(&conf->r10bio_pool); + kfree(conf->mirrors); + kfree(conf->mirrors_old); + kfree(conf->mirrors_new); + safe_put_page(conf->tmppage); + bioset_exit(&conf->bio_split); + kfree(conf); +} + static struct r10conf *setup_conf(struct mddev *mddev) { struct r10conf *conf = NULL; @@ -4086,13 +4115,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) return conf; out: - if (conf) { - mempool_exit(&conf->r10bio_pool); - kfree(conf->mirrors); - safe_put_page(conf->tmppage); - bioset_exit(&conf->bio_split); - kfree(conf); - } + raid10_free_conf(conf); return ERR_PTR(err); } @@ -4129,6 +4152,9 @@ static int raid10_run(struct mddev *mddev) if (!conf) goto out; + mddev->thread = conf->thread; + conf->thread = NULL; + if (mddev_is_clustered(conf->mddev)) { int fc, fo; @@ -4141,9 +4167,6 @@ static int raid10_run(struct mddev *mddev) } } - mddev->thread = conf->thread; - conf->thread = NULL; - if (mddev->queue) { blk_queue_max_write_zeroes_sectors(mddev->queue, 0); blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); @@ -4283,10 +4306,7 @@ static int raid10_run(struct mddev *mddev) out_free_conf: md_unregister_thread(&mddev->thread); - mempool_exit(&conf->r10bio_pool); - safe_put_page(conf->tmppage); - kfree(conf->mirrors); - kfree(conf); + raid10_free_conf(conf); mddev->private = NULL; out: return -EIO; @@ -4294,15 +4314,7 @@ out: static void raid10_free(struct mddev *mddev, void *priv) { - struct r10conf *conf = priv; - - mempool_exit(&conf->r10bio_pool); - safe_put_page(conf->tmppage); - kfree(conf->mirrors); - kfree(conf->mirrors_old); - kfree(conf->mirrors_new); - bioset_exit(&conf->bio_split); - kfree(conf); + raid10_free_conf(priv); } static void raid10_quiesce(struct mddev *mddev, int quiesce) |