2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/slab.h>
9 #include <linux/module.h>
17 #include <linux/device-mapper.h>
19 #define DM_MSG_PREFIX "raid"
20 #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */
23 * Minimum sectors of free reshape space per raid device
25 #define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
27 static bool devices_handle_discard_safely = false;
30 * The following flags are used by dm-raid.c to set up the array state.
31 * They must be cleared before md_run is called.
33 #define FirstUse 10 /* rdev flag */
37 * Two DM devices, one to hold metadata and one to hold the
38 * actual data/parity. The reason for this is to not confuse
39 * ti->len and give more flexibility in altering size and
42 * While it is possible for this device to be associated
43 * with a different physical device than the data_dev, it
44 * is intended for it to be the same.
45 * |--------- Physical Device ---------|
46 * |- meta_dev -|------ data_dev ------|
48 struct dm_dev *meta_dev;
49 struct dm_dev *data_dev;
54 * Bits for establishing rs->ctr_flags
59 #define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */
60 #define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */
61 #define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */
62 #define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */
63 #define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */
64 #define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */
65 #define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */
66 #define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */
67 #define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */
68 #define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */
69 #define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */
70 #define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */
72 #define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid4/5/6/10! */
73 #define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */
74 #define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */
77 * Flags for rs->ctr_flags field.
79 #define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
80 #define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
81 #define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
82 #define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
83 #define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
84 #define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
85 #define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
86 #define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
87 #define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
88 #define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
89 #define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
90 #define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
91 #define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
92 #define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
93 #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
96 * Definitions of various constructor flags to
97 * be used in checks of valid / invalid flags
100 /* Define all any sync flags */
101 #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
103 /* Define flags for options without argument (e.g. 'nosync') */
104 #define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \
105 CTR_FLAG_RAID10_USE_NEAR_SETS)
107 /* Define flags for options with one argument (e.g. 'delta_disks +2') */
108 #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
109 CTR_FLAG_WRITE_MOSTLY | \
110 CTR_FLAG_DAEMON_SLEEP | \
111 CTR_FLAG_MIN_RECOVERY_RATE | \
112 CTR_FLAG_MAX_RECOVERY_RATE | \
113 CTR_FLAG_MAX_WRITE_BEHIND | \
114 CTR_FLAG_STRIPE_CACHE | \
115 CTR_FLAG_REGION_SIZE | \
116 CTR_FLAG_RAID10_COPIES | \
117 CTR_FLAG_RAID10_FORMAT | \
118 CTR_FLAG_DELTA_DISKS | \
119 CTR_FLAG_DATA_OFFSET)
121 /* Valid options definitions per raid level... */
123 /* "raid0" does only accept data offset */
124 #define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET)
126 /* "raid1" does not accept stripe cache, data offset, delta_disks or any raid10 options */
127 #define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
129 CTR_FLAG_WRITE_MOSTLY | \
130 CTR_FLAG_DAEMON_SLEEP | \
131 CTR_FLAG_MIN_RECOVERY_RATE | \
132 CTR_FLAG_MAX_RECOVERY_RATE | \
133 CTR_FLAG_MAX_WRITE_BEHIND | \
134 CTR_FLAG_REGION_SIZE | \
135 CTR_FLAG_DATA_OFFSET)
137 /* "raid10" does not accept any raid1 or stripe cache options */
138 #define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
140 CTR_FLAG_DAEMON_SLEEP | \
141 CTR_FLAG_MIN_RECOVERY_RATE | \
142 CTR_FLAG_MAX_RECOVERY_RATE | \
143 CTR_FLAG_REGION_SIZE | \
144 CTR_FLAG_RAID10_COPIES | \
145 CTR_FLAG_RAID10_FORMAT | \
146 CTR_FLAG_DELTA_DISKS | \
147 CTR_FLAG_DATA_OFFSET | \
148 CTR_FLAG_RAID10_USE_NEAR_SETS)
151 * "raid4/5/6" do not accept any raid1 or raid10 specific options
153 * "raid6" does not accept "nosync", because it is not guaranteed
154 * that both parity and q-syndrome are being written properly with
157 #define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
159 CTR_FLAG_DAEMON_SLEEP | \
160 CTR_FLAG_MIN_RECOVERY_RATE | \
161 CTR_FLAG_MAX_RECOVERY_RATE | \
162 CTR_FLAG_MAX_WRITE_BEHIND | \
163 CTR_FLAG_STRIPE_CACHE | \
164 CTR_FLAG_REGION_SIZE | \
165 CTR_FLAG_DELTA_DISKS | \
166 CTR_FLAG_DATA_OFFSET)
168 #define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
170 CTR_FLAG_DAEMON_SLEEP | \
171 CTR_FLAG_MIN_RECOVERY_RATE | \
172 CTR_FLAG_MAX_RECOVERY_RATE | \
173 CTR_FLAG_MAX_WRITE_BEHIND | \
174 CTR_FLAG_STRIPE_CACHE | \
175 CTR_FLAG_REGION_SIZE | \
176 CTR_FLAG_DELTA_DISKS | \
177 CTR_FLAG_DATA_OFFSET)
178 /* ...valid options definitions per raid level */
181 * Flags for rs->runtime_flags field
182 * (RT_FLAG prefix meaning "runtime flag")
184 * These are all internal and used to define runtime state,
185 * e.g. to prevent another resume from preresume processing
186 * the raid set all over again.
188 #define RT_FLAG_RS_PRERESUMED 0
189 #define RT_FLAG_RS_RESUMED 1
190 #define RT_FLAG_RS_BITMAP_LOADED 2
191 #define RT_FLAG_UPDATE_SBS 3
192 #define RT_FLAG_RESHAPE_RS 4
193 #define RT_FLAG_KEEP_RS_FROZEN 5
195 /* Array elements of 64 bit needed for rebuild/write_mostly bits */
196 #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
199 * raid set level, layout and chunk sectors backup/restore
204 int new_chunk_sectors;
208 struct dm_target *ti;
210 uint32_t bitmap_loaded;
211 uint32_t stripe_cache_entries;
212 unsigned long ctr_flags;
213 unsigned long runtime_flags;
215 uint64_t rebuild_disks[DISKS_ARRAY_ELEMS];
221 int requested_bitmap_chunk_sectors;
224 struct raid_type *raid_type;
225 struct dm_target_callbacks callbacks;
227 struct raid_dev dev[0];
230 static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
232 struct mddev *mddev = &rs->md;
234 l->new_level = mddev->new_level;
235 l->new_layout = mddev->new_layout;
236 l->new_chunk_sectors = mddev->new_chunk_sectors;
239 static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
241 struct mddev *mddev = &rs->md;
243 mddev->new_level = l->new_level;
244 mddev->new_layout = l->new_layout;
245 mddev->new_chunk_sectors = l->new_chunk_sectors;
248 /* raid10 algorithms (i.e. formats) */
249 #define ALGORITHM_RAID10_DEFAULT 0
250 #define ALGORITHM_RAID10_NEAR 1
251 #define ALGORITHM_RAID10_OFFSET 2
252 #define ALGORITHM_RAID10_FAR 3
254 /* Supported raid types and properties. */
255 static struct raid_type {
256 const char *name; /* RAID algorithm. */
257 const char *descr; /* Descriptor text for logging. */
258 const unsigned parity_devs; /* # of parity devices. */
259 const unsigned minimal_devs; /* minimal # of devices in set. */
260 const unsigned level; /* RAID level. */
261 const unsigned algorithm; /* RAID algorithm. */
263 {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */},
264 {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */},
265 {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR},
266 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
267 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
268 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
269 {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */
270 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
271 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
272 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
273 {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
274 {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
275 {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
276 {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
277 {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE},
278 {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6},
279 {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6},
280 {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6},
281 {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6},
282 {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6}
285 /* True, if @v is in inclusive range [@min, @max] */
286 static bool __within_range(long v, long min, long max)
288 return v >= min && v <= max;
291 /* All table line arguments are defined here */
292 static struct arg_name_flag {
293 const unsigned long flag;
295 } __arg_name_flags[] = {
296 { CTR_FLAG_SYNC, "sync"},
297 { CTR_FLAG_NOSYNC, "nosync"},
298 { CTR_FLAG_REBUILD, "rebuild"},
299 { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
300 { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
301 { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
302 { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
303 { CTR_FLAG_WRITE_MOSTLY, "writemostly"},
304 { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
305 { CTR_FLAG_REGION_SIZE, "region_size"},
306 { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
307 { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
308 { CTR_FLAG_DATA_OFFSET, "data_offset"},
309 { CTR_FLAG_DELTA_DISKS, "delta_disks"},
310 { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"},
313 /* Return argument name string for given @flag */
314 static const char *dm_raid_arg_name_by_flag(const uint32_t flag)
316 if (hweight32(flag) == 1) {
317 struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags);
319 while (anf-- > __arg_name_flags)
320 if (flag & anf->flag)
324 DMERR("%s called with more than one flag!", __func__);
330 * bool helpers to test for various raid levels of a raid set,
331 * is. it's level as reported by the superblock rather than
332 * the requested raid_type passed to the constructor.
334 /* Return true, if raid set in @rs is raid0 */
335 static bool rs_is_raid0(struct raid_set *rs)
337 return !rs->md.level;
340 /* Return true, if raid set in @rs is raid1 */
341 static bool rs_is_raid1(struct raid_set *rs)
343 return rs->md.level == 1;
346 /* Return true, if raid set in @rs is raid10 */
347 static bool rs_is_raid10(struct raid_set *rs)
349 return rs->md.level == 10;
352 /* Return true, if raid set in @rs is level 4, 5 or 6 */
353 static bool rs_is_raid456(struct raid_set *rs)
355 return __within_range(rs->md.level, 4, 6);
358 /* Return true, if raid set in @rs is reshapable */
359 static unsigned int __is_raid10_far(int layout);
360 static bool rs_is_reshapable(struct raid_set *rs)
362 return rs_is_raid456(rs) ||
363 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout));
366 /* Return true, if raid set in @rs is recovering */
367 static bool rs_is_recovering(struct raid_set *rs)
369 return rs->md.recovery_cp != MaxSector;
372 /* Return true, if raid set in @rs is reshaping */
373 static bool rs_is_reshaping(struct raid_set *rs)
375 return rs->md.reshape_position != MaxSector;
379 * bool helpers to test for various raid levels of a raid type
382 /* Return true, if raid type in @rt is raid0 */
383 static bool rt_is_raid0(struct raid_type *rt)
388 /* Return true, if raid type in @rt is raid1 */
389 static bool rt_is_raid1(struct raid_type *rt)
391 return rt->level == 1;
394 /* Return true, if raid type in @rt is raid10 */
395 static bool rt_is_raid10(struct raid_type *rt)
397 return rt->level == 10;
400 /* Return true, if raid type in @rt is raid4/5 */
401 static bool rt_is_raid45(struct raid_type *rt)
403 return __within_range(rt->level, 4, 5);
406 /* Return true, if raid type in @rt is raid6 */
407 static bool rt_is_raid6(struct raid_type *rt)
409 return rt->level == 6;
412 /* Return true, if raid type in @rt is raid4/5/6 */
413 static bool rt_is_raid456(struct raid_type *rt)
415 return __within_range(rt->level, 4, 6);
417 /* END: raid level bools */
419 /* Return valid ctr flags for the raid level of @rs */
420 static unsigned long __valid_flags(struct raid_set *rs)
422 if (rt_is_raid0(rs->raid_type))
423 return RAID0_VALID_FLAGS;
424 else if (rt_is_raid1(rs->raid_type))
425 return RAID1_VALID_FLAGS;
426 else if (rt_is_raid10(rs->raid_type))
427 return RAID10_VALID_FLAGS;
428 else if (rt_is_raid45(rs->raid_type))
429 return RAID45_VALID_FLAGS;
430 else if (rt_is_raid6(rs->raid_type))
431 return RAID6_VALID_FLAGS;
437 * Check for valid flags set on @rs
439 * Has to be called after parsing of the ctr flags!
441 static int rs_check_for_valid_flags(struct raid_set *rs)
443 if (rs->ctr_flags & ~__valid_flags(rs)) {
444 rs->ti->error = "Invalid flags combination";
451 /* MD raid10 bit definitions and helpers */
452 #define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */
453 #define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */
454 #define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */
455 #define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */
457 /* Return md raid10 near copies for @layout */
458 static unsigned int __raid10_near_copies(int layout)
460 return layout & 0xFF;
463 /* Return md raid10 far copies for @layout */
464 static unsigned int __raid10_far_copies(int layout)
466 return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT);
469 /* Return true if md raid10 offset for @layout */
470 static unsigned int __is_raid10_offset(int layout)
472 return layout & RAID10_OFFSET;
475 /* Return true if md raid10 near for @layout */
476 static unsigned int __is_raid10_near(int layout)
478 return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1;
481 /* Return true if md raid10 far for @layout */
482 static unsigned int __is_raid10_far(int layout)
484 return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1;
487 /* Return md raid10 layout string for @layout */
488 static const char *raid10_md_layout_to_format(int layout)
491 * Bit 16 stands for "offset"
492 * (i.e. adjacent stripes hold copies)
494 * Refer to MD's raid10.c for details
496 if (__is_raid10_offset(layout))
499 if (__raid10_near_copies(layout) > 1)
502 WARN_ON(__raid10_far_copies(layout) < 2);
507 /* Return md raid10 algorithm for @name */
508 static int raid10_name_to_format(const char *name)
510 if (!strcasecmp(name, "near"))
511 return ALGORITHM_RAID10_NEAR;
512 else if (!strcasecmp(name, "offset"))
513 return ALGORITHM_RAID10_OFFSET;
514 else if (!strcasecmp(name, "far"))
515 return ALGORITHM_RAID10_FAR;
520 /* Return md raid10 copies for @layout */
521 static unsigned int raid10_md_layout_to_copies(int layout)
523 return __raid10_near_copies(layout) > 1 ?
524 __raid10_near_copies(layout) : __raid10_far_copies(layout);
527 /* Return md raid10 format id for @format string */
528 static int raid10_format_to_md_layout(struct raid_set *rs,
529 unsigned int algorithm,
532 unsigned int n = 1, f = 1, r = 0;
535 * MD resilienece flaw:
537 * enabling use_far_sets for far/offset formats causes copies
538 * to be colocated on the same devs together with their origins!
540 * -> disable it for now in the definition above
542 if (algorithm == ALGORITHM_RAID10_DEFAULT ||
543 algorithm == ALGORITHM_RAID10_NEAR)
546 else if (algorithm == ALGORITHM_RAID10_OFFSET) {
549 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
550 r |= RAID10_USE_FAR_SETS;
552 } else if (algorithm == ALGORITHM_RAID10_FAR) {
555 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
556 r |= RAID10_USE_FAR_SETS;
561 return r | (f << RAID10_FAR_COPIES_SHIFT) | n;
563 /* END: MD raid10 bit definitions and helpers */
565 /* Check for any of the raid10 algorithms */
566 static int __got_raid10(struct raid_type *rtp, const int layout)
568 if (rtp->level == 10) {
569 switch (rtp->algorithm) {
570 case ALGORITHM_RAID10_DEFAULT:
571 case ALGORITHM_RAID10_NEAR:
572 return __is_raid10_near(layout);
573 case ALGORITHM_RAID10_OFFSET:
574 return __is_raid10_offset(layout);
575 case ALGORITHM_RAID10_FAR:
576 return __is_raid10_far(layout);
585 /* Return raid_type for @name */
586 static struct raid_type *get_raid_type(const char *name)
588 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
590 while (rtp-- > raid_types)
591 if (!strcasecmp(rtp->name, name))
597 /* Return raid_type for @name based derived from @level and @layout */
598 static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
600 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
602 while (rtp-- > raid_types) {
603 /* RAID10 special checks based on @layout flags/properties */
604 if (rtp->level == level &&
605 (__got_raid10(rtp, layout) || rtp->algorithm == layout))
613 * Conditionally change bdev capacity of @rs
614 * in case of a disk add/remove reshape
616 static void rs_set_capacity(struct raid_set *rs)
618 struct mddev *mddev = &rs->md;
619 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
621 set_capacity(gendisk, mddev->array_sectors);
622 revalidate_disk(gendisk);
626 * Set the mddev properties in @rs to the current
627 * ones retrieved from the freshest superblock
629 static void rs_set_cur(struct raid_set *rs)
631 struct mddev *mddev = &rs->md;
633 mddev->new_level = mddev->level;
634 mddev->new_layout = mddev->layout;
635 mddev->new_chunk_sectors = mddev->chunk_sectors;
639 * Set the mddev properties in @rs to the new
640 * ones requested by the ctr
642 static void rs_set_new(struct raid_set *rs)
644 struct mddev *mddev = &rs->md;
646 mddev->level = mddev->new_level;
647 mddev->layout = mddev->new_layout;
648 mddev->chunk_sectors = mddev->new_chunk_sectors;
649 mddev->raid_disks = rs->raid_disks;
650 mddev->delta_disks = 0;
653 static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type,
659 if (raid_devs <= raid_type->parity_devs) {
660 ti->error = "Insufficient number of devices";
661 return ERR_PTR(-EINVAL);
664 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
666 ti->error = "Cannot allocate raid context";
667 return ERR_PTR(-ENOMEM);
672 rs->raid_disks = raid_devs;
676 rs->raid_type = raid_type;
677 rs->stripe_cache_entries = 256;
678 rs->md.raid_disks = raid_devs;
679 rs->md.level = raid_type->level;
680 rs->md.new_level = rs->md.level;
681 rs->md.layout = raid_type->algorithm;
682 rs->md.new_layout = rs->md.layout;
683 rs->md.delta_disks = 0;
684 rs->md.recovery_cp = rs_is_raid0(rs) ? MaxSector : 0;
686 for (i = 0; i < raid_devs; i++)
687 md_rdev_init(&rs->dev[i].rdev);
690 * Remaining items to be initialized by further RAID params:
693 * rs->md.chunk_sectors
694 * rs->md.new_chunk_sectors
701 static void raid_set_free(struct raid_set *rs)
705 for (i = 0; i < rs->md.raid_disks; i++) {
706 if (rs->dev[i].meta_dev)
707 dm_put_device(rs->ti, rs->dev[i].meta_dev);
708 md_rdev_clear(&rs->dev[i].rdev);
709 if (rs->dev[i].data_dev)
710 dm_put_device(rs->ti, rs->dev[i].data_dev);
717 * For every device we have two words
718 * <meta_dev>: meta device name or '-' if missing
719 * <data_dev>: data device name or '-' if missing
721 * The following are permitted:
724 * <meta_dev> <data_dev>
726 * The following is not allowed:
729 * This code parses those words. If there is a failure,
730 * the caller must use raid_set_free() to unwind the operations.
732 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
736 int metadata_available = 0;
740 /* Put off the number of raid devices argument to get to dev pairs */
741 arg = dm_shift_arg(as);
745 for (i = 0; i < rs->md.raid_disks; i++) {
746 rs->dev[i].rdev.raid_disk = i;
748 rs->dev[i].meta_dev = NULL;
749 rs->dev[i].data_dev = NULL;
752 * There are no offsets, since there is a separate device
753 * for data and metadata.
755 rs->dev[i].rdev.data_offset = 0;
756 rs->dev[i].rdev.mddev = &rs->md;
758 arg = dm_shift_arg(as);
762 if (strcmp(arg, "-")) {
763 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
764 &rs->dev[i].meta_dev);
766 rs->ti->error = "RAID metadata device lookup failure";
770 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
771 if (!rs->dev[i].rdev.sb_page) {
772 rs->ti->error = "Failed to allocate superblock page";
777 arg = dm_shift_arg(as);
781 if (!strcmp(arg, "-")) {
782 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
783 (!rs->dev[i].rdev.recovery_offset)) {
784 rs->ti->error = "Drive designated for rebuild not specified";
788 if (rs->dev[i].meta_dev) {
789 rs->ti->error = "No data device supplied with metadata device";
796 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
797 &rs->dev[i].data_dev);
799 rs->ti->error = "RAID device lookup failure";
803 if (rs->dev[i].meta_dev) {
804 metadata_available = 1;
805 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
807 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
808 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
809 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
813 if (metadata_available) {
815 rs->md.persistent = 1;
816 rs->md.major_version = 2;
817 } else if (rebuild && !rs->md.recovery_cp) {
819 * Without metadata, we will not be able to tell if the array
820 * is in-sync or not - we must assume it is not. Therefore,
821 * it is impossible to rebuild a drive.
823 * Even if there is metadata, the on-disk information may
824 * indicate that the array is not in-sync and it will then
827 * User could specify 'nosync' option if desperate.
829 rs->ti->error = "Unable to rebuild drive while array is not in-sync";
837 * validate_region_size
839 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
841 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
842 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
844 * Returns: 0 on success, -EINVAL on failure.
846 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
848 unsigned long min_region_size = rs->ti->len / (1 << 21);
852 * Choose a reasonable default. All figures in sectors.
854 if (min_region_size > (1 << 13)) {
855 /* If not a power of 2, make it the next power of 2 */
856 region_size = roundup_pow_of_two(min_region_size);
857 DMINFO("Choosing default region size of %lu sectors",
860 DMINFO("Choosing default region size of 4MiB");
861 region_size = 1 << 13; /* sectors */
865 * Validate user-supplied value.
867 if (region_size > rs->ti->len) {
868 rs->ti->error = "Supplied region size is too large";
872 if (region_size < min_region_size) {
873 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
874 region_size, min_region_size);
875 rs->ti->error = "Supplied region size is too small";
879 if (!is_power_of_2(region_size)) {
880 rs->ti->error = "Region size is not a power of 2";
884 if (region_size < rs->md.chunk_sectors) {
885 rs->ti->error = "Region size is smaller than the chunk size";
891 * Convert sectors to bytes.
893 rs->md.bitmap_info.chunksize = (region_size << 9);
899 * validate_raid_redundancy
902 * Determine if there are enough devices in the array that haven't
903 * failed (or are being rebuilt) to form a usable array.
905 * Returns: 0 on success, -EINVAL on failure.
907 static int validate_raid_redundancy(struct raid_set *rs)
909 unsigned i, rebuild_cnt = 0;
910 unsigned rebuilds_per_group = 0, copies;
911 unsigned group_size, last_group_start;
913 for (i = 0; i < rs->md.raid_disks; i++)
914 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
915 !rs->dev[i].rdev.sb_page)
918 switch (rs->raid_type->level) {
920 if (rebuild_cnt >= rs->md.raid_disks)
926 if (rebuild_cnt > rs->raid_type->parity_devs)
930 copies = raid10_md_layout_to_copies(rs->md.new_layout);
931 if (rebuild_cnt < copies)
935 * It is possible to have a higher rebuild count for RAID10,
936 * as long as the failed devices occur in different mirror
937 * groups (i.e. different stripes).
939 * When checking "near" format, make sure no adjacent devices
940 * have failed beyond what can be handled. In addition to the
941 * simple case where the number of devices is a multiple of the
942 * number of copies, we must also handle cases where the number
943 * of devices is not a multiple of the number of copies.
944 * E.g. dev1 dev2 dev3 dev4 dev5
948 if (__is_raid10_near(rs->md.new_layout)) {
949 for (i = 0; i < rs->raid_disks; i++) {
951 rebuilds_per_group = 0;
952 if ((!rs->dev[i].rdev.sb_page ||
953 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
954 (++rebuilds_per_group >= copies))
961 * When checking "far" and "offset" formats, we need to ensure
962 * that the device that holds its copy is not also dead or
963 * being rebuilt. (Note that "far" and "offset" formats only
964 * support two copies right now. These formats also only ever
965 * use the 'use_far_sets' variant.)
967 * This check is somewhat complicated by the need to account
968 * for arrays that are not a multiple of (far) copies. This
969 * results in the need to treat the last (potentially larger)
972 group_size = (rs->md.raid_disks / copies);
973 last_group_start = (rs->md.raid_disks / group_size) - 1;
974 last_group_start *= group_size;
975 for (i = 0; i < rs->md.raid_disks; i++) {
976 if (!(i % copies) && !(i > last_group_start))
977 rebuilds_per_group = 0;
978 if ((!rs->dev[i].rdev.sb_page ||
979 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
980 (++rebuilds_per_group >= copies))
996 * Possible arguments are...
997 * <chunk_size> [optional_args]
999 * Argument definitions
1000 * <chunk_size> The number of sectors per disk that
1001 * will form the "stripe"
1002 * [[no]sync] Force or prevent recovery of the
1004 * [rebuild <idx>] Rebuild the drive indicated by the index
1005 * [daemon_sleep <ms>] Time between bitmap daemon work to
1007 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
1008 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
1009 * [write_mostly <idx>] Indicate a write mostly drive via index
1010 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
1011 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
1012 * [region_size <sectors>] Defines granularity of bitmap
1014 * RAID10-only options:
1015 * [raid10_copies <# copies>] Number of copies. (Default: 2)
1016 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
1018 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1019 unsigned num_raid_params)
1021 int value, raid10_format = ALGORITHM_RAID10_DEFAULT;
1022 unsigned raid10_copies = 2;
1023 unsigned i, write_mostly = 0;
1024 unsigned region_size = 0;
1025 sector_t max_io_len;
1026 const char *arg, *key;
1027 struct raid_dev *rd;
1028 struct raid_type *rt = rs->raid_type;
1030 arg = dm_shift_arg(as);
1031 num_raid_params--; /* Account for chunk_size argument */
1033 if (kstrtoint(arg, 10, &value) < 0) {
1034 rs->ti->error = "Bad numerical argument given for chunk_size";
1039 * First, parse the in-order required arguments
1040 * "chunk_size" is the only argument of this type.
1042 if (rt_is_raid1(rt)) {
1044 DMERR("Ignoring chunk size parameter for RAID 1");
1046 } else if (!is_power_of_2(value)) {
1047 rs->ti->error = "Chunk size must be a power of 2";
1049 } else if (value < 8) {
1050 rs->ti->error = "Chunk size value is too small";
1054 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
1057 * We set each individual device as In_sync with a completed
1058 * 'recovery_offset'. If there has been a device failure or
1059 * replacement then one of the following cases applies:
1061 * 1) User specifies 'rebuild'.
1062 * - Device is reset when param is read.
1063 * 2) A new device is supplied.
1064 * - No matching superblock found, resets device.
1065 * 3) Device failure was transient and returns on reload.
1066 * - Failure noticed, resets device for bitmap replay.
1067 * 4) Device hadn't completed recovery after previous failure.
1068 * - Superblock is read and overrides recovery_offset.
1070 * What is found in the superblocks of the devices is always
1071 * authoritative, unless 'rebuild' or '[no]sync' was specified.
1073 for (i = 0; i < rs->md.raid_disks; i++) {
1074 set_bit(In_sync, &rs->dev[i].rdev.flags);
1075 rs->dev[i].rdev.recovery_offset = MaxSector;
1079 * Second, parse the unordered optional arguments
1081 for (i = 0; i < num_raid_params; i++) {
1082 key = dm_shift_arg(as);
1084 rs->ti->error = "Not enough raid parameters given";
1088 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) {
1089 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1090 rs->ti->error = "Only one 'nosync' argument allowed";
1093 rs->md.recovery_cp = MaxSector;
1096 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
1097 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) {
1098 rs->ti->error = "Only one 'sync' argument allowed";
1101 rs->md.recovery_cp = 0;
1104 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
1105 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1106 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
1112 arg = dm_shift_arg(as);
1113 i++; /* Account for the argument pairs */
1115 rs->ti->error = "Wrong number of raid parameters given";
1120 * Parameters that take a string value are checked here.
1123 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
1124 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
1125 rs->ti->error = "Only one 'raid10_format' argument pair allowed";
1128 if (!rt_is_raid10(rt)) {
1129 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
1132 raid10_format = raid10_name_to_format(arg);
1133 if (raid10_format < 0) {
1134 rs->ti->error = "Invalid 'raid10_format' value given";
1135 return raid10_format;
1140 if (kstrtoint(arg, 10, &value) < 0) {
1141 rs->ti->error = "Bad numerical argument given in raid params";
1145 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) {
1147 * "rebuild" is being passed in by userspace to provide
1148 * indexes of replaced devices and to set up additional
1149 * devices on raid level takeover.
1151 if (!__within_range(value, 0, rs->raid_disks - 1)) {
1152 rs->ti->error = "Invalid rebuild index given";
1156 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) {
1157 rs->ti->error = "rebuild for this index already given";
1161 rd = rs->dev + value;
1162 clear_bit(In_sync, &rd->rdev.flags);
1163 clear_bit(Faulty, &rd->rdev.flags);
1164 rd->rdev.recovery_offset = 0;
1165 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags);
1166 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
1167 if (!rt_is_raid1(rt)) {
1168 rs->ti->error = "write_mostly option is only valid for RAID1";
1172 if (!__within_range(value, 0, rs->md.raid_disks - 1)) {
1173 rs->ti->error = "Invalid write_mostly index given";
1178 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1179 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
1180 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
1181 if (!rt_is_raid1(rt)) {
1182 rs->ti->error = "max_write_behind option is only valid for RAID1";
1186 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
1187 rs->ti->error = "Only one max_write_behind argument pair allowed";
1192 * In device-mapper, we specify things in sectors, but
1193 * MD records this value in kB
1196 if (value > COUNTER_MAX) {
1197 rs->ti->error = "Max write-behind limit out of range";
1201 rs->md.bitmap_info.max_write_behind = value;
1202 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
1203 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
1204 rs->ti->error = "Only one daemon_sleep argument pair allowed";
1207 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
1208 rs->ti->error = "daemon sleep period out of range";
1211 rs->md.bitmap_info.daemon_sleep = value;
1212 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
1213 /* Userspace passes new data_offset after having extended the the data image LV */
1214 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
1215 rs->ti->error = "Only one data_offset argument pair allowed";
1218 /* Ensure sensible data offset */
1220 (value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) {
1221 rs->ti->error = "Bogus data_offset value";
1224 rs->data_offset = value;
1225 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) {
1226 /* Define the +/-# of disks to add to/remove from the given raid set */
1227 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1228 rs->ti->error = "Only one delta_disks argument pair allowed";
1231 /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */
1232 if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
1233 rs->ti->error = "Too many delta_disk requested";
1237 rs->delta_disks = value;
1238 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) {
1239 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
1240 rs->ti->error = "Only one stripe_cache argument pair allowed";
1244 if (!rt_is_raid456(rt)) {
1245 rs->ti->error = "Inappropriate argument: stripe_cache";
1249 rs->stripe_cache_entries = value;
1250 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
1251 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1252 rs->ti->error = "Only one min_recovery_rate argument pair allowed";
1255 if (value > INT_MAX) {
1256 rs->ti->error = "min_recovery_rate out of range";
1259 rs->md.sync_speed_min = (int)value;
1260 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
1261 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1262 rs->ti->error = "Only one max_recovery_rate argument pair allowed";
1265 if (value > INT_MAX) {
1266 rs->ti->error = "max_recovery_rate out of range";
1269 rs->md.sync_speed_max = (int)value;
1270 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) {
1271 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
1272 rs->ti->error = "Only one region_size argument pair allowed";
1276 region_size = value;
1277 rs->requested_bitmap_chunk_sectors = value;
1278 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) {
1279 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
1280 rs->ti->error = "Only one raid10_copies argument pair allowed";
1284 if (!__within_range(value, 2, rs->md.raid_disks)) {
1285 rs->ti->error = "Bad value for 'raid10_copies'";
1289 raid10_copies = value;
1291 DMERR("Unable to parse RAID parameter: %s", key);
1292 rs->ti->error = "Unable to parse RAID parameter";
1297 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) &&
1298 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1299 rs->ti->error = "sync and nosync are mutually exclusive";
1303 if (write_mostly >= rs->md.raid_disks) {
1304 rs->ti->error = "Can't set all raid1 devices to write_mostly";
1308 if (validate_region_size(rs, region_size))
1311 if (rs->md.chunk_sectors)
1312 max_io_len = rs->md.chunk_sectors;
1314 max_io_len = region_size;
1316 if (dm_set_target_max_io_len(rs->ti, max_io_len))
1319 if (rt_is_raid10(rt)) {
1320 if (raid10_copies > rs->md.raid_disks) {
1321 rs->ti->error = "Not enough devices to satisfy specification";
1325 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
1326 if (rs->md.new_layout < 0) {
1327 rs->ti->error = "Error getting raid10 format";
1328 return rs->md.new_layout;
1331 rt = get_raid_type_by_ll(10, rs->md.new_layout);
1333 rs->ti->error = "Failed to recognize new raid10 layout";
1337 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
1338 rt->algorithm == ALGORITHM_RAID10_NEAR) &&
1339 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1340 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
1345 rs->raid10_copies = raid10_copies;
1347 /* Assume there are no metadata devices until the drives are parsed */
1348 rs->md.persistent = 0;
1349 rs->md.external = 1;
1351 /* Check, if any invalid ctr arguments have been passed in for the raid level */
1352 return rs_check_for_valid_flags(rs);
1355 /* Set raid4/5/6 cache size */
1356 static int rs_set_raid456_stripe_cache(struct raid_set *rs)
1359 struct r5conf *conf;
1360 struct mddev *mddev = &rs->md;
1361 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2;
1362 uint32_t nr_stripes = rs->stripe_cache_entries;
1364 if (!rt_is_raid456(rs->raid_type)) {
1365 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size";
1369 if (nr_stripes < min_stripes) {
1370 DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size",
1371 nr_stripes, min_stripes);
1372 nr_stripes = min_stripes;
1375 conf = mddev->private;
1377 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set";
1381 /* Try setting number of stripes in raid456 stripe cache */
1382 if (conf->min_nr_stripes != nr_stripes) {
1383 r = raid5_set_cache_size(mddev, nr_stripes);
1385 rs->ti->error = "Failed to set raid4/5/6 stripe cache size";
1389 DMINFO("%u stripe cache entries", nr_stripes);
1395 /* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */
1396 static unsigned int mddev_data_stripes(struct raid_set *rs)
1398 return rs->md.raid_disks - rs->raid_type->parity_devs;
1401 /* Return # of data stripes of @rs (i.e. as of ctr) */
1402 static unsigned int rs_data_stripes(struct raid_set *rs)
1404 return rs->raid_disks - rs->raid_type->parity_devs;
1407 /* Calculate the sectors per device and per array used for @rs */
1408 static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
1411 unsigned int data_stripes;
1412 struct mddev *mddev = &rs->md;
1413 struct md_rdev *rdev;
1414 sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
1415 sector_t cur_dev_sectors = rs->dev[0].rdev.sectors;
1418 delta_disks = mddev->delta_disks;
1419 data_stripes = mddev_data_stripes(rs);
1421 delta_disks = rs->delta_disks;
1422 data_stripes = rs_data_stripes(rs);
1425 /* Special raid1 case w/o delta_disks support (yet) */
1426 if (rt_is_raid1(rs->raid_type))
1428 else if (rt_is_raid10(rs->raid_type)) {
1429 if (rs->raid10_copies < 2 ||
1431 rs->ti->error = "Bogus raid10 data copies or delta disks";
1435 dev_sectors *= rs->raid10_copies;
1436 if (sector_div(dev_sectors, data_stripes))
1439 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1440 if (sector_div(array_sectors, rs->raid10_copies))
1443 } else if (sector_div(dev_sectors, data_stripes))
1447 /* Striped layouts */
1448 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1450 rdev_for_each(rdev, mddev)
1451 rdev->sectors = dev_sectors;
1453 mddev->array_sectors = array_sectors;
1454 mddev->dev_sectors = dev_sectors;
1456 if (!rs_is_raid0(rs) && dev_sectors > cur_dev_sectors)
1457 mddev->recovery_cp = dev_sectors;
1461 rs->ti->error = "Target length not divisible by number of data devices";
1465 static void do_table_event(struct work_struct *ws)
1467 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
1469 smp_rmb(); /* Make sure we access most actual mddev properties */
1470 if (!rs_is_reshaping(rs))
1471 rs_set_capacity(rs);
1472 dm_table_event(rs->ti->table);
1475 static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
1477 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
1479 return mddev_congested(&rs->md, bits);
1483 * Make sure a valid takover (level switch) is being requested on @rs
1485 * Conversions of raid sets from one MD personality to another
1486 * have to conform to restrictions which are enforced here.
1488 static int rs_check_takeover(struct raid_set *rs)
1490 struct mddev *mddev = &rs->md;
1491 unsigned int near_copies;
1493 if (rs->md.degraded) {
1494 rs->ti->error = "Can't takeover degraded raid set";
1498 if (rs_is_reshaping(rs)) {
1499 rs->ti->error = "Can't takeover reshaping raid set";
1503 switch (mddev->level) {
1505 /* raid0 -> raid1/5 with one disk */
1506 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1507 mddev->raid_disks == 1)
1510 /* raid0 -> raid10 */
1511 if (mddev->new_level == 10 &&
1512 !(rs->raid_disks % mddev->raid_disks))
1515 /* raid0 with multiple disks -> raid4/5/6 */
1516 if (__within_range(mddev->new_level, 4, 6) &&
1517 mddev->new_layout == ALGORITHM_PARITY_N &&
1518 mddev->raid_disks > 1)
1524 /* Can't takeover raid10_offset! */
1525 if (__is_raid10_offset(mddev->layout))
1528 near_copies = __raid10_near_copies(mddev->layout);
1530 /* raid10* -> raid0 */
1531 if (mddev->new_level == 0) {
1532 /* Can takeover raid10_near with raid disks divisable by data copies! */
1533 if (near_copies > 1 &&
1534 !(mddev->raid_disks % near_copies)) {
1535 mddev->raid_disks /= near_copies;
1536 mddev->delta_disks = mddev->raid_disks;
1540 /* Can takeover raid10_far */
1541 if (near_copies == 1 &&
1542 __raid10_far_copies(mddev->layout) > 1)
1548 /* raid10_{near,far} -> raid1 */
1549 if (mddev->new_level == 1 &&
1550 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks)
1553 /* raid10_{near,far} with 2 disks -> raid4/5 */
1554 if (__within_range(mddev->new_level, 4, 5) &&
1555 mddev->raid_disks == 2)
1560 /* raid1 with 2 disks -> raid4/5 */
1561 if (__within_range(mddev->new_level, 4, 5) &&
1562 mddev->raid_disks == 2) {
1563 mddev->degraded = 1;
1567 /* raid1 -> raid0 */
1568 if (mddev->new_level == 0 &&
1569 mddev->raid_disks == 1)
1572 /* raid1 -> raid10 */
1573 if (mddev->new_level == 10)
1579 /* raid4 -> raid0 */
1580 if (mddev->new_level == 0)
1583 /* raid4 -> raid1/5 with 2 disks */
1584 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1585 mddev->raid_disks == 2)
1588 /* raid4 -> raid5/6 with parity N */
1589 if (__within_range(mddev->new_level, 5, 6) &&
1590 mddev->layout == ALGORITHM_PARITY_N)
1595 /* raid5 with parity N -> raid0 */
1596 if (mddev->new_level == 0 &&
1597 mddev->layout == ALGORITHM_PARITY_N)
1600 /* raid5 with parity N -> raid4 */
1601 if (mddev->new_level == 4 &&
1602 mddev->layout == ALGORITHM_PARITY_N)
1605 /* raid5 with 2 disks -> raid1/4/10 */
1606 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) &&
1607 mddev->raid_disks == 2)
1610 /* raid5_* -> raid6_*_6 with Q-Syndrome N (e.g. raid5_ra -> raid6_ra_6 */
1611 if (mddev->new_level == 6 &&
1612 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1613 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6)))
1618 /* raid6 with parity N -> raid0 */
1619 if (mddev->new_level == 0 &&
1620 mddev->layout == ALGORITHM_PARITY_N)
1623 /* raid6 with parity N -> raid4 */
1624 if (mddev->new_level == 4 &&
1625 mddev->layout == ALGORITHM_PARITY_N)
1628 /* raid6_*_n with Q-Syndrome N -> raid5_* */
1629 if (mddev->new_level == 5 &&
1630 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1631 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
1638 rs->ti->error = "takeover not possible";
1642 /* True if @rs requested to be taken over */
1643 static bool rs_takeover_requested(struct raid_set *rs)
1645 return rs->md.new_level != rs->md.level;
1648 /* True if @rs is requested to reshape by ctr */
1649 static bool rs_reshape_requested(struct raid_set *rs)
1651 struct mddev *mddev = &rs->md;
1656 return !__is_raid10_far(mddev->new_layout) &&
1657 mddev->new_level == mddev->level &&
1658 (mddev->new_layout != mddev->layout ||
1659 mddev->new_chunk_sectors != mddev->chunk_sectors ||
1660 rs->raid_disks + rs->delta_disks != mddev->raid_disks);
1664 #define FEATURE_FLAG_SUPPORTS_V190 0x1 /* Supports extended superblock */
1666 /* State flags for sb->flags */
1667 #define SB_FLAG_RESHAPE_ACTIVE 0x1
1668 #define SB_FLAG_RESHAPE_BACKWARDS 0x2
1671 * This structure is never routinely used by userspace, unlike md superblocks.
1672 * Devices with this superblock should only ever be accessed via device-mapper.
1674 #define DM_RAID_MAGIC 0x64526D44
1675 struct dm_raid_superblock {
1676 __le32 magic; /* "DmRd" */
1677 __le32 compat_features; /* Used to indicate compatible features (like 1.9.0 ondisk metadata extension) */
1679 __le32 num_devices; /* Number of devices in this raid set. (Max 64) */
1680 __le32 array_position; /* The position of this drive in the raid set */
1682 __le64 events; /* Incremented by md when superblock updated */
1683 __le64 failed_devices; /* Pre 1.9.0 part of bit field of devices to */
1684 /* indicate failures (see extension below) */
1687 * This offset tracks the progress of the repair or replacement of
1688 * an individual drive.
1690 __le64 disk_recovery_offset;
1693 * This offset tracks the progress of the initial raid set
1694 * synchronisation/parity calculation.
1696 __le64 array_resync_offset;
1699 * raid characteristics
1703 __le32 stripe_sectors;
1705 /********************************************************************
1706 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
1708 * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist
1711 __le32 flags; /* Flags defining array states for reshaping */
1714 * This offset tracks the progress of a raid
1715 * set reshape in order to be able to restart it
1717 __le64 reshape_position;
1720 * These define the properties of the array in case of an interrupted reshape
1724 __le32 new_stripe_sectors;
1727 __le64 array_sectors; /* Array size in sectors */
1730 * Sector offsets to data on devices (reshaping).
1731 * Needed to support out of place reshaping, thus
1732 * not writing over any stripes whilst converting
1733 * them from old to new layout
1736 __le64 new_data_offset;
1738 __le64 sectors; /* Used device size in sectors */
1741 * Additonal Bit field of devices indicating failures to support
1742 * up to 256 devices with the 1.9.0 on-disk metadata format
1744 __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
1746 __le32 incompat_features; /* Used to indicate any incompatible features */
1748 /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */
1752 * Check for reshape constraints on raid set @rs:
1754 * - reshape function non-existent
1756 * - ongoing recovery
1759 * Returns 0 if none or -EPERM if given constraint
1760 * and error message reference in @errmsg
1762 static int rs_check_reshape(struct raid_set *rs)
1764 struct mddev *mddev = &rs->md;
1766 if (!mddev->pers || !mddev->pers->check_reshape)
1767 rs->ti->error = "Reshape not supported";
1768 else if (mddev->degraded)
1769 rs->ti->error = "Can't reshape degraded raid set";
1770 else if (rs_is_recovering(rs))
1771 rs->ti->error = "Convert request on recovering raid set prohibited";
1772 else if (mddev->reshape_position && rs_is_reshaping(rs))
1773 rs->ti->error = "raid set already reshaping!";
1774 else if (!(rs_is_raid10(rs) || rs_is_raid456(rs)))
1775 rs->ti->error = "Reshaping only supported for raid4/5/6/10";
1782 static int read_disk_sb(struct md_rdev *rdev, int size)
1784 BUG_ON(!rdev->sb_page);
1786 if (rdev->sb_loaded)
1789 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) {
1790 DMERR("Failed to read superblock of device at position %d",
1792 md_error(rdev->mddev, rdev);
1796 rdev->sb_loaded = 1;
1801 static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
1803 failed_devices[0] = le64_to_cpu(sb->failed_devices);
1804 memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices));
1806 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
1807 int i = ARRAY_SIZE(sb->extended_failed_devices);
1810 failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]);
1814 static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
1816 int i = ARRAY_SIZE(sb->extended_failed_devices);
1818 sb->failed_devices = cpu_to_le64(failed_devices[0]);
1820 sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]);
1824 * Synchronize the superblock members with the raid set properties
1826 * All superblock data is little endian.
1828 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
1830 bool update_failed_devices = false;
1832 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
1833 struct dm_raid_superblock *sb;
1834 struct raid_set *rs = container_of(mddev, struct raid_set, md);
1836 /* No metadata device, no superblock */
1837 if (!rdev->meta_bdev)
1840 BUG_ON(!rdev->sb_page);
1842 sb = page_address(rdev->sb_page);
1844 sb_retrieve_failed_devices(sb, failed_devices);
1846 for (i = 0; i < rs->raid_disks; i++)
1847 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
1848 update_failed_devices = true;
1849 set_bit(i, (void *) failed_devices);
1852 if (update_failed_devices)
1853 sb_update_failed_devices(sb, failed_devices);
1855 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
1856 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
1858 sb->num_devices = cpu_to_le32(mddev->raid_disks);
1859 sb->array_position = cpu_to_le32(rdev->raid_disk);
1861 sb->events = cpu_to_le64(mddev->events);
1863 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
1864 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
1866 sb->level = cpu_to_le32(mddev->level);
1867 sb->layout = cpu_to_le32(mddev->layout);
1868 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
1870 sb->new_level = cpu_to_le32(mddev->new_level);
1871 sb->new_layout = cpu_to_le32(mddev->new_layout);
1872 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
1874 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1876 smp_rmb(); /* Make sure we access most recent reshape position */
1877 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1878 if (le64_to_cpu(sb->reshape_position) != MaxSector) {
1879 /* Flag ongoing reshape */
1880 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE);
1882 if (mddev->delta_disks < 0 || mddev->reshape_backwards)
1883 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS);
1885 /* Clear reshape flags */
1886 sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS));
1889 sb->array_sectors = cpu_to_le64(mddev->array_sectors);
1890 sb->data_offset = cpu_to_le64(rdev->data_offset);
1891 sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
1892 sb->sectors = cpu_to_le64(rdev->sectors);
1894 /* Zero out the rest of the payload after the size of the superblock */
1895 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
1901 * This function creates a superblock if one is not found on the device
1902 * and will decide which superblock to use if there's a choice.
1904 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
1906 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
1909 struct dm_raid_superblock *sb;
1910 struct dm_raid_superblock *refsb;
1911 uint64_t events_sb, events_refsb;
1914 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
1915 if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
1916 DMERR("superblock size of a logical block is no longer valid");
1920 r = read_disk_sb(rdev, rdev->sb_size);
1924 sb = page_address(rdev->sb_page);
1927 * Two cases that we want to write new superblocks and rebuild:
1928 * 1) New device (no matching magic number)
1929 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
1931 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
1932 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
1933 super_sync(rdev->mddev, rdev);
1935 set_bit(FirstUse, &rdev->flags);
1936 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
1938 /* Force writing of superblocks to disk */
1939 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
1941 /* Any superblock is better than none, choose that if given */
1942 return refdev ? 0 : 1;
1948 events_sb = le64_to_cpu(sb->events);
1950 refsb = page_address(refdev->sb_page);
1951 events_refsb = le64_to_cpu(refsb->events);
1953 return (events_sb > events_refsb) ? 1 : 0;
1956 static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
1960 struct mddev *mddev = &rs->md;
1962 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
1963 struct dm_raid_superblock *sb;
1964 uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0;
1966 struct dm_raid_superblock *sb2;
1968 sb = page_address(rdev->sb_page);
1969 events_sb = le64_to_cpu(sb->events);
1972 * Initialise to 1 if this is a new superblock.
1974 mddev->events = events_sb ? : 1;
1976 mddev->reshape_position = MaxSector;
1979 * Reshaping is supported, e.g. reshape_position is valid
1980 * in superblock and superblock content is authoritative.
1982 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
1983 /* Superblock is authoritative wrt given raid set layout! */
1984 mddev->raid_disks = le32_to_cpu(sb->num_devices);
1985 mddev->level = le32_to_cpu(sb->level);
1986 mddev->layout = le32_to_cpu(sb->layout);
1987 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
1988 mddev->new_level = le32_to_cpu(sb->new_level);
1989 mddev->new_layout = le32_to_cpu(sb->new_layout);
1990 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
1991 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1992 mddev->array_sectors = le64_to_cpu(sb->array_sectors);
1994 /* raid was reshaping and got interrupted */
1995 if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) {
1996 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1997 DMERR("Reshape requested but raid set is still reshaping");
2001 if (mddev->delta_disks < 0 ||
2002 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
2003 mddev->reshape_backwards = 1;
2005 mddev->reshape_backwards = 0;
2007 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
2008 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
2013 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
2015 if (le32_to_cpu(sb->level) != mddev->level) {
2016 DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
2019 if (le32_to_cpu(sb->layout) != mddev->layout) {
2020 DMERR("Reshaping raid sets not yet supported. (raid layout change)");
2021 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
2022 DMERR(" Old layout: %s w/ %d copies",
2023 raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
2024 raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
2025 DMERR(" New layout: %s w/ %d copies",
2026 raid10_md_layout_to_format(mddev->layout),
2027 raid10_md_layout_to_copies(mddev->layout));
2030 if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
2031 DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
2035 /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */
2036 if (!rt_is_raid1(rs->raid_type) &&
2037 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
2038 DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)",
2039 sb->num_devices, mddev->raid_disks);
2043 /* Table line is checked vs. authoritative superblock */
2047 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
2048 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
2051 * During load, we set FirstUse if a new superblock was written.
2052 * There are two reasons we might not have a superblock:
2053 * 1) The raid set is brand new - in which case, all of the
2054 * devices must have their In_sync bit set. Also,
2055 * recovery_cp must be 0, unless forced.
2056 * 2) This is a new device being added to an old raid set
2057 * and the new device needs to be rebuilt - in which
2058 * case the In_sync bit will /not/ be set and
2059 * recovery_cp must be MaxSector.
2060 * 3) This is/are a new device(s) being added to an old
2061 * raid set during takeover to a higher raid level
2062 * to provide capacity for redundancy or during reshape
2063 * to add capacity to grow the raid set.
2066 rdev_for_each(r, mddev) {
2067 if (test_bit(FirstUse, &r->flags))
2070 if (!test_bit(In_sync, &r->flags)) {
2071 DMINFO("Device %d specified for rebuild; clearing superblock",
2075 if (test_bit(FirstUse, &r->flags))
2082 if (new_devs == rs->raid_disks || !rebuilds) {
2083 /* Replace a broken device */
2084 if (new_devs == 1 && !rs->delta_disks)
2086 if (new_devs == rs->raid_disks) {
2087 DMINFO("Superblocks created for new raid set");
2088 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2089 mddev->recovery_cp = 0;
2090 } else if (new_devs != rebuilds &&
2091 new_devs != rs->delta_disks) {
2092 DMERR("New device injected into existing raid set without "
2093 "'delta_disks' or 'rebuild' parameter specified");
2096 } else if (new_devs && new_devs != rebuilds) {
2097 DMERR("%u 'rebuild' devices cannot be injected into"
2098 " a raid set with %u other first-time devices",
2099 rebuilds, new_devs);
2101 } else if (rebuilds) {
2102 if (rebuild_and_new && rebuilds != rebuild_and_new) {
2103 DMERR("new device%s provided without 'rebuild'",
2104 new_devs > 1 ? "s" : "");
2106 } else if (rs_is_recovering(rs)) {
2107 DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
2108 (unsigned long long) mddev->recovery_cp);
2110 } else if (rs_is_reshaping(rs)) {
2111 DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
2112 (unsigned long long) mddev->reshape_position);
2118 * Now we set the Faulty bit for those devices that are
2119 * recorded in the superblock as failed.
2121 sb_retrieve_failed_devices(sb, failed_devices);
2122 rdev_for_each(r, mddev) {
2125 sb2 = page_address(r->sb_page);
2126 sb2->failed_devices = 0;
2127 memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices));
2130 * Check for any device re-ordering.
2132 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
2133 role = le32_to_cpu(sb2->array_position);
2137 if (role != r->raid_disk) {
2138 if (__is_raid10_near(mddev->layout)) {
2139 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
2140 rs->raid_disks % rs->raid10_copies) {
2142 "Cannot change raid10 near set to odd # of devices!";
2146 sb2->array_position = cpu_to_le32(r->raid_disk);
2148 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) &&
2149 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) &&
2150 !rt_is_raid1(rs->raid_type)) {
2151 rs->ti->error = "Cannot change device positions in raid set";
2155 DMINFO("raid device #%d now at position #%d", role, r->raid_disk);
2159 * Partial recovery is performed on
2160 * returning failed devices.
2162 if (test_bit(role, (void *) failed_devices))
2163 set_bit(Faulty, &r->flags);
2170 static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2172 struct mddev *mddev = &rs->md;
2173 struct dm_raid_superblock *sb;
2175 if (rs_is_raid0(rs) || !rdev->sb_page)
2178 sb = page_address(rdev->sb_page);
2181 * If mddev->events is not set, we know we have not yet initialized
2184 if (!mddev->events && super_init_validation(rs, rdev))
2187 if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
2188 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
2192 if (sb->incompat_features) {
2193 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet";
2197 /* Enable bitmap creation for RAID levels != 0 */
2198 mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096);
2199 rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2201 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2202 /* Retrieve device size stored in superblock to be prepared for shrink */
2203 rdev->sectors = le64_to_cpu(sb->sectors);
2204 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
2205 if (rdev->recovery_offset == MaxSector)
2206 set_bit(In_sync, &rdev->flags);
2208 * If no reshape in progress -> we're recovering single
2209 * disk(s) and have to set the device(s) to out-of-sync
2211 else if (!rs_is_reshaping(rs))
2212 clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */
2216 * If a device comes back, set it as not In_sync and no longer faulty.
2218 if (test_and_clear_bit(Faulty, &rdev->flags)) {
2219 rdev->recovery_offset = 0;
2220 clear_bit(In_sync, &rdev->flags);
2221 rdev->saved_raid_disk = rdev->raid_disk;
2224 /* Reshape support -> restore repective data offsets */
2225 rdev->data_offset = le64_to_cpu(sb->data_offset);
2226 rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
2232 * Analyse superblocks and select the freshest.
2234 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2237 struct raid_dev *dev;
2238 struct md_rdev *rdev, *tmp, *freshest;
2239 struct mddev *mddev = &rs->md;
2242 rdev_for_each_safe(rdev, tmp, mddev) {
2244 * Skipping super_load due to CTR_FLAG_SYNC will cause
2245 * the array to undergo initialization again as
2246 * though it were new. This is the intended effect
2247 * of the "sync" directive.
2249 * When reshaping capability is added, we must ensure
2250 * that the "sync" directive is disallowed during the
2253 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
2256 if (!rdev->meta_bdev)
2259 r = super_load(rdev, freshest);
2268 dev = container_of(rdev, struct raid_dev, rdev);
2270 dm_put_device(ti, dev->meta_dev);
2272 dev->meta_dev = NULL;
2273 rdev->meta_bdev = NULL;
2276 put_page(rdev->sb_page);
2278 rdev->sb_page = NULL;
2280 rdev->sb_loaded = 0;
2283 * We might be able to salvage the data device
2284 * even though the meta device has failed. For
2285 * now, we behave as though '- -' had been
2286 * set for this device in the table.
2289 dm_put_device(ti, dev->data_dev);
2291 dev->data_dev = NULL;
2294 list_del(&rdev->same_set);
2301 if (validate_raid_redundancy(rs)) {
2302 rs->ti->error = "Insufficient redundancy to activate array";
2307 * Validation of the freshest device provides the source of
2308 * validation for the remaining devices.
2310 rs->ti->error = "Unable to assemble array: Invalid superblocks";
2311 if (super_validate(rs, freshest))
2314 rdev_for_each(rdev, mddev)
2315 if ((rdev != freshest) && super_validate(rs, rdev))
2321 * Adjust data_offset and new_data_offset on all disk members of @rs
2322 * for out of place reshaping if requested by contructor
2324 * We need free space at the beginning of each raid disk for forward
2325 * and at the end for backward reshapes which userspace has to provide
2326 * via remapping/reordering of space.
2328 static int rs_adjust_data_offsets(struct raid_set *rs)
2330 sector_t data_offset = 0, new_data_offset = 0;
2331 struct md_rdev *rdev;
2333 /* Constructor did not request data offset change */
2334 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
2335 if (!rs_is_reshapable(rs))
2341 /* HM FIXME: get InSync raid_dev? */
2342 rdev = &rs->dev[0].rdev;
2344 if (rs->delta_disks < 0) {
2346 * Removing disks (reshaping backwards):
2348 * - before reshape: data is at offset 0 and free space
2349 * is at end of each component LV
2351 * - after reshape: data is at offset rs->data_offset != 0 on each component LV
2354 new_data_offset = rs->data_offset;
2356 } else if (rs->delta_disks > 0) {
2358 * Adding disks (reshaping forwards):
2360 * - before reshape: data is at offset rs->data_offset != 0 and
2361 * free space is at begin of each component LV
2363 * - after reshape: data is at offset 0 on each component LV
2365 data_offset = rs->data_offset;
2366 new_data_offset = 0;
2370 * User space passes in 0 for data offset after having removed reshape space
2372 * - or - (data offset != 0)
2374 * Changing RAID layout or chunk size -> toggle offsets
2376 * - before reshape: data is at offset rs->data_offset 0 and
2377 * free space is at end of each component LV
2379 * data is at offset rs->data_offset != 0 and
2380 * free space is at begin of each component LV
2382 * - after reshape: data is at offset 0 if i was at offset != 0
2383 * of at offset != 0 if it was at offset 0
2384 * on each component LV
2387 data_offset = rs->data_offset ? rdev->data_offset : 0;
2388 new_data_offset = data_offset ? 0 : rs->data_offset;
2389 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2393 * Make sure we got a minimum amount of free sectors per device
2395 if (rs->data_offset &&
2396 to_sector(i_size_read(rdev->bdev->bd_inode)) - rdev->sectors < MIN_FREE_RESHAPE_SPACE) {
2397 rs->ti->error = data_offset ? "No space for forward reshape" :
2398 "No space for backward reshape";
2402 /* Adjust data offsets on all rdevs */
2403 rdev_for_each(rdev, &rs->md) {
2404 rdev->data_offset = data_offset;
2405 rdev->new_data_offset = new_data_offset;
2411 /* Userpace reordered disks -> adjust raid_disk indexes in @rs */
2412 static void __reorder_raid_disk_indexes(struct raid_set *rs)
2415 struct md_rdev *rdev;
2417 rdev_for_each(rdev, &rs->md) {
2418 rdev->raid_disk = i++;
2419 rdev->saved_raid_disk = rdev->new_raid_disk = -1;
2424 * Setup @rs for takeover by a different raid level
2426 static int rs_setup_takeover(struct raid_set *rs)
2428 struct mddev *mddev = &rs->md;
2429 struct md_rdev *rdev;
2430 unsigned int d = mddev->raid_disks = rs->raid_disks;
2431 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
2433 if (rt_is_raid10(rs->raid_type)) {
2434 if (mddev->level == 0) {
2435 /* Userpace reordered disks -> adjust raid_disk indexes */
2436 __reorder_raid_disk_indexes(rs);
2438 /* raid0 -> raid10_far layout */
2439 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
2441 } else if (mddev->level == 1)
2442 /* raid1 -> raid10_near layout */
2443 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2450 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2451 mddev->recovery_cp = MaxSector;
2454 rdev = &rs->dev[d].rdev;
2456 if (test_bit(d, (void *) rs->rebuild_disks)) {
2457 clear_bit(In_sync, &rdev->flags);
2458 clear_bit(Faulty, &rdev->flags);
2459 mddev->recovery_cp = rdev->recovery_offset = 0;
2460 /* Bitmap has to be created when we do an "up" takeover */
2461 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2464 rdev->new_data_offset = new_data_offset;
2472 * - change raid layout
2473 * - change chunk size
2477 static int rs_setup_reshape(struct raid_set *rs)
2480 unsigned int cur_raid_devs, d;
2481 struct mddev *mddev = &rs->md;
2482 struct md_rdev *rdev;
2484 mddev->delta_disks = rs->delta_disks;
2485 cur_raid_devs = mddev->raid_disks;
2487 /* Ignore impossible layout change whilst adding/removing disks */
2488 if (mddev->delta_disks &&
2489 mddev->layout != mddev->new_layout) {
2490 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks);
2491 mddev->new_layout = mddev->layout;
2495 * Adjust array size:
2497 * - in case of adding disks, array size has
2498 * to grow after the disk adding reshape,
2499 * which'll hapen in the event handler;
2500 * reshape will happen forward, so space has to
2501 * be available at the beginning of each disk
2503 * - in case of removing disks, array size
2504 * has to shrink before starting the reshape,
2505 * which'll happen here;
2506 * reshape will happen backward, so space has to
2507 * be available at the end of each disk
2509 * - data_offset and new_data_offset are
2510 * adjusted for aforementioned out of place
2511 * reshaping based on userspace passing in
2512 * the "data_offset <sectors>" key/value
2513 * pair via the constructor
2517 if (rs->delta_disks > 0) {
2518 /* Prepare disks for check in raid4/5/6/10 {check|start}_reshape */
2519 for (d = cur_raid_devs; d < rs->raid_disks; d++) {
2520 rdev = &rs->dev[d].rdev;
2521 clear_bit(In_sync, &rdev->flags);
2524 * save_raid_disk needs to be -1, or recovery_offset will be set to 0
2525 * by md, which'll store that erroneously in the superblock on reshape
2527 rdev->saved_raid_disk = -1;
2528 rdev->raid_disk = d;
2530 rdev->sectors = mddev->dev_sectors;
2531 rdev->recovery_offset = MaxSector;
2534 mddev->reshape_backwards = 0; /* adding disks -> forward reshape */
2536 /* Remove disk(s) */
2537 } else if (rs->delta_disks < 0) {
2538 r = rs_set_dev_and_array_sectors(rs, true);
2539 mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */
2541 /* Change layout and/or chunk size */
2544 * Reshape layout (e.g. raid5_ls -> raid5_n) and/or chunk size:
2546 * keeping number of disks and do layout change ->
2548 * toggle reshape_backward depending on data_offset:
2550 * - free space upfront -> reshape forward
2552 * - free space at the end -> reshape backward
2555 * This utilizes free reshape space avoiding the need
2556 * for userspace to move (parts of) LV segments in
2557 * case of layout/chunksize change (for disk
2558 * adding/removing reshape space has to be at
2559 * the proper address (see above with delta_disks):
2561 * add disk(s) -> begin
2562 * remove disk(s)-> end
2564 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
2571 * Enable/disable discard support on RAID set depending on
2572 * RAID level and discard properties of underlying RAID members.
2574 static void configure_discard_support(struct raid_set *rs)
2578 struct dm_target *ti = rs->ti;
2580 /* Assume discards not supported until after checks below. */
2581 ti->discards_supported = false;
2583 /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */
2584 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
2586 for (i = 0; i < rs->md.raid_disks; i++) {
2587 struct request_queue *q;
2589 if (!rs->dev[i].rdev.bdev)
2592 q = bdev_get_queue(rs->dev[i].rdev.bdev);
2593 if (!q || !blk_queue_discard(q))
2597 if (!q->limits.discard_zeroes_data)
2599 if (!devices_handle_discard_safely) {
2600 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
2601 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
2607 /* All RAID members properly support discards */
2608 ti->discards_supported = true;
2611 * RAID1 and RAID10 personalities require bio splitting,
2612 * RAID0/4/5/6 don't and process large discard bios properly.
2614 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
2615 ti->num_discard_bios = 1;
2619 * Construct a RAID0/1/10/4/5/6 mapping:
2621 * <raid_type> <#raid_params> <raid_params>{0,} \
2622 * <#raid_devs> [<meta_dev1> <dev1>]{1,}
2624 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
2625 * details on possible <raid_params>.
2627 * Userspace is free to initialize the metadata devices, hence the superblocks to
2628 * enforce recreation based on the passed in table parameters.
2631 static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
2634 struct raid_type *rt;
2635 unsigned num_raid_params, num_raid_devs;
2636 struct raid_set *rs = NULL;
2638 struct rs_layout rs_layout;
2639 struct dm_arg_set as = { argc, argv }, as_nrd;
2640 struct dm_arg _args[] = {
2641 { 0, as.argc, "Cannot understand number of raid parameters" },
2642 { 1, 254, "Cannot understand number of raid devices parameters" }
2645 /* Must have <raid_type> */
2646 arg = dm_shift_arg(&as);
2648 ti->error = "No arguments";
2652 rt = get_raid_type(arg);
2654 ti->error = "Unrecognised raid_type";
2658 /* Must have <#raid_params> */
2659 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
2662 /* number of raid device tupples <meta_dev data_dev> */
2664 dm_consume_args(&as_nrd, num_raid_params);
2665 _args[1].max = (as_nrd.argc - 1) / 2;
2666 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
2669 if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) {
2670 ti->error = "Invalid number of supplied raid devices";
2674 rs = raid_set_alloc(ti, rt, num_raid_devs);
2678 r = parse_raid_params(rs, &as, num_raid_params);
2682 r = parse_dev_params(rs, &as);
2686 rs->md.sync_super = super_sync;
2688 r = rs_set_dev_and_array_sectors(rs, false);
2693 * Backup any new raid set level, layout, ...
2694 * requested to be able to compare to superblock
2695 * members for conversion decisions.
2697 rs_config_backup(rs, &rs_layout);
2699 r = analyse_superblocks(ti, rs);
2703 INIT_WORK(&rs->md.event_work, do_table_event);
2705 ti->num_flush_bios = 1;
2707 /* Restore any requested new layout for conversion decision */
2708 rs_config_restore(rs, &rs_layout);
2710 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) {
2711 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2713 } else if (rs_is_reshaping(rs))
2714 ; /* skip rs setup */
2715 else if (rs_takeover_requested(rs)) {
2716 if (rs_is_reshaping(rs)) {
2717 ti->error = "Can't takeover a reshaping raid set";
2722 * If a takeover is needed, just set the level to
2723 * the new requested one and allow the raid set to run.
2725 r = rs_check_takeover(rs);
2729 r = rs_setup_takeover(rs);
2733 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2734 set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
2736 } else if (rs_reshape_requested(rs)) {
2737 if (rs_is_reshaping(rs)) {
2738 ti->error = "raid set already reshaping!";
2742 if (rs_is_raid10(rs)) {
2743 if (rs->raid_disks != rs->md.raid_disks &&
2744 __is_raid10_near(rs->md.layout) &&
2745 rs->raid10_copies &&
2746 rs->raid10_copies != __raid10_near_copies(rs->md.layout)) {
2748 * raid disk have to be multiple of data copies to allow this conversion,
2750 * This is actually not a reshape it is a
2751 * rebuild of any additional mirrors per group
2753 if (rs->raid_disks % rs->raid10_copies) {
2754 ti->error = "Can't reshape raid10 mirror groups";
2758 /* Userpace reordered disks to add/remove mirrors -> adjust raid_disk indexes */
2759 __reorder_raid_disk_indexes(rs);
2760 rs->md.layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2762 rs->md.new_layout = rs->md.layout;
2765 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
2767 } else if (rs_is_raid456(rs))
2768 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
2771 * HM FIXME: process raid1 via delta_disks as well?
2772 * Would cause allocations in raid1->check_reshape
2773 * though, thus more issues with potential failures
2775 else if (rs_is_raid1(rs)) {
2776 set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
2777 rs->md.raid_disks = rs->raid_disks;
2780 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
2781 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2782 set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
2785 if (rs->md.raid_disks < rs->raid_disks)
2786 set_bit(MD_ARRAY_FIRST_USE, &rs->md.flags);
2792 /* If constructor requested it, change data and new_data offsets */
2793 r = rs_adjust_data_offsets(rs);
2797 /* Start raid set read-only and assumed clean to change in raid_resume() */
2800 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
2802 /* Has to be held on running the array */
2803 mddev_lock_nointr(&rs->md);
2804 r = md_run(&rs->md);
2805 rs->md.in_sync = 0; /* Assume already marked dirty */
2808 ti->error = "Failed to run raid array";
2809 mddev_unlock(&rs->md);
2813 rs->callbacks.congested_fn = raid_is_congested;
2814 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
2816 mddev_suspend(&rs->md);
2818 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
2819 if (rs_is_raid456(rs)) {
2820 r = rs_set_raid456_stripe_cache(rs);
2822 goto bad_stripe_cache;
2825 /* Now do an early reshape check */
2826 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
2827 r = rs_check_reshape(rs);
2831 /* Restore new, ctr requested layout to perform check */
2832 rs_config_restore(rs, &rs_layout);
2834 r = rs->md.pers->check_reshape(&rs->md);
2836 ti->error = "Reshape check failed";
2837 goto bad_check_reshape;
2841 mddev_unlock(&rs->md);
2853 static void raid_dtr(struct dm_target *ti)
2855 struct raid_set *rs = ti->private;
2857 list_del_init(&rs->callbacks.list);
2862 static int raid_map(struct dm_target *ti, struct bio *bio)
2864 struct raid_set *rs = ti->private;
2865 struct mddev *mddev = &rs->md;
2868 * If we're reshaping to add disk(s)), ti->len and
2869 * mddev->array_sectors will differ during the process
2870 * (ti->len > mddev->array_sectors), so we have to requeue
2871 * bios with addresses > mddev->array_sectors here or
2872 * or there will occur accesses past EOD of the component
2873 * data images thus erroring the raid set.
2875 if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
2876 return DM_MAPIO_REQUEUE;
2878 mddev->pers->make_request(mddev, bio);
2880 return DM_MAPIO_SUBMITTED;
2883 /* Return string describing the current sync action of @mddev */
2884 static const char *decipher_sync_action(struct mddev *mddev)
2886 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
2889 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2890 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
2891 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2894 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2895 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2897 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2902 if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
2910 * Return status string @rdev
2912 * Status characters:
2914 * 'D' = Dead/Failed device
2915 * 'a' = Alive but not in-sync
2916 * 'A' = Alive and in-sync
2918 static const char *__raid_dev_status(struct md_rdev *rdev, bool array_in_sync)
2920 if (test_bit(Faulty, &rdev->flags))
2922 else if (!array_in_sync || !test_bit(In_sync, &rdev->flags))
2928 /* Helper to return resync/reshape progress for @rs and @array_in_sync */
2929 static sector_t rs_get_progress(struct raid_set *rs,
2930 sector_t resync_max_sectors, bool *array_in_sync)
2932 sector_t r, recovery_cp, curr_resync_completed;
2933 struct mddev *mddev = &rs->md;
2935 curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
2936 recovery_cp = mddev->recovery_cp;
2937 *array_in_sync = false;
2939 if (rs_is_raid0(rs)) {
2940 r = resync_max_sectors;
2941 *array_in_sync = true;
2944 r = mddev->reshape_position;
2946 /* Reshape is relative to the array size */
2947 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
2949 if (r == MaxSector) {
2950 *array_in_sync = true;
2951 r = resync_max_sectors;
2953 /* Got to reverse on backward reshape */
2954 if (mddev->reshape_backwards)
2955 r = mddev->array_sectors - r;
2957 /* Devide by # of data stripes */
2958 sector_div(r, mddev_data_stripes(rs));
2961 /* Sync is relative to the component device size */
2962 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2963 r = curr_resync_completed;
2967 if (r == MaxSector) {
2971 *array_in_sync = true;
2972 r = resync_max_sectors;
2973 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2975 * If "check" or "repair" is occurring, the raid set has
2976 * undergone an initial sync and the health characters
2977 * should not be 'a' anymore.
2979 *array_in_sync = true;
2981 struct md_rdev *rdev;
2984 * The raid set may be doing an initial sync, or it may
2985 * be rebuilding individual components. If all the
2986 * devices are In_sync, then it is the raid set that is
2987 * being initialized.
2989 rdev_for_each(rdev, mddev)
2990 if (!test_bit(In_sync, &rdev->flags))
2991 *array_in_sync = true;
2993 r = 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */
3001 /* Helper to return @dev name or "-" if !@dev */
3002 static const char *__get_dev_name(struct dm_dev *dev)
3004 return dev ? dev->name : "-";
3007 static void raid_status(struct dm_target *ti, status_type_t type,
3008 unsigned int status_flags, char *result, unsigned int maxlen)
3010 struct raid_set *rs = ti->private;
3011 struct mddev *mddev = &rs->md;
3012 struct r5conf *conf = mddev->private;
3013 int max_nr_stripes = conf ? conf->max_nr_stripes : 0;
3015 unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
3016 unsigned int sz = 0;
3017 unsigned int write_mostly_params = 0;
3018 sector_t progress, resync_max_sectors, resync_mismatches;
3019 const char *sync_action;
3020 struct raid_type *rt;
3021 struct md_rdev *rdev;
3024 case STATUSTYPE_INFO:
3025 /* *Should* always succeed */
3026 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3030 DMEMIT("%s %d ", rt->name, mddev->raid_disks);
3032 /* Access most recent mddev properties for status output */
3034 /* Get sensible max sectors even if raid set not yet started */
3035 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
3036 mddev->resync_max_sectors : mddev->dev_sectors;
3037 progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync);
3038 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
3039 atomic64_read(&mddev->resync_mismatches) : 0;
3040 sync_action = decipher_sync_action(&rs->md);
3042 /* HM FIXME: do we want another state char for raid0? It shows 'D' or 'A' now */
3043 rdev_for_each(rdev, mddev)
3044 DMEMIT(__raid_dev_status(rdev, array_in_sync));
3047 * In-sync/Reshape ratio:
3048 * The in-sync ratio shows the progress of:
3049 * - Initializing the raid set
3050 * - Rebuilding a subset of devices of the raid set
3051 * The user can distinguish between the two by referring
3052 * to the status characters.
3054 * The reshape ratio shows the progress of
3055 * changing the raid layout or the number of
3056 * disks of a raid set
3058 DMEMIT(" %llu/%llu", (unsigned long long) progress,
3059 (unsigned long long) resync_max_sectors);
3065 * See Documentation/device-mapper/dm-raid.txt for
3066 * information on each of these states.
3068 DMEMIT(" %s", sync_action);
3073 * resync_mismatches/mismatch_cnt
3074 * This field shows the number of discrepancies found when
3075 * performing a "check" of the raid set.
3077 DMEMIT(" %llu", (unsigned long long) resync_mismatches);
3082 * data_offset (needed for out of space reshaping)
3083 * This field shows the data offset into the data
3084 * image LV where the first stripes data starts.
3086 * We keep data_offset equal on all raid disks of the set,
3087 * so retrieving it from the first raid disk is sufficient.
3089 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
3092 case STATUSTYPE_TABLE:
3093 /* Report the table line string you would use to construct this raid set */
3095 /* Calculate raid parameter count */
3096 rdev_for_each(rdev, mddev)
3097 if (test_bit(WriteMostly, &rdev->flags))
3098 write_mostly_params += 2;
3099 raid_param_cnt += memweight(rs->rebuild_disks,
3100 DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks)) * 2 +
3101 write_mostly_params +
3102 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
3103 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
3104 /* Emit table line */
3105 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
3106 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
3107 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT),
3108 raid10_md_layout_to_format(mddev->layout));
3109 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
3110 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES),
3111 raid10_md_layout_to_copies(mddev->layout));
3112 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
3113 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
3114 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
3115 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
3116 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
3117 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE),
3118 (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
3119 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags))
3120 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET),
3121 (unsigned long long) rs->data_offset);
3122 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
3123 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
3124 mddev->bitmap_info.daemon_sleep);
3125 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags))
3126 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS),
3127 mddev->delta_disks);
3128 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
3129 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE),
3131 rdev_for_each(rdev, mddev)
3132 if (test_bit(rdev->raid_disk, (void *) rs->rebuild_disks))
3133 DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD),
3135 rdev_for_each(rdev, mddev)
3136 if (test_bit(WriteMostly, &rdev->flags))
3137 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
3139 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
3140 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND),
3141 mddev->bitmap_info.max_write_behind);
3142 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
3143 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
3144 mddev->sync_speed_max);
3145 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
3146 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
3147 mddev->sync_speed_min);
3148 DMEMIT(" %d", rs->raid_disks);
3149 rdev_for_each(rdev, mddev) {
3150 struct raid_dev *rd = container_of(rdev, struct raid_dev, rdev);
3152 DMEMIT(" %s %s", __get_dev_name(rd->meta_dev),
3153 __get_dev_name(rd->data_dev));
3158 static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
3160 struct raid_set *rs = ti->private;
3161 struct mddev *mddev = &rs->md;
3163 if (!mddev->pers || !mddev->pers->sync_request)
3166 if (!strcasecmp(argv[0], "frozen"))
3167 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3169 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3171 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
3172 if (mddev->sync_thread) {
3173 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3174 md_reap_sync_thread(mddev);
3176 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3177 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3179 else if (!strcasecmp(argv[0], "resync"))
3180 ; /* MD_RECOVERY_NEEDED set below */
3181 else if (!strcasecmp(argv[0], "recover"))
3182 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3184 if (!strcasecmp(argv[0], "check"))
3185 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3186 else if (!!strcasecmp(argv[0], "repair"))
3188 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3189 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3191 if (mddev->ro == 2) {
3192 /* A write to sync_action is enough to justify
3193 * canceling read-auto mode
3196 if (!mddev->suspended && mddev->sync_thread)
3197 md_wakeup_thread(mddev->sync_thread);
3199 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3200 if (!mddev->suspended && mddev->thread)
3201 md_wakeup_thread(mddev->thread);
3206 static int raid_iterate_devices(struct dm_target *ti,
3207 iterate_devices_callout_fn fn, void *data)
3209 struct raid_set *rs = ti->private;
3213 for (i = 0; !r && i < rs->md.raid_disks; i++)
3214 if (rs->dev[i].data_dev)
3216 rs->dev[i].data_dev,
3217 0, /* No offset on data devs */
3224 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
3226 struct raid_set *rs = ti->private;
3227 unsigned chunk_size = rs->md.chunk_sectors << 9;
3228 struct r5conf *conf = rs->md.private;
3230 blk_limits_io_min(limits, chunk_size);
3231 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
3234 static void raid_presuspend(struct dm_target *ti)
3236 struct raid_set *rs = ti->private;
3238 md_stop_writes(&rs->md);
3241 static void raid_postsuspend(struct dm_target *ti)
3243 struct raid_set *rs = ti->private;
3245 if (test_and_clear_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
3246 if (!rs->md.suspended)
3247 mddev_suspend(&rs->md);
3252 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
3255 uint64_t failed_devices, cleared_failed_devices = 0;
3256 unsigned long flags;
3257 struct dm_raid_superblock *sb;
3260 for (i = 0; i < rs->md.raid_disks; i++) {
3261 r = &rs->dev[i].rdev;
3262 if (test_bit(Faulty, &r->flags) && r->sb_page &&
3263 sync_page_io(r, 0, r->sb_size, r->sb_page,
3264 REQ_OP_READ, 0, true)) {
3265 DMINFO("Faulty %s device #%d has readable super block."
3266 " Attempting to revive it.",
3267 rs->raid_type->name, i);
3270 * Faulty bit may be set, but sometimes the array can
3271 * be suspended before the personalities can respond
3272 * by removing the device from the array (i.e. calling
3273 * 'hot_remove_disk'). If they haven't yet removed
3274 * the failed device, its 'raid_disk' number will be
3275 * '>= 0' - meaning we must call this function
3278 if ((r->raid_disk >= 0) &&
3279 (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
3280 /* Failed to revive this device, try next */
3284 r->saved_raid_disk = i;
3286 clear_bit(Faulty, &r->flags);
3287 clear_bit(WriteErrorSeen, &r->flags);
3288 clear_bit(In_sync, &r->flags);
3289 if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
3291 r->saved_raid_disk = -1;
3294 r->recovery_offset = 0;
3295 cleared_failed_devices |= 1 << i;
3299 if (cleared_failed_devices) {
3300 rdev_for_each(r, &rs->md) {
3301 sb = page_address(r->sb_page);
3302 failed_devices = le64_to_cpu(sb->failed_devices);
3303 failed_devices &= ~cleared_failed_devices;
3304 sb->failed_devices = cpu_to_le64(failed_devices);
3309 static int __load_dirty_region_bitmap(struct raid_set *rs)
3313 /* Try loading the bitmap unless "raid0", which does not have one */
3314 if (!rs_is_raid0(rs) &&
3315 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
3316 r = bitmap_load(&rs->md);
3318 DMERR("Failed to load bitmap");
3324 /* Enforce updating all superblocks */
3325 static void rs_update_sbs(struct raid_set *rs)
3327 struct mddev *mddev = &rs->md;
3330 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3332 md_update_sb(mddev, 1);
3337 * Reshape changes raid algorithm of @rs to new one within personality
3338 * (e.g. raid6_zr -> raid6_nc), changes stripe size, adds/removes
3339 * disks from a raid set thus growing/shrinking it or resizes the set
3341 * Call mddev_lock_nointr() before!
3343 static int rs_start_reshape(struct raid_set *rs)
3346 struct mddev *mddev = &rs->md;
3347 struct md_personality *pers = mddev->pers;
3349 r = rs_setup_reshape(rs);
3353 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
3354 if (mddev->suspended)
3355 mddev_resume(mddev);
3358 * Check any reshape constraints enforced by the personalility
3360 * May as well already kick the reshape off so that * pers->start_reshape() becomes optional.
3362 r = pers->check_reshape(mddev);
3364 rs->ti->error = "pers->check_reshape() failed";
3369 * Personality may not provide start reshape method in which
3370 * case check_reshape above has already covered everything
3372 if (pers->start_reshape) {
3373 r = pers->start_reshape(mddev);
3375 rs->ti->error = "pers->start_reshape() failed";
3380 /* Suspend because a resume will happen in raid_resume() */
3381 if (!mddev->suspended)
3382 mddev_suspend(mddev);
3385 * Now reshape got set up, update superblocks to
3386 * reflect the fact so that a table reload will
3387 * access proper superblock content in the ctr.
3394 static int raid_preresume(struct dm_target *ti)
3397 struct raid_set *rs = ti->private;
3398 struct mddev *mddev = &rs->md;
3400 /* This is a resume after a suspend of the set -> it's already started */
3401 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
3405 * The superblocks need to be updated on disk if the
3406 * array is new or new devices got added (thus zeroed
3407 * out by userspace) or __load_dirty_region_bitmap
3408 * will overwrite them in core with old data or fail.
3410 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
3414 * Disable/enable discard support on raid set after any
3415 * conversion, because devices can have been added
3417 configure_discard_support(rs);
3419 /* Load the bitmap from disk unless raid0 */
3420 r = __load_dirty_region_bitmap(rs);
3424 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
3425 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) &&
3426 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
3427 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
3428 to_bytes(rs->requested_bitmap_chunk_sectors), 0);
3430 DMERR("Failed to resize bitmap");
3433 /* Check for any resize/reshape on @rs and adjust/initiate */
3434 /* Be prepared for mddev_resume() in raid_resume() */
3435 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3436 if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
3437 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3438 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3439 mddev->resync_min = mddev->recovery_cp;
3442 rs_set_capacity(rs);
3444 /* Check for any reshape request and region size change unless new raid set */
3445 if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3446 /* Initiate a reshape. */
3447 mddev_lock_nointr(mddev);
3448 r = rs_start_reshape(rs);
3449 mddev_unlock(mddev);
3451 DMWARN("Failed to check/start reshape, continuing without change");
3458 static void raid_resume(struct dm_target *ti)
3460 struct raid_set *rs = ti->private;
3461 struct mddev *mddev = &rs->md;
3463 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
3465 * A secondary resume while the device is active.
3466 * Take this opportunity to check whether any failed
3467 * devices are reachable again.
3469 attempt_restore_of_faulty_devices(rs);
3475 * When passing in flags to the ctr, we expect userspace
3476 * to reset them because they made it to the superblocks
3477 * and reload the mapping anyway.
3479 * -> only unfreeze recovery in case of a table reload or
3480 * we'll have a bogus recovery/reshape position
3481 * retrieved from the superblock by the ctr because
3482 * the ongoing recovery/reshape will change it after read.
3484 if (!test_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags))
3485 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3487 if (mddev->suspended)
3488 mddev_resume(mddev);
3492 static struct target_type raid_target = {
3494 .version = {1, 9, 0},
3495 .module = THIS_MODULE,
3499 .status = raid_status,
3500 .message = raid_message,
3501 .iterate_devices = raid_iterate_devices,
3502 .io_hints = raid_io_hints,
3503 .presuspend = raid_presuspend,
3504 .postsuspend = raid_postsuspend,
3505 .preresume = raid_preresume,
3506 .resume = raid_resume,
3509 static int __init dm_raid_init(void)
3511 DMINFO("Loading target version %u.%u.%u",
3512 raid_target.version[0],
3513 raid_target.version[1],
3514 raid_target.version[2]);
3515 return dm_register_target(&raid_target);
3518 static void __exit dm_raid_exit(void)
3520 dm_unregister_target(&raid_target);
3523 module_init(dm_raid_init);
3524 module_exit(dm_raid_exit);
3526 module_param(devices_handle_discard_safely, bool, 0644);
3527 MODULE_PARM_DESC(devices_handle_discard_safely,
3528 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
3530 MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target");
3531 MODULE_ALIAS("dm-raid0");
3532 MODULE_ALIAS("dm-raid1");
3533 MODULE_ALIAS("dm-raid10");
3534 MODULE_ALIAS("dm-raid4");
3535 MODULE_ALIAS("dm-raid5");
3536 MODULE_ALIAS("dm-raid6");
3537 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
3538 MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
3539 MODULE_LICENSE("GPL");