|
@@ -3846,11 +3846,20 @@ static int is_mddev_idle(mddev_t *mddev)
|
|
curr_events = disk_stat_read(disk, sectors[0]) +
|
|
curr_events = disk_stat_read(disk, sectors[0]) +
|
|
disk_stat_read(disk, sectors[1]) -
|
|
disk_stat_read(disk, sectors[1]) -
|
|
atomic_read(&disk->sync_io);
|
|
atomic_read(&disk->sync_io);
|
|
- /* Allow some slack between valud of curr_events and last_events,
|
|
|
|
- * as there are some uninteresting races.
|
|
|
|
|
|
+ /* The difference between curr_events and last_events
|
|
|
|
+ * will be affected by any new non-sync IO (making
|
|
|
|
+ * curr_events bigger) and any difference in the amount of
|
|
|
|
+ * in-flight syncio (making current_events bigger or smaller)
|
|
|
|
+ * The amount in-flight is currently limited to
|
|
|
|
+ * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
|
|
|
|
+ * which is at most 4096 sectors.
|
|
|
|
+ * These numbers are fairly fragile and should be made
|
|
|
|
+ * more robust, probably by enforcing the
|
|
|
|
+ * 'window size' that md_do_sync sort-of uses.
|
|
|
|
+ *
|
|
* Note: the following is an unsigned comparison.
|
|
* Note: the following is an unsigned comparison.
|
|
*/
|
|
*/
|
|
- if ((curr_events - rdev->last_events + 32) > 64) {
|
|
|
|
|
|
+ if ((curr_events - rdev->last_events + 4096) > 8192) {
|
|
rdev->last_events = curr_events;
|
|
rdev->last_events = curr_events;
|
|
idle = 0;
|
|
idle = 0;
|
|
}
|
|
}
|
|
@@ -4109,7 +4118,7 @@ static void md_do_sync(mddev_t *mddev)
|
|
if (currspeed > sysctl_speed_limit_min) {
|
|
if (currspeed > sysctl_speed_limit_min) {
|
|
if ((currspeed > sysctl_speed_limit_max) ||
|
|
if ((currspeed > sysctl_speed_limit_max) ||
|
|
!is_mddev_idle(mddev)) {
|
|
!is_mddev_idle(mddev)) {
|
|
- msleep(250);
|
|
|
|
|
|
+ msleep(500);
|
|
goto repeat;
|
|
goto repeat;
|
|
}
|
|
}
|
|
}
|
|
}
|