|
@@ -56,8 +56,6 @@ static DECLARE_MUTEX(swapon_sem);
|
|
*/
|
|
*/
|
|
static DECLARE_RWSEM(swap_unplug_sem);
|
|
static DECLARE_RWSEM(swap_unplug_sem);
|
|
|
|
|
|
-#define SWAPFILE_CLUSTER 256
|
|
|
|
-
|
|
|
|
void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
|
|
void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
|
|
{
|
|
{
|
|
swp_entry_t entry;
|
|
swp_entry_t entry;
|
|
@@ -84,9 +82,13 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
|
|
up_read(&swap_unplug_sem);
|
|
up_read(&swap_unplug_sem);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#define SWAPFILE_CLUSTER 256
|
|
|
|
+#define LATENCY_LIMIT 256
|
|
|
|
+
|
|
static inline unsigned long scan_swap_map(struct swap_info_struct *si)
|
|
static inline unsigned long scan_swap_map(struct swap_info_struct *si)
|
|
{
|
|
{
|
|
unsigned long offset, last_in_cluster;
|
|
unsigned long offset, last_in_cluster;
|
|
|
|
+ int latency_ration = LATENCY_LIMIT;
|
|
|
|
|
|
/*
|
|
/*
|
|
* We try to cluster swap pages by allocating them sequentially
|
|
* We try to cluster swap pages by allocating them sequentially
|
|
@@ -117,6 +119,10 @@ static inline unsigned long scan_swap_map(struct swap_info_struct *si)
|
|
si->cluster_next = offset-SWAPFILE_CLUSTER-1;
|
|
si->cluster_next = offset-SWAPFILE_CLUSTER-1;
|
|
goto cluster;
|
|
goto cluster;
|
|
}
|
|
}
|
|
|
|
+ if (unlikely(--latency_ration < 0)) {
|
|
|
|
+ cond_resched();
|
|
|
|
+ latency_ration = LATENCY_LIMIT;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
swap_device_lock(si);
|
|
swap_device_lock(si);
|
|
goto lowest;
|
|
goto lowest;
|
|
@@ -153,6 +159,10 @@ checks: if (!(si->flags & SWP_WRITEOK))
|
|
swap_device_lock(si);
|
|
swap_device_lock(si);
|
|
goto checks;
|
|
goto checks;
|
|
}
|
|
}
|
|
|
|
+ if (unlikely(--latency_ration < 0)) {
|
|
|
|
+ cond_resched();
|
|
|
|
+ latency_ration = LATENCY_LIMIT;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
swap_device_lock(si);
|
|
swap_device_lock(si);
|
|
goto lowest;
|
|
goto lowest;
|