|
@@ -345,14 +345,14 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
|
|
* Also when FAIL is set do a force kill because something went
|
|
* Also when FAIL is set do a force kill because something went
|
|
* wrong earlier.
|
|
* wrong earlier.
|
|
*/
|
|
*/
|
|
-static void kill_procs(struct list_head *to_kill, int doit, int trapno,
|
|
|
|
|
|
+static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
|
|
int fail, struct page *page, unsigned long pfn,
|
|
int fail, struct page *page, unsigned long pfn,
|
|
int flags)
|
|
int flags)
|
|
{
|
|
{
|
|
struct to_kill *tk, *next;
|
|
struct to_kill *tk, *next;
|
|
|
|
|
|
list_for_each_entry_safe (tk, next, to_kill, nd) {
|
|
list_for_each_entry_safe (tk, next, to_kill, nd) {
|
|
- if (doit) {
|
|
|
|
|
|
+ if (forcekill) {
|
|
/*
|
|
/*
|
|
* In case something went wrong with munmapping
|
|
* In case something went wrong with munmapping
|
|
* make sure the process doesn't catch the
|
|
* make sure the process doesn't catch the
|
|
@@ -858,7 +858,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
|
struct address_space *mapping;
|
|
struct address_space *mapping;
|
|
LIST_HEAD(tokill);
|
|
LIST_HEAD(tokill);
|
|
int ret;
|
|
int ret;
|
|
- int kill = 1;
|
|
|
|
|
|
+ int kill = 1, forcekill;
|
|
struct page *hpage = compound_head(p);
|
|
struct page *hpage = compound_head(p);
|
|
struct page *ppage;
|
|
struct page *ppage;
|
|
|
|
|
|
@@ -888,7 +888,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
|
* be called inside page lock (it's recommended but not enforced).
|
|
* be called inside page lock (it's recommended but not enforced).
|
|
*/
|
|
*/
|
|
mapping = page_mapping(hpage);
|
|
mapping = page_mapping(hpage);
|
|
- if (!PageDirty(hpage) && mapping &&
|
|
|
|
|
|
+ if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
|
|
mapping_cap_writeback_dirty(mapping)) {
|
|
mapping_cap_writeback_dirty(mapping)) {
|
|
if (page_mkclean(hpage)) {
|
|
if (page_mkclean(hpage)) {
|
|
SetPageDirty(hpage);
|
|
SetPageDirty(hpage);
|
|
@@ -965,12 +965,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
|
* Now that the dirty bit has been propagated to the
|
|
* Now that the dirty bit has been propagated to the
|
|
* struct page and all unmaps done we can decide if
|
|
* struct page and all unmaps done we can decide if
|
|
* killing is needed or not. Only kill when the page
|
|
* killing is needed or not. Only kill when the page
|
|
- * was dirty, otherwise the tokill list is merely
|
|
|
|
|
|
+ * was dirty or the process is not restartable,
|
|
|
|
+ * otherwise the tokill list is merely
|
|
* freed. When there was a problem unmapping earlier
|
|
* freed. When there was a problem unmapping earlier
|
|
* use a more force-full uncatchable kill to prevent
|
|
* use a more force-full uncatchable kill to prevent
|
|
* any accesses to the poisoned memory.
|
|
* any accesses to the poisoned memory.
|
|
*/
|
|
*/
|
|
- kill_procs(&tokill, !!PageDirty(ppage), trapno,
|
|
|
|
|
|
+ forcekill = PageDirty(ppage) || (flags & MF_MUST_KILL);
|
|
|
|
+ kill_procs(&tokill, forcekill, trapno,
|
|
ret != SWAP_SUCCESS, p, pfn, flags);
|
|
ret != SWAP_SUCCESS, p, pfn, flags);
|
|
|
|
|
|
return ret;
|
|
return ret;
|