memory-failure.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471
  1. /*
  2. * Copyright (C) 2008, 2009 Intel Corporation
  3. * Authors: Andi Kleen, Fengguang Wu
  4. *
  5. * This software may be redistributed and/or modified under the terms of
  6. * the GNU General Public License ("GPL") version 2 only as published by the
  7. * Free Software Foundation.
  8. *
  9. * High level machine check handler. Handles pages reported by the
  10. * hardware as being corrupted usually due to a multi-bit ECC memory or cache
  11. * failure.
  12. *
  13. * In addition there is a "soft offline" entry point that allows stop using
  14. * not-yet-corrupted-by-suspicious pages without killing anything.
  15. *
  16. * Handles page cache pages in various states. The tricky part
  17. * here is that we can access any page asynchronously in respect to
  18. * other VM users, because memory failures could happen anytime and
  19. * anywhere. This could violate some of their assumptions. This is why
  20. * this code has to be extremely careful. Generally it tries to use
  21. * normal locking rules, as in get the standard locks, even if that means
  22. * the error handling takes potentially a long time.
  23. *
  24. * There are several operations here with exponential complexity because
  25. * of unsuitable VM data structures. For example the operation to map back
  26. * from RMAP chains to processes has to walk the complete process list and
  27. * has non linear complexity with the number. But since memory corruptions
  28. * are rare we hope to get away with this. This avoids impacting the core
  29. * VM.
  30. */
  31. /*
  32. * Notebook:
  33. * - hugetlb needs more code
  34. * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
  35. * - pass bad pages to kdump next kernel
  36. */
  37. #include <linux/kernel.h>
  38. #include <linux/mm.h>
  39. #include <linux/page-flags.h>
  40. #include <linux/kernel-page-flags.h>
  41. #include <linux/sched.h>
  42. #include <linux/ksm.h>
  43. #include <linux/rmap.h>
  44. #include <linux/pagemap.h>
  45. #include <linux/swap.h>
  46. #include <linux/backing-dev.h>
  47. #include <linux/migrate.h>
  48. #include <linux/page-isolation.h>
  49. #include <linux/suspend.h>
  50. #include <linux/slab.h>
  51. #include <linux/swapops.h>
  52. #include <linux/hugetlb.h>
  53. #include <linux/memory_hotplug.h>
  54. #include "internal.h"
  55. int sysctl_memory_failure_early_kill __read_mostly = 0;
  56. int sysctl_memory_failure_recovery __read_mostly = 1;
  57. atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
  58. #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
  59. u32 hwpoison_filter_enable = 0;
  60. u32 hwpoison_filter_dev_major = ~0U;
  61. u32 hwpoison_filter_dev_minor = ~0U;
  62. u64 hwpoison_filter_flags_mask;
  63. u64 hwpoison_filter_flags_value;
  64. EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
  65. EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
  66. EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
  67. EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
  68. EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
  69. static int hwpoison_filter_dev(struct page *p)
  70. {
  71. struct address_space *mapping;
  72. dev_t dev;
  73. if (hwpoison_filter_dev_major == ~0U &&
  74. hwpoison_filter_dev_minor == ~0U)
  75. return 0;
  76. /*
  77. * page_mapping() does not accept slab pages.
  78. */
  79. if (PageSlab(p))
  80. return -EINVAL;
  81. mapping = page_mapping(p);
  82. if (mapping == NULL || mapping->host == NULL)
  83. return -EINVAL;
  84. dev = mapping->host->i_sb->s_dev;
  85. if (hwpoison_filter_dev_major != ~0U &&
  86. hwpoison_filter_dev_major != MAJOR(dev))
  87. return -EINVAL;
  88. if (hwpoison_filter_dev_minor != ~0U &&
  89. hwpoison_filter_dev_minor != MINOR(dev))
  90. return -EINVAL;
  91. return 0;
  92. }
  93. static int hwpoison_filter_flags(struct page *p)
  94. {
  95. if (!hwpoison_filter_flags_mask)
  96. return 0;
  97. if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
  98. hwpoison_filter_flags_value)
  99. return 0;
  100. else
  101. return -EINVAL;
  102. }
  103. /*
  104. * This allows stress tests to limit test scope to a collection of tasks
  105. * by putting them under some memcg. This prevents killing unrelated/important
  106. * processes such as /sbin/init. Note that the target task may share clean
  107. * pages with init (eg. libc text), which is harmless. If the target task
  108. * share _dirty_ pages with another task B, the test scheme must make sure B
  109. * is also included in the memcg. At last, due to race conditions this filter
  110. * can only guarantee that the page either belongs to the memcg tasks, or is
  111. * a freed page.
  112. */
  113. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  114. u64 hwpoison_filter_memcg;
  115. EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
  116. static int hwpoison_filter_task(struct page *p)
  117. {
  118. struct mem_cgroup *mem;
  119. struct cgroup_subsys_state *css;
  120. unsigned long ino;
  121. if (!hwpoison_filter_memcg)
  122. return 0;
  123. mem = try_get_mem_cgroup_from_page(p);
  124. if (!mem)
  125. return -EINVAL;
  126. css = mem_cgroup_css(mem);
  127. /* root_mem_cgroup has NULL dentries */
  128. if (!css->cgroup->dentry)
  129. return -EINVAL;
  130. ino = css->cgroup->dentry->d_inode->i_ino;
  131. css_put(css);
  132. if (ino != hwpoison_filter_memcg)
  133. return -EINVAL;
  134. return 0;
  135. }
  136. #else
  137. static int hwpoison_filter_task(struct page *p) { return 0; }
  138. #endif
  139. int hwpoison_filter(struct page *p)
  140. {
  141. if (!hwpoison_filter_enable)
  142. return 0;
  143. if (hwpoison_filter_dev(p))
  144. return -EINVAL;
  145. if (hwpoison_filter_flags(p))
  146. return -EINVAL;
  147. if (hwpoison_filter_task(p))
  148. return -EINVAL;
  149. return 0;
  150. }
  151. #else
  152. int hwpoison_filter(struct page *p)
  153. {
  154. return 0;
  155. }
  156. #endif
  157. EXPORT_SYMBOL_GPL(hwpoison_filter);
  158. /*
  159. * Send all the processes who have the page mapped an ``action optional''
  160. * signal.
  161. */
  162. static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
  163. unsigned long pfn, struct page *page)
  164. {
  165. struct siginfo si;
  166. int ret;
  167. printk(KERN_ERR
  168. "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n",
  169. pfn, t->comm, t->pid);
  170. si.si_signo = SIGBUS;
  171. si.si_errno = 0;
  172. si.si_code = BUS_MCEERR_AO;
  173. si.si_addr = (void *)addr;
  174. #ifdef __ARCH_SI_TRAPNO
  175. si.si_trapno = trapno;
  176. #endif
  177. si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
  178. /*
  179. * Don't use force here, it's convenient if the signal
  180. * can be temporarily blocked.
  181. * This could cause a loop when the user sets SIGBUS
  182. * to SIG_IGN, but hopefully noone will do that?
  183. */
  184. ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
  185. if (ret < 0)
  186. printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
  187. t->comm, t->pid, ret);
  188. return ret;
  189. }
  190. /*
  191. * When a unknown page type is encountered drain as many buffers as possible
  192. * in the hope to turn the page into a LRU or free page, which we can handle.
  193. */
  194. void shake_page(struct page *p, int access)
  195. {
  196. if (!PageSlab(p)) {
  197. lru_add_drain_all();
  198. if (PageLRU(p))
  199. return;
  200. drain_all_pages();
  201. if (PageLRU(p) || is_free_buddy_page(p))
  202. return;
  203. }
  204. /*
  205. * Only all shrink_slab here (which would also
  206. * shrink other caches) if access is not potentially fatal.
  207. */
  208. if (access) {
  209. int nr;
  210. do {
  211. nr = shrink_slab(1000, GFP_KERNEL, 1000);
  212. if (page_count(p) == 1)
  213. break;
  214. } while (nr > 10);
  215. }
  216. }
  217. EXPORT_SYMBOL_GPL(shake_page);
  218. /*
  219. * Kill all processes that have a poisoned page mapped and then isolate
  220. * the page.
  221. *
  222. * General strategy:
  223. * Find all processes having the page mapped and kill them.
  224. * But we keep a page reference around so that the page is not
  225. * actually freed yet.
  226. * Then stash the page away
  227. *
  228. * There's no convenient way to get back to mapped processes
  229. * from the VMAs. So do a brute-force search over all
  230. * running processes.
  231. *
  232. * Remember that machine checks are not common (or rather
  233. * if they are common you have other problems), so this shouldn't
  234. * be a performance issue.
  235. *
  236. * Also there are some races possible while we get from the
  237. * error detection to actually handle it.
  238. */
  239. struct to_kill {
  240. struct list_head nd;
  241. struct task_struct *tsk;
  242. unsigned long addr;
  243. char addr_valid;
  244. };
  245. /*
  246. * Failure handling: if we can't find or can't kill a process there's
  247. * not much we can do. We just print a message and ignore otherwise.
  248. */
  249. /*
  250. * Schedule a process for later kill.
  251. * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
  252. * TBD would GFP_NOIO be enough?
  253. */
  254. static void add_to_kill(struct task_struct *tsk, struct page *p,
  255. struct vm_area_struct *vma,
  256. struct list_head *to_kill,
  257. struct to_kill **tkc)
  258. {
  259. struct to_kill *tk;
  260. if (*tkc) {
  261. tk = *tkc;
  262. *tkc = NULL;
  263. } else {
  264. tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
  265. if (!tk) {
  266. printk(KERN_ERR
  267. "MCE: Out of memory while machine check handling\n");
  268. return;
  269. }
  270. }
  271. tk->addr = page_address_in_vma(p, vma);
  272. tk->addr_valid = 1;
  273. /*
  274. * In theory we don't have to kill when the page was
  275. * munmaped. But it could be also a mremap. Since that's
  276. * likely very rare kill anyways just out of paranoia, but use
  277. * a SIGKILL because the error is not contained anymore.
  278. */
  279. if (tk->addr == -EFAULT) {
  280. pr_info("MCE: Unable to find user space address %lx in %s\n",
  281. page_to_pfn(p), tsk->comm);
  282. tk->addr_valid = 0;
  283. }
  284. get_task_struct(tsk);
  285. tk->tsk = tsk;
  286. list_add_tail(&tk->nd, to_kill);
  287. }
  288. /*
  289. * Kill the processes that have been collected earlier.
  290. *
  291. * Only do anything when DOIT is set, otherwise just free the list
  292. * (this is used for clean pages which do not need killing)
  293. * Also when FAIL is set do a force kill because something went
  294. * wrong earlier.
  295. */
  296. static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
  297. int fail, struct page *page, unsigned long pfn)
  298. {
  299. struct to_kill *tk, *next;
  300. list_for_each_entry_safe (tk, next, to_kill, nd) {
  301. if (doit) {
  302. /*
  303. * In case something went wrong with munmapping
  304. * make sure the process doesn't catch the
  305. * signal and then access the memory. Just kill it.
  306. */
  307. if (fail || tk->addr_valid == 0) {
  308. printk(KERN_ERR
  309. "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
  310. pfn, tk->tsk->comm, tk->tsk->pid);
  311. force_sig(SIGKILL, tk->tsk);
  312. }
  313. /*
  314. * In theory the process could have mapped
  315. * something else on the address in-between. We could
  316. * check for that, but we need to tell the
  317. * process anyways.
  318. */
  319. else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
  320. pfn, page) < 0)
  321. printk(KERN_ERR
  322. "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
  323. pfn, tk->tsk->comm, tk->tsk->pid);
  324. }
  325. put_task_struct(tk->tsk);
  326. kfree(tk);
  327. }
  328. }
  329. static int task_early_kill(struct task_struct *tsk)
  330. {
  331. if (!tsk->mm)
  332. return 0;
  333. if (tsk->flags & PF_MCE_PROCESS)
  334. return !!(tsk->flags & PF_MCE_EARLY);
  335. return sysctl_memory_failure_early_kill;
  336. }
  337. /*
  338. * Collect processes when the error hit an anonymous page.
  339. */
  340. static void collect_procs_anon(struct page *page, struct list_head *to_kill,
  341. struct to_kill **tkc)
  342. {
  343. struct vm_area_struct *vma;
  344. struct task_struct *tsk;
  345. struct anon_vma *av;
  346. if (!PageHuge(page) && unlikely(split_huge_page(page)))
  347. return;
  348. read_lock(&tasklist_lock);
  349. av = page_lock_anon_vma(page);
  350. if (av == NULL) /* Not actually mapped anymore */
  351. goto out;
  352. for_each_process (tsk) {
  353. struct anon_vma_chain *vmac;
  354. if (!task_early_kill(tsk))
  355. continue;
  356. list_for_each_entry(vmac, &av->head, same_anon_vma) {
  357. vma = vmac->vma;
  358. if (!page_mapped_in_vma(page, vma))
  359. continue;
  360. if (vma->vm_mm == tsk->mm)
  361. add_to_kill(tsk, page, vma, to_kill, tkc);
  362. }
  363. }
  364. page_unlock_anon_vma(av);
  365. out:
  366. read_unlock(&tasklist_lock);
  367. }
  368. /*
  369. * Collect processes when the error hit a file mapped page.
  370. */
  371. static void collect_procs_file(struct page *page, struct list_head *to_kill,
  372. struct to_kill **tkc)
  373. {
  374. struct vm_area_struct *vma;
  375. struct task_struct *tsk;
  376. struct prio_tree_iter iter;
  377. struct address_space *mapping = page->mapping;
  378. /*
  379. * A note on the locking order between the two locks.
  380. * We don't rely on this particular order.
  381. * If you have some other code that needs a different order
  382. * feel free to switch them around. Or add a reverse link
  383. * from mm_struct to task_struct, then this could be all
  384. * done without taking tasklist_lock and looping over all tasks.
  385. */
  386. read_lock(&tasklist_lock);
  387. spin_lock(&mapping->i_mmap_lock);
  388. for_each_process(tsk) {
  389. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  390. if (!task_early_kill(tsk))
  391. continue;
  392. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
  393. pgoff) {
  394. /*
  395. * Send early kill signal to tasks where a vma covers
  396. * the page but the corrupted page is not necessarily
  397. * mapped it in its pte.
  398. * Assume applications who requested early kill want
  399. * to be informed of all such data corruptions.
  400. */
  401. if (vma->vm_mm == tsk->mm)
  402. add_to_kill(tsk, page, vma, to_kill, tkc);
  403. }
  404. }
  405. spin_unlock(&mapping->i_mmap_lock);
  406. read_unlock(&tasklist_lock);
  407. }
  408. /*
  409. * Collect the processes who have the corrupted page mapped to kill.
  410. * This is done in two steps for locking reasons.
  411. * First preallocate one tokill structure outside the spin locks,
  412. * so that we can kill at least one process reasonably reliable.
  413. */
  414. static void collect_procs(struct page *page, struct list_head *tokill)
  415. {
  416. struct to_kill *tk;
  417. if (!page->mapping)
  418. return;
  419. tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
  420. if (!tk)
  421. return;
  422. if (PageAnon(page))
  423. collect_procs_anon(page, tokill, &tk);
  424. else
  425. collect_procs_file(page, tokill, &tk);
  426. kfree(tk);
  427. }
  428. /*
  429. * Error handlers for various types of pages.
  430. */
  431. enum outcome {
  432. IGNORED, /* Error: cannot be handled */
  433. FAILED, /* Error: handling failed */
  434. DELAYED, /* Will be handled later */
  435. RECOVERED, /* Successfully recovered */
  436. };
  437. static const char *action_name[] = {
  438. [IGNORED] = "Ignored",
  439. [FAILED] = "Failed",
  440. [DELAYED] = "Delayed",
  441. [RECOVERED] = "Recovered",
  442. };
  443. /*
  444. * XXX: It is possible that a page is isolated from LRU cache,
  445. * and then kept in swap cache or failed to remove from page cache.
  446. * The page count will stop it from being freed by unpoison.
  447. * Stress tests should be aware of this memory leak problem.
  448. */
  449. static int delete_from_lru_cache(struct page *p)
  450. {
  451. if (!isolate_lru_page(p)) {
  452. /*
  453. * Clear sensible page flags, so that the buddy system won't
  454. * complain when the page is unpoison-and-freed.
  455. */
  456. ClearPageActive(p);
  457. ClearPageUnevictable(p);
  458. /*
  459. * drop the page count elevated by isolate_lru_page()
  460. */
  461. page_cache_release(p);
  462. return 0;
  463. }
  464. return -EIO;
  465. }
  466. /*
  467. * Error hit kernel page.
  468. * Do nothing, try to be lucky and not touch this instead. For a few cases we
  469. * could be more sophisticated.
  470. */
  471. static int me_kernel(struct page *p, unsigned long pfn)
  472. {
  473. return IGNORED;
  474. }
  475. /*
  476. * Page in unknown state. Do nothing.
  477. */
  478. static int me_unknown(struct page *p, unsigned long pfn)
  479. {
  480. printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
  481. return FAILED;
  482. }
  483. /*
  484. * Clean (or cleaned) page cache page.
  485. */
  486. static int me_pagecache_clean(struct page *p, unsigned long pfn)
  487. {
  488. int err;
  489. int ret = FAILED;
  490. struct address_space *mapping;
  491. delete_from_lru_cache(p);
  492. /*
  493. * For anonymous pages we're done the only reference left
  494. * should be the one m_f() holds.
  495. */
  496. if (PageAnon(p))
  497. return RECOVERED;
  498. /*
  499. * Now truncate the page in the page cache. This is really
  500. * more like a "temporary hole punch"
  501. * Don't do this for block devices when someone else
  502. * has a reference, because it could be file system metadata
  503. * and that's not safe to truncate.
  504. */
  505. mapping = page_mapping(p);
  506. if (!mapping) {
  507. /*
  508. * Page has been teared down in the meanwhile
  509. */
  510. return FAILED;
  511. }
  512. /*
  513. * Truncation is a bit tricky. Enable it per file system for now.
  514. *
  515. * Open: to take i_mutex or not for this? Right now we don't.
  516. */
  517. if (mapping->a_ops->error_remove_page) {
  518. err = mapping->a_ops->error_remove_page(mapping, p);
  519. if (err != 0) {
  520. printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
  521. pfn, err);
  522. } else if (page_has_private(p) &&
  523. !try_to_release_page(p, GFP_NOIO)) {
  524. pr_info("MCE %#lx: failed to release buffers\n", pfn);
  525. } else {
  526. ret = RECOVERED;
  527. }
  528. } else {
  529. /*
  530. * If the file system doesn't support it just invalidate
  531. * This fails on dirty or anything with private pages
  532. */
  533. if (invalidate_inode_page(p))
  534. ret = RECOVERED;
  535. else
  536. printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
  537. pfn);
  538. }
  539. return ret;
  540. }
  541. /*
  542. * Dirty cache page page
  543. * Issues: when the error hit a hole page the error is not properly
  544. * propagated.
  545. */
  546. static int me_pagecache_dirty(struct page *p, unsigned long pfn)
  547. {
  548. struct address_space *mapping = page_mapping(p);
  549. SetPageError(p);
  550. /* TBD: print more information about the file. */
  551. if (mapping) {
  552. /*
  553. * IO error will be reported by write(), fsync(), etc.
  554. * who check the mapping.
  555. * This way the application knows that something went
  556. * wrong with its dirty file data.
  557. *
  558. * There's one open issue:
  559. *
  560. * The EIO will be only reported on the next IO
  561. * operation and then cleared through the IO map.
  562. * Normally Linux has two mechanisms to pass IO error
  563. * first through the AS_EIO flag in the address space
  564. * and then through the PageError flag in the page.
  565. * Since we drop pages on memory failure handling the
  566. * only mechanism open to use is through AS_AIO.
  567. *
  568. * This has the disadvantage that it gets cleared on
  569. * the first operation that returns an error, while
  570. * the PageError bit is more sticky and only cleared
  571. * when the page is reread or dropped. If an
  572. * application assumes it will always get error on
  573. * fsync, but does other operations on the fd before
  574. * and the page is dropped inbetween then the error
  575. * will not be properly reported.
  576. *
  577. * This can already happen even without hwpoisoned
  578. * pages: first on metadata IO errors (which only
  579. * report through AS_EIO) or when the page is dropped
  580. * at the wrong time.
  581. *
  582. * So right now we assume that the application DTRT on
  583. * the first EIO, but we're not worse than other parts
  584. * of the kernel.
  585. */
  586. mapping_set_error(mapping, EIO);
  587. }
  588. return me_pagecache_clean(p, pfn);
  589. }
  590. /*
  591. * Clean and dirty swap cache.
  592. *
  593. * Dirty swap cache page is tricky to handle. The page could live both in page
  594. * cache and swap cache(ie. page is freshly swapped in). So it could be
  595. * referenced concurrently by 2 types of PTEs:
  596. * normal PTEs and swap PTEs. We try to handle them consistently by calling
  597. * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
  598. * and then
  599. * - clear dirty bit to prevent IO
  600. * - remove from LRU
  601. * - but keep in the swap cache, so that when we return to it on
  602. * a later page fault, we know the application is accessing
  603. * corrupted data and shall be killed (we installed simple
  604. * interception code in do_swap_page to catch it).
  605. *
  606. * Clean swap cache pages can be directly isolated. A later page fault will
  607. * bring in the known good data from disk.
  608. */
  609. static int me_swapcache_dirty(struct page *p, unsigned long pfn)
  610. {
  611. ClearPageDirty(p);
  612. /* Trigger EIO in shmem: */
  613. ClearPageUptodate(p);
  614. if (!delete_from_lru_cache(p))
  615. return DELAYED;
  616. else
  617. return FAILED;
  618. }
  619. static int me_swapcache_clean(struct page *p, unsigned long pfn)
  620. {
  621. delete_from_swap_cache(p);
  622. if (!delete_from_lru_cache(p))
  623. return RECOVERED;
  624. else
  625. return FAILED;
  626. }
  627. /*
  628. * Huge pages. Needs work.
  629. * Issues:
  630. * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
  631. * To narrow down kill region to one page, we need to break up pmd.
  632. */
  633. static int me_huge_page(struct page *p, unsigned long pfn)
  634. {
  635. int res = 0;
  636. struct page *hpage = compound_head(p);
  637. /*
  638. * We can safely recover from error on free or reserved (i.e.
  639. * not in-use) hugepage by dequeuing it from freelist.
  640. * To check whether a hugepage is in-use or not, we can't use
  641. * page->lru because it can be used in other hugepage operations,
  642. * such as __unmap_hugepage_range() and gather_surplus_pages().
  643. * So instead we use page_mapping() and PageAnon().
  644. * We assume that this function is called with page lock held,
  645. * so there is no race between isolation and mapping/unmapping.
  646. */
  647. if (!(page_mapping(hpage) || PageAnon(hpage))) {
  648. res = dequeue_hwpoisoned_huge_page(hpage);
  649. if (!res)
  650. return RECOVERED;
  651. }
  652. return DELAYED;
  653. }
  654. /*
  655. * Various page states we can handle.
  656. *
  657. * A page state is defined by its current page->flags bits.
  658. * The table matches them in order and calls the right handler.
  659. *
  660. * This is quite tricky because we can access page at any time
  661. * in its live cycle, so all accesses have to be extremly careful.
  662. *
  663. * This is not complete. More states could be added.
  664. * For any missing state don't attempt recovery.
  665. */
  666. #define dirty (1UL << PG_dirty)
  667. #define sc (1UL << PG_swapcache)
  668. #define unevict (1UL << PG_unevictable)
  669. #define mlock (1UL << PG_mlocked)
  670. #define writeback (1UL << PG_writeback)
  671. #define lru (1UL << PG_lru)
  672. #define swapbacked (1UL << PG_swapbacked)
  673. #define head (1UL << PG_head)
  674. #define tail (1UL << PG_tail)
  675. #define compound (1UL << PG_compound)
  676. #define slab (1UL << PG_slab)
  677. #define reserved (1UL << PG_reserved)
  678. static struct page_state {
  679. unsigned long mask;
  680. unsigned long res;
  681. char *msg;
  682. int (*action)(struct page *p, unsigned long pfn);
  683. } error_states[] = {
  684. { reserved, reserved, "reserved kernel", me_kernel },
  685. /*
  686. * free pages are specially detected outside this table:
  687. * PG_buddy pages only make a small fraction of all free pages.
  688. */
  689. /*
  690. * Could in theory check if slab page is free or if we can drop
  691. * currently unused objects without touching them. But just
  692. * treat it as standard kernel for now.
  693. */
  694. { slab, slab, "kernel slab", me_kernel },
  695. #ifdef CONFIG_PAGEFLAGS_EXTENDED
  696. { head, head, "huge", me_huge_page },
  697. { tail, tail, "huge", me_huge_page },
  698. #else
  699. { compound, compound, "huge", me_huge_page },
  700. #endif
  701. { sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty },
  702. { sc|dirty, sc, "swapcache", me_swapcache_clean },
  703. { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
  704. { unevict, unevict, "unevictable LRU", me_pagecache_clean},
  705. { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
  706. { mlock, mlock, "mlocked LRU", me_pagecache_clean },
  707. { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
  708. { lru|dirty, lru, "clean LRU", me_pagecache_clean },
  709. /*
  710. * Catchall entry: must be at end.
  711. */
  712. { 0, 0, "unknown page state", me_unknown },
  713. };
  714. #undef dirty
  715. #undef sc
  716. #undef unevict
  717. #undef mlock
  718. #undef writeback
  719. #undef lru
  720. #undef swapbacked
  721. #undef head
  722. #undef tail
  723. #undef compound
  724. #undef slab
  725. #undef reserved
  726. static void action_result(unsigned long pfn, char *msg, int result)
  727. {
  728. struct page *page = pfn_to_page(pfn);
  729. printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
  730. pfn,
  731. PageDirty(page) ? "dirty " : "",
  732. msg, action_name[result]);
  733. }
  734. static int page_action(struct page_state *ps, struct page *p,
  735. unsigned long pfn)
  736. {
  737. int result;
  738. int count;
  739. result = ps->action(p, pfn);
  740. action_result(pfn, ps->msg, result);
  741. count = page_count(p) - 1;
  742. if (ps->action == me_swapcache_dirty && result == DELAYED)
  743. count--;
  744. if (count != 0) {
  745. printk(KERN_ERR
  746. "MCE %#lx: %s page still referenced by %d users\n",
  747. pfn, ps->msg, count);
  748. result = FAILED;
  749. }
  750. /* Could do more checks here if page looks ok */
  751. /*
  752. * Could adjust zone counters here to correct for the missing page.
  753. */
  754. return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
  755. }
  756. /*
  757. * Do all that is necessary to remove user space mappings. Unmap
  758. * the pages and send SIGBUS to the processes if the data was dirty.
  759. */
  760. static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
  761. int trapno)
  762. {
  763. enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
  764. struct address_space *mapping;
  765. LIST_HEAD(tokill);
  766. int ret;
  767. int kill = 1;
  768. struct page *hpage = compound_head(p);
  769. if (PageReserved(p) || PageSlab(p))
  770. return SWAP_SUCCESS;
  771. /*
  772. * This check implies we don't kill processes if their pages
  773. * are in the swap cache early. Those are always late kills.
  774. */
  775. if (!page_mapped(hpage))
  776. return SWAP_SUCCESS;
  777. if (PageKsm(p))
  778. return SWAP_FAIL;
  779. if (PageSwapCache(p)) {
  780. printk(KERN_ERR
  781. "MCE %#lx: keeping poisoned page in swap cache\n", pfn);
  782. ttu |= TTU_IGNORE_HWPOISON;
  783. }
  784. /*
  785. * Propagate the dirty bit from PTEs to struct page first, because we
  786. * need this to decide if we should kill or just drop the page.
  787. * XXX: the dirty test could be racy: set_page_dirty() may not always
  788. * be called inside page lock (it's recommended but not enforced).
  789. */
  790. mapping = page_mapping(hpage);
  791. if (!PageDirty(hpage) && mapping &&
  792. mapping_cap_writeback_dirty(mapping)) {
  793. if (page_mkclean(hpage)) {
  794. SetPageDirty(hpage);
  795. } else {
  796. kill = 0;
  797. ttu |= TTU_IGNORE_HWPOISON;
  798. printk(KERN_INFO
  799. "MCE %#lx: corrupted page was clean: dropped without side effects\n",
  800. pfn);
  801. }
  802. }
  803. /*
  804. * First collect all the processes that have the page
  805. * mapped in dirty form. This has to be done before try_to_unmap,
  806. * because ttu takes the rmap data structures down.
  807. *
  808. * Error handling: We ignore errors here because
  809. * there's nothing that can be done.
  810. */
  811. if (kill)
  812. collect_procs(hpage, &tokill);
  813. ret = try_to_unmap(hpage, ttu);
  814. if (ret != SWAP_SUCCESS)
  815. printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
  816. pfn, page_mapcount(hpage));
  817. /*
  818. * Now that the dirty bit has been propagated to the
  819. * struct page and all unmaps done we can decide if
  820. * killing is needed or not. Only kill when the page
  821. * was dirty, otherwise the tokill list is merely
  822. * freed. When there was a problem unmapping earlier
  823. * use a more force-full uncatchable kill to prevent
  824. * any accesses to the poisoned memory.
  825. */
  826. kill_procs_ao(&tokill, !!PageDirty(hpage), trapno,
  827. ret != SWAP_SUCCESS, p, pfn);
  828. return ret;
  829. }
  830. static void set_page_hwpoison_huge_page(struct page *hpage)
  831. {
  832. int i;
  833. int nr_pages = 1 << compound_trans_order(hpage);
  834. for (i = 0; i < nr_pages; i++)
  835. SetPageHWPoison(hpage + i);
  836. }
  837. static void clear_page_hwpoison_huge_page(struct page *hpage)
  838. {
  839. int i;
  840. int nr_pages = 1 << compound_trans_order(hpage);
  841. for (i = 0; i < nr_pages; i++)
  842. ClearPageHWPoison(hpage + i);
  843. }
  844. int __memory_failure(unsigned long pfn, int trapno, int flags)
  845. {
  846. struct page_state *ps;
  847. struct page *p;
  848. struct page *hpage;
  849. int res;
  850. unsigned int nr_pages;
  851. if (!sysctl_memory_failure_recovery)
  852. panic("Memory failure from trap %d on page %lx", trapno, pfn);
  853. if (!pfn_valid(pfn)) {
  854. printk(KERN_ERR
  855. "MCE %#lx: memory outside kernel control\n",
  856. pfn);
  857. return -ENXIO;
  858. }
  859. p = pfn_to_page(pfn);
  860. hpage = compound_head(p);
  861. if (TestSetPageHWPoison(p)) {
  862. printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
  863. return 0;
  864. }
  865. nr_pages = 1 << compound_trans_order(hpage);
  866. atomic_long_add(nr_pages, &mce_bad_pages);
  867. /*
  868. * We need/can do nothing about count=0 pages.
  869. * 1) it's a free page, and therefore in safe hand:
  870. * prep_new_page() will be the gate keeper.
  871. * 2) it's a free hugepage, which is also safe:
  872. * an affected hugepage will be dequeued from hugepage freelist,
  873. * so there's no concern about reusing it ever after.
  874. * 3) it's part of a non-compound high order page.
  875. * Implies some kernel user: cannot stop them from
  876. * R/W the page; let's pray that the page has been
  877. * used and will be freed some time later.
  878. * In fact it's dangerous to directly bump up page count from 0,
  879. * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
  880. */
  881. if (!(flags & MF_COUNT_INCREASED) &&
  882. !get_page_unless_zero(hpage)) {
  883. if (is_free_buddy_page(p)) {
  884. action_result(pfn, "free buddy", DELAYED);
  885. return 0;
  886. } else if (PageHuge(hpage)) {
  887. /*
  888. * Check "just unpoisoned", "filter hit", and
  889. * "race with other subpage."
  890. */
  891. lock_page_nosync(hpage);
  892. if (!PageHWPoison(hpage)
  893. || (hwpoison_filter(p) && TestClearPageHWPoison(p))
  894. || (p != hpage && TestSetPageHWPoison(hpage))) {
  895. atomic_long_sub(nr_pages, &mce_bad_pages);
  896. return 0;
  897. }
  898. set_page_hwpoison_huge_page(hpage);
  899. res = dequeue_hwpoisoned_huge_page(hpage);
  900. action_result(pfn, "free huge",
  901. res ? IGNORED : DELAYED);
  902. unlock_page(hpage);
  903. return res;
  904. } else {
  905. action_result(pfn, "high order kernel", IGNORED);
  906. return -EBUSY;
  907. }
  908. }
  909. /*
  910. * We ignore non-LRU pages for good reasons.
  911. * - PG_locked is only well defined for LRU pages and a few others
  912. * - to avoid races with __set_page_locked()
  913. * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
  914. * The check (unnecessarily) ignores LRU pages being isolated and
  915. * walked by the page reclaim code, however that's not a big loss.
  916. */
  917. if (!PageLRU(p) && !PageHuge(p))
  918. shake_page(p, 0);
  919. if (!PageLRU(p) && !PageHuge(p)) {
  920. /*
  921. * shake_page could have turned it free.
  922. */
  923. if (is_free_buddy_page(p)) {
  924. action_result(pfn, "free buddy, 2nd try", DELAYED);
  925. return 0;
  926. }
  927. action_result(pfn, "non LRU", IGNORED);
  928. put_page(p);
  929. return -EBUSY;
  930. }
  931. /*
  932. * Lock the page and wait for writeback to finish.
  933. * It's very difficult to mess with pages currently under IO
  934. * and in many cases impossible, so we just avoid it here.
  935. */
  936. lock_page_nosync(hpage);
  937. /*
  938. * unpoison always clear PG_hwpoison inside page lock
  939. */
  940. if (!PageHWPoison(p)) {
  941. printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
  942. res = 0;
  943. goto out;
  944. }
  945. if (hwpoison_filter(p)) {
  946. if (TestClearPageHWPoison(p))
  947. atomic_long_sub(nr_pages, &mce_bad_pages);
  948. unlock_page(hpage);
  949. put_page(hpage);
  950. return 0;
  951. }
  952. /*
  953. * For error on the tail page, we should set PG_hwpoison
  954. * on the head page to show that the hugepage is hwpoisoned
  955. */
  956. if (PageTail(p) && TestSetPageHWPoison(hpage)) {
  957. action_result(pfn, "hugepage already hardware poisoned",
  958. IGNORED);
  959. unlock_page(hpage);
  960. put_page(hpage);
  961. return 0;
  962. }
  963. /*
  964. * Set PG_hwpoison on all pages in an error hugepage,
  965. * because containment is done in hugepage unit for now.
  966. * Since we have done TestSetPageHWPoison() for the head page with
  967. * page lock held, we can safely set PG_hwpoison bits on tail pages.
  968. */
  969. if (PageHuge(p))
  970. set_page_hwpoison_huge_page(hpage);
  971. wait_on_page_writeback(p);
  972. /*
  973. * Now take care of user space mappings.
  974. * Abort on fail: __remove_from_page_cache() assumes unmapped page.
  975. */
  976. if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
  977. printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
  978. res = -EBUSY;
  979. goto out;
  980. }
  981. /*
  982. * Torn down by someone else?
  983. */
  984. if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
  985. action_result(pfn, "already truncated LRU", IGNORED);
  986. res = -EBUSY;
  987. goto out;
  988. }
  989. res = -EBUSY;
  990. for (ps = error_states;; ps++) {
  991. if ((p->flags & ps->mask) == ps->res) {
  992. res = page_action(ps, p, pfn);
  993. break;
  994. }
  995. }
  996. out:
  997. unlock_page(hpage);
  998. return res;
  999. }
  1000. EXPORT_SYMBOL_GPL(__memory_failure);
  1001. /**
  1002. * memory_failure - Handle memory failure of a page.
  1003. * @pfn: Page Number of the corrupted page
  1004. * @trapno: Trap number reported in the signal to user space.
  1005. *
  1006. * This function is called by the low level machine check code
  1007. * of an architecture when it detects hardware memory corruption
  1008. * of a page. It tries its best to recover, which includes
  1009. * dropping pages, killing processes etc.
  1010. *
  1011. * The function is primarily of use for corruptions that
  1012. * happen outside the current execution context (e.g. when
  1013. * detected by a background scrubber)
  1014. *
  1015. * Must run in process context (e.g. a work queue) with interrupts
  1016. * enabled and no spinlocks hold.
  1017. */
  1018. void memory_failure(unsigned long pfn, int trapno)
  1019. {
  1020. __memory_failure(pfn, trapno, 0);
  1021. }
  1022. /**
  1023. * unpoison_memory - Unpoison a previously poisoned page
  1024. * @pfn: Page number of the to be unpoisoned page
  1025. *
  1026. * Software-unpoison a page that has been poisoned by
  1027. * memory_failure() earlier.
  1028. *
  1029. * This is only done on the software-level, so it only works
  1030. * for linux injected failures, not real hardware failures
  1031. *
  1032. * Returns 0 for success, otherwise -errno.
  1033. */
  1034. int unpoison_memory(unsigned long pfn)
  1035. {
  1036. struct page *page;
  1037. struct page *p;
  1038. int freeit = 0;
  1039. unsigned int nr_pages;
  1040. if (!pfn_valid(pfn))
  1041. return -ENXIO;
  1042. p = pfn_to_page(pfn);
  1043. page = compound_head(p);
  1044. if (!PageHWPoison(p)) {
  1045. pr_info("MCE: Page was already unpoisoned %#lx\n", pfn);
  1046. return 0;
  1047. }
  1048. nr_pages = 1 << compound_trans_order(page);
  1049. if (!get_page_unless_zero(page)) {
  1050. /*
  1051. * Since HWPoisoned hugepage should have non-zero refcount,
  1052. * race between memory failure and unpoison seems to happen.
  1053. * In such case unpoison fails and memory failure runs
  1054. * to the end.
  1055. */
  1056. if (PageHuge(page)) {
  1057. pr_debug("MCE: Memory failure is now running on free hugepage %#lx\n", pfn);
  1058. return 0;
  1059. }
  1060. if (TestClearPageHWPoison(p))
  1061. atomic_long_sub(nr_pages, &mce_bad_pages);
  1062. pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
  1063. return 0;
  1064. }
  1065. lock_page_nosync(page);
  1066. /*
  1067. * This test is racy because PG_hwpoison is set outside of page lock.
  1068. * That's acceptable because that won't trigger kernel panic. Instead,
  1069. * the PG_hwpoison page will be caught and isolated on the entrance to
  1070. * the free buddy page pool.
  1071. */
  1072. if (TestClearPageHWPoison(page)) {
  1073. pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
  1074. atomic_long_sub(nr_pages, &mce_bad_pages);
  1075. freeit = 1;
  1076. if (PageHuge(page))
  1077. clear_page_hwpoison_huge_page(page);
  1078. }
  1079. unlock_page(page);
  1080. put_page(page);
  1081. if (freeit)
  1082. put_page(page);
  1083. return 0;
  1084. }
  1085. EXPORT_SYMBOL(unpoison_memory);
  1086. static struct page *new_page(struct page *p, unsigned long private, int **x)
  1087. {
  1088. int nid = page_to_nid(p);
  1089. if (PageHuge(p))
  1090. return alloc_huge_page_node(page_hstate(compound_head(p)),
  1091. nid);
  1092. else
  1093. return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
  1094. }
  1095. /*
  1096. * Safely get reference count of an arbitrary page.
  1097. * Returns 0 for a free page, -EIO for a zero refcount page
  1098. * that is not free, and 1 for any other page type.
  1099. * For 1 the page is returned with increased page count, otherwise not.
  1100. */
  1101. static int get_any_page(struct page *p, unsigned long pfn, int flags)
  1102. {
  1103. int ret;
  1104. if (flags & MF_COUNT_INCREASED)
  1105. return 1;
  1106. /*
  1107. * The lock_memory_hotplug prevents a race with memory hotplug.
  1108. * This is a big hammer, a better would be nicer.
  1109. */
  1110. lock_memory_hotplug();
  1111. /*
  1112. * Isolate the page, so that it doesn't get reallocated if it
  1113. * was free.
  1114. */
  1115. set_migratetype_isolate(p);
  1116. /*
  1117. * When the target page is a free hugepage, just remove it
  1118. * from free hugepage list.
  1119. */
  1120. if (!get_page_unless_zero(compound_head(p))) {
  1121. if (PageHuge(p)) {
  1122. pr_info("get_any_page: %#lx free huge page\n", pfn);
  1123. ret = dequeue_hwpoisoned_huge_page(compound_head(p));
  1124. } else if (is_free_buddy_page(p)) {
  1125. pr_info("get_any_page: %#lx free buddy page\n", pfn);
  1126. /* Set hwpoison bit while page is still isolated */
  1127. SetPageHWPoison(p);
  1128. ret = 0;
  1129. } else {
  1130. pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n",
  1131. pfn, p->flags);
  1132. ret = -EIO;
  1133. }
  1134. } else {
  1135. /* Not a free page */
  1136. ret = 1;
  1137. }
  1138. unset_migratetype_isolate(p);
  1139. unlock_memory_hotplug();
  1140. return ret;
  1141. }
  1142. static int soft_offline_huge_page(struct page *page, int flags)
  1143. {
  1144. int ret;
  1145. unsigned long pfn = page_to_pfn(page);
  1146. struct page *hpage = compound_head(page);
  1147. LIST_HEAD(pagelist);
  1148. ret = get_any_page(page, pfn, flags);
  1149. if (ret < 0)
  1150. return ret;
  1151. if (ret == 0)
  1152. goto done;
  1153. if (PageHWPoison(hpage)) {
  1154. put_page(hpage);
  1155. pr_debug("soft offline: %#lx hugepage already poisoned\n", pfn);
  1156. return -EBUSY;
  1157. }
  1158. /* Keep page count to indicate a given hugepage is isolated. */
  1159. list_add(&hpage->lru, &pagelist);
  1160. ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
  1161. true);
  1162. if (ret) {
  1163. putback_lru_pages(&pagelist);
  1164. pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
  1165. pfn, ret, page->flags);
  1166. if (ret > 0)
  1167. ret = -EIO;
  1168. return ret;
  1169. }
  1170. done:
  1171. if (!PageHWPoison(hpage))
  1172. atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
  1173. set_page_hwpoison_huge_page(hpage);
  1174. dequeue_hwpoisoned_huge_page(hpage);
  1175. /* keep elevated page count for bad page */
  1176. return ret;
  1177. }
  1178. /**
  1179. * soft_offline_page - Soft offline a page.
  1180. * @page: page to offline
  1181. * @flags: flags. Same as memory_failure().
  1182. *
  1183. * Returns 0 on success, otherwise negated errno.
  1184. *
  1185. * Soft offline a page, by migration or invalidation,
  1186. * without killing anything. This is for the case when
  1187. * a page is not corrupted yet (so it's still valid to access),
  1188. * but has had a number of corrected errors and is better taken
  1189. * out.
  1190. *
  1191. * The actual policy on when to do that is maintained by
  1192. * user space.
  1193. *
  1194. * This should never impact any application or cause data loss,
  1195. * however it might take some time.
  1196. *
  1197. * This is not a 100% solution for all memory, but tries to be
  1198. * ``good enough'' for the majority of memory.
  1199. */
  1200. int soft_offline_page(struct page *page, int flags)
  1201. {
  1202. int ret;
  1203. unsigned long pfn = page_to_pfn(page);
  1204. if (PageHuge(page))
  1205. return soft_offline_huge_page(page, flags);
  1206. ret = get_any_page(page, pfn, flags);
  1207. if (ret < 0)
  1208. return ret;
  1209. if (ret == 0)
  1210. goto done;
  1211. /*
  1212. * Page cache page we can handle?
  1213. */
  1214. if (!PageLRU(page)) {
  1215. /*
  1216. * Try to free it.
  1217. */
  1218. put_page(page);
  1219. shake_page(page, 1);
  1220. /*
  1221. * Did it turn free?
  1222. */
  1223. ret = get_any_page(page, pfn, 0);
  1224. if (ret < 0)
  1225. return ret;
  1226. if (ret == 0)
  1227. goto done;
  1228. }
  1229. if (!PageLRU(page)) {
  1230. pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
  1231. pfn, page->flags);
  1232. return -EIO;
  1233. }
  1234. lock_page(page);
  1235. wait_on_page_writeback(page);
  1236. /*
  1237. * Synchronized using the page lock with memory_failure()
  1238. */
  1239. if (PageHWPoison(page)) {
  1240. unlock_page(page);
  1241. put_page(page);
  1242. pr_info("soft offline: %#lx page already poisoned\n", pfn);
  1243. return -EBUSY;
  1244. }
  1245. /*
  1246. * Try to invalidate first. This should work for
  1247. * non dirty unmapped page cache pages.
  1248. */
  1249. ret = invalidate_inode_page(page);
  1250. unlock_page(page);
  1251. /*
  1252. * Drop count because page migration doesn't like raised
  1253. * counts. The page could get re-allocated, but if it becomes
  1254. * LRU the isolation will just fail.
  1255. * RED-PEN would be better to keep it isolated here, but we
  1256. * would need to fix isolation locking first.
  1257. */
  1258. put_page(page);
  1259. if (ret == 1) {
  1260. ret = 0;
  1261. pr_info("soft_offline: %#lx: invalidated\n", pfn);
  1262. goto done;
  1263. }
  1264. /*
  1265. * Simple invalidation didn't work.
  1266. * Try to migrate to a new page instead. migrate.c
  1267. * handles a large number of cases for us.
  1268. */
  1269. ret = isolate_lru_page(page);
  1270. if (!ret) {
  1271. LIST_HEAD(pagelist);
  1272. list_add(&page->lru, &pagelist);
  1273. ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
  1274. 0, true);
  1275. if (ret) {
  1276. pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
  1277. pfn, ret, page->flags);
  1278. if (ret > 0)
  1279. ret = -EIO;
  1280. }
  1281. } else {
  1282. pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
  1283. pfn, ret, page_count(page), page->flags);
  1284. }
  1285. if (ret)
  1286. return ret;
  1287. done:
  1288. atomic_long_add(1, &mce_bad_pages);
  1289. SetPageHWPoison(page);
  1290. /* keep elevated page count for bad page */
  1291. return ret;
  1292. }
  1293. /*
  1294. * The caller must hold current->mm->mmap_sem in read mode.
  1295. */
  1296. int is_hwpoison_address(unsigned long addr)
  1297. {
  1298. pgd_t *pgdp;
  1299. pud_t pud, *pudp;
  1300. pmd_t pmd, *pmdp;
  1301. pte_t pte, *ptep;
  1302. swp_entry_t entry;
  1303. pgdp = pgd_offset(current->mm, addr);
  1304. if (!pgd_present(*pgdp))
  1305. return 0;
  1306. pudp = pud_offset(pgdp, addr);
  1307. pud = *pudp;
  1308. if (!pud_present(pud) || pud_large(pud))
  1309. return 0;
  1310. pmdp = pmd_offset(pudp, addr);
  1311. pmd = *pmdp;
  1312. if (!pmd_present(pmd) || pmd_large(pmd))
  1313. return 0;
  1314. ptep = pte_offset_map(pmdp, addr);
  1315. pte = *ptep;
  1316. pte_unmap(ptep);
  1317. if (!is_swap_pte(pte))
  1318. return 0;
  1319. entry = pte_to_swp_entry(pte);
  1320. return is_hwpoison_entry(entry);
  1321. }
  1322. EXPORT_SYMBOL_GPL(is_hwpoison_address);