memory-failure.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. /*
  2. * Copyright (C) 2008, 2009 Intel Corporation
  3. * Authors: Andi Kleen, Fengguang Wu
  4. *
  5. * This software may be redistributed and/or modified under the terms of
  6. * the GNU General Public License ("GPL") version 2 only as published by the
  7. * Free Software Foundation.
  8. *
  9. * High level machine check handler. Handles pages reported by the
  10. * hardware as being corrupted usually due to a 2bit ECC memory or cache
  11. * failure.
  12. *
  13. * Handles page cache pages in various states. The tricky part
  14. * here is that we can access any page asynchronous to other VM
  15. * users, because memory failures could happen anytime and anywhere,
  16. * possibly violating some of their assumptions. This is why this code
  17. * has to be extremely careful. Generally it tries to use normal locking
  18. * rules, as in get the standard locks, even if that means the
  19. * error handling takes potentially a long time.
  20. *
  21. * The operation to map back from RMAP chains to processes has to walk
  22. * the complete process list and has non linear complexity with the number
  23. * mappings. In short it can be quite slow. But since memory corruptions
  24. * are rare we hope to get away with this.
  25. */
  26. /*
  27. * Notebook:
  28. * - hugetlb needs more code
  29. * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
  30. * - pass bad pages to kdump next kernel
  31. */
  32. #define DEBUG 1 /* remove me in 2.6.34 */
  33. #include <linux/kernel.h>
  34. #include <linux/mm.h>
  35. #include <linux/page-flags.h>
  36. #include <linux/kernel-page-flags.h>
  37. #include <linux/sched.h>
  38. #include <linux/ksm.h>
  39. #include <linux/rmap.h>
  40. #include <linux/pagemap.h>
  41. #include <linux/swap.h>
  42. #include <linux/backing-dev.h>
  43. #include "internal.h"
  44. int sysctl_memory_failure_early_kill __read_mostly = 0;
  45. int sysctl_memory_failure_recovery __read_mostly = 1;
  46. atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
  47. u32 hwpoison_filter_enable = 0;
  48. u32 hwpoison_filter_dev_major = ~0U;
  49. u32 hwpoison_filter_dev_minor = ~0U;
  50. u64 hwpoison_filter_flags_mask;
  51. u64 hwpoison_filter_flags_value;
  52. EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
  53. EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
  54. EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
  55. EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
  56. EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
  57. static int hwpoison_filter_dev(struct page *p)
  58. {
  59. struct address_space *mapping;
  60. dev_t dev;
  61. if (hwpoison_filter_dev_major == ~0U &&
  62. hwpoison_filter_dev_minor == ~0U)
  63. return 0;
  64. /*
  65. * page_mapping() does not accept slab page
  66. */
  67. if (PageSlab(p))
  68. return -EINVAL;
  69. mapping = page_mapping(p);
  70. if (mapping == NULL || mapping->host == NULL)
  71. return -EINVAL;
  72. dev = mapping->host->i_sb->s_dev;
  73. if (hwpoison_filter_dev_major != ~0U &&
  74. hwpoison_filter_dev_major != MAJOR(dev))
  75. return -EINVAL;
  76. if (hwpoison_filter_dev_minor != ~0U &&
  77. hwpoison_filter_dev_minor != MINOR(dev))
  78. return -EINVAL;
  79. return 0;
  80. }
  81. static int hwpoison_filter_flags(struct page *p)
  82. {
  83. if (!hwpoison_filter_flags_mask)
  84. return 0;
  85. if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
  86. hwpoison_filter_flags_value)
  87. return 0;
  88. else
  89. return -EINVAL;
  90. }
  91. /*
  92. * This allows stress tests to limit test scope to a collection of tasks
  93. * by putting them under some memcg. This prevents killing unrelated/important
  94. * processes such as /sbin/init. Note that the target task may share clean
  95. * pages with init (eg. libc text), which is harmless. If the target task
  96. * share _dirty_ pages with another task B, the test scheme must make sure B
  97. * is also included in the memcg. At last, due to race conditions this filter
  98. * can only guarantee that the page either belongs to the memcg tasks, or is
  99. * a freed page.
  100. */
  101. #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  102. u64 hwpoison_filter_memcg;
  103. EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
  104. static int hwpoison_filter_task(struct page *p)
  105. {
  106. struct mem_cgroup *mem;
  107. struct cgroup_subsys_state *css;
  108. unsigned long ino;
  109. if (!hwpoison_filter_memcg)
  110. return 0;
  111. mem = try_get_mem_cgroup_from_page(p);
  112. if (!mem)
  113. return -EINVAL;
  114. css = mem_cgroup_css(mem);
  115. /* root_mem_cgroup has NULL dentries */
  116. if (!css->cgroup->dentry)
  117. return -EINVAL;
  118. ino = css->cgroup->dentry->d_inode->i_ino;
  119. css_put(css);
  120. if (ino != hwpoison_filter_memcg)
  121. return -EINVAL;
  122. return 0;
  123. }
  124. #else
  125. static int hwpoison_filter_task(struct page *p) { return 0; }
  126. #endif
  127. int hwpoison_filter(struct page *p)
  128. {
  129. if (!hwpoison_filter_enable)
  130. return 0;
  131. if (hwpoison_filter_dev(p))
  132. return -EINVAL;
  133. if (hwpoison_filter_flags(p))
  134. return -EINVAL;
  135. if (hwpoison_filter_task(p))
  136. return -EINVAL;
  137. return 0;
  138. }
  139. EXPORT_SYMBOL_GPL(hwpoison_filter);
  140. /*
  141. * Send all the processes who have the page mapped an ``action optional''
  142. * signal.
  143. */
  144. static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
  145. unsigned long pfn)
  146. {
  147. struct siginfo si;
  148. int ret;
  149. printk(KERN_ERR
  150. "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n",
  151. pfn, t->comm, t->pid);
  152. si.si_signo = SIGBUS;
  153. si.si_errno = 0;
  154. si.si_code = BUS_MCEERR_AO;
  155. si.si_addr = (void *)addr;
  156. #ifdef __ARCH_SI_TRAPNO
  157. si.si_trapno = trapno;
  158. #endif
  159. si.si_addr_lsb = PAGE_SHIFT;
  160. /*
  161. * Don't use force here, it's convenient if the signal
  162. * can be temporarily blocked.
  163. * This could cause a loop when the user sets SIGBUS
  164. * to SIG_IGN, but hopefully noone will do that?
  165. */
  166. ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
  167. if (ret < 0)
  168. printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
  169. t->comm, t->pid, ret);
  170. return ret;
  171. }
  172. /*
  173. * When a unknown page type is encountered drain as many buffers as possible
  174. * in the hope to turn the page into a LRU or free page, which we can handle.
  175. */
  176. void shake_page(struct page *p)
  177. {
  178. if (!PageSlab(p)) {
  179. lru_add_drain_all();
  180. if (PageLRU(p))
  181. return;
  182. drain_all_pages();
  183. if (PageLRU(p) || is_free_buddy_page(p))
  184. return;
  185. }
  186. /*
  187. * Could call shrink_slab here (which would also
  188. * shrink other caches). Unfortunately that might
  189. * also access the corrupted page, which could be fatal.
  190. */
  191. }
  192. EXPORT_SYMBOL_GPL(shake_page);
  193. /*
  194. * Kill all processes that have a poisoned page mapped and then isolate
  195. * the page.
  196. *
  197. * General strategy:
  198. * Find all processes having the page mapped and kill them.
  199. * But we keep a page reference around so that the page is not
  200. * actually freed yet.
  201. * Then stash the page away
  202. *
  203. * There's no convenient way to get back to mapped processes
  204. * from the VMAs. So do a brute-force search over all
  205. * running processes.
  206. *
  207. * Remember that machine checks are not common (or rather
  208. * if they are common you have other problems), so this shouldn't
  209. * be a performance issue.
  210. *
  211. * Also there are some races possible while we get from the
  212. * error detection to actually handle it.
  213. */
  214. struct to_kill {
  215. struct list_head nd;
  216. struct task_struct *tsk;
  217. unsigned long addr;
  218. unsigned addr_valid:1;
  219. };
  220. /*
  221. * Failure handling: if we can't find or can't kill a process there's
  222. * not much we can do. We just print a message and ignore otherwise.
  223. */
  224. /*
  225. * Schedule a process for later kill.
  226. * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
  227. * TBD would GFP_NOIO be enough?
  228. */
  229. static void add_to_kill(struct task_struct *tsk, struct page *p,
  230. struct vm_area_struct *vma,
  231. struct list_head *to_kill,
  232. struct to_kill **tkc)
  233. {
  234. struct to_kill *tk;
  235. if (*tkc) {
  236. tk = *tkc;
  237. *tkc = NULL;
  238. } else {
  239. tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
  240. if (!tk) {
  241. printk(KERN_ERR
  242. "MCE: Out of memory while machine check handling\n");
  243. return;
  244. }
  245. }
  246. tk->addr = page_address_in_vma(p, vma);
  247. tk->addr_valid = 1;
  248. /*
  249. * In theory we don't have to kill when the page was
  250. * munmaped. But it could be also a mremap. Since that's
  251. * likely very rare kill anyways just out of paranoia, but use
  252. * a SIGKILL because the error is not contained anymore.
  253. */
  254. if (tk->addr == -EFAULT) {
  255. pr_debug("MCE: Unable to find user space address %lx in %s\n",
  256. page_to_pfn(p), tsk->comm);
  257. tk->addr_valid = 0;
  258. }
  259. get_task_struct(tsk);
  260. tk->tsk = tsk;
  261. list_add_tail(&tk->nd, to_kill);
  262. }
  263. /*
  264. * Kill the processes that have been collected earlier.
  265. *
  266. * Only do anything when DOIT is set, otherwise just free the list
  267. * (this is used for clean pages which do not need killing)
  268. * Also when FAIL is set do a force kill because something went
  269. * wrong earlier.
  270. */
  271. static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
  272. int fail, unsigned long pfn)
  273. {
  274. struct to_kill *tk, *next;
  275. list_for_each_entry_safe (tk, next, to_kill, nd) {
  276. if (doit) {
  277. /*
  278. * In case something went wrong with munmapping
  279. * make sure the process doesn't catch the
  280. * signal and then access the memory. Just kill it.
  281. * the signal handlers
  282. */
  283. if (fail || tk->addr_valid == 0) {
  284. printk(KERN_ERR
  285. "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
  286. pfn, tk->tsk->comm, tk->tsk->pid);
  287. force_sig(SIGKILL, tk->tsk);
  288. }
  289. /*
  290. * In theory the process could have mapped
  291. * something else on the address in-between. We could
  292. * check for that, but we need to tell the
  293. * process anyways.
  294. */
  295. else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
  296. pfn) < 0)
  297. printk(KERN_ERR
  298. "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
  299. pfn, tk->tsk->comm, tk->tsk->pid);
  300. }
  301. put_task_struct(tk->tsk);
  302. kfree(tk);
  303. }
  304. }
  305. static int task_early_kill(struct task_struct *tsk)
  306. {
  307. if (!tsk->mm)
  308. return 0;
  309. if (tsk->flags & PF_MCE_PROCESS)
  310. return !!(tsk->flags & PF_MCE_EARLY);
  311. return sysctl_memory_failure_early_kill;
  312. }
  313. /*
  314. * Collect processes when the error hit an anonymous page.
  315. */
  316. static void collect_procs_anon(struct page *page, struct list_head *to_kill,
  317. struct to_kill **tkc)
  318. {
  319. struct vm_area_struct *vma;
  320. struct task_struct *tsk;
  321. struct anon_vma *av;
  322. read_lock(&tasklist_lock);
  323. av = page_lock_anon_vma(page);
  324. if (av == NULL) /* Not actually mapped anymore */
  325. goto out;
  326. for_each_process (tsk) {
  327. if (!task_early_kill(tsk))
  328. continue;
  329. list_for_each_entry (vma, &av->head, anon_vma_node) {
  330. if (!page_mapped_in_vma(page, vma))
  331. continue;
  332. if (vma->vm_mm == tsk->mm)
  333. add_to_kill(tsk, page, vma, to_kill, tkc);
  334. }
  335. }
  336. page_unlock_anon_vma(av);
  337. out:
  338. read_unlock(&tasklist_lock);
  339. }
  340. /*
  341. * Collect processes when the error hit a file mapped page.
  342. */
  343. static void collect_procs_file(struct page *page, struct list_head *to_kill,
  344. struct to_kill **tkc)
  345. {
  346. struct vm_area_struct *vma;
  347. struct task_struct *tsk;
  348. struct prio_tree_iter iter;
  349. struct address_space *mapping = page->mapping;
  350. /*
  351. * A note on the locking order between the two locks.
  352. * We don't rely on this particular order.
  353. * If you have some other code that needs a different order
  354. * feel free to switch them around. Or add a reverse link
  355. * from mm_struct to task_struct, then this could be all
  356. * done without taking tasklist_lock and looping over all tasks.
  357. */
  358. read_lock(&tasklist_lock);
  359. spin_lock(&mapping->i_mmap_lock);
  360. for_each_process(tsk) {
  361. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  362. if (!task_early_kill(tsk))
  363. continue;
  364. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
  365. pgoff) {
  366. /*
  367. * Send early kill signal to tasks where a vma covers
  368. * the page but the corrupted page is not necessarily
  369. * mapped it in its pte.
  370. * Assume applications who requested early kill want
  371. * to be informed of all such data corruptions.
  372. */
  373. if (vma->vm_mm == tsk->mm)
  374. add_to_kill(tsk, page, vma, to_kill, tkc);
  375. }
  376. }
  377. spin_unlock(&mapping->i_mmap_lock);
  378. read_unlock(&tasklist_lock);
  379. }
  380. /*
  381. * Collect the processes who have the corrupted page mapped to kill.
  382. * This is done in two steps for locking reasons.
  383. * First preallocate one tokill structure outside the spin locks,
  384. * so that we can kill at least one process reasonably reliable.
  385. */
  386. static void collect_procs(struct page *page, struct list_head *tokill)
  387. {
  388. struct to_kill *tk;
  389. if (!page->mapping)
  390. return;
  391. tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
  392. if (!tk)
  393. return;
  394. if (PageAnon(page))
  395. collect_procs_anon(page, tokill, &tk);
  396. else
  397. collect_procs_file(page, tokill, &tk);
  398. kfree(tk);
  399. }
  400. /*
  401. * Error handlers for various types of pages.
  402. */
  403. enum outcome {
  404. IGNORED, /* Error: cannot be handled */
  405. FAILED, /* Error: handling failed */
  406. DELAYED, /* Will be handled later */
  407. RECOVERED, /* Successfully recovered */
  408. };
  409. static const char *action_name[] = {
  410. [IGNORED] = "Ignored",
  411. [FAILED] = "Failed",
  412. [DELAYED] = "Delayed",
  413. [RECOVERED] = "Recovered",
  414. };
  415. /*
  416. * XXX: It is possible that a page is isolated from LRU cache,
  417. * and then kept in swap cache or failed to remove from page cache.
  418. * The page count will stop it from being freed by unpoison.
  419. * Stress tests should be aware of this memory leak problem.
  420. */
  421. static int delete_from_lru_cache(struct page *p)
  422. {
  423. if (!isolate_lru_page(p)) {
  424. /*
  425. * Clear sensible page flags, so that the buddy system won't
  426. * complain when the page is unpoison-and-freed.
  427. */
  428. ClearPageActive(p);
  429. ClearPageUnevictable(p);
  430. /*
  431. * drop the page count elevated by isolate_lru_page()
  432. */
  433. page_cache_release(p);
  434. return 0;
  435. }
  436. return -EIO;
  437. }
  438. /*
  439. * Error hit kernel page.
  440. * Do nothing, try to be lucky and not touch this instead. For a few cases we
  441. * could be more sophisticated.
  442. */
  443. static int me_kernel(struct page *p, unsigned long pfn)
  444. {
  445. return IGNORED;
  446. }
  447. /*
  448. * Page in unknown state. Do nothing.
  449. */
  450. static int me_unknown(struct page *p, unsigned long pfn)
  451. {
  452. printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
  453. return FAILED;
  454. }
  455. /*
  456. * Clean (or cleaned) page cache page.
  457. */
  458. static int me_pagecache_clean(struct page *p, unsigned long pfn)
  459. {
  460. int err;
  461. int ret = FAILED;
  462. struct address_space *mapping;
  463. delete_from_lru_cache(p);
  464. /*
  465. * For anonymous pages we're done the only reference left
  466. * should be the one m_f() holds.
  467. */
  468. if (PageAnon(p))
  469. return RECOVERED;
  470. /*
  471. * Now truncate the page in the page cache. This is really
  472. * more like a "temporary hole punch"
  473. * Don't do this for block devices when someone else
  474. * has a reference, because it could be file system metadata
  475. * and that's not safe to truncate.
  476. */
  477. mapping = page_mapping(p);
  478. if (!mapping) {
  479. /*
  480. * Page has been teared down in the meanwhile
  481. */
  482. return FAILED;
  483. }
  484. /*
  485. * Truncation is a bit tricky. Enable it per file system for now.
  486. *
  487. * Open: to take i_mutex or not for this? Right now we don't.
  488. */
  489. if (mapping->a_ops->error_remove_page) {
  490. err = mapping->a_ops->error_remove_page(mapping, p);
  491. if (err != 0) {
  492. printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
  493. pfn, err);
  494. } else if (page_has_private(p) &&
  495. !try_to_release_page(p, GFP_NOIO)) {
  496. pr_debug("MCE %#lx: failed to release buffers\n", pfn);
  497. } else {
  498. ret = RECOVERED;
  499. }
  500. } else {
  501. /*
  502. * If the file system doesn't support it just invalidate
  503. * This fails on dirty or anything with private pages
  504. */
  505. if (invalidate_inode_page(p))
  506. ret = RECOVERED;
  507. else
  508. printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
  509. pfn);
  510. }
  511. return ret;
  512. }
  513. /*
  514. * Dirty cache page page
  515. * Issues: when the error hit a hole page the error is not properly
  516. * propagated.
  517. */
  518. static int me_pagecache_dirty(struct page *p, unsigned long pfn)
  519. {
  520. struct address_space *mapping = page_mapping(p);
  521. SetPageError(p);
  522. /* TBD: print more information about the file. */
  523. if (mapping) {
  524. /*
  525. * IO error will be reported by write(), fsync(), etc.
  526. * who check the mapping.
  527. * This way the application knows that something went
  528. * wrong with its dirty file data.
  529. *
  530. * There's one open issue:
  531. *
  532. * The EIO will be only reported on the next IO
  533. * operation and then cleared through the IO map.
  534. * Normally Linux has two mechanisms to pass IO error
  535. * first through the AS_EIO flag in the address space
  536. * and then through the PageError flag in the page.
  537. * Since we drop pages on memory failure handling the
  538. * only mechanism open to use is through AS_AIO.
  539. *
  540. * This has the disadvantage that it gets cleared on
  541. * the first operation that returns an error, while
  542. * the PageError bit is more sticky and only cleared
  543. * when the page is reread or dropped. If an
  544. * application assumes it will always get error on
  545. * fsync, but does other operations on the fd before
  546. * and the page is dropped inbetween then the error
  547. * will not be properly reported.
  548. *
  549. * This can already happen even without hwpoisoned
  550. * pages: first on metadata IO errors (which only
  551. * report through AS_EIO) or when the page is dropped
  552. * at the wrong time.
  553. *
  554. * So right now we assume that the application DTRT on
  555. * the first EIO, but we're not worse than other parts
  556. * of the kernel.
  557. */
  558. mapping_set_error(mapping, EIO);
  559. }
  560. return me_pagecache_clean(p, pfn);
  561. }
  562. /*
  563. * Clean and dirty swap cache.
  564. *
  565. * Dirty swap cache page is tricky to handle. The page could live both in page
  566. * cache and swap cache(ie. page is freshly swapped in). So it could be
  567. * referenced concurrently by 2 types of PTEs:
  568. * normal PTEs and swap PTEs. We try to handle them consistently by calling
  569. * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
  570. * and then
  571. * - clear dirty bit to prevent IO
  572. * - remove from LRU
  573. * - but keep in the swap cache, so that when we return to it on
  574. * a later page fault, we know the application is accessing
  575. * corrupted data and shall be killed (we installed simple
  576. * interception code in do_swap_page to catch it).
  577. *
  578. * Clean swap cache pages can be directly isolated. A later page fault will
  579. * bring in the known good data from disk.
  580. */
  581. static int me_swapcache_dirty(struct page *p, unsigned long pfn)
  582. {
  583. ClearPageDirty(p);
  584. /* Trigger EIO in shmem: */
  585. ClearPageUptodate(p);
  586. if (!delete_from_lru_cache(p))
  587. return DELAYED;
  588. else
  589. return FAILED;
  590. }
  591. static int me_swapcache_clean(struct page *p, unsigned long pfn)
  592. {
  593. delete_from_swap_cache(p);
  594. if (!delete_from_lru_cache(p))
  595. return RECOVERED;
  596. else
  597. return FAILED;
  598. }
  599. /*
  600. * Huge pages. Needs work.
  601. * Issues:
  602. * No rmap support so we cannot find the original mapper. In theory could walk
  603. * all MMs and look for the mappings, but that would be non atomic and racy.
  604. * Need rmap for hugepages for this. Alternatively we could employ a heuristic,
  605. * like just walking the current process and hoping it has it mapped (that
  606. * should be usually true for the common "shared database cache" case)
  607. * Should handle free huge pages and dequeue them too, but this needs to
  608. * handle huge page accounting correctly.
  609. */
  610. static int me_huge_page(struct page *p, unsigned long pfn)
  611. {
  612. return FAILED;
  613. }
  614. /*
  615. * Various page states we can handle.
  616. *
  617. * A page state is defined by its current page->flags bits.
  618. * The table matches them in order and calls the right handler.
  619. *
  620. * This is quite tricky because we can access page at any time
  621. * in its live cycle, so all accesses have to be extremly careful.
  622. *
  623. * This is not complete. More states could be added.
  624. * For any missing state don't attempt recovery.
  625. */
  626. #define dirty (1UL << PG_dirty)
  627. #define sc (1UL << PG_swapcache)
  628. #define unevict (1UL << PG_unevictable)
  629. #define mlock (1UL << PG_mlocked)
  630. #define writeback (1UL << PG_writeback)
  631. #define lru (1UL << PG_lru)
  632. #define swapbacked (1UL << PG_swapbacked)
  633. #define head (1UL << PG_head)
  634. #define tail (1UL << PG_tail)
  635. #define compound (1UL << PG_compound)
  636. #define slab (1UL << PG_slab)
  637. #define reserved (1UL << PG_reserved)
  638. static struct page_state {
  639. unsigned long mask;
  640. unsigned long res;
  641. char *msg;
  642. int (*action)(struct page *p, unsigned long pfn);
  643. } error_states[] = {
  644. { reserved, reserved, "reserved kernel", me_kernel },
  645. /*
  646. * free pages are specially detected outside this table:
  647. * PG_buddy pages only make a small fraction of all free pages.
  648. */
  649. /*
  650. * Could in theory check if slab page is free or if we can drop
  651. * currently unused objects without touching them. But just
  652. * treat it as standard kernel for now.
  653. */
  654. { slab, slab, "kernel slab", me_kernel },
  655. #ifdef CONFIG_PAGEFLAGS_EXTENDED
  656. { head, head, "huge", me_huge_page },
  657. { tail, tail, "huge", me_huge_page },
  658. #else
  659. { compound, compound, "huge", me_huge_page },
  660. #endif
  661. { sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty },
  662. { sc|dirty, sc, "swapcache", me_swapcache_clean },
  663. { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
  664. { unevict, unevict, "unevictable LRU", me_pagecache_clean},
  665. { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
  666. { mlock, mlock, "mlocked LRU", me_pagecache_clean },
  667. { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
  668. { lru|dirty, lru, "clean LRU", me_pagecache_clean },
  669. /*
  670. * Catchall entry: must be at end.
  671. */
  672. { 0, 0, "unknown page state", me_unknown },
  673. };
  674. static void action_result(unsigned long pfn, char *msg, int result)
  675. {
  676. struct page *page = pfn_to_page(pfn);
  677. printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
  678. pfn,
  679. PageDirty(page) ? "dirty " : "",
  680. msg, action_name[result]);
  681. }
  682. static int page_action(struct page_state *ps, struct page *p,
  683. unsigned long pfn)
  684. {
  685. int result;
  686. int count;
  687. result = ps->action(p, pfn);
  688. action_result(pfn, ps->msg, result);
  689. count = page_count(p) - 1;
  690. if (ps->action == me_swapcache_dirty && result == DELAYED)
  691. count--;
  692. if (count != 0) {
  693. printk(KERN_ERR
  694. "MCE %#lx: %s page still referenced by %d users\n",
  695. pfn, ps->msg, count);
  696. result = FAILED;
  697. }
  698. /* Could do more checks here if page looks ok */
  699. /*
  700. * Could adjust zone counters here to correct for the missing page.
  701. */
  702. return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
  703. }
  704. #define N_UNMAP_TRIES 5
  705. /*
  706. * Do all that is necessary to remove user space mappings. Unmap
  707. * the pages and send SIGBUS to the processes if the data was dirty.
  708. */
  709. static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
  710. int trapno)
  711. {
  712. enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
  713. struct address_space *mapping;
  714. LIST_HEAD(tokill);
  715. int ret;
  716. int i;
  717. int kill = 1;
  718. if (PageReserved(p) || PageSlab(p))
  719. return SWAP_SUCCESS;
  720. /*
  721. * This check implies we don't kill processes if their pages
  722. * are in the swap cache early. Those are always late kills.
  723. */
  724. if (!page_mapped(p))
  725. return SWAP_SUCCESS;
  726. if (PageCompound(p) || PageKsm(p))
  727. return SWAP_FAIL;
  728. if (PageSwapCache(p)) {
  729. printk(KERN_ERR
  730. "MCE %#lx: keeping poisoned page in swap cache\n", pfn);
  731. ttu |= TTU_IGNORE_HWPOISON;
  732. }
  733. /*
  734. * Propagate the dirty bit from PTEs to struct page first, because we
  735. * need this to decide if we should kill or just drop the page.
  736. * XXX: the dirty test could be racy: set_page_dirty() may not always
  737. * be called inside page lock (it's recommended but not enforced).
  738. */
  739. mapping = page_mapping(p);
  740. if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) {
  741. if (page_mkclean(p)) {
  742. SetPageDirty(p);
  743. } else {
  744. kill = 0;
  745. ttu |= TTU_IGNORE_HWPOISON;
  746. printk(KERN_INFO
  747. "MCE %#lx: corrupted page was clean: dropped without side effects\n",
  748. pfn);
  749. }
  750. }
  751. /*
  752. * First collect all the processes that have the page
  753. * mapped in dirty form. This has to be done before try_to_unmap,
  754. * because ttu takes the rmap data structures down.
  755. *
  756. * Error handling: We ignore errors here because
  757. * there's nothing that can be done.
  758. */
  759. if (kill)
  760. collect_procs(p, &tokill);
  761. /*
  762. * try_to_unmap can fail temporarily due to races.
  763. * Try a few times (RED-PEN better strategy?)
  764. */
  765. for (i = 0; i < N_UNMAP_TRIES; i++) {
  766. ret = try_to_unmap(p, ttu);
  767. if (ret == SWAP_SUCCESS)
  768. break;
  769. pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret);
  770. }
  771. if (ret != SWAP_SUCCESS)
  772. printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
  773. pfn, page_mapcount(p));
  774. /*
  775. * Now that the dirty bit has been propagated to the
  776. * struct page and all unmaps done we can decide if
  777. * killing is needed or not. Only kill when the page
  778. * was dirty, otherwise the tokill list is merely
  779. * freed. When there was a problem unmapping earlier
  780. * use a more force-full uncatchable kill to prevent
  781. * any accesses to the poisoned memory.
  782. */
  783. kill_procs_ao(&tokill, !!PageDirty(p), trapno,
  784. ret != SWAP_SUCCESS, pfn);
  785. return ret;
  786. }
  787. int __memory_failure(unsigned long pfn, int trapno, int flags)
  788. {
  789. struct page_state *ps;
  790. struct page *p;
  791. int res;
  792. if (!sysctl_memory_failure_recovery)
  793. panic("Memory failure from trap %d on page %lx", trapno, pfn);
  794. if (!pfn_valid(pfn)) {
  795. printk(KERN_ERR
  796. "MCE %#lx: memory outside kernel control\n",
  797. pfn);
  798. return -ENXIO;
  799. }
  800. p = pfn_to_page(pfn);
  801. if (TestSetPageHWPoison(p)) {
  802. printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
  803. return 0;
  804. }
  805. atomic_long_add(1, &mce_bad_pages);
  806. /*
  807. * We need/can do nothing about count=0 pages.
  808. * 1) it's a free page, and therefore in safe hand:
  809. * prep_new_page() will be the gate keeper.
  810. * 2) it's part of a non-compound high order page.
  811. * Implies some kernel user: cannot stop them from
  812. * R/W the page; let's pray that the page has been
  813. * used and will be freed some time later.
  814. * In fact it's dangerous to directly bump up page count from 0,
  815. * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
  816. */
  817. if (!(flags & MF_COUNT_INCREASED) &&
  818. !get_page_unless_zero(compound_head(p))) {
  819. if (is_free_buddy_page(p)) {
  820. action_result(pfn, "free buddy", DELAYED);
  821. return 0;
  822. } else {
  823. action_result(pfn, "high order kernel", IGNORED);
  824. return -EBUSY;
  825. }
  826. }
  827. /*
  828. * We ignore non-LRU pages for good reasons.
  829. * - PG_locked is only well defined for LRU pages and a few others
  830. * - to avoid races with __set_page_locked()
  831. * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
  832. * The check (unnecessarily) ignores LRU pages being isolated and
  833. * walked by the page reclaim code, however that's not a big loss.
  834. */
  835. if (!PageLRU(p))
  836. shake_page(p);
  837. if (!PageLRU(p)) {
  838. /*
  839. * shake_page could have turned it free.
  840. */
  841. if (is_free_buddy_page(p)) {
  842. action_result(pfn, "free buddy, 2nd try", DELAYED);
  843. return 0;
  844. }
  845. action_result(pfn, "non LRU", IGNORED);
  846. put_page(p);
  847. return -EBUSY;
  848. }
  849. /*
  850. * Lock the page and wait for writeback to finish.
  851. * It's very difficult to mess with pages currently under IO
  852. * and in many cases impossible, so we just avoid it here.
  853. */
  854. lock_page_nosync(p);
  855. /*
  856. * unpoison always clear PG_hwpoison inside page lock
  857. */
  858. if (!PageHWPoison(p)) {
  859. printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
  860. res = 0;
  861. goto out;
  862. }
  863. if (hwpoison_filter(p)) {
  864. if (TestClearPageHWPoison(p))
  865. atomic_long_dec(&mce_bad_pages);
  866. unlock_page(p);
  867. put_page(p);
  868. return 0;
  869. }
  870. wait_on_page_writeback(p);
  871. /*
  872. * Now take care of user space mappings.
  873. * Abort on fail: __remove_from_page_cache() assumes unmapped page.
  874. */
  875. if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
  876. printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
  877. res = -EBUSY;
  878. goto out;
  879. }
  880. /*
  881. * Torn down by someone else?
  882. */
  883. if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
  884. action_result(pfn, "already truncated LRU", IGNORED);
  885. res = -EBUSY;
  886. goto out;
  887. }
  888. res = -EBUSY;
  889. for (ps = error_states;; ps++) {
  890. if ((p->flags & ps->mask) == ps->res) {
  891. res = page_action(ps, p, pfn);
  892. break;
  893. }
  894. }
  895. out:
  896. unlock_page(p);
  897. return res;
  898. }
  899. EXPORT_SYMBOL_GPL(__memory_failure);
  900. /**
  901. * memory_failure - Handle memory failure of a page.
  902. * @pfn: Page Number of the corrupted page
  903. * @trapno: Trap number reported in the signal to user space.
  904. *
  905. * This function is called by the low level machine check code
  906. * of an architecture when it detects hardware memory corruption
  907. * of a page. It tries its best to recover, which includes
  908. * dropping pages, killing processes etc.
  909. *
  910. * The function is primarily of use for corruptions that
  911. * happen outside the current execution context (e.g. when
  912. * detected by a background scrubber)
  913. *
  914. * Must run in process context (e.g. a work queue) with interrupts
  915. * enabled and no spinlocks hold.
  916. */
  917. void memory_failure(unsigned long pfn, int trapno)
  918. {
  919. __memory_failure(pfn, trapno, 0);
  920. }
  921. /**
  922. * unpoison_memory - Unpoison a previously poisoned page
  923. * @pfn: Page number of the to be unpoisoned page
  924. *
  925. * Software-unpoison a page that has been poisoned by
  926. * memory_failure() earlier.
  927. *
  928. * This is only done on the software-level, so it only works
  929. * for linux injected failures, not real hardware failures
  930. *
  931. * Returns 0 for success, otherwise -errno.
  932. */
  933. int unpoison_memory(unsigned long pfn)
  934. {
  935. struct page *page;
  936. struct page *p;
  937. int freeit = 0;
  938. if (!pfn_valid(pfn))
  939. return -ENXIO;
  940. p = pfn_to_page(pfn);
  941. page = compound_head(p);
  942. if (!PageHWPoison(p)) {
  943. pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn);
  944. return 0;
  945. }
  946. if (!get_page_unless_zero(page)) {
  947. if (TestClearPageHWPoison(p))
  948. atomic_long_dec(&mce_bad_pages);
  949. pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn);
  950. return 0;
  951. }
  952. lock_page_nosync(page);
  953. /*
  954. * This test is racy because PG_hwpoison is set outside of page lock.
  955. * That's acceptable because that won't trigger kernel panic. Instead,
  956. * the PG_hwpoison page will be caught and isolated on the entrance to
  957. * the free buddy page pool.
  958. */
  959. if (TestClearPageHWPoison(p)) {
  960. pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn);
  961. atomic_long_dec(&mce_bad_pages);
  962. freeit = 1;
  963. }
  964. unlock_page(page);
  965. put_page(page);
  966. if (freeit)
  967. put_page(page);
  968. return 0;
  969. }
  970. EXPORT_SYMBOL(unpoison_memory);