ksm.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045
  1. /*
  2. * Memory merging support.
  3. *
  4. * This code enables dynamic sharing of identical pages found in different
  5. * memory areas, even if they are not shared by fork()
  6. *
  7. * Copyright (C) 2008-2009 Red Hat, Inc.
  8. * Authors:
  9. * Izik Eidus
  10. * Andrea Arcangeli
  11. * Chris Wright
  12. * Hugh Dickins
  13. *
  14. * This work is licensed under the terms of the GNU GPL, version 2.
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/mm.h>
  18. #include <linux/fs.h>
  19. #include <linux/mman.h>
  20. #include <linux/sched.h>
  21. #include <linux/rwsem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/rmap.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/jhash.h>
  26. #include <linux/delay.h>
  27. #include <linux/kthread.h>
  28. #include <linux/wait.h>
  29. #include <linux/slab.h>
  30. #include <linux/rbtree.h>
  31. #include <linux/memory.h>
  32. #include <linux/mmu_notifier.h>
  33. #include <linux/swap.h>
  34. #include <linux/ksm.h>
  35. #include <linux/hash.h>
  36. #include <linux/freezer.h>
  37. #include <linux/oom.h>
  38. #include <asm/tlbflush.h>
  39. #include "internal.h"
  40. /*
  41. * A few notes about the KSM scanning process,
  42. * to make it easier to understand the data structures below:
  43. *
  44. * In order to reduce excessive scanning, KSM sorts the memory pages by their
  45. * contents into a data structure that holds pointers to the pages' locations.
  46. *
  47. * Since the contents of the pages may change at any moment, KSM cannot just
  48. * insert the pages into a normal sorted tree and expect it to find anything.
  49. * Therefore KSM uses two data structures - the stable and the unstable tree.
  50. *
  51. * The stable tree holds pointers to all the merged pages (ksm pages), sorted
  52. * by their contents. Because each such page is write-protected, searching on
  53. * this tree is fully assured to be working (except when pages are unmapped),
  54. * and therefore this tree is called the stable tree.
  55. *
  56. * In addition to the stable tree, KSM uses a second data structure called the
  57. * unstable tree: this tree holds pointers to pages which have been found to
  58. * be "unchanged for a period of time". The unstable tree sorts these pages
  59. * by their contents, but since they are not write-protected, KSM cannot rely
  60. * upon the unstable tree to work correctly - the unstable tree is liable to
  61. * be corrupted as its contents are modified, and so it is called unstable.
  62. *
  63. * KSM solves this problem by several techniques:
  64. *
  65. * 1) The unstable tree is flushed every time KSM completes scanning all
  66. * memory areas, and then the tree is rebuilt again from the beginning.
  67. * 2) KSM will only insert into the unstable tree, pages whose hash value
  68. * has not changed since the previous scan of all memory areas.
  69. * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
  70. * colors of the nodes and not on their contents, assuring that even when
  71. * the tree gets "corrupted" it won't get out of balance, so scanning time
  72. * remains the same (also, searching and inserting nodes in an rbtree uses
  73. * the same algorithm, so we have no overhead when we flush and rebuild).
  74. * 4) KSM never flushes the stable tree, which means that even if it were to
  75. * take 10 attempts to find a page in the unstable tree, once it is found,
  76. * it is secured in the stable tree. (When we scan a new page, we first
  77. * compare it against the stable tree, and then against the unstable tree.)
  78. */
  79. /**
  80. * struct mm_slot - ksm information per mm that is being scanned
  81. * @link: link to the mm_slots hash list
  82. * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
  83. * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
  84. * @mm: the mm that this information is valid for
  85. */
  86. struct mm_slot {
  87. struct hlist_node link;
  88. struct list_head mm_list;
  89. struct rmap_item *rmap_list;
  90. struct mm_struct *mm;
  91. };
  92. /**
  93. * struct ksm_scan - cursor for scanning
  94. * @mm_slot: the current mm_slot we are scanning
  95. * @address: the next address inside that to be scanned
  96. * @rmap_list: link to the next rmap to be scanned in the rmap_list
  97. * @seqnr: count of completed full scans (needed when removing unstable node)
  98. *
  99. * There is only the one ksm_scan instance of this cursor structure.
  100. */
  101. struct ksm_scan {
  102. struct mm_slot *mm_slot;
  103. unsigned long address;
  104. struct rmap_item **rmap_list;
  105. unsigned long seqnr;
  106. };
  107. /**
  108. * struct stable_node - node of the stable rbtree
  109. * @node: rb node of this ksm page in the stable tree
  110. * @hlist: hlist head of rmap_items using this ksm page
  111. * @kpfn: page frame number of this ksm page
  112. */
  113. struct stable_node {
  114. struct rb_node node;
  115. struct hlist_head hlist;
  116. unsigned long kpfn;
  117. };
  118. /**
  119. * struct rmap_item - reverse mapping item for virtual addresses
  120. * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
  121. * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
  122. * @mm: the memory structure this rmap_item is pointing into
  123. * @address: the virtual address this rmap_item tracks (+ flags in low bits)
  124. * @oldchecksum: previous checksum of the page at that virtual address
  125. * @node: rb node of this rmap_item in the unstable tree
  126. * @head: pointer to stable_node heading this list in the stable tree
  127. * @hlist: link into hlist of rmap_items hanging off that stable_node
  128. */
  129. struct rmap_item {
  130. struct rmap_item *rmap_list;
  131. struct anon_vma *anon_vma; /* when stable */
  132. struct mm_struct *mm;
  133. unsigned long address; /* + low bits used for flags below */
  134. unsigned int oldchecksum; /* when unstable */
  135. union {
  136. struct rb_node node; /* when node of unstable tree */
  137. struct { /* when listed from stable tree */
  138. struct stable_node *head;
  139. struct hlist_node hlist;
  140. };
  141. };
  142. };
  143. #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
  144. #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
  145. #define STABLE_FLAG 0x200 /* is listed from the stable tree */
  146. /* The stable and unstable tree heads */
  147. static struct rb_root root_stable_tree = RB_ROOT;
  148. static struct rb_root root_unstable_tree = RB_ROOT;
  149. #define MM_SLOTS_HASH_SHIFT 10
  150. #define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT)
  151. static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS];
  152. static struct mm_slot ksm_mm_head = {
  153. .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
  154. };
  155. static struct ksm_scan ksm_scan = {
  156. .mm_slot = &ksm_mm_head,
  157. };
  158. static struct kmem_cache *rmap_item_cache;
  159. static struct kmem_cache *stable_node_cache;
  160. static struct kmem_cache *mm_slot_cache;
  161. /* The number of nodes in the stable tree */
  162. static unsigned long ksm_pages_shared;
  163. /* The number of page slots additionally sharing those nodes */
  164. static unsigned long ksm_pages_sharing;
  165. /* The number of nodes in the unstable tree */
  166. static unsigned long ksm_pages_unshared;
  167. /* The number of rmap_items in use: to calculate pages_volatile */
  168. static unsigned long ksm_rmap_items;
  169. /* Number of pages ksmd should scan in one batch */
  170. static unsigned int ksm_thread_pages_to_scan = 100;
  171. /* Milliseconds ksmd should sleep between batches */
  172. static unsigned int ksm_thread_sleep_millisecs = 20;
  173. #define KSM_RUN_STOP 0
  174. #define KSM_RUN_MERGE 1
  175. #define KSM_RUN_UNMERGE 2
  176. static unsigned int ksm_run = KSM_RUN_STOP;
  177. static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
  178. static DEFINE_MUTEX(ksm_thread_mutex);
  179. static DEFINE_SPINLOCK(ksm_mmlist_lock);
  180. #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
  181. sizeof(struct __struct), __alignof__(struct __struct),\
  182. (__flags), NULL)
  183. static int __init ksm_slab_init(void)
  184. {
  185. rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
  186. if (!rmap_item_cache)
  187. goto out;
  188. stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
  189. if (!stable_node_cache)
  190. goto out_free1;
  191. mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
  192. if (!mm_slot_cache)
  193. goto out_free2;
  194. return 0;
  195. out_free2:
  196. kmem_cache_destroy(stable_node_cache);
  197. out_free1:
  198. kmem_cache_destroy(rmap_item_cache);
  199. out:
  200. return -ENOMEM;
  201. }
  202. static void __init ksm_slab_free(void)
  203. {
  204. kmem_cache_destroy(mm_slot_cache);
  205. kmem_cache_destroy(stable_node_cache);
  206. kmem_cache_destroy(rmap_item_cache);
  207. mm_slot_cache = NULL;
  208. }
  209. static inline struct rmap_item *alloc_rmap_item(void)
  210. {
  211. struct rmap_item *rmap_item;
  212. rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
  213. if (rmap_item)
  214. ksm_rmap_items++;
  215. return rmap_item;
  216. }
  217. static inline void free_rmap_item(struct rmap_item *rmap_item)
  218. {
  219. ksm_rmap_items--;
  220. rmap_item->mm = NULL; /* debug safety */
  221. kmem_cache_free(rmap_item_cache, rmap_item);
  222. }
  223. static inline struct stable_node *alloc_stable_node(void)
  224. {
  225. return kmem_cache_alloc(stable_node_cache, GFP_KERNEL);
  226. }
  227. static inline void free_stable_node(struct stable_node *stable_node)
  228. {
  229. kmem_cache_free(stable_node_cache, stable_node);
  230. }
  231. static inline struct mm_slot *alloc_mm_slot(void)
  232. {
  233. if (!mm_slot_cache) /* initialization failed */
  234. return NULL;
  235. return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
  236. }
  237. static inline void free_mm_slot(struct mm_slot *mm_slot)
  238. {
  239. kmem_cache_free(mm_slot_cache, mm_slot);
  240. }
  241. static struct mm_slot *get_mm_slot(struct mm_struct *mm)
  242. {
  243. struct mm_slot *mm_slot;
  244. struct hlist_head *bucket;
  245. struct hlist_node *node;
  246. bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
  247. hlist_for_each_entry(mm_slot, node, bucket, link) {
  248. if (mm == mm_slot->mm)
  249. return mm_slot;
  250. }
  251. return NULL;
  252. }
  253. static void insert_to_mm_slots_hash(struct mm_struct *mm,
  254. struct mm_slot *mm_slot)
  255. {
  256. struct hlist_head *bucket;
  257. bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
  258. mm_slot->mm = mm;
  259. hlist_add_head(&mm_slot->link, bucket);
  260. }
  261. static inline int in_stable_tree(struct rmap_item *rmap_item)
  262. {
  263. return rmap_item->address & STABLE_FLAG;
  264. }
  265. /*
  266. * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
  267. * page tables after it has passed through ksm_exit() - which, if necessary,
  268. * takes mmap_sem briefly to serialize against them. ksm_exit() does not set
  269. * a special flag: they can just back out as soon as mm_users goes to zero.
  270. * ksm_test_exit() is used throughout to make this test for exit: in some
  271. * places for correctness, in some places just to avoid unnecessary work.
  272. */
  273. static inline bool ksm_test_exit(struct mm_struct *mm)
  274. {
  275. return atomic_read(&mm->mm_users) == 0;
  276. }
  277. /*
  278. * We use break_ksm to break COW on a ksm page: it's a stripped down
  279. *
  280. * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1)
  281. * put_page(page);
  282. *
  283. * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
  284. * in case the application has unmapped and remapped mm,addr meanwhile.
  285. * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
  286. * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
  287. */
  288. static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
  289. {
  290. struct page *page;
  291. int ret = 0;
  292. do {
  293. cond_resched();
  294. page = follow_page(vma, addr, FOLL_GET);
  295. if (IS_ERR_OR_NULL(page))
  296. break;
  297. if (PageKsm(page))
  298. ret = handle_mm_fault(vma->vm_mm, vma, addr,
  299. FAULT_FLAG_WRITE);
  300. else
  301. ret = VM_FAULT_WRITE;
  302. put_page(page);
  303. } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
  304. /*
  305. * We must loop because handle_mm_fault() may back out if there's
  306. * any difficulty e.g. if pte accessed bit gets updated concurrently.
  307. *
  308. * VM_FAULT_WRITE is what we have been hoping for: it indicates that
  309. * COW has been broken, even if the vma does not permit VM_WRITE;
  310. * but note that a concurrent fault might break PageKsm for us.
  311. *
  312. * VM_FAULT_SIGBUS could occur if we race with truncation of the
  313. * backing file, which also invalidates anonymous pages: that's
  314. * okay, that truncation will have unmapped the PageKsm for us.
  315. *
  316. * VM_FAULT_OOM: at the time of writing (late July 2009), setting
  317. * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
  318. * current task has TIF_MEMDIE set, and will be OOM killed on return
  319. * to user; and ksmd, having no mm, would never be chosen for that.
  320. *
  321. * But if the mm is in a limited mem_cgroup, then the fault may fail
  322. * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
  323. * even ksmd can fail in this way - though it's usually breaking ksm
  324. * just to undo a merge it made a moment before, so unlikely to oom.
  325. *
  326. * That's a pity: we might therefore have more kernel pages allocated
  327. * than we're counting as nodes in the stable tree; but ksm_do_scan
  328. * will retry to break_cow on each pass, so should recover the page
  329. * in due course. The important thing is to not let VM_MERGEABLE
  330. * be cleared while any such pages might remain in the area.
  331. */
  332. return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
  333. }
  334. static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
  335. unsigned long addr)
  336. {
  337. struct vm_area_struct *vma;
  338. if (ksm_test_exit(mm))
  339. return NULL;
  340. vma = find_vma(mm, addr);
  341. if (!vma || vma->vm_start > addr)
  342. return NULL;
  343. if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
  344. return NULL;
  345. return vma;
  346. }
  347. static void break_cow(struct rmap_item *rmap_item)
  348. {
  349. struct mm_struct *mm = rmap_item->mm;
  350. unsigned long addr = rmap_item->address;
  351. struct vm_area_struct *vma;
  352. /*
  353. * It is not an accident that whenever we want to break COW
  354. * to undo, we also need to drop a reference to the anon_vma.
  355. */
  356. put_anon_vma(rmap_item->anon_vma);
  357. down_read(&mm->mmap_sem);
  358. vma = find_mergeable_vma(mm, addr);
  359. if (vma)
  360. break_ksm(vma, addr);
  361. up_read(&mm->mmap_sem);
  362. }
  363. static struct page *page_trans_compound_anon(struct page *page)
  364. {
  365. if (PageTransCompound(page)) {
  366. struct page *head = compound_trans_head(page);
  367. /*
  368. * head may actually be splitted and freed from under
  369. * us but it's ok here.
  370. */
  371. if (PageAnon(head))
  372. return head;
  373. }
  374. return NULL;
  375. }
  376. static struct page *get_mergeable_page(struct rmap_item *rmap_item)
  377. {
  378. struct mm_struct *mm = rmap_item->mm;
  379. unsigned long addr = rmap_item->address;
  380. struct vm_area_struct *vma;
  381. struct page *page;
  382. down_read(&mm->mmap_sem);
  383. vma = find_mergeable_vma(mm, addr);
  384. if (!vma)
  385. goto out;
  386. page = follow_page(vma, addr, FOLL_GET);
  387. if (IS_ERR_OR_NULL(page))
  388. goto out;
  389. if (PageAnon(page) || page_trans_compound_anon(page)) {
  390. flush_anon_page(vma, page, addr);
  391. flush_dcache_page(page);
  392. } else {
  393. put_page(page);
  394. out: page = NULL;
  395. }
  396. up_read(&mm->mmap_sem);
  397. return page;
  398. }
  399. static void remove_node_from_stable_tree(struct stable_node *stable_node)
  400. {
  401. struct rmap_item *rmap_item;
  402. struct hlist_node *hlist;
  403. hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
  404. if (rmap_item->hlist.next)
  405. ksm_pages_sharing--;
  406. else
  407. ksm_pages_shared--;
  408. put_anon_vma(rmap_item->anon_vma);
  409. rmap_item->address &= PAGE_MASK;
  410. cond_resched();
  411. }
  412. rb_erase(&stable_node->node, &root_stable_tree);
  413. free_stable_node(stable_node);
  414. }
  415. /*
  416. * get_ksm_page: checks if the page indicated by the stable node
  417. * is still its ksm page, despite having held no reference to it.
  418. * In which case we can trust the content of the page, and it
  419. * returns the gotten page; but if the page has now been zapped,
  420. * remove the stale node from the stable tree and return NULL.
  421. *
  422. * You would expect the stable_node to hold a reference to the ksm page.
  423. * But if it increments the page's count, swapping out has to wait for
  424. * ksmd to come around again before it can free the page, which may take
  425. * seconds or even minutes: much too unresponsive. So instead we use a
  426. * "keyhole reference": access to the ksm page from the stable node peeps
  427. * out through its keyhole to see if that page still holds the right key,
  428. * pointing back to this stable node. This relies on freeing a PageAnon
  429. * page to reset its page->mapping to NULL, and relies on no other use of
  430. * a page to put something that might look like our key in page->mapping.
  431. *
  432. * include/linux/pagemap.h page_cache_get_speculative() is a good reference,
  433. * but this is different - made simpler by ksm_thread_mutex being held, but
  434. * interesting for assuming that no other use of the struct page could ever
  435. * put our expected_mapping into page->mapping (or a field of the union which
  436. * coincides with page->mapping). The RCU calls are not for KSM at all, but
  437. * to keep the page_count protocol described with page_cache_get_speculative.
  438. *
  439. * Note: it is possible that get_ksm_page() will return NULL one moment,
  440. * then page the next, if the page is in between page_freeze_refs() and
  441. * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
  442. * is on its way to being freed; but it is an anomaly to bear in mind.
  443. */
  444. static struct page *get_ksm_page(struct stable_node *stable_node)
  445. {
  446. struct page *page;
  447. void *expected_mapping;
  448. page = pfn_to_page(stable_node->kpfn);
  449. expected_mapping = (void *)stable_node +
  450. (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
  451. rcu_read_lock();
  452. if (page->mapping != expected_mapping)
  453. goto stale;
  454. if (!get_page_unless_zero(page))
  455. goto stale;
  456. if (page->mapping != expected_mapping) {
  457. put_page(page);
  458. goto stale;
  459. }
  460. rcu_read_unlock();
  461. return page;
  462. stale:
  463. rcu_read_unlock();
  464. remove_node_from_stable_tree(stable_node);
  465. return NULL;
  466. }
  467. /*
  468. * Removing rmap_item from stable or unstable tree.
  469. * This function will clean the information from the stable/unstable tree.
  470. */
  471. static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
  472. {
  473. if (rmap_item->address & STABLE_FLAG) {
  474. struct stable_node *stable_node;
  475. struct page *page;
  476. stable_node = rmap_item->head;
  477. page = get_ksm_page(stable_node);
  478. if (!page)
  479. goto out;
  480. lock_page(page);
  481. hlist_del(&rmap_item->hlist);
  482. unlock_page(page);
  483. put_page(page);
  484. if (stable_node->hlist.first)
  485. ksm_pages_sharing--;
  486. else
  487. ksm_pages_shared--;
  488. put_anon_vma(rmap_item->anon_vma);
  489. rmap_item->address &= PAGE_MASK;
  490. } else if (rmap_item->address & UNSTABLE_FLAG) {
  491. unsigned char age;
  492. /*
  493. * Usually ksmd can and must skip the rb_erase, because
  494. * root_unstable_tree was already reset to RB_ROOT.
  495. * But be careful when an mm is exiting: do the rb_erase
  496. * if this rmap_item was inserted by this scan, rather
  497. * than left over from before.
  498. */
  499. age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
  500. BUG_ON(age > 1);
  501. if (!age)
  502. rb_erase(&rmap_item->node, &root_unstable_tree);
  503. ksm_pages_unshared--;
  504. rmap_item->address &= PAGE_MASK;
  505. }
  506. out:
  507. cond_resched(); /* we're called from many long loops */
  508. }
  509. static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
  510. struct rmap_item **rmap_list)
  511. {
  512. while (*rmap_list) {
  513. struct rmap_item *rmap_item = *rmap_list;
  514. *rmap_list = rmap_item->rmap_list;
  515. remove_rmap_item_from_tree(rmap_item);
  516. free_rmap_item(rmap_item);
  517. }
  518. }
  519. /*
  520. * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather
  521. * than check every pte of a given vma, the locking doesn't quite work for
  522. * that - an rmap_item is assigned to the stable tree after inserting ksm
  523. * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing
  524. * rmap_items from parent to child at fork time (so as not to waste time
  525. * if exit comes before the next scan reaches it).
  526. *
  527. * Similarly, although we'd like to remove rmap_items (so updating counts
  528. * and freeing memory) when unmerging an area, it's easier to leave that
  529. * to the next pass of ksmd - consider, for example, how ksmd might be
  530. * in cmp_and_merge_page on one of the rmap_items we would be removing.
  531. */
  532. static int unmerge_ksm_pages(struct vm_area_struct *vma,
  533. unsigned long start, unsigned long end)
  534. {
  535. unsigned long addr;
  536. int err = 0;
  537. for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
  538. if (ksm_test_exit(vma->vm_mm))
  539. break;
  540. if (signal_pending(current))
  541. err = -ERESTARTSYS;
  542. else
  543. err = break_ksm(vma, addr);
  544. }
  545. return err;
  546. }
  547. #ifdef CONFIG_SYSFS
  548. /*
  549. * Only called through the sysfs control interface:
  550. */
  551. static int unmerge_and_remove_all_rmap_items(void)
  552. {
  553. struct mm_slot *mm_slot;
  554. struct mm_struct *mm;
  555. struct vm_area_struct *vma;
  556. int err = 0;
  557. spin_lock(&ksm_mmlist_lock);
  558. ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
  559. struct mm_slot, mm_list);
  560. spin_unlock(&ksm_mmlist_lock);
  561. for (mm_slot = ksm_scan.mm_slot;
  562. mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
  563. mm = mm_slot->mm;
  564. down_read(&mm->mmap_sem);
  565. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  566. if (ksm_test_exit(mm))
  567. break;
  568. if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
  569. continue;
  570. err = unmerge_ksm_pages(vma,
  571. vma->vm_start, vma->vm_end);
  572. if (err)
  573. goto error;
  574. }
  575. remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
  576. spin_lock(&ksm_mmlist_lock);
  577. ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
  578. struct mm_slot, mm_list);
  579. if (ksm_test_exit(mm)) {
  580. hlist_del(&mm_slot->link);
  581. list_del(&mm_slot->mm_list);
  582. spin_unlock(&ksm_mmlist_lock);
  583. free_mm_slot(mm_slot);
  584. clear_bit(MMF_VM_MERGEABLE, &mm->flags);
  585. up_read(&mm->mmap_sem);
  586. mmdrop(mm);
  587. } else {
  588. spin_unlock(&ksm_mmlist_lock);
  589. up_read(&mm->mmap_sem);
  590. }
  591. }
  592. ksm_scan.seqnr = 0;
  593. return 0;
  594. error:
  595. up_read(&mm->mmap_sem);
  596. spin_lock(&ksm_mmlist_lock);
  597. ksm_scan.mm_slot = &ksm_mm_head;
  598. spin_unlock(&ksm_mmlist_lock);
  599. return err;
  600. }
  601. #endif /* CONFIG_SYSFS */
  602. static u32 calc_checksum(struct page *page)
  603. {
  604. u32 checksum;
  605. void *addr = kmap_atomic(page);
  606. checksum = jhash2(addr, PAGE_SIZE / 4, 17);
  607. kunmap_atomic(addr);
  608. return checksum;
  609. }
  610. static int memcmp_pages(struct page *page1, struct page *page2)
  611. {
  612. char *addr1, *addr2;
  613. int ret;
  614. addr1 = kmap_atomic(page1);
  615. addr2 = kmap_atomic(page2);
  616. ret = memcmp(addr1, addr2, PAGE_SIZE);
  617. kunmap_atomic(addr2);
  618. kunmap_atomic(addr1);
  619. return ret;
  620. }
  621. static inline int pages_identical(struct page *page1, struct page *page2)
  622. {
  623. return !memcmp_pages(page1, page2);
  624. }
  625. static int write_protect_page(struct vm_area_struct *vma, struct page *page,
  626. pte_t *orig_pte)
  627. {
  628. struct mm_struct *mm = vma->vm_mm;
  629. unsigned long addr;
  630. pte_t *ptep;
  631. spinlock_t *ptl;
  632. int swapped;
  633. int err = -EFAULT;
  634. unsigned long mmun_start; /* For mmu_notifiers */
  635. unsigned long mmun_end; /* For mmu_notifiers */
  636. addr = page_address_in_vma(page, vma);
  637. if (addr == -EFAULT)
  638. goto out;
  639. BUG_ON(PageTransCompound(page));
  640. mmun_start = addr;
  641. mmun_end = addr + PAGE_SIZE;
  642. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  643. ptep = page_check_address(page, mm, addr, &ptl, 0);
  644. if (!ptep)
  645. goto out_mn;
  646. if (pte_write(*ptep) || pte_dirty(*ptep)) {
  647. pte_t entry;
  648. swapped = PageSwapCache(page);
  649. flush_cache_page(vma, addr, page_to_pfn(page));
  650. /*
  651. * Ok this is tricky, when get_user_pages_fast() run it doesn't
  652. * take any lock, therefore the check that we are going to make
  653. * with the pagecount against the mapcount is racey and
  654. * O_DIRECT can happen right after the check.
  655. * So we clear the pte and flush the tlb before the check
  656. * this assure us that no O_DIRECT can happen after the check
  657. * or in the middle of the check.
  658. */
  659. entry = ptep_clear_flush(vma, addr, ptep);
  660. /*
  661. * Check that no O_DIRECT or similar I/O is in progress on the
  662. * page
  663. */
  664. if (page_mapcount(page) + 1 + swapped != page_count(page)) {
  665. set_pte_at(mm, addr, ptep, entry);
  666. goto out_unlock;
  667. }
  668. if (pte_dirty(entry))
  669. set_page_dirty(page);
  670. entry = pte_mkclean(pte_wrprotect(entry));
  671. set_pte_at_notify(mm, addr, ptep, entry);
  672. }
  673. *orig_pte = *ptep;
  674. err = 0;
  675. out_unlock:
  676. pte_unmap_unlock(ptep, ptl);
  677. out_mn:
  678. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  679. out:
  680. return err;
  681. }
  682. /**
  683. * replace_page - replace page in vma by new ksm page
  684. * @vma: vma that holds the pte pointing to page
  685. * @page: the page we are replacing by kpage
  686. * @kpage: the ksm page we replace page by
  687. * @orig_pte: the original value of the pte
  688. *
  689. * Returns 0 on success, -EFAULT on failure.
  690. */
  691. static int replace_page(struct vm_area_struct *vma, struct page *page,
  692. struct page *kpage, pte_t orig_pte)
  693. {
  694. struct mm_struct *mm = vma->vm_mm;
  695. pmd_t *pmd;
  696. pte_t *ptep;
  697. spinlock_t *ptl;
  698. unsigned long addr;
  699. int err = -EFAULT;
  700. unsigned long mmun_start; /* For mmu_notifiers */
  701. unsigned long mmun_end; /* For mmu_notifiers */
  702. addr = page_address_in_vma(page, vma);
  703. if (addr == -EFAULT)
  704. goto out;
  705. pmd = mm_find_pmd(mm, addr);
  706. if (!pmd)
  707. goto out;
  708. BUG_ON(pmd_trans_huge(*pmd));
  709. mmun_start = addr;
  710. mmun_end = addr + PAGE_SIZE;
  711. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  712. ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
  713. if (!pte_same(*ptep, orig_pte)) {
  714. pte_unmap_unlock(ptep, ptl);
  715. goto out_mn;
  716. }
  717. get_page(kpage);
  718. page_add_anon_rmap(kpage, vma, addr);
  719. flush_cache_page(vma, addr, pte_pfn(*ptep));
  720. ptep_clear_flush(vma, addr, ptep);
  721. set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
  722. page_remove_rmap(page);
  723. if (!page_mapped(page))
  724. try_to_free_swap(page);
  725. put_page(page);
  726. pte_unmap_unlock(ptep, ptl);
  727. err = 0;
  728. out_mn:
  729. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  730. out:
  731. return err;
  732. }
  733. static int page_trans_compound_anon_split(struct page *page)
  734. {
  735. int ret = 0;
  736. struct page *transhuge_head = page_trans_compound_anon(page);
  737. if (transhuge_head) {
  738. /* Get the reference on the head to split it. */
  739. if (get_page_unless_zero(transhuge_head)) {
  740. /*
  741. * Recheck we got the reference while the head
  742. * was still anonymous.
  743. */
  744. if (PageAnon(transhuge_head))
  745. ret = split_huge_page(transhuge_head);
  746. else
  747. /*
  748. * Retry later if split_huge_page run
  749. * from under us.
  750. */
  751. ret = 1;
  752. put_page(transhuge_head);
  753. } else
  754. /* Retry later if split_huge_page run from under us. */
  755. ret = 1;
  756. }
  757. return ret;
  758. }
  759. /*
  760. * try_to_merge_one_page - take two pages and merge them into one
  761. * @vma: the vma that holds the pte pointing to page
  762. * @page: the PageAnon page that we want to replace with kpage
  763. * @kpage: the PageKsm page that we want to map instead of page,
  764. * or NULL the first time when we want to use page as kpage.
  765. *
  766. * This function returns 0 if the pages were merged, -EFAULT otherwise.
  767. */
  768. static int try_to_merge_one_page(struct vm_area_struct *vma,
  769. struct page *page, struct page *kpage)
  770. {
  771. pte_t orig_pte = __pte(0);
  772. int err = -EFAULT;
  773. if (page == kpage) /* ksm page forked */
  774. return 0;
  775. if (!(vma->vm_flags & VM_MERGEABLE))
  776. goto out;
  777. if (PageTransCompound(page) && page_trans_compound_anon_split(page))
  778. goto out;
  779. BUG_ON(PageTransCompound(page));
  780. if (!PageAnon(page))
  781. goto out;
  782. /*
  783. * We need the page lock to read a stable PageSwapCache in
  784. * write_protect_page(). We use trylock_page() instead of
  785. * lock_page() because we don't want to wait here - we
  786. * prefer to continue scanning and merging different pages,
  787. * then come back to this page when it is unlocked.
  788. */
  789. if (!trylock_page(page))
  790. goto out;
  791. /*
  792. * If this anonymous page is mapped only here, its pte may need
  793. * to be write-protected. If it's mapped elsewhere, all of its
  794. * ptes are necessarily already write-protected. But in either
  795. * case, we need to lock and check page_count is not raised.
  796. */
  797. if (write_protect_page(vma, page, &orig_pte) == 0) {
  798. if (!kpage) {
  799. /*
  800. * While we hold page lock, upgrade page from
  801. * PageAnon+anon_vma to PageKsm+NULL stable_node:
  802. * stable_tree_insert() will update stable_node.
  803. */
  804. set_page_stable_node(page, NULL);
  805. mark_page_accessed(page);
  806. err = 0;
  807. } else if (pages_identical(page, kpage))
  808. err = replace_page(vma, page, kpage, orig_pte);
  809. }
  810. if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
  811. munlock_vma_page(page);
  812. if (!PageMlocked(kpage)) {
  813. unlock_page(page);
  814. lock_page(kpage);
  815. mlock_vma_page(kpage);
  816. page = kpage; /* for final unlock */
  817. }
  818. }
  819. unlock_page(page);
  820. out:
  821. return err;
  822. }
  823. /*
  824. * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
  825. * but no new kernel page is allocated: kpage must already be a ksm page.
  826. *
  827. * This function returns 0 if the pages were merged, -EFAULT otherwise.
  828. */
  829. static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
  830. struct page *page, struct page *kpage)
  831. {
  832. struct mm_struct *mm = rmap_item->mm;
  833. struct vm_area_struct *vma;
  834. int err = -EFAULT;
  835. down_read(&mm->mmap_sem);
  836. if (ksm_test_exit(mm))
  837. goto out;
  838. vma = find_vma(mm, rmap_item->address);
  839. if (!vma || vma->vm_start > rmap_item->address)
  840. goto out;
  841. err = try_to_merge_one_page(vma, page, kpage);
  842. if (err)
  843. goto out;
  844. /* Must get reference to anon_vma while still holding mmap_sem */
  845. rmap_item->anon_vma = vma->anon_vma;
  846. get_anon_vma(vma->anon_vma);
  847. out:
  848. up_read(&mm->mmap_sem);
  849. return err;
  850. }
  851. /*
  852. * try_to_merge_two_pages - take two identical pages and prepare them
  853. * to be merged into one page.
  854. *
  855. * This function returns the kpage if we successfully merged two identical
  856. * pages into one ksm page, NULL otherwise.
  857. *
  858. * Note that this function upgrades page to ksm page: if one of the pages
  859. * is already a ksm page, try_to_merge_with_ksm_page should be used.
  860. */
  861. static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
  862. struct page *page,
  863. struct rmap_item *tree_rmap_item,
  864. struct page *tree_page)
  865. {
  866. int err;
  867. err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
  868. if (!err) {
  869. err = try_to_merge_with_ksm_page(tree_rmap_item,
  870. tree_page, page);
  871. /*
  872. * If that fails, we have a ksm page with only one pte
  873. * pointing to it: so break it.
  874. */
  875. if (err)
  876. break_cow(rmap_item);
  877. }
  878. return err ? NULL : page;
  879. }
  880. /*
  881. * stable_tree_search - search for page inside the stable tree
  882. *
  883. * This function checks if there is a page inside the stable tree
  884. * with identical content to the page that we are scanning right now.
  885. *
  886. * This function returns the stable tree node of identical content if found,
  887. * NULL otherwise.
  888. */
  889. static struct page *stable_tree_search(struct page *page)
  890. {
  891. struct rb_node *node = root_stable_tree.rb_node;
  892. struct stable_node *stable_node;
  893. stable_node = page_stable_node(page);
  894. if (stable_node) { /* ksm page forked */
  895. get_page(page);
  896. return page;
  897. }
  898. while (node) {
  899. struct page *tree_page;
  900. int ret;
  901. cond_resched();
  902. stable_node = rb_entry(node, struct stable_node, node);
  903. tree_page = get_ksm_page(stable_node);
  904. if (!tree_page)
  905. return NULL;
  906. ret = memcmp_pages(page, tree_page);
  907. if (ret < 0) {
  908. put_page(tree_page);
  909. node = node->rb_left;
  910. } else if (ret > 0) {
  911. put_page(tree_page);
  912. node = node->rb_right;
  913. } else
  914. return tree_page;
  915. }
  916. return NULL;
  917. }
  918. /*
  919. * stable_tree_insert - insert rmap_item pointing to new ksm page
  920. * into the stable tree.
  921. *
  922. * This function returns the stable tree node just allocated on success,
  923. * NULL otherwise.
  924. */
  925. static struct stable_node *stable_tree_insert(struct page *kpage)
  926. {
  927. struct rb_node **new = &root_stable_tree.rb_node;
  928. struct rb_node *parent = NULL;
  929. struct stable_node *stable_node;
  930. while (*new) {
  931. struct page *tree_page;
  932. int ret;
  933. cond_resched();
  934. stable_node = rb_entry(*new, struct stable_node, node);
  935. tree_page = get_ksm_page(stable_node);
  936. if (!tree_page)
  937. return NULL;
  938. ret = memcmp_pages(kpage, tree_page);
  939. put_page(tree_page);
  940. parent = *new;
  941. if (ret < 0)
  942. new = &parent->rb_left;
  943. else if (ret > 0)
  944. new = &parent->rb_right;
  945. else {
  946. /*
  947. * It is not a bug that stable_tree_search() didn't
  948. * find this node: because at that time our page was
  949. * not yet write-protected, so may have changed since.
  950. */
  951. return NULL;
  952. }
  953. }
  954. stable_node = alloc_stable_node();
  955. if (!stable_node)
  956. return NULL;
  957. rb_link_node(&stable_node->node, parent, new);
  958. rb_insert_color(&stable_node->node, &root_stable_tree);
  959. INIT_HLIST_HEAD(&stable_node->hlist);
  960. stable_node->kpfn = page_to_pfn(kpage);
  961. set_page_stable_node(kpage, stable_node);
  962. return stable_node;
  963. }
  964. /*
  965. * unstable_tree_search_insert - search for identical page,
  966. * else insert rmap_item into the unstable tree.
  967. *
  968. * This function searches for a page in the unstable tree identical to the
  969. * page currently being scanned; and if no identical page is found in the
  970. * tree, we insert rmap_item as a new object into the unstable tree.
  971. *
  972. * This function returns pointer to rmap_item found to be identical
  973. * to the currently scanned page, NULL otherwise.
  974. *
  975. * This function does both searching and inserting, because they share
  976. * the same walking algorithm in an rbtree.
  977. */
  978. static
  979. struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
  980. struct page *page,
  981. struct page **tree_pagep)
  982. {
  983. struct rb_node **new = &root_unstable_tree.rb_node;
  984. struct rb_node *parent = NULL;
  985. while (*new) {
  986. struct rmap_item *tree_rmap_item;
  987. struct page *tree_page;
  988. int ret;
  989. cond_resched();
  990. tree_rmap_item = rb_entry(*new, struct rmap_item, node);
  991. tree_page = get_mergeable_page(tree_rmap_item);
  992. if (IS_ERR_OR_NULL(tree_page))
  993. return NULL;
  994. /*
  995. * Don't substitute a ksm page for a forked page.
  996. */
  997. if (page == tree_page) {
  998. put_page(tree_page);
  999. return NULL;
  1000. }
  1001. ret = memcmp_pages(page, tree_page);
  1002. parent = *new;
  1003. if (ret < 0) {
  1004. put_page(tree_page);
  1005. new = &parent->rb_left;
  1006. } else if (ret > 0) {
  1007. put_page(tree_page);
  1008. new = &parent->rb_right;
  1009. } else {
  1010. *tree_pagep = tree_page;
  1011. return tree_rmap_item;
  1012. }
  1013. }
  1014. rmap_item->address |= UNSTABLE_FLAG;
  1015. rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
  1016. rb_link_node(&rmap_item->node, parent, new);
  1017. rb_insert_color(&rmap_item->node, &root_unstable_tree);
  1018. ksm_pages_unshared++;
  1019. return NULL;
  1020. }
  1021. /*
  1022. * stable_tree_append - add another rmap_item to the linked list of
  1023. * rmap_items hanging off a given node of the stable tree, all sharing
  1024. * the same ksm page.
  1025. */
  1026. static void stable_tree_append(struct rmap_item *rmap_item,
  1027. struct stable_node *stable_node)
  1028. {
  1029. rmap_item->head = stable_node;
  1030. rmap_item->address |= STABLE_FLAG;
  1031. hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
  1032. if (rmap_item->hlist.next)
  1033. ksm_pages_sharing++;
  1034. else
  1035. ksm_pages_shared++;
  1036. }
  1037. /*
  1038. * cmp_and_merge_page - first see if page can be merged into the stable tree;
  1039. * if not, compare checksum to previous and if it's the same, see if page can
  1040. * be inserted into the unstable tree, or merged with a page already there and
  1041. * both transferred to the stable tree.
  1042. *
  1043. * @page: the page that we are searching identical page to.
  1044. * @rmap_item: the reverse mapping into the virtual address of this page
  1045. */
  1046. static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
  1047. {
  1048. struct rmap_item *tree_rmap_item;
  1049. struct page *tree_page = NULL;
  1050. struct stable_node *stable_node;
  1051. struct page *kpage;
  1052. unsigned int checksum;
  1053. int err;
  1054. remove_rmap_item_from_tree(rmap_item);
  1055. /* We first start with searching the page inside the stable tree */
  1056. kpage = stable_tree_search(page);
  1057. if (kpage) {
  1058. err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
  1059. if (!err) {
  1060. /*
  1061. * The page was successfully merged:
  1062. * add its rmap_item to the stable tree.
  1063. */
  1064. lock_page(kpage);
  1065. stable_tree_append(rmap_item, page_stable_node(kpage));
  1066. unlock_page(kpage);
  1067. }
  1068. put_page(kpage);
  1069. return;
  1070. }
  1071. /*
  1072. * If the hash value of the page has changed from the last time
  1073. * we calculated it, this page is changing frequently: therefore we
  1074. * don't want to insert it in the unstable tree, and we don't want
  1075. * to waste our time searching for something identical to it there.
  1076. */
  1077. checksum = calc_checksum(page);
  1078. if (rmap_item->oldchecksum != checksum) {
  1079. rmap_item->oldchecksum = checksum;
  1080. return;
  1081. }
  1082. tree_rmap_item =
  1083. unstable_tree_search_insert(rmap_item, page, &tree_page);
  1084. if (tree_rmap_item) {
  1085. kpage = try_to_merge_two_pages(rmap_item, page,
  1086. tree_rmap_item, tree_page);
  1087. put_page(tree_page);
  1088. /*
  1089. * As soon as we merge this page, we want to remove the
  1090. * rmap_item of the page we have merged with from the unstable
  1091. * tree, and insert it instead as new node in the stable tree.
  1092. */
  1093. if (kpage) {
  1094. remove_rmap_item_from_tree(tree_rmap_item);
  1095. lock_page(kpage);
  1096. stable_node = stable_tree_insert(kpage);
  1097. if (stable_node) {
  1098. stable_tree_append(tree_rmap_item, stable_node);
  1099. stable_tree_append(rmap_item, stable_node);
  1100. }
  1101. unlock_page(kpage);
  1102. /*
  1103. * If we fail to insert the page into the stable tree,
  1104. * we will have 2 virtual addresses that are pointing
  1105. * to a ksm page left outside the stable tree,
  1106. * in which case we need to break_cow on both.
  1107. */
  1108. if (!stable_node) {
  1109. break_cow(tree_rmap_item);
  1110. break_cow(rmap_item);
  1111. }
  1112. }
  1113. }
  1114. }
  1115. static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
  1116. struct rmap_item **rmap_list,
  1117. unsigned long addr)
  1118. {
  1119. struct rmap_item *rmap_item;
  1120. while (*rmap_list) {
  1121. rmap_item = *rmap_list;
  1122. if ((rmap_item->address & PAGE_MASK) == addr)
  1123. return rmap_item;
  1124. if (rmap_item->address > addr)
  1125. break;
  1126. *rmap_list = rmap_item->rmap_list;
  1127. remove_rmap_item_from_tree(rmap_item);
  1128. free_rmap_item(rmap_item);
  1129. }
  1130. rmap_item = alloc_rmap_item();
  1131. if (rmap_item) {
  1132. /* It has already been zeroed */
  1133. rmap_item->mm = mm_slot->mm;
  1134. rmap_item->address = addr;
  1135. rmap_item->rmap_list = *rmap_list;
  1136. *rmap_list = rmap_item;
  1137. }
  1138. return rmap_item;
  1139. }
  1140. static struct rmap_item *scan_get_next_rmap_item(struct page **page)
  1141. {
  1142. struct mm_struct *mm;
  1143. struct mm_slot *slot;
  1144. struct vm_area_struct *vma;
  1145. struct rmap_item *rmap_item;
  1146. if (list_empty(&ksm_mm_head.mm_list))
  1147. return NULL;
  1148. slot = ksm_scan.mm_slot;
  1149. if (slot == &ksm_mm_head) {
  1150. /*
  1151. * A number of pages can hang around indefinitely on per-cpu
  1152. * pagevecs, raised page count preventing write_protect_page
  1153. * from merging them. Though it doesn't really matter much,
  1154. * it is puzzling to see some stuck in pages_volatile until
  1155. * other activity jostles them out, and they also prevented
  1156. * LTP's KSM test from succeeding deterministically; so drain
  1157. * them here (here rather than on entry to ksm_do_scan(),
  1158. * so we don't IPI too often when pages_to_scan is set low).
  1159. */
  1160. lru_add_drain_all();
  1161. root_unstable_tree = RB_ROOT;
  1162. spin_lock(&ksm_mmlist_lock);
  1163. slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
  1164. ksm_scan.mm_slot = slot;
  1165. spin_unlock(&ksm_mmlist_lock);
  1166. /*
  1167. * Although we tested list_empty() above, a racing __ksm_exit
  1168. * of the last mm on the list may have removed it since then.
  1169. */
  1170. if (slot == &ksm_mm_head)
  1171. return NULL;
  1172. next_mm:
  1173. ksm_scan.address = 0;
  1174. ksm_scan.rmap_list = &slot->rmap_list;
  1175. }
  1176. mm = slot->mm;
  1177. down_read(&mm->mmap_sem);
  1178. if (ksm_test_exit(mm))
  1179. vma = NULL;
  1180. else
  1181. vma = find_vma(mm, ksm_scan.address);
  1182. for (; vma; vma = vma->vm_next) {
  1183. if (!(vma->vm_flags & VM_MERGEABLE))
  1184. continue;
  1185. if (ksm_scan.address < vma->vm_start)
  1186. ksm_scan.address = vma->vm_start;
  1187. if (!vma->anon_vma)
  1188. ksm_scan.address = vma->vm_end;
  1189. while (ksm_scan.address < vma->vm_end) {
  1190. if (ksm_test_exit(mm))
  1191. break;
  1192. *page = follow_page(vma, ksm_scan.address, FOLL_GET);
  1193. if (IS_ERR_OR_NULL(*page)) {
  1194. ksm_scan.address += PAGE_SIZE;
  1195. cond_resched();
  1196. continue;
  1197. }
  1198. if (PageAnon(*page) ||
  1199. page_trans_compound_anon(*page)) {
  1200. flush_anon_page(vma, *page, ksm_scan.address);
  1201. flush_dcache_page(*page);
  1202. rmap_item = get_next_rmap_item(slot,
  1203. ksm_scan.rmap_list, ksm_scan.address);
  1204. if (rmap_item) {
  1205. ksm_scan.rmap_list =
  1206. &rmap_item->rmap_list;
  1207. ksm_scan.address += PAGE_SIZE;
  1208. } else
  1209. put_page(*page);
  1210. up_read(&mm->mmap_sem);
  1211. return rmap_item;
  1212. }
  1213. put_page(*page);
  1214. ksm_scan.address += PAGE_SIZE;
  1215. cond_resched();
  1216. }
  1217. }
  1218. if (ksm_test_exit(mm)) {
  1219. ksm_scan.address = 0;
  1220. ksm_scan.rmap_list = &slot->rmap_list;
  1221. }
  1222. /*
  1223. * Nuke all the rmap_items that are above this current rmap:
  1224. * because there were no VM_MERGEABLE vmas with such addresses.
  1225. */
  1226. remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
  1227. spin_lock(&ksm_mmlist_lock);
  1228. ksm_scan.mm_slot = list_entry(slot->mm_list.next,
  1229. struct mm_slot, mm_list);
  1230. if (ksm_scan.address == 0) {
  1231. /*
  1232. * We've completed a full scan of all vmas, holding mmap_sem
  1233. * throughout, and found no VM_MERGEABLE: so do the same as
  1234. * __ksm_exit does to remove this mm from all our lists now.
  1235. * This applies either when cleaning up after __ksm_exit
  1236. * (but beware: we can reach here even before __ksm_exit),
  1237. * or when all VM_MERGEABLE areas have been unmapped (and
  1238. * mmap_sem then protects against race with MADV_MERGEABLE).
  1239. */
  1240. hlist_del(&slot->link);
  1241. list_del(&slot->mm_list);
  1242. spin_unlock(&ksm_mmlist_lock);
  1243. free_mm_slot(slot);
  1244. clear_bit(MMF_VM_MERGEABLE, &mm->flags);
  1245. up_read(&mm->mmap_sem);
  1246. mmdrop(mm);
  1247. } else {
  1248. spin_unlock(&ksm_mmlist_lock);
  1249. up_read(&mm->mmap_sem);
  1250. }
  1251. /* Repeat until we've completed scanning the whole list */
  1252. slot = ksm_scan.mm_slot;
  1253. if (slot != &ksm_mm_head)
  1254. goto next_mm;
  1255. ksm_scan.seqnr++;
  1256. return NULL;
  1257. }
  1258. /**
  1259. * ksm_do_scan - the ksm scanner main worker function.
  1260. * @scan_npages - number of pages we want to scan before we return.
  1261. */
  1262. static void ksm_do_scan(unsigned int scan_npages)
  1263. {
  1264. struct rmap_item *rmap_item;
  1265. struct page *uninitialized_var(page);
  1266. while (scan_npages-- && likely(!freezing(current))) {
  1267. cond_resched();
  1268. rmap_item = scan_get_next_rmap_item(&page);
  1269. if (!rmap_item)
  1270. return;
  1271. if (!PageKsm(page) || !in_stable_tree(rmap_item))
  1272. cmp_and_merge_page(page, rmap_item);
  1273. put_page(page);
  1274. }
  1275. }
  1276. static int ksmd_should_run(void)
  1277. {
  1278. return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
  1279. }
  1280. static int ksm_scan_thread(void *nothing)
  1281. {
  1282. set_freezable();
  1283. set_user_nice(current, 5);
  1284. while (!kthread_should_stop()) {
  1285. mutex_lock(&ksm_thread_mutex);
  1286. if (ksmd_should_run())
  1287. ksm_do_scan(ksm_thread_pages_to_scan);
  1288. mutex_unlock(&ksm_thread_mutex);
  1289. try_to_freeze();
  1290. if (ksmd_should_run()) {
  1291. schedule_timeout_interruptible(
  1292. msecs_to_jiffies(ksm_thread_sleep_millisecs));
  1293. } else {
  1294. wait_event_freezable(ksm_thread_wait,
  1295. ksmd_should_run() || kthread_should_stop());
  1296. }
  1297. }
  1298. return 0;
  1299. }
  1300. int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
  1301. unsigned long end, int advice, unsigned long *vm_flags)
  1302. {
  1303. struct mm_struct *mm = vma->vm_mm;
  1304. int err;
  1305. switch (advice) {
  1306. case MADV_MERGEABLE:
  1307. /*
  1308. * Be somewhat over-protective for now!
  1309. */
  1310. if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
  1311. VM_PFNMAP | VM_IO | VM_DONTEXPAND |
  1312. VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP))
  1313. return 0; /* just ignore the advice */
  1314. #ifdef VM_SAO
  1315. if (*vm_flags & VM_SAO)
  1316. return 0;
  1317. #endif
  1318. if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
  1319. err = __ksm_enter(mm);
  1320. if (err)
  1321. return err;
  1322. }
  1323. *vm_flags |= VM_MERGEABLE;
  1324. break;
  1325. case MADV_UNMERGEABLE:
  1326. if (!(*vm_flags & VM_MERGEABLE))
  1327. return 0; /* just ignore the advice */
  1328. if (vma->anon_vma) {
  1329. err = unmerge_ksm_pages(vma, start, end);
  1330. if (err)
  1331. return err;
  1332. }
  1333. *vm_flags &= ~VM_MERGEABLE;
  1334. break;
  1335. }
  1336. return 0;
  1337. }
  1338. int __ksm_enter(struct mm_struct *mm)
  1339. {
  1340. struct mm_slot *mm_slot;
  1341. int needs_wakeup;
  1342. mm_slot = alloc_mm_slot();
  1343. if (!mm_slot)
  1344. return -ENOMEM;
  1345. /* Check ksm_run too? Would need tighter locking */
  1346. needs_wakeup = list_empty(&ksm_mm_head.mm_list);
  1347. spin_lock(&ksm_mmlist_lock);
  1348. insert_to_mm_slots_hash(mm, mm_slot);
  1349. /*
  1350. * Insert just behind the scanning cursor, to let the area settle
  1351. * down a little; when fork is followed by immediate exec, we don't
  1352. * want ksmd to waste time setting up and tearing down an rmap_list.
  1353. */
  1354. list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
  1355. spin_unlock(&ksm_mmlist_lock);
  1356. set_bit(MMF_VM_MERGEABLE, &mm->flags);
  1357. atomic_inc(&mm->mm_count);
  1358. if (needs_wakeup)
  1359. wake_up_interruptible(&ksm_thread_wait);
  1360. return 0;
  1361. }
  1362. void __ksm_exit(struct mm_struct *mm)
  1363. {
  1364. struct mm_slot *mm_slot;
  1365. int easy_to_free = 0;
  1366. /*
  1367. * This process is exiting: if it's straightforward (as is the
  1368. * case when ksmd was never running), free mm_slot immediately.
  1369. * But if it's at the cursor or has rmap_items linked to it, use
  1370. * mmap_sem to synchronize with any break_cows before pagetables
  1371. * are freed, and leave the mm_slot on the list for ksmd to free.
  1372. * Beware: ksm may already have noticed it exiting and freed the slot.
  1373. */
  1374. spin_lock(&ksm_mmlist_lock);
  1375. mm_slot = get_mm_slot(mm);
  1376. if (mm_slot && ksm_scan.mm_slot != mm_slot) {
  1377. if (!mm_slot->rmap_list) {
  1378. hlist_del(&mm_slot->link);
  1379. list_del(&mm_slot->mm_list);
  1380. easy_to_free = 1;
  1381. } else {
  1382. list_move(&mm_slot->mm_list,
  1383. &ksm_scan.mm_slot->mm_list);
  1384. }
  1385. }
  1386. spin_unlock(&ksm_mmlist_lock);
  1387. if (easy_to_free) {
  1388. free_mm_slot(mm_slot);
  1389. clear_bit(MMF_VM_MERGEABLE, &mm->flags);
  1390. mmdrop(mm);
  1391. } else if (mm_slot) {
  1392. down_write(&mm->mmap_sem);
  1393. up_write(&mm->mmap_sem);
  1394. }
  1395. }
  1396. struct page *ksm_does_need_to_copy(struct page *page,
  1397. struct vm_area_struct *vma, unsigned long address)
  1398. {
  1399. struct page *new_page;
  1400. new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
  1401. if (new_page) {
  1402. copy_user_highpage(new_page, page, address, vma);
  1403. SetPageDirty(new_page);
  1404. __SetPageUptodate(new_page);
  1405. SetPageSwapBacked(new_page);
  1406. __set_page_locked(new_page);
  1407. if (!mlocked_vma_newpage(vma, new_page))
  1408. lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
  1409. else
  1410. add_page_to_unevictable_list(new_page);
  1411. }
  1412. return new_page;
  1413. }
  1414. int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
  1415. unsigned long *vm_flags)
  1416. {
  1417. struct stable_node *stable_node;
  1418. struct rmap_item *rmap_item;
  1419. struct hlist_node *hlist;
  1420. unsigned int mapcount = page_mapcount(page);
  1421. int referenced = 0;
  1422. int search_new_forks = 0;
  1423. VM_BUG_ON(!PageKsm(page));
  1424. VM_BUG_ON(!PageLocked(page));
  1425. stable_node = page_stable_node(page);
  1426. if (!stable_node)
  1427. return 0;
  1428. again:
  1429. hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
  1430. struct anon_vma *anon_vma = rmap_item->anon_vma;
  1431. struct anon_vma_chain *vmac;
  1432. struct vm_area_struct *vma;
  1433. anon_vma_lock_write(anon_vma);
  1434. anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
  1435. 0, ULONG_MAX) {
  1436. vma = vmac->vma;
  1437. if (rmap_item->address < vma->vm_start ||
  1438. rmap_item->address >= vma->vm_end)
  1439. continue;
  1440. /*
  1441. * Initially we examine only the vma which covers this
  1442. * rmap_item; but later, if there is still work to do,
  1443. * we examine covering vmas in other mms: in case they
  1444. * were forked from the original since ksmd passed.
  1445. */
  1446. if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
  1447. continue;
  1448. if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
  1449. continue;
  1450. referenced += page_referenced_one(page, vma,
  1451. rmap_item->address, &mapcount, vm_flags);
  1452. if (!search_new_forks || !mapcount)
  1453. break;
  1454. }
  1455. anon_vma_unlock(anon_vma);
  1456. if (!mapcount)
  1457. goto out;
  1458. }
  1459. if (!search_new_forks++)
  1460. goto again;
  1461. out:
  1462. return referenced;
  1463. }
  1464. int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
  1465. {
  1466. struct stable_node *stable_node;
  1467. struct hlist_node *hlist;
  1468. struct rmap_item *rmap_item;
  1469. int ret = SWAP_AGAIN;
  1470. int search_new_forks = 0;
  1471. VM_BUG_ON(!PageKsm(page));
  1472. VM_BUG_ON(!PageLocked(page));
  1473. stable_node = page_stable_node(page);
  1474. if (!stable_node)
  1475. return SWAP_FAIL;
  1476. again:
  1477. hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
  1478. struct anon_vma *anon_vma = rmap_item->anon_vma;
  1479. struct anon_vma_chain *vmac;
  1480. struct vm_area_struct *vma;
  1481. anon_vma_lock_write(anon_vma);
  1482. anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
  1483. 0, ULONG_MAX) {
  1484. vma = vmac->vma;
  1485. if (rmap_item->address < vma->vm_start ||
  1486. rmap_item->address >= vma->vm_end)
  1487. continue;
  1488. /*
  1489. * Initially we examine only the vma which covers this
  1490. * rmap_item; but later, if there is still work to do,
  1491. * we examine covering vmas in other mms: in case they
  1492. * were forked from the original since ksmd passed.
  1493. */
  1494. if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
  1495. continue;
  1496. ret = try_to_unmap_one(page, vma,
  1497. rmap_item->address, flags);
  1498. if (ret != SWAP_AGAIN || !page_mapped(page)) {
  1499. anon_vma_unlock(anon_vma);
  1500. goto out;
  1501. }
  1502. }
  1503. anon_vma_unlock(anon_vma);
  1504. }
  1505. if (!search_new_forks++)
  1506. goto again;
  1507. out:
  1508. return ret;
  1509. }
  1510. #ifdef CONFIG_MIGRATION
  1511. int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
  1512. struct vm_area_struct *, unsigned long, void *), void *arg)
  1513. {
  1514. struct stable_node *stable_node;
  1515. struct hlist_node *hlist;
  1516. struct rmap_item *rmap_item;
  1517. int ret = SWAP_AGAIN;
  1518. int search_new_forks = 0;
  1519. VM_BUG_ON(!PageKsm(page));
  1520. VM_BUG_ON(!PageLocked(page));
  1521. stable_node = page_stable_node(page);
  1522. if (!stable_node)
  1523. return ret;
  1524. again:
  1525. hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
  1526. struct anon_vma *anon_vma = rmap_item->anon_vma;
  1527. struct anon_vma_chain *vmac;
  1528. struct vm_area_struct *vma;
  1529. anon_vma_lock_write(anon_vma);
  1530. anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
  1531. 0, ULONG_MAX) {
  1532. vma = vmac->vma;
  1533. if (rmap_item->address < vma->vm_start ||
  1534. rmap_item->address >= vma->vm_end)
  1535. continue;
  1536. /*
  1537. * Initially we examine only the vma which covers this
  1538. * rmap_item; but later, if there is still work to do,
  1539. * we examine covering vmas in other mms: in case they
  1540. * were forked from the original since ksmd passed.
  1541. */
  1542. if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
  1543. continue;
  1544. ret = rmap_one(page, vma, rmap_item->address, arg);
  1545. if (ret != SWAP_AGAIN) {
  1546. anon_vma_unlock(anon_vma);
  1547. goto out;
  1548. }
  1549. }
  1550. anon_vma_unlock(anon_vma);
  1551. }
  1552. if (!search_new_forks++)
  1553. goto again;
  1554. out:
  1555. return ret;
  1556. }
  1557. void ksm_migrate_page(struct page *newpage, struct page *oldpage)
  1558. {
  1559. struct stable_node *stable_node;
  1560. VM_BUG_ON(!PageLocked(oldpage));
  1561. VM_BUG_ON(!PageLocked(newpage));
  1562. VM_BUG_ON(newpage->mapping != oldpage->mapping);
  1563. stable_node = page_stable_node(newpage);
  1564. if (stable_node) {
  1565. VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
  1566. stable_node->kpfn = page_to_pfn(newpage);
  1567. }
  1568. }
  1569. #endif /* CONFIG_MIGRATION */
  1570. #ifdef CONFIG_MEMORY_HOTREMOVE
  1571. static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
  1572. unsigned long end_pfn)
  1573. {
  1574. struct rb_node *node;
  1575. for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) {
  1576. struct stable_node *stable_node;
  1577. stable_node = rb_entry(node, struct stable_node, node);
  1578. if (stable_node->kpfn >= start_pfn &&
  1579. stable_node->kpfn < end_pfn)
  1580. return stable_node;
  1581. }
  1582. return NULL;
  1583. }
  1584. static int ksm_memory_callback(struct notifier_block *self,
  1585. unsigned long action, void *arg)
  1586. {
  1587. struct memory_notify *mn = arg;
  1588. struct stable_node *stable_node;
  1589. switch (action) {
  1590. case MEM_GOING_OFFLINE:
  1591. /*
  1592. * Keep it very simple for now: just lock out ksmd and
  1593. * MADV_UNMERGEABLE while any memory is going offline.
  1594. * mutex_lock_nested() is necessary because lockdep was alarmed
  1595. * that here we take ksm_thread_mutex inside notifier chain
  1596. * mutex, and later take notifier chain mutex inside
  1597. * ksm_thread_mutex to unlock it. But that's safe because both
  1598. * are inside mem_hotplug_mutex.
  1599. */
  1600. mutex_lock_nested(&ksm_thread_mutex, SINGLE_DEPTH_NESTING);
  1601. break;
  1602. case MEM_OFFLINE:
  1603. /*
  1604. * Most of the work is done by page migration; but there might
  1605. * be a few stable_nodes left over, still pointing to struct
  1606. * pages which have been offlined: prune those from the tree.
  1607. */
  1608. while ((stable_node = ksm_check_stable_tree(mn->start_pfn,
  1609. mn->start_pfn + mn->nr_pages)) != NULL)
  1610. remove_node_from_stable_tree(stable_node);
  1611. /* fallthrough */
  1612. case MEM_CANCEL_OFFLINE:
  1613. mutex_unlock(&ksm_thread_mutex);
  1614. break;
  1615. }
  1616. return NOTIFY_OK;
  1617. }
  1618. #endif /* CONFIG_MEMORY_HOTREMOVE */
  1619. #ifdef CONFIG_SYSFS
  1620. /*
  1621. * This all compiles without CONFIG_SYSFS, but is a waste of space.
  1622. */
  1623. #define KSM_ATTR_RO(_name) \
  1624. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  1625. #define KSM_ATTR(_name) \
  1626. static struct kobj_attribute _name##_attr = \
  1627. __ATTR(_name, 0644, _name##_show, _name##_store)
  1628. static ssize_t sleep_millisecs_show(struct kobject *kobj,
  1629. struct kobj_attribute *attr, char *buf)
  1630. {
  1631. return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
  1632. }
  1633. static ssize_t sleep_millisecs_store(struct kobject *kobj,
  1634. struct kobj_attribute *attr,
  1635. const char *buf, size_t count)
  1636. {
  1637. unsigned long msecs;
  1638. int err;
  1639. err = strict_strtoul(buf, 10, &msecs);
  1640. if (err || msecs > UINT_MAX)
  1641. return -EINVAL;
  1642. ksm_thread_sleep_millisecs = msecs;
  1643. return count;
  1644. }
  1645. KSM_ATTR(sleep_millisecs);
  1646. static ssize_t pages_to_scan_show(struct kobject *kobj,
  1647. struct kobj_attribute *attr, char *buf)
  1648. {
  1649. return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
  1650. }
  1651. static ssize_t pages_to_scan_store(struct kobject *kobj,
  1652. struct kobj_attribute *attr,
  1653. const char *buf, size_t count)
  1654. {
  1655. int err;
  1656. unsigned long nr_pages;
  1657. err = strict_strtoul(buf, 10, &nr_pages);
  1658. if (err || nr_pages > UINT_MAX)
  1659. return -EINVAL;
  1660. ksm_thread_pages_to_scan = nr_pages;
  1661. return count;
  1662. }
  1663. KSM_ATTR(pages_to_scan);
  1664. static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
  1665. char *buf)
  1666. {
  1667. return sprintf(buf, "%u\n", ksm_run);
  1668. }
  1669. static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
  1670. const char *buf, size_t count)
  1671. {
  1672. int err;
  1673. unsigned long flags;
  1674. err = strict_strtoul(buf, 10, &flags);
  1675. if (err || flags > UINT_MAX)
  1676. return -EINVAL;
  1677. if (flags > KSM_RUN_UNMERGE)
  1678. return -EINVAL;
  1679. /*
  1680. * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
  1681. * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
  1682. * breaking COW to free the pages_shared (but leaves mm_slots
  1683. * on the list for when ksmd may be set running again).
  1684. */
  1685. mutex_lock(&ksm_thread_mutex);
  1686. if (ksm_run != flags) {
  1687. ksm_run = flags;
  1688. if (flags & KSM_RUN_UNMERGE) {
  1689. set_current_oom_origin();
  1690. err = unmerge_and_remove_all_rmap_items();
  1691. clear_current_oom_origin();
  1692. if (err) {
  1693. ksm_run = KSM_RUN_STOP;
  1694. count = err;
  1695. }
  1696. }
  1697. }
  1698. mutex_unlock(&ksm_thread_mutex);
  1699. if (flags & KSM_RUN_MERGE)
  1700. wake_up_interruptible(&ksm_thread_wait);
  1701. return count;
  1702. }
  1703. KSM_ATTR(run);
  1704. static ssize_t pages_shared_show(struct kobject *kobj,
  1705. struct kobj_attribute *attr, char *buf)
  1706. {
  1707. return sprintf(buf, "%lu\n", ksm_pages_shared);
  1708. }
  1709. KSM_ATTR_RO(pages_shared);
  1710. static ssize_t pages_sharing_show(struct kobject *kobj,
  1711. struct kobj_attribute *attr, char *buf)
  1712. {
  1713. return sprintf(buf, "%lu\n", ksm_pages_sharing);
  1714. }
  1715. KSM_ATTR_RO(pages_sharing);
  1716. static ssize_t pages_unshared_show(struct kobject *kobj,
  1717. struct kobj_attribute *attr, char *buf)
  1718. {
  1719. return sprintf(buf, "%lu\n", ksm_pages_unshared);
  1720. }
  1721. KSM_ATTR_RO(pages_unshared);
  1722. static ssize_t pages_volatile_show(struct kobject *kobj,
  1723. struct kobj_attribute *attr, char *buf)
  1724. {
  1725. long ksm_pages_volatile;
  1726. ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
  1727. - ksm_pages_sharing - ksm_pages_unshared;
  1728. /*
  1729. * It was not worth any locking to calculate that statistic,
  1730. * but it might therefore sometimes be negative: conceal that.
  1731. */
  1732. if (ksm_pages_volatile < 0)
  1733. ksm_pages_volatile = 0;
  1734. return sprintf(buf, "%ld\n", ksm_pages_volatile);
  1735. }
  1736. KSM_ATTR_RO(pages_volatile);
  1737. static ssize_t full_scans_show(struct kobject *kobj,
  1738. struct kobj_attribute *attr, char *buf)
  1739. {
  1740. return sprintf(buf, "%lu\n", ksm_scan.seqnr);
  1741. }
  1742. KSM_ATTR_RO(full_scans);
  1743. static struct attribute *ksm_attrs[] = {
  1744. &sleep_millisecs_attr.attr,
  1745. &pages_to_scan_attr.attr,
  1746. &run_attr.attr,
  1747. &pages_shared_attr.attr,
  1748. &pages_sharing_attr.attr,
  1749. &pages_unshared_attr.attr,
  1750. &pages_volatile_attr.attr,
  1751. &full_scans_attr.attr,
  1752. NULL,
  1753. };
  1754. static struct attribute_group ksm_attr_group = {
  1755. .attrs = ksm_attrs,
  1756. .name = "ksm",
  1757. };
  1758. #endif /* CONFIG_SYSFS */
  1759. static int __init ksm_init(void)
  1760. {
  1761. struct task_struct *ksm_thread;
  1762. int err;
  1763. err = ksm_slab_init();
  1764. if (err)
  1765. goto out;
  1766. ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
  1767. if (IS_ERR(ksm_thread)) {
  1768. printk(KERN_ERR "ksm: creating kthread failed\n");
  1769. err = PTR_ERR(ksm_thread);
  1770. goto out_free;
  1771. }
  1772. #ifdef CONFIG_SYSFS
  1773. err = sysfs_create_group(mm_kobj, &ksm_attr_group);
  1774. if (err) {
  1775. printk(KERN_ERR "ksm: register sysfs failed\n");
  1776. kthread_stop(ksm_thread);
  1777. goto out_free;
  1778. }
  1779. #else
  1780. ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
  1781. #endif /* CONFIG_SYSFS */
  1782. #ifdef CONFIG_MEMORY_HOTREMOVE
  1783. /*
  1784. * Choose a high priority since the callback takes ksm_thread_mutex:
  1785. * later callbacks could only be taking locks which nest within that.
  1786. */
  1787. hotplug_memory_notifier(ksm_memory_callback, 100);
  1788. #endif
  1789. return 0;
  1790. out_free:
  1791. ksm_slab_free();
  1792. out:
  1793. return err;
  1794. }
  1795. module_init(ksm_init)