huge_memory.c 73 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781
  1. /*
  2. * Copyright (C) 2009 Red Hat, Inc.
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2. See
  5. * the COPYING file in the top-level directory.
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/sched.h>
  9. #include <linux/highmem.h>
  10. #include <linux/hugetlb.h>
  11. #include <linux/mmu_notifier.h>
  12. #include <linux/rmap.h>
  13. #include <linux/swap.h>
  14. #include <linux/shrinker.h>
  15. #include <linux/mm_inline.h>
  16. #include <linux/kthread.h>
  17. #include <linux/khugepaged.h>
  18. #include <linux/freezer.h>
  19. #include <linux/mman.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/migrate.h>
  22. #include <linux/hashtable.h>
  23. #include <asm/tlb.h>
  24. #include <asm/pgalloc.h>
  25. #include "internal.h"
  26. /*
  27. * By default transparent hugepage support is enabled for all mappings
  28. * and khugepaged scans all mappings. Defrag is only invoked by
  29. * khugepaged hugepage allocations and by page faults inside
  30. * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
  31. * allocations.
  32. */
  33. unsigned long transparent_hugepage_flags __read_mostly =
  34. #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
  35. (1<<TRANSPARENT_HUGEPAGE_FLAG)|
  36. #endif
  37. #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
  38. (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
  39. #endif
  40. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
  41. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
  42. (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
  43. /* default scan 8*512 pte (or vmas) every 30 second */
  44. static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
  45. static unsigned int khugepaged_pages_collapsed;
  46. static unsigned int khugepaged_full_scans;
  47. static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  48. /* during fragmentation poll the hugepage allocator once every minute */
  49. static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  50. static struct task_struct *khugepaged_thread __read_mostly;
  51. static DEFINE_MUTEX(khugepaged_mutex);
  52. static DEFINE_SPINLOCK(khugepaged_mm_lock);
  53. static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  54. /*
  55. * default collapse hugepages if there is at least one pte mapped like
  56. * it would have happened if the vma was large enough during page
  57. * fault.
  58. */
  59. static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
  60. static int khugepaged(void *none);
  61. static int khugepaged_slab_init(void);
  62. #define MM_SLOTS_HASH_BITS 10
  63. static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  64. static struct kmem_cache *mm_slot_cache __read_mostly;
  65. /**
  66. * struct mm_slot - hash lookup from mm to mm_slot
  67. * @hash: hash collision list
  68. * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  69. * @mm: the mm that this information is valid for
  70. */
  71. struct mm_slot {
  72. struct hlist_node hash;
  73. struct list_head mm_node;
  74. struct mm_struct *mm;
  75. };
  76. /**
  77. * struct khugepaged_scan - cursor for scanning
  78. * @mm_head: the head of the mm list to scan
  79. * @mm_slot: the current mm_slot we are scanning
  80. * @address: the next address inside that to be scanned
  81. *
  82. * There is only the one khugepaged_scan instance of this cursor structure.
  83. */
  84. struct khugepaged_scan {
  85. struct list_head mm_head;
  86. struct mm_slot *mm_slot;
  87. unsigned long address;
  88. };
  89. static struct khugepaged_scan khugepaged_scan = {
  90. .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
  91. };
  92. static int set_recommended_min_free_kbytes(void)
  93. {
  94. struct zone *zone;
  95. int nr_zones = 0;
  96. unsigned long recommended_min;
  97. extern int min_free_kbytes;
  98. if (!khugepaged_enabled())
  99. return 0;
  100. for_each_populated_zone(zone)
  101. nr_zones++;
  102. /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
  103. recommended_min = pageblock_nr_pages * nr_zones * 2;
  104. /*
  105. * Make sure that on average at least two pageblocks are almost free
  106. * of another type, one for a migratetype to fall back to and a
  107. * second to avoid subsequent fallbacks of other types There are 3
  108. * MIGRATE_TYPES we care about.
  109. */
  110. recommended_min += pageblock_nr_pages * nr_zones *
  111. MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
  112. /* don't ever allow to reserve more than 5% of the lowmem */
  113. recommended_min = min(recommended_min,
  114. (unsigned long) nr_free_buffer_pages() / 20);
  115. recommended_min <<= (PAGE_SHIFT-10);
  116. if (recommended_min > min_free_kbytes)
  117. min_free_kbytes = recommended_min;
  118. setup_per_zone_wmarks();
  119. return 0;
  120. }
  121. late_initcall(set_recommended_min_free_kbytes);
  122. static int start_khugepaged(void)
  123. {
  124. int err = 0;
  125. if (khugepaged_enabled()) {
  126. if (!khugepaged_thread)
  127. khugepaged_thread = kthread_run(khugepaged, NULL,
  128. "khugepaged");
  129. if (unlikely(IS_ERR(khugepaged_thread))) {
  130. printk(KERN_ERR
  131. "khugepaged: kthread_run(khugepaged) failed\n");
  132. err = PTR_ERR(khugepaged_thread);
  133. khugepaged_thread = NULL;
  134. }
  135. if (!list_empty(&khugepaged_scan.mm_head))
  136. wake_up_interruptible(&khugepaged_wait);
  137. set_recommended_min_free_kbytes();
  138. } else if (khugepaged_thread) {
  139. kthread_stop(khugepaged_thread);
  140. khugepaged_thread = NULL;
  141. }
  142. return err;
  143. }
  144. static atomic_t huge_zero_refcount;
  145. static unsigned long huge_zero_pfn __read_mostly;
  146. static inline bool is_huge_zero_pfn(unsigned long pfn)
  147. {
  148. unsigned long zero_pfn = ACCESS_ONCE(huge_zero_pfn);
  149. return zero_pfn && pfn == zero_pfn;
  150. }
  151. static inline bool is_huge_zero_pmd(pmd_t pmd)
  152. {
  153. return is_huge_zero_pfn(pmd_pfn(pmd));
  154. }
  155. static unsigned long get_huge_zero_page(void)
  156. {
  157. struct page *zero_page;
  158. retry:
  159. if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
  160. return ACCESS_ONCE(huge_zero_pfn);
  161. zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
  162. HPAGE_PMD_ORDER);
  163. if (!zero_page) {
  164. count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
  165. return 0;
  166. }
  167. count_vm_event(THP_ZERO_PAGE_ALLOC);
  168. preempt_disable();
  169. if (cmpxchg(&huge_zero_pfn, 0, page_to_pfn(zero_page))) {
  170. preempt_enable();
  171. __free_page(zero_page);
  172. goto retry;
  173. }
  174. /* We take additional reference here. It will be put back by shrinker */
  175. atomic_set(&huge_zero_refcount, 2);
  176. preempt_enable();
  177. return ACCESS_ONCE(huge_zero_pfn);
  178. }
  179. static void put_huge_zero_page(void)
  180. {
  181. /*
  182. * Counter should never go to zero here. Only shrinker can put
  183. * last reference.
  184. */
  185. BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
  186. }
  187. static int shrink_huge_zero_page(struct shrinker *shrink,
  188. struct shrink_control *sc)
  189. {
  190. if (!sc->nr_to_scan)
  191. /* we can free zero page only if last reference remains */
  192. return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
  193. if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
  194. unsigned long zero_pfn = xchg(&huge_zero_pfn, 0);
  195. BUG_ON(zero_pfn == 0);
  196. __free_page(__pfn_to_page(zero_pfn));
  197. }
  198. return 0;
  199. }
  200. static struct shrinker huge_zero_page_shrinker = {
  201. .shrink = shrink_huge_zero_page,
  202. .seeks = DEFAULT_SEEKS,
  203. };
  204. #ifdef CONFIG_SYSFS
  205. static ssize_t double_flag_show(struct kobject *kobj,
  206. struct kobj_attribute *attr, char *buf,
  207. enum transparent_hugepage_flag enabled,
  208. enum transparent_hugepage_flag req_madv)
  209. {
  210. if (test_bit(enabled, &transparent_hugepage_flags)) {
  211. VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
  212. return sprintf(buf, "[always] madvise never\n");
  213. } else if (test_bit(req_madv, &transparent_hugepage_flags))
  214. return sprintf(buf, "always [madvise] never\n");
  215. else
  216. return sprintf(buf, "always madvise [never]\n");
  217. }
  218. static ssize_t double_flag_store(struct kobject *kobj,
  219. struct kobj_attribute *attr,
  220. const char *buf, size_t count,
  221. enum transparent_hugepage_flag enabled,
  222. enum transparent_hugepage_flag req_madv)
  223. {
  224. if (!memcmp("always", buf,
  225. min(sizeof("always")-1, count))) {
  226. set_bit(enabled, &transparent_hugepage_flags);
  227. clear_bit(req_madv, &transparent_hugepage_flags);
  228. } else if (!memcmp("madvise", buf,
  229. min(sizeof("madvise")-1, count))) {
  230. clear_bit(enabled, &transparent_hugepage_flags);
  231. set_bit(req_madv, &transparent_hugepage_flags);
  232. } else if (!memcmp("never", buf,
  233. min(sizeof("never")-1, count))) {
  234. clear_bit(enabled, &transparent_hugepage_flags);
  235. clear_bit(req_madv, &transparent_hugepage_flags);
  236. } else
  237. return -EINVAL;
  238. return count;
  239. }
  240. static ssize_t enabled_show(struct kobject *kobj,
  241. struct kobj_attribute *attr, char *buf)
  242. {
  243. return double_flag_show(kobj, attr, buf,
  244. TRANSPARENT_HUGEPAGE_FLAG,
  245. TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
  246. }
  247. static ssize_t enabled_store(struct kobject *kobj,
  248. struct kobj_attribute *attr,
  249. const char *buf, size_t count)
  250. {
  251. ssize_t ret;
  252. ret = double_flag_store(kobj, attr, buf, count,
  253. TRANSPARENT_HUGEPAGE_FLAG,
  254. TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
  255. if (ret > 0) {
  256. int err;
  257. mutex_lock(&khugepaged_mutex);
  258. err = start_khugepaged();
  259. mutex_unlock(&khugepaged_mutex);
  260. if (err)
  261. ret = err;
  262. }
  263. return ret;
  264. }
  265. static struct kobj_attribute enabled_attr =
  266. __ATTR(enabled, 0644, enabled_show, enabled_store);
  267. static ssize_t single_flag_show(struct kobject *kobj,
  268. struct kobj_attribute *attr, char *buf,
  269. enum transparent_hugepage_flag flag)
  270. {
  271. return sprintf(buf, "%d\n",
  272. !!test_bit(flag, &transparent_hugepage_flags));
  273. }
  274. static ssize_t single_flag_store(struct kobject *kobj,
  275. struct kobj_attribute *attr,
  276. const char *buf, size_t count,
  277. enum transparent_hugepage_flag flag)
  278. {
  279. unsigned long value;
  280. int ret;
  281. ret = kstrtoul(buf, 10, &value);
  282. if (ret < 0)
  283. return ret;
  284. if (value > 1)
  285. return -EINVAL;
  286. if (value)
  287. set_bit(flag, &transparent_hugepage_flags);
  288. else
  289. clear_bit(flag, &transparent_hugepage_flags);
  290. return count;
  291. }
  292. /*
  293. * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
  294. * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
  295. * memory just to allocate one more hugepage.
  296. */
  297. static ssize_t defrag_show(struct kobject *kobj,
  298. struct kobj_attribute *attr, char *buf)
  299. {
  300. return double_flag_show(kobj, attr, buf,
  301. TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
  302. TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
  303. }
  304. static ssize_t defrag_store(struct kobject *kobj,
  305. struct kobj_attribute *attr,
  306. const char *buf, size_t count)
  307. {
  308. return double_flag_store(kobj, attr, buf, count,
  309. TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
  310. TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
  311. }
  312. static struct kobj_attribute defrag_attr =
  313. __ATTR(defrag, 0644, defrag_show, defrag_store);
  314. static ssize_t use_zero_page_show(struct kobject *kobj,
  315. struct kobj_attribute *attr, char *buf)
  316. {
  317. return single_flag_show(kobj, attr, buf,
  318. TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
  319. }
  320. static ssize_t use_zero_page_store(struct kobject *kobj,
  321. struct kobj_attribute *attr, const char *buf, size_t count)
  322. {
  323. return single_flag_store(kobj, attr, buf, count,
  324. TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
  325. }
  326. static struct kobj_attribute use_zero_page_attr =
  327. __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
  328. #ifdef CONFIG_DEBUG_VM
  329. static ssize_t debug_cow_show(struct kobject *kobj,
  330. struct kobj_attribute *attr, char *buf)
  331. {
  332. return single_flag_show(kobj, attr, buf,
  333. TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
  334. }
  335. static ssize_t debug_cow_store(struct kobject *kobj,
  336. struct kobj_attribute *attr,
  337. const char *buf, size_t count)
  338. {
  339. return single_flag_store(kobj, attr, buf, count,
  340. TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
  341. }
  342. static struct kobj_attribute debug_cow_attr =
  343. __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
  344. #endif /* CONFIG_DEBUG_VM */
  345. static struct attribute *hugepage_attr[] = {
  346. &enabled_attr.attr,
  347. &defrag_attr.attr,
  348. &use_zero_page_attr.attr,
  349. #ifdef CONFIG_DEBUG_VM
  350. &debug_cow_attr.attr,
  351. #endif
  352. NULL,
  353. };
  354. static struct attribute_group hugepage_attr_group = {
  355. .attrs = hugepage_attr,
  356. };
  357. static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
  358. struct kobj_attribute *attr,
  359. char *buf)
  360. {
  361. return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
  362. }
  363. static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
  364. struct kobj_attribute *attr,
  365. const char *buf, size_t count)
  366. {
  367. unsigned long msecs;
  368. int err;
  369. err = strict_strtoul(buf, 10, &msecs);
  370. if (err || msecs > UINT_MAX)
  371. return -EINVAL;
  372. khugepaged_scan_sleep_millisecs = msecs;
  373. wake_up_interruptible(&khugepaged_wait);
  374. return count;
  375. }
  376. static struct kobj_attribute scan_sleep_millisecs_attr =
  377. __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
  378. scan_sleep_millisecs_store);
  379. static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
  380. struct kobj_attribute *attr,
  381. char *buf)
  382. {
  383. return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
  384. }
  385. static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
  386. struct kobj_attribute *attr,
  387. const char *buf, size_t count)
  388. {
  389. unsigned long msecs;
  390. int err;
  391. err = strict_strtoul(buf, 10, &msecs);
  392. if (err || msecs > UINT_MAX)
  393. return -EINVAL;
  394. khugepaged_alloc_sleep_millisecs = msecs;
  395. wake_up_interruptible(&khugepaged_wait);
  396. return count;
  397. }
  398. static struct kobj_attribute alloc_sleep_millisecs_attr =
  399. __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
  400. alloc_sleep_millisecs_store);
  401. static ssize_t pages_to_scan_show(struct kobject *kobj,
  402. struct kobj_attribute *attr,
  403. char *buf)
  404. {
  405. return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
  406. }
  407. static ssize_t pages_to_scan_store(struct kobject *kobj,
  408. struct kobj_attribute *attr,
  409. const char *buf, size_t count)
  410. {
  411. int err;
  412. unsigned long pages;
  413. err = strict_strtoul(buf, 10, &pages);
  414. if (err || !pages || pages > UINT_MAX)
  415. return -EINVAL;
  416. khugepaged_pages_to_scan = pages;
  417. return count;
  418. }
  419. static struct kobj_attribute pages_to_scan_attr =
  420. __ATTR(pages_to_scan, 0644, pages_to_scan_show,
  421. pages_to_scan_store);
  422. static ssize_t pages_collapsed_show(struct kobject *kobj,
  423. struct kobj_attribute *attr,
  424. char *buf)
  425. {
  426. return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
  427. }
  428. static struct kobj_attribute pages_collapsed_attr =
  429. __ATTR_RO(pages_collapsed);
  430. static ssize_t full_scans_show(struct kobject *kobj,
  431. struct kobj_attribute *attr,
  432. char *buf)
  433. {
  434. return sprintf(buf, "%u\n", khugepaged_full_scans);
  435. }
  436. static struct kobj_attribute full_scans_attr =
  437. __ATTR_RO(full_scans);
  438. static ssize_t khugepaged_defrag_show(struct kobject *kobj,
  439. struct kobj_attribute *attr, char *buf)
  440. {
  441. return single_flag_show(kobj, attr, buf,
  442. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  443. }
  444. static ssize_t khugepaged_defrag_store(struct kobject *kobj,
  445. struct kobj_attribute *attr,
  446. const char *buf, size_t count)
  447. {
  448. return single_flag_store(kobj, attr, buf, count,
  449. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  450. }
  451. static struct kobj_attribute khugepaged_defrag_attr =
  452. __ATTR(defrag, 0644, khugepaged_defrag_show,
  453. khugepaged_defrag_store);
  454. /*
  455. * max_ptes_none controls if khugepaged should collapse hugepages over
  456. * any unmapped ptes in turn potentially increasing the memory
  457. * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
  458. * reduce the available free memory in the system as it
  459. * runs. Increasing max_ptes_none will instead potentially reduce the
  460. * free memory in the system during the khugepaged scan.
  461. */
  462. static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
  463. struct kobj_attribute *attr,
  464. char *buf)
  465. {
  466. return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
  467. }
  468. static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
  469. struct kobj_attribute *attr,
  470. const char *buf, size_t count)
  471. {
  472. int err;
  473. unsigned long max_ptes_none;
  474. err = strict_strtoul(buf, 10, &max_ptes_none);
  475. if (err || max_ptes_none > HPAGE_PMD_NR-1)
  476. return -EINVAL;
  477. khugepaged_max_ptes_none = max_ptes_none;
  478. return count;
  479. }
  480. static struct kobj_attribute khugepaged_max_ptes_none_attr =
  481. __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
  482. khugepaged_max_ptes_none_store);
  483. static struct attribute *khugepaged_attr[] = {
  484. &khugepaged_defrag_attr.attr,
  485. &khugepaged_max_ptes_none_attr.attr,
  486. &pages_to_scan_attr.attr,
  487. &pages_collapsed_attr.attr,
  488. &full_scans_attr.attr,
  489. &scan_sleep_millisecs_attr.attr,
  490. &alloc_sleep_millisecs_attr.attr,
  491. NULL,
  492. };
  493. static struct attribute_group khugepaged_attr_group = {
  494. .attrs = khugepaged_attr,
  495. .name = "khugepaged",
  496. };
  497. static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
  498. {
  499. int err;
  500. *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
  501. if (unlikely(!*hugepage_kobj)) {
  502. printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n");
  503. return -ENOMEM;
  504. }
  505. err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
  506. if (err) {
  507. printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
  508. goto delete_obj;
  509. }
  510. err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
  511. if (err) {
  512. printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
  513. goto remove_hp_group;
  514. }
  515. return 0;
  516. remove_hp_group:
  517. sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
  518. delete_obj:
  519. kobject_put(*hugepage_kobj);
  520. return err;
  521. }
  522. static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
  523. {
  524. sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
  525. sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
  526. kobject_put(hugepage_kobj);
  527. }
  528. #else
  529. static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
  530. {
  531. return 0;
  532. }
  533. static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
  534. {
  535. }
  536. #endif /* CONFIG_SYSFS */
  537. static int __init hugepage_init(void)
  538. {
  539. int err;
  540. struct kobject *hugepage_kobj;
  541. if (!has_transparent_hugepage()) {
  542. transparent_hugepage_flags = 0;
  543. return -EINVAL;
  544. }
  545. err = hugepage_init_sysfs(&hugepage_kobj);
  546. if (err)
  547. return err;
  548. err = khugepaged_slab_init();
  549. if (err)
  550. goto out;
  551. register_shrinker(&huge_zero_page_shrinker);
  552. /*
  553. * By default disable transparent hugepages on smaller systems,
  554. * where the extra memory used could hurt more than TLB overhead
  555. * is likely to save. The admin can still enable it through /sys.
  556. */
  557. if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
  558. transparent_hugepage_flags = 0;
  559. start_khugepaged();
  560. return 0;
  561. out:
  562. hugepage_exit_sysfs(hugepage_kobj);
  563. return err;
  564. }
  565. module_init(hugepage_init)
  566. static int __init setup_transparent_hugepage(char *str)
  567. {
  568. int ret = 0;
  569. if (!str)
  570. goto out;
  571. if (!strcmp(str, "always")) {
  572. set_bit(TRANSPARENT_HUGEPAGE_FLAG,
  573. &transparent_hugepage_flags);
  574. clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  575. &transparent_hugepage_flags);
  576. ret = 1;
  577. } else if (!strcmp(str, "madvise")) {
  578. clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
  579. &transparent_hugepage_flags);
  580. set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  581. &transparent_hugepage_flags);
  582. ret = 1;
  583. } else if (!strcmp(str, "never")) {
  584. clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
  585. &transparent_hugepage_flags);
  586. clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  587. &transparent_hugepage_flags);
  588. ret = 1;
  589. }
  590. out:
  591. if (!ret)
  592. printk(KERN_WARNING
  593. "transparent_hugepage= cannot parse, ignored\n");
  594. return ret;
  595. }
  596. __setup("transparent_hugepage=", setup_transparent_hugepage);
  597. pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
  598. {
  599. if (likely(vma->vm_flags & VM_WRITE))
  600. pmd = pmd_mkwrite(pmd);
  601. return pmd;
  602. }
  603. static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma)
  604. {
  605. pmd_t entry;
  606. entry = mk_pmd(page, vma->vm_page_prot);
  607. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  608. entry = pmd_mkhuge(entry);
  609. return entry;
  610. }
  611. static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
  612. struct vm_area_struct *vma,
  613. unsigned long haddr, pmd_t *pmd,
  614. struct page *page)
  615. {
  616. pgtable_t pgtable;
  617. VM_BUG_ON(!PageCompound(page));
  618. pgtable = pte_alloc_one(mm, haddr);
  619. if (unlikely(!pgtable))
  620. return VM_FAULT_OOM;
  621. clear_huge_page(page, haddr, HPAGE_PMD_NR);
  622. __SetPageUptodate(page);
  623. spin_lock(&mm->page_table_lock);
  624. if (unlikely(!pmd_none(*pmd))) {
  625. spin_unlock(&mm->page_table_lock);
  626. mem_cgroup_uncharge_page(page);
  627. put_page(page);
  628. pte_free(mm, pgtable);
  629. } else {
  630. pmd_t entry;
  631. entry = mk_huge_pmd(page, vma);
  632. /*
  633. * The spinlocking to take the lru_lock inside
  634. * page_add_new_anon_rmap() acts as a full memory
  635. * barrier to be sure clear_huge_page writes become
  636. * visible after the set_pmd_at() write.
  637. */
  638. page_add_new_anon_rmap(page, vma, haddr);
  639. set_pmd_at(mm, haddr, pmd, entry);
  640. pgtable_trans_huge_deposit(mm, pgtable);
  641. add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
  642. mm->nr_ptes++;
  643. spin_unlock(&mm->page_table_lock);
  644. }
  645. return 0;
  646. }
  647. static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
  648. {
  649. return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
  650. }
  651. static inline struct page *alloc_hugepage_vma(int defrag,
  652. struct vm_area_struct *vma,
  653. unsigned long haddr, int nd,
  654. gfp_t extra_gfp)
  655. {
  656. return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
  657. HPAGE_PMD_ORDER, vma, haddr, nd);
  658. }
  659. #ifndef CONFIG_NUMA
  660. static inline struct page *alloc_hugepage(int defrag)
  661. {
  662. return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
  663. HPAGE_PMD_ORDER);
  664. }
  665. #endif
  666. static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
  667. struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
  668. unsigned long zero_pfn)
  669. {
  670. pmd_t entry;
  671. if (!pmd_none(*pmd))
  672. return false;
  673. entry = pfn_pmd(zero_pfn, vma->vm_page_prot);
  674. entry = pmd_wrprotect(entry);
  675. entry = pmd_mkhuge(entry);
  676. set_pmd_at(mm, haddr, pmd, entry);
  677. pgtable_trans_huge_deposit(mm, pgtable);
  678. mm->nr_ptes++;
  679. return true;
  680. }
  681. int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  682. unsigned long address, pmd_t *pmd,
  683. unsigned int flags)
  684. {
  685. struct page *page;
  686. unsigned long haddr = address & HPAGE_PMD_MASK;
  687. pte_t *pte;
  688. if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
  689. if (unlikely(anon_vma_prepare(vma)))
  690. return VM_FAULT_OOM;
  691. if (unlikely(khugepaged_enter(vma)))
  692. return VM_FAULT_OOM;
  693. if (!(flags & FAULT_FLAG_WRITE) &&
  694. transparent_hugepage_use_zero_page()) {
  695. pgtable_t pgtable;
  696. unsigned long zero_pfn;
  697. bool set;
  698. pgtable = pte_alloc_one(mm, haddr);
  699. if (unlikely(!pgtable))
  700. return VM_FAULT_OOM;
  701. zero_pfn = get_huge_zero_page();
  702. if (unlikely(!zero_pfn)) {
  703. pte_free(mm, pgtable);
  704. count_vm_event(THP_FAULT_FALLBACK);
  705. goto out;
  706. }
  707. spin_lock(&mm->page_table_lock);
  708. set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
  709. zero_pfn);
  710. spin_unlock(&mm->page_table_lock);
  711. if (!set) {
  712. pte_free(mm, pgtable);
  713. put_huge_zero_page();
  714. }
  715. return 0;
  716. }
  717. page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
  718. vma, haddr, numa_node_id(), 0);
  719. if (unlikely(!page)) {
  720. count_vm_event(THP_FAULT_FALLBACK);
  721. goto out;
  722. }
  723. count_vm_event(THP_FAULT_ALLOC);
  724. if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
  725. put_page(page);
  726. goto out;
  727. }
  728. if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
  729. page))) {
  730. mem_cgroup_uncharge_page(page);
  731. put_page(page);
  732. goto out;
  733. }
  734. return 0;
  735. }
  736. out:
  737. /*
  738. * Use __pte_alloc instead of pte_alloc_map, because we can't
  739. * run pte_offset_map on the pmd, if an huge pmd could
  740. * materialize from under us from a different thread.
  741. */
  742. if (unlikely(pmd_none(*pmd)) &&
  743. unlikely(__pte_alloc(mm, vma, pmd, address)))
  744. return VM_FAULT_OOM;
  745. /* if an huge pmd materialized from under us just retry later */
  746. if (unlikely(pmd_trans_huge(*pmd)))
  747. return 0;
  748. /*
  749. * A regular pmd is established and it can't morph into a huge pmd
  750. * from under us anymore at this point because we hold the mmap_sem
  751. * read mode and khugepaged takes it in write mode. So now it's
  752. * safe to run pte_offset_map().
  753. */
  754. pte = pte_offset_map(pmd, address);
  755. return handle_pte_fault(mm, vma, address, pte, pmd, flags);
  756. }
  757. int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  758. pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
  759. struct vm_area_struct *vma)
  760. {
  761. struct page *src_page;
  762. pmd_t pmd;
  763. pgtable_t pgtable;
  764. int ret;
  765. ret = -ENOMEM;
  766. pgtable = pte_alloc_one(dst_mm, addr);
  767. if (unlikely(!pgtable))
  768. goto out;
  769. spin_lock(&dst_mm->page_table_lock);
  770. spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
  771. ret = -EAGAIN;
  772. pmd = *src_pmd;
  773. if (unlikely(!pmd_trans_huge(pmd))) {
  774. pte_free(dst_mm, pgtable);
  775. goto out_unlock;
  776. }
  777. /*
  778. * mm->page_table_lock is enough to be sure that huge zero pmd is not
  779. * under splitting since we don't split the page itself, only pmd to
  780. * a page table.
  781. */
  782. if (is_huge_zero_pmd(pmd)) {
  783. unsigned long zero_pfn;
  784. bool set;
  785. /*
  786. * get_huge_zero_page() will never allocate a new page here,
  787. * since we already have a zero page to copy. It just takes a
  788. * reference.
  789. */
  790. zero_pfn = get_huge_zero_page();
  791. set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
  792. zero_pfn);
  793. BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
  794. ret = 0;
  795. goto out_unlock;
  796. }
  797. if (unlikely(pmd_trans_splitting(pmd))) {
  798. /* split huge page running from under us */
  799. spin_unlock(&src_mm->page_table_lock);
  800. spin_unlock(&dst_mm->page_table_lock);
  801. pte_free(dst_mm, pgtable);
  802. wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
  803. goto out;
  804. }
  805. src_page = pmd_page(pmd);
  806. VM_BUG_ON(!PageHead(src_page));
  807. get_page(src_page);
  808. page_dup_rmap(src_page);
  809. add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
  810. pmdp_set_wrprotect(src_mm, addr, src_pmd);
  811. pmd = pmd_mkold(pmd_wrprotect(pmd));
  812. set_pmd_at(dst_mm, addr, dst_pmd, pmd);
  813. pgtable_trans_huge_deposit(dst_mm, pgtable);
  814. dst_mm->nr_ptes++;
  815. ret = 0;
  816. out_unlock:
  817. spin_unlock(&src_mm->page_table_lock);
  818. spin_unlock(&dst_mm->page_table_lock);
  819. out:
  820. return ret;
  821. }
  822. void huge_pmd_set_accessed(struct mm_struct *mm,
  823. struct vm_area_struct *vma,
  824. unsigned long address,
  825. pmd_t *pmd, pmd_t orig_pmd,
  826. int dirty)
  827. {
  828. pmd_t entry;
  829. unsigned long haddr;
  830. spin_lock(&mm->page_table_lock);
  831. if (unlikely(!pmd_same(*pmd, orig_pmd)))
  832. goto unlock;
  833. entry = pmd_mkyoung(orig_pmd);
  834. haddr = address & HPAGE_PMD_MASK;
  835. if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
  836. update_mmu_cache_pmd(vma, address, pmd);
  837. unlock:
  838. spin_unlock(&mm->page_table_lock);
  839. }
  840. static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
  841. struct vm_area_struct *vma, unsigned long address,
  842. pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr)
  843. {
  844. pgtable_t pgtable;
  845. pmd_t _pmd;
  846. struct page *page;
  847. int i, ret = 0;
  848. unsigned long mmun_start; /* For mmu_notifiers */
  849. unsigned long mmun_end; /* For mmu_notifiers */
  850. page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
  851. if (!page) {
  852. ret |= VM_FAULT_OOM;
  853. goto out;
  854. }
  855. if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
  856. put_page(page);
  857. ret |= VM_FAULT_OOM;
  858. goto out;
  859. }
  860. clear_user_highpage(page, address);
  861. __SetPageUptodate(page);
  862. mmun_start = haddr;
  863. mmun_end = haddr + HPAGE_PMD_SIZE;
  864. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  865. spin_lock(&mm->page_table_lock);
  866. if (unlikely(!pmd_same(*pmd, orig_pmd)))
  867. goto out_free_page;
  868. pmdp_clear_flush(vma, haddr, pmd);
  869. /* leave pmd empty until pte is filled */
  870. pgtable = pgtable_trans_huge_withdraw(mm);
  871. pmd_populate(mm, &_pmd, pgtable);
  872. for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
  873. pte_t *pte, entry;
  874. if (haddr == (address & PAGE_MASK)) {
  875. entry = mk_pte(page, vma->vm_page_prot);
  876. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  877. page_add_new_anon_rmap(page, vma, haddr);
  878. } else {
  879. entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
  880. entry = pte_mkspecial(entry);
  881. }
  882. pte = pte_offset_map(&_pmd, haddr);
  883. VM_BUG_ON(!pte_none(*pte));
  884. set_pte_at(mm, haddr, pte, entry);
  885. pte_unmap(pte);
  886. }
  887. smp_wmb(); /* make pte visible before pmd */
  888. pmd_populate(mm, pmd, pgtable);
  889. spin_unlock(&mm->page_table_lock);
  890. put_huge_zero_page();
  891. inc_mm_counter(mm, MM_ANONPAGES);
  892. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  893. ret |= VM_FAULT_WRITE;
  894. out:
  895. return ret;
  896. out_free_page:
  897. spin_unlock(&mm->page_table_lock);
  898. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  899. mem_cgroup_uncharge_page(page);
  900. put_page(page);
  901. goto out;
  902. }
  903. static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
  904. struct vm_area_struct *vma,
  905. unsigned long address,
  906. pmd_t *pmd, pmd_t orig_pmd,
  907. struct page *page,
  908. unsigned long haddr)
  909. {
  910. pgtable_t pgtable;
  911. pmd_t _pmd;
  912. int ret = 0, i;
  913. struct page **pages;
  914. unsigned long mmun_start; /* For mmu_notifiers */
  915. unsigned long mmun_end; /* For mmu_notifiers */
  916. pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
  917. GFP_KERNEL);
  918. if (unlikely(!pages)) {
  919. ret |= VM_FAULT_OOM;
  920. goto out;
  921. }
  922. for (i = 0; i < HPAGE_PMD_NR; i++) {
  923. pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
  924. __GFP_OTHER_NODE,
  925. vma, address, page_to_nid(page));
  926. if (unlikely(!pages[i] ||
  927. mem_cgroup_newpage_charge(pages[i], mm,
  928. GFP_KERNEL))) {
  929. if (pages[i])
  930. put_page(pages[i]);
  931. mem_cgroup_uncharge_start();
  932. while (--i >= 0) {
  933. mem_cgroup_uncharge_page(pages[i]);
  934. put_page(pages[i]);
  935. }
  936. mem_cgroup_uncharge_end();
  937. kfree(pages);
  938. ret |= VM_FAULT_OOM;
  939. goto out;
  940. }
  941. }
  942. for (i = 0; i < HPAGE_PMD_NR; i++) {
  943. copy_user_highpage(pages[i], page + i,
  944. haddr + PAGE_SIZE * i, vma);
  945. __SetPageUptodate(pages[i]);
  946. cond_resched();
  947. }
  948. mmun_start = haddr;
  949. mmun_end = haddr + HPAGE_PMD_SIZE;
  950. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  951. spin_lock(&mm->page_table_lock);
  952. if (unlikely(!pmd_same(*pmd, orig_pmd)))
  953. goto out_free_pages;
  954. VM_BUG_ON(!PageHead(page));
  955. pmdp_clear_flush(vma, haddr, pmd);
  956. /* leave pmd empty until pte is filled */
  957. pgtable = pgtable_trans_huge_withdraw(mm);
  958. pmd_populate(mm, &_pmd, pgtable);
  959. for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
  960. pte_t *pte, entry;
  961. entry = mk_pte(pages[i], vma->vm_page_prot);
  962. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  963. page_add_new_anon_rmap(pages[i], vma, haddr);
  964. pte = pte_offset_map(&_pmd, haddr);
  965. VM_BUG_ON(!pte_none(*pte));
  966. set_pte_at(mm, haddr, pte, entry);
  967. pte_unmap(pte);
  968. }
  969. kfree(pages);
  970. smp_wmb(); /* make pte visible before pmd */
  971. pmd_populate(mm, pmd, pgtable);
  972. page_remove_rmap(page);
  973. spin_unlock(&mm->page_table_lock);
  974. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  975. ret |= VM_FAULT_WRITE;
  976. put_page(page);
  977. out:
  978. return ret;
  979. out_free_pages:
  980. spin_unlock(&mm->page_table_lock);
  981. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  982. mem_cgroup_uncharge_start();
  983. for (i = 0; i < HPAGE_PMD_NR; i++) {
  984. mem_cgroup_uncharge_page(pages[i]);
  985. put_page(pages[i]);
  986. }
  987. mem_cgroup_uncharge_end();
  988. kfree(pages);
  989. goto out;
  990. }
  991. int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
  992. unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
  993. {
  994. int ret = 0;
  995. struct page *page = NULL, *new_page;
  996. unsigned long haddr;
  997. unsigned long mmun_start; /* For mmu_notifiers */
  998. unsigned long mmun_end; /* For mmu_notifiers */
  999. VM_BUG_ON(!vma->anon_vma);
  1000. haddr = address & HPAGE_PMD_MASK;
  1001. if (is_huge_zero_pmd(orig_pmd))
  1002. goto alloc;
  1003. spin_lock(&mm->page_table_lock);
  1004. if (unlikely(!pmd_same(*pmd, orig_pmd)))
  1005. goto out_unlock;
  1006. page = pmd_page(orig_pmd);
  1007. VM_BUG_ON(!PageCompound(page) || !PageHead(page));
  1008. if (page_mapcount(page) == 1) {
  1009. pmd_t entry;
  1010. entry = pmd_mkyoung(orig_pmd);
  1011. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  1012. if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
  1013. update_mmu_cache_pmd(vma, address, pmd);
  1014. ret |= VM_FAULT_WRITE;
  1015. goto out_unlock;
  1016. }
  1017. get_page(page);
  1018. spin_unlock(&mm->page_table_lock);
  1019. alloc:
  1020. if (transparent_hugepage_enabled(vma) &&
  1021. !transparent_hugepage_debug_cow())
  1022. new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
  1023. vma, haddr, numa_node_id(), 0);
  1024. else
  1025. new_page = NULL;
  1026. if (unlikely(!new_page)) {
  1027. count_vm_event(THP_FAULT_FALLBACK);
  1028. if (is_huge_zero_pmd(orig_pmd)) {
  1029. ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
  1030. address, pmd, orig_pmd, haddr);
  1031. } else {
  1032. ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
  1033. pmd, orig_pmd, page, haddr);
  1034. if (ret & VM_FAULT_OOM)
  1035. split_huge_page(page);
  1036. put_page(page);
  1037. }
  1038. goto out;
  1039. }
  1040. count_vm_event(THP_FAULT_ALLOC);
  1041. if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
  1042. put_page(new_page);
  1043. if (page) {
  1044. split_huge_page(page);
  1045. put_page(page);
  1046. }
  1047. ret |= VM_FAULT_OOM;
  1048. goto out;
  1049. }
  1050. if (is_huge_zero_pmd(orig_pmd))
  1051. clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
  1052. else
  1053. copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
  1054. __SetPageUptodate(new_page);
  1055. mmun_start = haddr;
  1056. mmun_end = haddr + HPAGE_PMD_SIZE;
  1057. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  1058. spin_lock(&mm->page_table_lock);
  1059. if (page)
  1060. put_page(page);
  1061. if (unlikely(!pmd_same(*pmd, orig_pmd))) {
  1062. spin_unlock(&mm->page_table_lock);
  1063. mem_cgroup_uncharge_page(new_page);
  1064. put_page(new_page);
  1065. goto out_mn;
  1066. } else {
  1067. pmd_t entry;
  1068. entry = mk_huge_pmd(new_page, vma);
  1069. pmdp_clear_flush(vma, haddr, pmd);
  1070. page_add_new_anon_rmap(new_page, vma, haddr);
  1071. set_pmd_at(mm, haddr, pmd, entry);
  1072. update_mmu_cache_pmd(vma, address, pmd);
  1073. if (is_huge_zero_pmd(orig_pmd)) {
  1074. add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
  1075. put_huge_zero_page();
  1076. } else {
  1077. VM_BUG_ON(!PageHead(page));
  1078. page_remove_rmap(page);
  1079. put_page(page);
  1080. }
  1081. ret |= VM_FAULT_WRITE;
  1082. }
  1083. spin_unlock(&mm->page_table_lock);
  1084. out_mn:
  1085. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  1086. out:
  1087. return ret;
  1088. out_unlock:
  1089. spin_unlock(&mm->page_table_lock);
  1090. return ret;
  1091. }
  1092. struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
  1093. unsigned long addr,
  1094. pmd_t *pmd,
  1095. unsigned int flags)
  1096. {
  1097. struct mm_struct *mm = vma->vm_mm;
  1098. struct page *page = NULL;
  1099. assert_spin_locked(&mm->page_table_lock);
  1100. if (flags & FOLL_WRITE && !pmd_write(*pmd))
  1101. goto out;
  1102. /* Avoid dumping huge zero page */
  1103. if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
  1104. return ERR_PTR(-EFAULT);
  1105. page = pmd_page(*pmd);
  1106. VM_BUG_ON(!PageHead(page));
  1107. if (flags & FOLL_TOUCH) {
  1108. pmd_t _pmd;
  1109. /*
  1110. * We should set the dirty bit only for FOLL_WRITE but
  1111. * for now the dirty bit in the pmd is meaningless.
  1112. * And if the dirty bit will become meaningful and
  1113. * we'll only set it with FOLL_WRITE, an atomic
  1114. * set_bit will be required on the pmd to set the
  1115. * young bit, instead of the current set_pmd_at.
  1116. */
  1117. _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
  1118. set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
  1119. }
  1120. if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
  1121. if (page->mapping && trylock_page(page)) {
  1122. lru_add_drain();
  1123. if (page->mapping)
  1124. mlock_vma_page(page);
  1125. unlock_page(page);
  1126. }
  1127. }
  1128. page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
  1129. VM_BUG_ON(!PageCompound(page));
  1130. if (flags & FOLL_GET)
  1131. get_page_foll(page);
  1132. out:
  1133. return page;
  1134. }
  1135. /* NUMA hinting page fault entry point for trans huge pmds */
  1136. int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1137. unsigned long addr, pmd_t pmd, pmd_t *pmdp)
  1138. {
  1139. struct page *page;
  1140. unsigned long haddr = addr & HPAGE_PMD_MASK;
  1141. int target_nid;
  1142. int current_nid = -1;
  1143. bool migrated;
  1144. spin_lock(&mm->page_table_lock);
  1145. if (unlikely(!pmd_same(pmd, *pmdp)))
  1146. goto out_unlock;
  1147. page = pmd_page(pmd);
  1148. get_page(page);
  1149. current_nid = page_to_nid(page);
  1150. count_vm_numa_event(NUMA_HINT_FAULTS);
  1151. if (current_nid == numa_node_id())
  1152. count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
  1153. target_nid = mpol_misplaced(page, vma, haddr);
  1154. if (target_nid == -1) {
  1155. put_page(page);
  1156. goto clear_pmdnuma;
  1157. }
  1158. /* Acquire the page lock to serialise THP migrations */
  1159. spin_unlock(&mm->page_table_lock);
  1160. lock_page(page);
  1161. /* Confirm the PTE did not while locked */
  1162. spin_lock(&mm->page_table_lock);
  1163. if (unlikely(!pmd_same(pmd, *pmdp))) {
  1164. unlock_page(page);
  1165. put_page(page);
  1166. goto out_unlock;
  1167. }
  1168. spin_unlock(&mm->page_table_lock);
  1169. /* Migrate the THP to the requested node */
  1170. migrated = migrate_misplaced_transhuge_page(mm, vma,
  1171. pmdp, pmd, addr, page, target_nid);
  1172. if (!migrated)
  1173. goto check_same;
  1174. task_numa_fault(target_nid, HPAGE_PMD_NR, true);
  1175. return 0;
  1176. check_same:
  1177. spin_lock(&mm->page_table_lock);
  1178. if (unlikely(!pmd_same(pmd, *pmdp)))
  1179. goto out_unlock;
  1180. clear_pmdnuma:
  1181. pmd = pmd_mknonnuma(pmd);
  1182. set_pmd_at(mm, haddr, pmdp, pmd);
  1183. VM_BUG_ON(pmd_numa(*pmdp));
  1184. update_mmu_cache_pmd(vma, addr, pmdp);
  1185. out_unlock:
  1186. spin_unlock(&mm->page_table_lock);
  1187. if (current_nid != -1)
  1188. task_numa_fault(current_nid, HPAGE_PMD_NR, false);
  1189. return 0;
  1190. }
  1191. int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
  1192. pmd_t *pmd, unsigned long addr)
  1193. {
  1194. int ret = 0;
  1195. if (__pmd_trans_huge_lock(pmd, vma) == 1) {
  1196. struct page *page;
  1197. pgtable_t pgtable;
  1198. pmd_t orig_pmd;
  1199. pgtable = pgtable_trans_huge_withdraw(tlb->mm);
  1200. orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
  1201. tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
  1202. if (is_huge_zero_pmd(orig_pmd)) {
  1203. tlb->mm->nr_ptes--;
  1204. spin_unlock(&tlb->mm->page_table_lock);
  1205. put_huge_zero_page();
  1206. } else {
  1207. page = pmd_page(orig_pmd);
  1208. page_remove_rmap(page);
  1209. VM_BUG_ON(page_mapcount(page) < 0);
  1210. add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
  1211. VM_BUG_ON(!PageHead(page));
  1212. tlb->mm->nr_ptes--;
  1213. spin_unlock(&tlb->mm->page_table_lock);
  1214. tlb_remove_page(tlb, page);
  1215. }
  1216. pte_free(tlb->mm, pgtable);
  1217. ret = 1;
  1218. }
  1219. return ret;
  1220. }
  1221. int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  1222. unsigned long addr, unsigned long end,
  1223. unsigned char *vec)
  1224. {
  1225. int ret = 0;
  1226. if (__pmd_trans_huge_lock(pmd, vma) == 1) {
  1227. /*
  1228. * All logical pages in the range are present
  1229. * if backed by a huge page.
  1230. */
  1231. spin_unlock(&vma->vm_mm->page_table_lock);
  1232. memset(vec, 1, (end - addr) >> PAGE_SHIFT);
  1233. ret = 1;
  1234. }
  1235. return ret;
  1236. }
  1237. int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
  1238. unsigned long old_addr,
  1239. unsigned long new_addr, unsigned long old_end,
  1240. pmd_t *old_pmd, pmd_t *new_pmd)
  1241. {
  1242. int ret = 0;
  1243. pmd_t pmd;
  1244. struct mm_struct *mm = vma->vm_mm;
  1245. if ((old_addr & ~HPAGE_PMD_MASK) ||
  1246. (new_addr & ~HPAGE_PMD_MASK) ||
  1247. old_end - old_addr < HPAGE_PMD_SIZE ||
  1248. (new_vma->vm_flags & VM_NOHUGEPAGE))
  1249. goto out;
  1250. /*
  1251. * The destination pmd shouldn't be established, free_pgtables()
  1252. * should have release it.
  1253. */
  1254. if (WARN_ON(!pmd_none(*new_pmd))) {
  1255. VM_BUG_ON(pmd_trans_huge(*new_pmd));
  1256. goto out;
  1257. }
  1258. ret = __pmd_trans_huge_lock(old_pmd, vma);
  1259. if (ret == 1) {
  1260. pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
  1261. VM_BUG_ON(!pmd_none(*new_pmd));
  1262. set_pmd_at(mm, new_addr, new_pmd, pmd);
  1263. spin_unlock(&mm->page_table_lock);
  1264. }
  1265. out:
  1266. return ret;
  1267. }
  1268. int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  1269. unsigned long addr, pgprot_t newprot, int prot_numa)
  1270. {
  1271. struct mm_struct *mm = vma->vm_mm;
  1272. int ret = 0;
  1273. if (__pmd_trans_huge_lock(pmd, vma) == 1) {
  1274. pmd_t entry;
  1275. entry = pmdp_get_and_clear(mm, addr, pmd);
  1276. if (!prot_numa) {
  1277. entry = pmd_modify(entry, newprot);
  1278. BUG_ON(pmd_write(entry));
  1279. } else {
  1280. struct page *page = pmd_page(*pmd);
  1281. /* only check non-shared pages */
  1282. if (page_mapcount(page) == 1 &&
  1283. !pmd_numa(*pmd)) {
  1284. entry = pmd_mknuma(entry);
  1285. }
  1286. }
  1287. set_pmd_at(mm, addr, pmd, entry);
  1288. spin_unlock(&vma->vm_mm->page_table_lock);
  1289. ret = 1;
  1290. }
  1291. return ret;
  1292. }
  1293. /*
  1294. * Returns 1 if a given pmd maps a stable (not under splitting) thp.
  1295. * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
  1296. *
  1297. * Note that if it returns 1, this routine returns without unlocking page
  1298. * table locks. So callers must unlock them.
  1299. */
  1300. int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
  1301. {
  1302. spin_lock(&vma->vm_mm->page_table_lock);
  1303. if (likely(pmd_trans_huge(*pmd))) {
  1304. if (unlikely(pmd_trans_splitting(*pmd))) {
  1305. spin_unlock(&vma->vm_mm->page_table_lock);
  1306. wait_split_huge_page(vma->anon_vma, pmd);
  1307. return -1;
  1308. } else {
  1309. /* Thp mapped by 'pmd' is stable, so we can
  1310. * handle it as it is. */
  1311. return 1;
  1312. }
  1313. }
  1314. spin_unlock(&vma->vm_mm->page_table_lock);
  1315. return 0;
  1316. }
  1317. pmd_t *page_check_address_pmd(struct page *page,
  1318. struct mm_struct *mm,
  1319. unsigned long address,
  1320. enum page_check_address_pmd_flag flag)
  1321. {
  1322. pmd_t *pmd, *ret = NULL;
  1323. if (address & ~HPAGE_PMD_MASK)
  1324. goto out;
  1325. pmd = mm_find_pmd(mm, address);
  1326. if (!pmd)
  1327. goto out;
  1328. if (pmd_none(*pmd))
  1329. goto out;
  1330. if (pmd_page(*pmd) != page)
  1331. goto out;
  1332. /*
  1333. * split_vma() may create temporary aliased mappings. There is
  1334. * no risk as long as all huge pmd are found and have their
  1335. * splitting bit set before __split_huge_page_refcount
  1336. * runs. Finding the same huge pmd more than once during the
  1337. * same rmap walk is not a problem.
  1338. */
  1339. if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
  1340. pmd_trans_splitting(*pmd))
  1341. goto out;
  1342. if (pmd_trans_huge(*pmd)) {
  1343. VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
  1344. !pmd_trans_splitting(*pmd));
  1345. ret = pmd;
  1346. }
  1347. out:
  1348. return ret;
  1349. }
  1350. static int __split_huge_page_splitting(struct page *page,
  1351. struct vm_area_struct *vma,
  1352. unsigned long address)
  1353. {
  1354. struct mm_struct *mm = vma->vm_mm;
  1355. pmd_t *pmd;
  1356. int ret = 0;
  1357. /* For mmu_notifiers */
  1358. const unsigned long mmun_start = address;
  1359. const unsigned long mmun_end = address + HPAGE_PMD_SIZE;
  1360. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  1361. spin_lock(&mm->page_table_lock);
  1362. pmd = page_check_address_pmd(page, mm, address,
  1363. PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
  1364. if (pmd) {
  1365. /*
  1366. * We can't temporarily set the pmd to null in order
  1367. * to split it, the pmd must remain marked huge at all
  1368. * times or the VM won't take the pmd_trans_huge paths
  1369. * and it won't wait on the anon_vma->root->rwsem to
  1370. * serialize against split_huge_page*.
  1371. */
  1372. pmdp_splitting_flush(vma, address, pmd);
  1373. ret = 1;
  1374. }
  1375. spin_unlock(&mm->page_table_lock);
  1376. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  1377. return ret;
  1378. }
  1379. static void __split_huge_page_refcount(struct page *page)
  1380. {
  1381. int i;
  1382. struct zone *zone = page_zone(page);
  1383. struct lruvec *lruvec;
  1384. int tail_count = 0;
  1385. /* prevent PageLRU to go away from under us, and freeze lru stats */
  1386. spin_lock_irq(&zone->lru_lock);
  1387. lruvec = mem_cgroup_page_lruvec(page, zone);
  1388. compound_lock(page);
  1389. /* complete memcg works before add pages to LRU */
  1390. mem_cgroup_split_huge_fixup(page);
  1391. for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
  1392. struct page *page_tail = page + i;
  1393. /* tail_page->_mapcount cannot change */
  1394. BUG_ON(page_mapcount(page_tail) < 0);
  1395. tail_count += page_mapcount(page_tail);
  1396. /* check for overflow */
  1397. BUG_ON(tail_count < 0);
  1398. BUG_ON(atomic_read(&page_tail->_count) != 0);
  1399. /*
  1400. * tail_page->_count is zero and not changing from
  1401. * under us. But get_page_unless_zero() may be running
  1402. * from under us on the tail_page. If we used
  1403. * atomic_set() below instead of atomic_add(), we
  1404. * would then run atomic_set() concurrently with
  1405. * get_page_unless_zero(), and atomic_set() is
  1406. * implemented in C not using locked ops. spin_unlock
  1407. * on x86 sometime uses locked ops because of PPro
  1408. * errata 66, 92, so unless somebody can guarantee
  1409. * atomic_set() here would be safe on all archs (and
  1410. * not only on x86), it's safer to use atomic_add().
  1411. */
  1412. atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
  1413. &page_tail->_count);
  1414. /* after clearing PageTail the gup refcount can be released */
  1415. smp_mb();
  1416. /*
  1417. * retain hwpoison flag of the poisoned tail page:
  1418. * fix for the unsuitable process killed on Guest Machine(KVM)
  1419. * by the memory-failure.
  1420. */
  1421. page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
  1422. page_tail->flags |= (page->flags &
  1423. ((1L << PG_referenced) |
  1424. (1L << PG_swapbacked) |
  1425. (1L << PG_mlocked) |
  1426. (1L << PG_uptodate)));
  1427. page_tail->flags |= (1L << PG_dirty);
  1428. /* clear PageTail before overwriting first_page */
  1429. smp_wmb();
  1430. /*
  1431. * __split_huge_page_splitting() already set the
  1432. * splitting bit in all pmd that could map this
  1433. * hugepage, that will ensure no CPU can alter the
  1434. * mapcount on the head page. The mapcount is only
  1435. * accounted in the head page and it has to be
  1436. * transferred to all tail pages in the below code. So
  1437. * for this code to be safe, the split the mapcount
  1438. * can't change. But that doesn't mean userland can't
  1439. * keep changing and reading the page contents while
  1440. * we transfer the mapcount, so the pmd splitting
  1441. * status is achieved setting a reserved bit in the
  1442. * pmd, not by clearing the present bit.
  1443. */
  1444. page_tail->_mapcount = page->_mapcount;
  1445. BUG_ON(page_tail->mapping);
  1446. page_tail->mapping = page->mapping;
  1447. page_tail->index = page->index + i;
  1448. page_xchg_last_nid(page_tail, page_last_nid(page));
  1449. BUG_ON(!PageAnon(page_tail));
  1450. BUG_ON(!PageUptodate(page_tail));
  1451. BUG_ON(!PageDirty(page_tail));
  1452. BUG_ON(!PageSwapBacked(page_tail));
  1453. lru_add_page_tail(page, page_tail, lruvec);
  1454. }
  1455. atomic_sub(tail_count, &page->_count);
  1456. BUG_ON(atomic_read(&page->_count) <= 0);
  1457. __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
  1458. __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
  1459. ClearPageCompound(page);
  1460. compound_unlock(page);
  1461. spin_unlock_irq(&zone->lru_lock);
  1462. for (i = 1; i < HPAGE_PMD_NR; i++) {
  1463. struct page *page_tail = page + i;
  1464. BUG_ON(page_count(page_tail) <= 0);
  1465. /*
  1466. * Tail pages may be freed if there wasn't any mapping
  1467. * like if add_to_swap() is running on a lru page that
  1468. * had its mapping zapped. And freeing these pages
  1469. * requires taking the lru_lock so we do the put_page
  1470. * of the tail pages after the split is complete.
  1471. */
  1472. put_page(page_tail);
  1473. }
  1474. /*
  1475. * Only the head page (now become a regular page) is required
  1476. * to be pinned by the caller.
  1477. */
  1478. BUG_ON(page_count(page) <= 0);
  1479. }
  1480. static int __split_huge_page_map(struct page *page,
  1481. struct vm_area_struct *vma,
  1482. unsigned long address)
  1483. {
  1484. struct mm_struct *mm = vma->vm_mm;
  1485. pmd_t *pmd, _pmd;
  1486. int ret = 0, i;
  1487. pgtable_t pgtable;
  1488. unsigned long haddr;
  1489. spin_lock(&mm->page_table_lock);
  1490. pmd = page_check_address_pmd(page, mm, address,
  1491. PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
  1492. if (pmd) {
  1493. pgtable = pgtable_trans_huge_withdraw(mm);
  1494. pmd_populate(mm, &_pmd, pgtable);
  1495. haddr = address;
  1496. for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
  1497. pte_t *pte, entry;
  1498. BUG_ON(PageCompound(page+i));
  1499. entry = mk_pte(page + i, vma->vm_page_prot);
  1500. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  1501. if (!pmd_write(*pmd))
  1502. entry = pte_wrprotect(entry);
  1503. else
  1504. BUG_ON(page_mapcount(page) != 1);
  1505. if (!pmd_young(*pmd))
  1506. entry = pte_mkold(entry);
  1507. if (pmd_numa(*pmd))
  1508. entry = pte_mknuma(entry);
  1509. pte = pte_offset_map(&_pmd, haddr);
  1510. BUG_ON(!pte_none(*pte));
  1511. set_pte_at(mm, haddr, pte, entry);
  1512. pte_unmap(pte);
  1513. }
  1514. smp_wmb(); /* make pte visible before pmd */
  1515. /*
  1516. * Up to this point the pmd is present and huge and
  1517. * userland has the whole access to the hugepage
  1518. * during the split (which happens in place). If we
  1519. * overwrite the pmd with the not-huge version
  1520. * pointing to the pte here (which of course we could
  1521. * if all CPUs were bug free), userland could trigger
  1522. * a small page size TLB miss on the small sized TLB
  1523. * while the hugepage TLB entry is still established
  1524. * in the huge TLB. Some CPU doesn't like that. See
  1525. * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
  1526. * Erratum 383 on page 93. Intel should be safe but is
  1527. * also warns that it's only safe if the permission
  1528. * and cache attributes of the two entries loaded in
  1529. * the two TLB is identical (which should be the case
  1530. * here). But it is generally safer to never allow
  1531. * small and huge TLB entries for the same virtual
  1532. * address to be loaded simultaneously. So instead of
  1533. * doing "pmd_populate(); flush_tlb_range();" we first
  1534. * mark the current pmd notpresent (atomically because
  1535. * here the pmd_trans_huge and pmd_trans_splitting
  1536. * must remain set at all times on the pmd until the
  1537. * split is complete for this pmd), then we flush the
  1538. * SMP TLB and finally we write the non-huge version
  1539. * of the pmd entry with pmd_populate.
  1540. */
  1541. pmdp_invalidate(vma, address, pmd);
  1542. pmd_populate(mm, pmd, pgtable);
  1543. ret = 1;
  1544. }
  1545. spin_unlock(&mm->page_table_lock);
  1546. return ret;
  1547. }
  1548. /* must be called with anon_vma->root->rwsem held */
  1549. static void __split_huge_page(struct page *page,
  1550. struct anon_vma *anon_vma)
  1551. {
  1552. int mapcount, mapcount2;
  1553. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  1554. struct anon_vma_chain *avc;
  1555. BUG_ON(!PageHead(page));
  1556. BUG_ON(PageTail(page));
  1557. mapcount = 0;
  1558. anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
  1559. struct vm_area_struct *vma = avc->vma;
  1560. unsigned long addr = vma_address(page, vma);
  1561. BUG_ON(is_vma_temporary_stack(vma));
  1562. mapcount += __split_huge_page_splitting(page, vma, addr);
  1563. }
  1564. /*
  1565. * It is critical that new vmas are added to the tail of the
  1566. * anon_vma list. This guarantes that if copy_huge_pmd() runs
  1567. * and establishes a child pmd before
  1568. * __split_huge_page_splitting() freezes the parent pmd (so if
  1569. * we fail to prevent copy_huge_pmd() from running until the
  1570. * whole __split_huge_page() is complete), we will still see
  1571. * the newly established pmd of the child later during the
  1572. * walk, to be able to set it as pmd_trans_splitting too.
  1573. */
  1574. if (mapcount != page_mapcount(page))
  1575. printk(KERN_ERR "mapcount %d page_mapcount %d\n",
  1576. mapcount, page_mapcount(page));
  1577. BUG_ON(mapcount != page_mapcount(page));
  1578. __split_huge_page_refcount(page);
  1579. mapcount2 = 0;
  1580. anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
  1581. struct vm_area_struct *vma = avc->vma;
  1582. unsigned long addr = vma_address(page, vma);
  1583. BUG_ON(is_vma_temporary_stack(vma));
  1584. mapcount2 += __split_huge_page_map(page, vma, addr);
  1585. }
  1586. if (mapcount != mapcount2)
  1587. printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
  1588. mapcount, mapcount2, page_mapcount(page));
  1589. BUG_ON(mapcount != mapcount2);
  1590. }
  1591. int split_huge_page(struct page *page)
  1592. {
  1593. struct anon_vma *anon_vma;
  1594. int ret = 1;
  1595. BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
  1596. BUG_ON(!PageAnon(page));
  1597. /*
  1598. * The caller does not necessarily hold an mmap_sem that would prevent
  1599. * the anon_vma disappearing so we first we take a reference to it
  1600. * and then lock the anon_vma for write. This is similar to
  1601. * page_lock_anon_vma_read except the write lock is taken to serialise
  1602. * against parallel split or collapse operations.
  1603. */
  1604. anon_vma = page_get_anon_vma(page);
  1605. if (!anon_vma)
  1606. goto out;
  1607. anon_vma_lock_write(anon_vma);
  1608. ret = 0;
  1609. if (!PageCompound(page))
  1610. goto out_unlock;
  1611. BUG_ON(!PageSwapBacked(page));
  1612. __split_huge_page(page, anon_vma);
  1613. count_vm_event(THP_SPLIT);
  1614. BUG_ON(PageCompound(page));
  1615. out_unlock:
  1616. anon_vma_unlock_write(anon_vma);
  1617. put_anon_vma(anon_vma);
  1618. out:
  1619. return ret;
  1620. }
  1621. #define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
  1622. int hugepage_madvise(struct vm_area_struct *vma,
  1623. unsigned long *vm_flags, int advice)
  1624. {
  1625. struct mm_struct *mm = vma->vm_mm;
  1626. switch (advice) {
  1627. case MADV_HUGEPAGE:
  1628. /*
  1629. * Be somewhat over-protective like KSM for now!
  1630. */
  1631. if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
  1632. return -EINVAL;
  1633. if (mm->def_flags & VM_NOHUGEPAGE)
  1634. return -EINVAL;
  1635. *vm_flags &= ~VM_NOHUGEPAGE;
  1636. *vm_flags |= VM_HUGEPAGE;
  1637. /*
  1638. * If the vma become good for khugepaged to scan,
  1639. * register it here without waiting a page fault that
  1640. * may not happen any time soon.
  1641. */
  1642. if (unlikely(khugepaged_enter_vma_merge(vma)))
  1643. return -ENOMEM;
  1644. break;
  1645. case MADV_NOHUGEPAGE:
  1646. /*
  1647. * Be somewhat over-protective like KSM for now!
  1648. */
  1649. if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
  1650. return -EINVAL;
  1651. *vm_flags &= ~VM_HUGEPAGE;
  1652. *vm_flags |= VM_NOHUGEPAGE;
  1653. /*
  1654. * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
  1655. * this vma even if we leave the mm registered in khugepaged if
  1656. * it got registered before VM_NOHUGEPAGE was set.
  1657. */
  1658. break;
  1659. }
  1660. return 0;
  1661. }
  1662. static int __init khugepaged_slab_init(void)
  1663. {
  1664. mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
  1665. sizeof(struct mm_slot),
  1666. __alignof__(struct mm_slot), 0, NULL);
  1667. if (!mm_slot_cache)
  1668. return -ENOMEM;
  1669. return 0;
  1670. }
  1671. static inline struct mm_slot *alloc_mm_slot(void)
  1672. {
  1673. if (!mm_slot_cache) /* initialization failed */
  1674. return NULL;
  1675. return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
  1676. }
  1677. static inline void free_mm_slot(struct mm_slot *mm_slot)
  1678. {
  1679. kmem_cache_free(mm_slot_cache, mm_slot);
  1680. }
  1681. static struct mm_slot *get_mm_slot(struct mm_struct *mm)
  1682. {
  1683. struct mm_slot *mm_slot;
  1684. struct hlist_node *node;
  1685. hash_for_each_possible(mm_slots_hash, mm_slot, node, hash, (unsigned long)mm)
  1686. if (mm == mm_slot->mm)
  1687. return mm_slot;
  1688. return NULL;
  1689. }
  1690. static void insert_to_mm_slots_hash(struct mm_struct *mm,
  1691. struct mm_slot *mm_slot)
  1692. {
  1693. mm_slot->mm = mm;
  1694. hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
  1695. }
  1696. static inline int khugepaged_test_exit(struct mm_struct *mm)
  1697. {
  1698. return atomic_read(&mm->mm_users) == 0;
  1699. }
  1700. int __khugepaged_enter(struct mm_struct *mm)
  1701. {
  1702. struct mm_slot *mm_slot;
  1703. int wakeup;
  1704. mm_slot = alloc_mm_slot();
  1705. if (!mm_slot)
  1706. return -ENOMEM;
  1707. /* __khugepaged_exit() must not run from under us */
  1708. VM_BUG_ON(khugepaged_test_exit(mm));
  1709. if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
  1710. free_mm_slot(mm_slot);
  1711. return 0;
  1712. }
  1713. spin_lock(&khugepaged_mm_lock);
  1714. insert_to_mm_slots_hash(mm, mm_slot);
  1715. /*
  1716. * Insert just behind the scanning cursor, to let the area settle
  1717. * down a little.
  1718. */
  1719. wakeup = list_empty(&khugepaged_scan.mm_head);
  1720. list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
  1721. spin_unlock(&khugepaged_mm_lock);
  1722. atomic_inc(&mm->mm_count);
  1723. if (wakeup)
  1724. wake_up_interruptible(&khugepaged_wait);
  1725. return 0;
  1726. }
  1727. int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
  1728. {
  1729. unsigned long hstart, hend;
  1730. if (!vma->anon_vma)
  1731. /*
  1732. * Not yet faulted in so we will register later in the
  1733. * page fault if needed.
  1734. */
  1735. return 0;
  1736. if (vma->vm_ops)
  1737. /* khugepaged not yet working on file or special mappings */
  1738. return 0;
  1739. VM_BUG_ON(vma->vm_flags & VM_NO_THP);
  1740. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  1741. hend = vma->vm_end & HPAGE_PMD_MASK;
  1742. if (hstart < hend)
  1743. return khugepaged_enter(vma);
  1744. return 0;
  1745. }
  1746. void __khugepaged_exit(struct mm_struct *mm)
  1747. {
  1748. struct mm_slot *mm_slot;
  1749. int free = 0;
  1750. spin_lock(&khugepaged_mm_lock);
  1751. mm_slot = get_mm_slot(mm);
  1752. if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
  1753. hash_del(&mm_slot->hash);
  1754. list_del(&mm_slot->mm_node);
  1755. free = 1;
  1756. }
  1757. spin_unlock(&khugepaged_mm_lock);
  1758. if (free) {
  1759. clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
  1760. free_mm_slot(mm_slot);
  1761. mmdrop(mm);
  1762. } else if (mm_slot) {
  1763. /*
  1764. * This is required to serialize against
  1765. * khugepaged_test_exit() (which is guaranteed to run
  1766. * under mmap sem read mode). Stop here (after we
  1767. * return all pagetables will be destroyed) until
  1768. * khugepaged has finished working on the pagetables
  1769. * under the mmap_sem.
  1770. */
  1771. down_write(&mm->mmap_sem);
  1772. up_write(&mm->mmap_sem);
  1773. }
  1774. }
  1775. static void release_pte_page(struct page *page)
  1776. {
  1777. /* 0 stands for page_is_file_cache(page) == false */
  1778. dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
  1779. unlock_page(page);
  1780. putback_lru_page(page);
  1781. }
  1782. static void release_pte_pages(pte_t *pte, pte_t *_pte)
  1783. {
  1784. while (--_pte >= pte) {
  1785. pte_t pteval = *_pte;
  1786. if (!pte_none(pteval))
  1787. release_pte_page(pte_page(pteval));
  1788. }
  1789. }
  1790. static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
  1791. unsigned long address,
  1792. pte_t *pte)
  1793. {
  1794. struct page *page;
  1795. pte_t *_pte;
  1796. int referenced = 0, none = 0;
  1797. for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
  1798. _pte++, address += PAGE_SIZE) {
  1799. pte_t pteval = *_pte;
  1800. if (pte_none(pteval)) {
  1801. if (++none <= khugepaged_max_ptes_none)
  1802. continue;
  1803. else
  1804. goto out;
  1805. }
  1806. if (!pte_present(pteval) || !pte_write(pteval))
  1807. goto out;
  1808. page = vm_normal_page(vma, address, pteval);
  1809. if (unlikely(!page))
  1810. goto out;
  1811. VM_BUG_ON(PageCompound(page));
  1812. BUG_ON(!PageAnon(page));
  1813. VM_BUG_ON(!PageSwapBacked(page));
  1814. /* cannot use mapcount: can't collapse if there's a gup pin */
  1815. if (page_count(page) != 1)
  1816. goto out;
  1817. /*
  1818. * We can do it before isolate_lru_page because the
  1819. * page can't be freed from under us. NOTE: PG_lock
  1820. * is needed to serialize against split_huge_page
  1821. * when invoked from the VM.
  1822. */
  1823. if (!trylock_page(page))
  1824. goto out;
  1825. /*
  1826. * Isolate the page to avoid collapsing an hugepage
  1827. * currently in use by the VM.
  1828. */
  1829. if (isolate_lru_page(page)) {
  1830. unlock_page(page);
  1831. goto out;
  1832. }
  1833. /* 0 stands for page_is_file_cache(page) == false */
  1834. inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
  1835. VM_BUG_ON(!PageLocked(page));
  1836. VM_BUG_ON(PageLRU(page));
  1837. /* If there is no mapped pte young don't collapse the page */
  1838. if (pte_young(pteval) || PageReferenced(page) ||
  1839. mmu_notifier_test_young(vma->vm_mm, address))
  1840. referenced = 1;
  1841. }
  1842. if (likely(referenced))
  1843. return 1;
  1844. out:
  1845. release_pte_pages(pte, _pte);
  1846. return 0;
  1847. }
  1848. static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
  1849. struct vm_area_struct *vma,
  1850. unsigned long address,
  1851. spinlock_t *ptl)
  1852. {
  1853. pte_t *_pte;
  1854. for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
  1855. pte_t pteval = *_pte;
  1856. struct page *src_page;
  1857. if (pte_none(pteval)) {
  1858. clear_user_highpage(page, address);
  1859. add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
  1860. } else {
  1861. src_page = pte_page(pteval);
  1862. copy_user_highpage(page, src_page, address, vma);
  1863. VM_BUG_ON(page_mapcount(src_page) != 1);
  1864. release_pte_page(src_page);
  1865. /*
  1866. * ptl mostly unnecessary, but preempt has to
  1867. * be disabled to update the per-cpu stats
  1868. * inside page_remove_rmap().
  1869. */
  1870. spin_lock(ptl);
  1871. /*
  1872. * paravirt calls inside pte_clear here are
  1873. * superfluous.
  1874. */
  1875. pte_clear(vma->vm_mm, address, _pte);
  1876. page_remove_rmap(src_page);
  1877. spin_unlock(ptl);
  1878. free_page_and_swap_cache(src_page);
  1879. }
  1880. address += PAGE_SIZE;
  1881. page++;
  1882. }
  1883. }
  1884. static void khugepaged_alloc_sleep(void)
  1885. {
  1886. wait_event_freezable_timeout(khugepaged_wait, false,
  1887. msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
  1888. }
  1889. #ifdef CONFIG_NUMA
  1890. static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
  1891. {
  1892. if (IS_ERR(*hpage)) {
  1893. if (!*wait)
  1894. return false;
  1895. *wait = false;
  1896. *hpage = NULL;
  1897. khugepaged_alloc_sleep();
  1898. } else if (*hpage) {
  1899. put_page(*hpage);
  1900. *hpage = NULL;
  1901. }
  1902. return true;
  1903. }
  1904. static struct page
  1905. *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
  1906. struct vm_area_struct *vma, unsigned long address,
  1907. int node)
  1908. {
  1909. VM_BUG_ON(*hpage);
  1910. /*
  1911. * Allocate the page while the vma is still valid and under
  1912. * the mmap_sem read mode so there is no memory allocation
  1913. * later when we take the mmap_sem in write mode. This is more
  1914. * friendly behavior (OTOH it may actually hide bugs) to
  1915. * filesystems in userland with daemons allocating memory in
  1916. * the userland I/O paths. Allocating memory with the
  1917. * mmap_sem in read mode is good idea also to allow greater
  1918. * scalability.
  1919. */
  1920. *hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
  1921. node, __GFP_OTHER_NODE);
  1922. /*
  1923. * After allocating the hugepage, release the mmap_sem read lock in
  1924. * preparation for taking it in write mode.
  1925. */
  1926. up_read(&mm->mmap_sem);
  1927. if (unlikely(!*hpage)) {
  1928. count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
  1929. *hpage = ERR_PTR(-ENOMEM);
  1930. return NULL;
  1931. }
  1932. count_vm_event(THP_COLLAPSE_ALLOC);
  1933. return *hpage;
  1934. }
  1935. #else
  1936. static struct page *khugepaged_alloc_hugepage(bool *wait)
  1937. {
  1938. struct page *hpage;
  1939. do {
  1940. hpage = alloc_hugepage(khugepaged_defrag());
  1941. if (!hpage) {
  1942. count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
  1943. if (!*wait)
  1944. return NULL;
  1945. *wait = false;
  1946. khugepaged_alloc_sleep();
  1947. } else
  1948. count_vm_event(THP_COLLAPSE_ALLOC);
  1949. } while (unlikely(!hpage) && likely(khugepaged_enabled()));
  1950. return hpage;
  1951. }
  1952. static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
  1953. {
  1954. if (!*hpage)
  1955. *hpage = khugepaged_alloc_hugepage(wait);
  1956. if (unlikely(!*hpage))
  1957. return false;
  1958. return true;
  1959. }
  1960. static struct page
  1961. *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
  1962. struct vm_area_struct *vma, unsigned long address,
  1963. int node)
  1964. {
  1965. up_read(&mm->mmap_sem);
  1966. VM_BUG_ON(!*hpage);
  1967. return *hpage;
  1968. }
  1969. #endif
  1970. static bool hugepage_vma_check(struct vm_area_struct *vma)
  1971. {
  1972. if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
  1973. (vma->vm_flags & VM_NOHUGEPAGE))
  1974. return false;
  1975. if (!vma->anon_vma || vma->vm_ops)
  1976. return false;
  1977. if (is_vma_temporary_stack(vma))
  1978. return false;
  1979. VM_BUG_ON(vma->vm_flags & VM_NO_THP);
  1980. return true;
  1981. }
  1982. static void collapse_huge_page(struct mm_struct *mm,
  1983. unsigned long address,
  1984. struct page **hpage,
  1985. struct vm_area_struct *vma,
  1986. int node)
  1987. {
  1988. pmd_t *pmd, _pmd;
  1989. pte_t *pte;
  1990. pgtable_t pgtable;
  1991. struct page *new_page;
  1992. spinlock_t *ptl;
  1993. int isolated;
  1994. unsigned long hstart, hend;
  1995. unsigned long mmun_start; /* For mmu_notifiers */
  1996. unsigned long mmun_end; /* For mmu_notifiers */
  1997. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1998. /* release the mmap_sem read lock. */
  1999. new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
  2000. if (!new_page)
  2001. return;
  2002. if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
  2003. return;
  2004. /*
  2005. * Prevent all access to pagetables with the exception of
  2006. * gup_fast later hanlded by the ptep_clear_flush and the VM
  2007. * handled by the anon_vma lock + PG_lock.
  2008. */
  2009. down_write(&mm->mmap_sem);
  2010. if (unlikely(khugepaged_test_exit(mm)))
  2011. goto out;
  2012. vma = find_vma(mm, address);
  2013. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  2014. hend = vma->vm_end & HPAGE_PMD_MASK;
  2015. if (address < hstart || address + HPAGE_PMD_SIZE > hend)
  2016. goto out;
  2017. if (!hugepage_vma_check(vma))
  2018. goto out;
  2019. pmd = mm_find_pmd(mm, address);
  2020. if (!pmd)
  2021. goto out;
  2022. if (pmd_trans_huge(*pmd))
  2023. goto out;
  2024. anon_vma_lock_write(vma->anon_vma);
  2025. pte = pte_offset_map(pmd, address);
  2026. ptl = pte_lockptr(mm, pmd);
  2027. mmun_start = address;
  2028. mmun_end = address + HPAGE_PMD_SIZE;
  2029. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2030. spin_lock(&mm->page_table_lock); /* probably unnecessary */
  2031. /*
  2032. * After this gup_fast can't run anymore. This also removes
  2033. * any huge TLB entry from the CPU so we won't allow
  2034. * huge and small TLB entries for the same virtual address
  2035. * to avoid the risk of CPU bugs in that area.
  2036. */
  2037. _pmd = pmdp_clear_flush(vma, address, pmd);
  2038. spin_unlock(&mm->page_table_lock);
  2039. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2040. spin_lock(ptl);
  2041. isolated = __collapse_huge_page_isolate(vma, address, pte);
  2042. spin_unlock(ptl);
  2043. if (unlikely(!isolated)) {
  2044. pte_unmap(pte);
  2045. spin_lock(&mm->page_table_lock);
  2046. BUG_ON(!pmd_none(*pmd));
  2047. set_pmd_at(mm, address, pmd, _pmd);
  2048. spin_unlock(&mm->page_table_lock);
  2049. anon_vma_unlock_write(vma->anon_vma);
  2050. goto out;
  2051. }
  2052. /*
  2053. * All pages are isolated and locked so anon_vma rmap
  2054. * can't run anymore.
  2055. */
  2056. anon_vma_unlock_write(vma->anon_vma);
  2057. __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
  2058. pte_unmap(pte);
  2059. __SetPageUptodate(new_page);
  2060. pgtable = pmd_pgtable(_pmd);
  2061. _pmd = mk_huge_pmd(new_page, vma);
  2062. /*
  2063. * spin_lock() below is not the equivalent of smp_wmb(), so
  2064. * this is needed to avoid the copy_huge_page writes to become
  2065. * visible after the set_pmd_at() write.
  2066. */
  2067. smp_wmb();
  2068. spin_lock(&mm->page_table_lock);
  2069. BUG_ON(!pmd_none(*pmd));
  2070. page_add_new_anon_rmap(new_page, vma, address);
  2071. set_pmd_at(mm, address, pmd, _pmd);
  2072. update_mmu_cache_pmd(vma, address, pmd);
  2073. pgtable_trans_huge_deposit(mm, pgtable);
  2074. spin_unlock(&mm->page_table_lock);
  2075. *hpage = NULL;
  2076. khugepaged_pages_collapsed++;
  2077. out_up_write:
  2078. up_write(&mm->mmap_sem);
  2079. return;
  2080. out:
  2081. mem_cgroup_uncharge_page(new_page);
  2082. goto out_up_write;
  2083. }
  2084. static int khugepaged_scan_pmd(struct mm_struct *mm,
  2085. struct vm_area_struct *vma,
  2086. unsigned long address,
  2087. struct page **hpage)
  2088. {
  2089. pmd_t *pmd;
  2090. pte_t *pte, *_pte;
  2091. int ret = 0, referenced = 0, none = 0;
  2092. struct page *page;
  2093. unsigned long _address;
  2094. spinlock_t *ptl;
  2095. int node = -1;
  2096. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  2097. pmd = mm_find_pmd(mm, address);
  2098. if (!pmd)
  2099. goto out;
  2100. if (pmd_trans_huge(*pmd))
  2101. goto out;
  2102. pte = pte_offset_map_lock(mm, pmd, address, &ptl);
  2103. for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
  2104. _pte++, _address += PAGE_SIZE) {
  2105. pte_t pteval = *_pte;
  2106. if (pte_none(pteval)) {
  2107. if (++none <= khugepaged_max_ptes_none)
  2108. continue;
  2109. else
  2110. goto out_unmap;
  2111. }
  2112. if (!pte_present(pteval) || !pte_write(pteval))
  2113. goto out_unmap;
  2114. page = vm_normal_page(vma, _address, pteval);
  2115. if (unlikely(!page))
  2116. goto out_unmap;
  2117. /*
  2118. * Chose the node of the first page. This could
  2119. * be more sophisticated and look at more pages,
  2120. * but isn't for now.
  2121. */
  2122. if (node == -1)
  2123. node = page_to_nid(page);
  2124. VM_BUG_ON(PageCompound(page));
  2125. if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
  2126. goto out_unmap;
  2127. /* cannot use mapcount: can't collapse if there's a gup pin */
  2128. if (page_count(page) != 1)
  2129. goto out_unmap;
  2130. if (pte_young(pteval) || PageReferenced(page) ||
  2131. mmu_notifier_test_young(vma->vm_mm, address))
  2132. referenced = 1;
  2133. }
  2134. if (referenced)
  2135. ret = 1;
  2136. out_unmap:
  2137. pte_unmap_unlock(pte, ptl);
  2138. if (ret)
  2139. /* collapse_huge_page will return with the mmap_sem released */
  2140. collapse_huge_page(mm, address, hpage, vma, node);
  2141. out:
  2142. return ret;
  2143. }
  2144. static void collect_mm_slot(struct mm_slot *mm_slot)
  2145. {
  2146. struct mm_struct *mm = mm_slot->mm;
  2147. VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
  2148. if (khugepaged_test_exit(mm)) {
  2149. /* free mm_slot */
  2150. hash_del(&mm_slot->hash);
  2151. list_del(&mm_slot->mm_node);
  2152. /*
  2153. * Not strictly needed because the mm exited already.
  2154. *
  2155. * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
  2156. */
  2157. /* khugepaged_mm_lock actually not necessary for the below */
  2158. free_mm_slot(mm_slot);
  2159. mmdrop(mm);
  2160. }
  2161. }
  2162. static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
  2163. struct page **hpage)
  2164. __releases(&khugepaged_mm_lock)
  2165. __acquires(&khugepaged_mm_lock)
  2166. {
  2167. struct mm_slot *mm_slot;
  2168. struct mm_struct *mm;
  2169. struct vm_area_struct *vma;
  2170. int progress = 0;
  2171. VM_BUG_ON(!pages);
  2172. VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
  2173. if (khugepaged_scan.mm_slot)
  2174. mm_slot = khugepaged_scan.mm_slot;
  2175. else {
  2176. mm_slot = list_entry(khugepaged_scan.mm_head.next,
  2177. struct mm_slot, mm_node);
  2178. khugepaged_scan.address = 0;
  2179. khugepaged_scan.mm_slot = mm_slot;
  2180. }
  2181. spin_unlock(&khugepaged_mm_lock);
  2182. mm = mm_slot->mm;
  2183. down_read(&mm->mmap_sem);
  2184. if (unlikely(khugepaged_test_exit(mm)))
  2185. vma = NULL;
  2186. else
  2187. vma = find_vma(mm, khugepaged_scan.address);
  2188. progress++;
  2189. for (; vma; vma = vma->vm_next) {
  2190. unsigned long hstart, hend;
  2191. cond_resched();
  2192. if (unlikely(khugepaged_test_exit(mm))) {
  2193. progress++;
  2194. break;
  2195. }
  2196. if (!hugepage_vma_check(vma)) {
  2197. skip:
  2198. progress++;
  2199. continue;
  2200. }
  2201. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  2202. hend = vma->vm_end & HPAGE_PMD_MASK;
  2203. if (hstart >= hend)
  2204. goto skip;
  2205. if (khugepaged_scan.address > hend)
  2206. goto skip;
  2207. if (khugepaged_scan.address < hstart)
  2208. khugepaged_scan.address = hstart;
  2209. VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
  2210. while (khugepaged_scan.address < hend) {
  2211. int ret;
  2212. cond_resched();
  2213. if (unlikely(khugepaged_test_exit(mm)))
  2214. goto breakouterloop;
  2215. VM_BUG_ON(khugepaged_scan.address < hstart ||
  2216. khugepaged_scan.address + HPAGE_PMD_SIZE >
  2217. hend);
  2218. ret = khugepaged_scan_pmd(mm, vma,
  2219. khugepaged_scan.address,
  2220. hpage);
  2221. /* move to next address */
  2222. khugepaged_scan.address += HPAGE_PMD_SIZE;
  2223. progress += HPAGE_PMD_NR;
  2224. if (ret)
  2225. /* we released mmap_sem so break loop */
  2226. goto breakouterloop_mmap_sem;
  2227. if (progress >= pages)
  2228. goto breakouterloop;
  2229. }
  2230. }
  2231. breakouterloop:
  2232. up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
  2233. breakouterloop_mmap_sem:
  2234. spin_lock(&khugepaged_mm_lock);
  2235. VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
  2236. /*
  2237. * Release the current mm_slot if this mm is about to die, or
  2238. * if we scanned all vmas of this mm.
  2239. */
  2240. if (khugepaged_test_exit(mm) || !vma) {
  2241. /*
  2242. * Make sure that if mm_users is reaching zero while
  2243. * khugepaged runs here, khugepaged_exit will find
  2244. * mm_slot not pointing to the exiting mm.
  2245. */
  2246. if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
  2247. khugepaged_scan.mm_slot = list_entry(
  2248. mm_slot->mm_node.next,
  2249. struct mm_slot, mm_node);
  2250. khugepaged_scan.address = 0;
  2251. } else {
  2252. khugepaged_scan.mm_slot = NULL;
  2253. khugepaged_full_scans++;
  2254. }
  2255. collect_mm_slot(mm_slot);
  2256. }
  2257. return progress;
  2258. }
  2259. static int khugepaged_has_work(void)
  2260. {
  2261. return !list_empty(&khugepaged_scan.mm_head) &&
  2262. khugepaged_enabled();
  2263. }
  2264. static int khugepaged_wait_event(void)
  2265. {
  2266. return !list_empty(&khugepaged_scan.mm_head) ||
  2267. kthread_should_stop();
  2268. }
  2269. static void khugepaged_do_scan(void)
  2270. {
  2271. struct page *hpage = NULL;
  2272. unsigned int progress = 0, pass_through_head = 0;
  2273. unsigned int pages = khugepaged_pages_to_scan;
  2274. bool wait = true;
  2275. barrier(); /* write khugepaged_pages_to_scan to local stack */
  2276. while (progress < pages) {
  2277. if (!khugepaged_prealloc_page(&hpage, &wait))
  2278. break;
  2279. cond_resched();
  2280. if (unlikely(kthread_should_stop() || freezing(current)))
  2281. break;
  2282. spin_lock(&khugepaged_mm_lock);
  2283. if (!khugepaged_scan.mm_slot)
  2284. pass_through_head++;
  2285. if (khugepaged_has_work() &&
  2286. pass_through_head < 2)
  2287. progress += khugepaged_scan_mm_slot(pages - progress,
  2288. &hpage);
  2289. else
  2290. progress = pages;
  2291. spin_unlock(&khugepaged_mm_lock);
  2292. }
  2293. if (!IS_ERR_OR_NULL(hpage))
  2294. put_page(hpage);
  2295. }
  2296. static void khugepaged_wait_work(void)
  2297. {
  2298. try_to_freeze();
  2299. if (khugepaged_has_work()) {
  2300. if (!khugepaged_scan_sleep_millisecs)
  2301. return;
  2302. wait_event_freezable_timeout(khugepaged_wait,
  2303. kthread_should_stop(),
  2304. msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
  2305. return;
  2306. }
  2307. if (khugepaged_enabled())
  2308. wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
  2309. }
  2310. static int khugepaged(void *none)
  2311. {
  2312. struct mm_slot *mm_slot;
  2313. set_freezable();
  2314. set_user_nice(current, 19);
  2315. while (!kthread_should_stop()) {
  2316. khugepaged_do_scan();
  2317. khugepaged_wait_work();
  2318. }
  2319. spin_lock(&khugepaged_mm_lock);
  2320. mm_slot = khugepaged_scan.mm_slot;
  2321. khugepaged_scan.mm_slot = NULL;
  2322. if (mm_slot)
  2323. collect_mm_slot(mm_slot);
  2324. spin_unlock(&khugepaged_mm_lock);
  2325. return 0;
  2326. }
  2327. static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
  2328. unsigned long haddr, pmd_t *pmd)
  2329. {
  2330. struct mm_struct *mm = vma->vm_mm;
  2331. pgtable_t pgtable;
  2332. pmd_t _pmd;
  2333. int i;
  2334. pmdp_clear_flush(vma, haddr, pmd);
  2335. /* leave pmd empty until pte is filled */
  2336. pgtable = pgtable_trans_huge_withdraw(mm);
  2337. pmd_populate(mm, &_pmd, pgtable);
  2338. for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
  2339. pte_t *pte, entry;
  2340. entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
  2341. entry = pte_mkspecial(entry);
  2342. pte = pte_offset_map(&_pmd, haddr);
  2343. VM_BUG_ON(!pte_none(*pte));
  2344. set_pte_at(mm, haddr, pte, entry);
  2345. pte_unmap(pte);
  2346. }
  2347. smp_wmb(); /* make pte visible before pmd */
  2348. pmd_populate(mm, pmd, pgtable);
  2349. put_huge_zero_page();
  2350. }
  2351. void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
  2352. pmd_t *pmd)
  2353. {
  2354. struct page *page;
  2355. struct mm_struct *mm = vma->vm_mm;
  2356. unsigned long haddr = address & HPAGE_PMD_MASK;
  2357. unsigned long mmun_start; /* For mmu_notifiers */
  2358. unsigned long mmun_end; /* For mmu_notifiers */
  2359. BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
  2360. mmun_start = haddr;
  2361. mmun_end = haddr + HPAGE_PMD_SIZE;
  2362. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2363. spin_lock(&mm->page_table_lock);
  2364. if (unlikely(!pmd_trans_huge(*pmd))) {
  2365. spin_unlock(&mm->page_table_lock);
  2366. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2367. return;
  2368. }
  2369. if (is_huge_zero_pmd(*pmd)) {
  2370. __split_huge_zero_page_pmd(vma, haddr, pmd);
  2371. spin_unlock(&mm->page_table_lock);
  2372. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2373. return;
  2374. }
  2375. page = pmd_page(*pmd);
  2376. VM_BUG_ON(!page_count(page));
  2377. get_page(page);
  2378. spin_unlock(&mm->page_table_lock);
  2379. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2380. split_huge_page(page);
  2381. put_page(page);
  2382. BUG_ON(pmd_trans_huge(*pmd));
  2383. }
  2384. void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
  2385. pmd_t *pmd)
  2386. {
  2387. struct vm_area_struct *vma;
  2388. vma = find_vma(mm, address);
  2389. BUG_ON(vma == NULL);
  2390. split_huge_page_pmd(vma, address, pmd);
  2391. }
  2392. static void split_huge_page_address(struct mm_struct *mm,
  2393. unsigned long address)
  2394. {
  2395. pmd_t *pmd;
  2396. VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
  2397. pmd = mm_find_pmd(mm, address);
  2398. if (!pmd)
  2399. return;
  2400. /*
  2401. * Caller holds the mmap_sem write mode, so a huge pmd cannot
  2402. * materialize from under us.
  2403. */
  2404. split_huge_page_pmd_mm(mm, address, pmd);
  2405. }
  2406. void __vma_adjust_trans_huge(struct vm_area_struct *vma,
  2407. unsigned long start,
  2408. unsigned long end,
  2409. long adjust_next)
  2410. {
  2411. /*
  2412. * If the new start address isn't hpage aligned and it could
  2413. * previously contain an hugepage: check if we need to split
  2414. * an huge pmd.
  2415. */
  2416. if (start & ~HPAGE_PMD_MASK &&
  2417. (start & HPAGE_PMD_MASK) >= vma->vm_start &&
  2418. (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
  2419. split_huge_page_address(vma->vm_mm, start);
  2420. /*
  2421. * If the new end address isn't hpage aligned and it could
  2422. * previously contain an hugepage: check if we need to split
  2423. * an huge pmd.
  2424. */
  2425. if (end & ~HPAGE_PMD_MASK &&
  2426. (end & HPAGE_PMD_MASK) >= vma->vm_start &&
  2427. (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
  2428. split_huge_page_address(vma->vm_mm, end);
  2429. /*
  2430. * If we're also updating the vma->vm_next->vm_start, if the new
  2431. * vm_next->vm_start isn't page aligned and it could previously
  2432. * contain an hugepage: check if we need to split an huge pmd.
  2433. */
  2434. if (adjust_next > 0) {
  2435. struct vm_area_struct *next = vma->vm_next;
  2436. unsigned long nstart = next->vm_start;
  2437. nstart += adjust_next << PAGE_SHIFT;
  2438. if (nstart & ~HPAGE_PMD_MASK &&
  2439. (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
  2440. (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
  2441. split_huge_page_address(next->vm_mm, nstart);
  2442. }
  2443. }