huge_memory.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349
  1. /*
  2. * Copyright (C) 2009 Red Hat, Inc.
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2. See
  5. * the COPYING file in the top-level directory.
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/sched.h>
  9. #include <linux/highmem.h>
  10. #include <linux/hugetlb.h>
  11. #include <linux/mmu_notifier.h>
  12. #include <linux/rmap.h>
  13. #include <linux/swap.h>
  14. #include <linux/mm_inline.h>
  15. #include <linux/kthread.h>
  16. #include <linux/khugepaged.h>
  17. #include <linux/freezer.h>
  18. #include <linux/mman.h>
  19. #include <asm/tlb.h>
  20. #include <asm/pgalloc.h>
  21. #include "internal.h"
  22. /*
  23. * By default transparent hugepage support is enabled for all mappings
  24. * and khugepaged scans all mappings. Defrag is only invoked by
  25. * khugepaged hugepage allocations and by page faults inside
  26. * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
  27. * allocations.
  28. */
  29. unsigned long transparent_hugepage_flags __read_mostly =
  30. #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
  31. (1<<TRANSPARENT_HUGEPAGE_FLAG)|
  32. #endif
  33. #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
  34. (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
  35. #endif
  36. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
  37. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  38. /* default scan 8*512 pte (or vmas) every 30 second */
  39. static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
  40. static unsigned int khugepaged_pages_collapsed;
  41. static unsigned int khugepaged_full_scans;
  42. static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  43. /* during fragmentation poll the hugepage allocator once every minute */
  44. static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  45. static struct task_struct *khugepaged_thread __read_mostly;
  46. static DEFINE_MUTEX(khugepaged_mutex);
  47. static DEFINE_SPINLOCK(khugepaged_mm_lock);
  48. static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  49. /*
  50. * default collapse hugepages if there is at least one pte mapped like
  51. * it would have happened if the vma was large enough during page
  52. * fault.
  53. */
  54. static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
  55. static int khugepaged(void *none);
  56. static int mm_slots_hash_init(void);
  57. static int khugepaged_slab_init(void);
  58. static void khugepaged_slab_free(void);
  59. #define MM_SLOTS_HASH_HEADS 1024
  60. static struct hlist_head *mm_slots_hash __read_mostly;
  61. static struct kmem_cache *mm_slot_cache __read_mostly;
  62. /**
  63. * struct mm_slot - hash lookup from mm to mm_slot
  64. * @hash: hash collision list
  65. * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  66. * @mm: the mm that this information is valid for
  67. */
  68. struct mm_slot {
  69. struct hlist_node hash;
  70. struct list_head mm_node;
  71. struct mm_struct *mm;
  72. };
  73. /**
  74. * struct khugepaged_scan - cursor for scanning
  75. * @mm_head: the head of the mm list to scan
  76. * @mm_slot: the current mm_slot we are scanning
  77. * @address: the next address inside that to be scanned
  78. *
  79. * There is only the one khugepaged_scan instance of this cursor structure.
  80. */
  81. struct khugepaged_scan {
  82. struct list_head mm_head;
  83. struct mm_slot *mm_slot;
  84. unsigned long address;
  85. } khugepaged_scan = {
  86. .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
  87. };
  88. static int set_recommended_min_free_kbytes(void)
  89. {
  90. struct zone *zone;
  91. int nr_zones = 0;
  92. unsigned long recommended_min;
  93. extern int min_free_kbytes;
  94. if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
  95. &transparent_hugepage_flags) &&
  96. !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  97. &transparent_hugepage_flags))
  98. return 0;
  99. for_each_populated_zone(zone)
  100. nr_zones++;
  101. /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
  102. recommended_min = pageblock_nr_pages * nr_zones * 2;
  103. /*
  104. * Make sure that on average at least two pageblocks are almost free
  105. * of another type, one for a migratetype to fall back to and a
  106. * second to avoid subsequent fallbacks of other types There are 3
  107. * MIGRATE_TYPES we care about.
  108. */
  109. recommended_min += pageblock_nr_pages * nr_zones *
  110. MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
  111. /* don't ever allow to reserve more than 5% of the lowmem */
  112. recommended_min = min(recommended_min,
  113. (unsigned long) nr_free_buffer_pages() / 20);
  114. recommended_min <<= (PAGE_SHIFT-10);
  115. if (recommended_min > min_free_kbytes)
  116. min_free_kbytes = recommended_min;
  117. setup_per_zone_wmarks();
  118. return 0;
  119. }
  120. late_initcall(set_recommended_min_free_kbytes);
  121. static int start_khugepaged(void)
  122. {
  123. int err = 0;
  124. if (khugepaged_enabled()) {
  125. int wakeup;
  126. if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
  127. err = -ENOMEM;
  128. goto out;
  129. }
  130. mutex_lock(&khugepaged_mutex);
  131. if (!khugepaged_thread)
  132. khugepaged_thread = kthread_run(khugepaged, NULL,
  133. "khugepaged");
  134. if (unlikely(IS_ERR(khugepaged_thread))) {
  135. printk(KERN_ERR
  136. "khugepaged: kthread_run(khugepaged) failed\n");
  137. err = PTR_ERR(khugepaged_thread);
  138. khugepaged_thread = NULL;
  139. }
  140. wakeup = !list_empty(&khugepaged_scan.mm_head);
  141. mutex_unlock(&khugepaged_mutex);
  142. if (wakeup)
  143. wake_up_interruptible(&khugepaged_wait);
  144. set_recommended_min_free_kbytes();
  145. } else
  146. /* wakeup to exit */
  147. wake_up_interruptible(&khugepaged_wait);
  148. out:
  149. return err;
  150. }
  151. #ifdef CONFIG_SYSFS
  152. static ssize_t double_flag_show(struct kobject *kobj,
  153. struct kobj_attribute *attr, char *buf,
  154. enum transparent_hugepage_flag enabled,
  155. enum transparent_hugepage_flag req_madv)
  156. {
  157. if (test_bit(enabled, &transparent_hugepage_flags)) {
  158. VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
  159. return sprintf(buf, "[always] madvise never\n");
  160. } else if (test_bit(req_madv, &transparent_hugepage_flags))
  161. return sprintf(buf, "always [madvise] never\n");
  162. else
  163. return sprintf(buf, "always madvise [never]\n");
  164. }
  165. static ssize_t double_flag_store(struct kobject *kobj,
  166. struct kobj_attribute *attr,
  167. const char *buf, size_t count,
  168. enum transparent_hugepage_flag enabled,
  169. enum transparent_hugepage_flag req_madv)
  170. {
  171. if (!memcmp("always", buf,
  172. min(sizeof("always")-1, count))) {
  173. set_bit(enabled, &transparent_hugepage_flags);
  174. clear_bit(req_madv, &transparent_hugepage_flags);
  175. } else if (!memcmp("madvise", buf,
  176. min(sizeof("madvise")-1, count))) {
  177. clear_bit(enabled, &transparent_hugepage_flags);
  178. set_bit(req_madv, &transparent_hugepage_flags);
  179. } else if (!memcmp("never", buf,
  180. min(sizeof("never")-1, count))) {
  181. clear_bit(enabled, &transparent_hugepage_flags);
  182. clear_bit(req_madv, &transparent_hugepage_flags);
  183. } else
  184. return -EINVAL;
  185. return count;
  186. }
  187. static ssize_t enabled_show(struct kobject *kobj,
  188. struct kobj_attribute *attr, char *buf)
  189. {
  190. return double_flag_show(kobj, attr, buf,
  191. TRANSPARENT_HUGEPAGE_FLAG,
  192. TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
  193. }
  194. static ssize_t enabled_store(struct kobject *kobj,
  195. struct kobj_attribute *attr,
  196. const char *buf, size_t count)
  197. {
  198. ssize_t ret;
  199. ret = double_flag_store(kobj, attr, buf, count,
  200. TRANSPARENT_HUGEPAGE_FLAG,
  201. TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
  202. if (ret > 0) {
  203. int err = start_khugepaged();
  204. if (err)
  205. ret = err;
  206. }
  207. if (ret > 0 &&
  208. (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
  209. &transparent_hugepage_flags) ||
  210. test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  211. &transparent_hugepage_flags)))
  212. set_recommended_min_free_kbytes();
  213. return ret;
  214. }
  215. static struct kobj_attribute enabled_attr =
  216. __ATTR(enabled, 0644, enabled_show, enabled_store);
  217. static ssize_t single_flag_show(struct kobject *kobj,
  218. struct kobj_attribute *attr, char *buf,
  219. enum transparent_hugepage_flag flag)
  220. {
  221. if (test_bit(flag, &transparent_hugepage_flags))
  222. return sprintf(buf, "[yes] no\n");
  223. else
  224. return sprintf(buf, "yes [no]\n");
  225. }
  226. static ssize_t single_flag_store(struct kobject *kobj,
  227. struct kobj_attribute *attr,
  228. const char *buf, size_t count,
  229. enum transparent_hugepage_flag flag)
  230. {
  231. if (!memcmp("yes", buf,
  232. min(sizeof("yes")-1, count))) {
  233. set_bit(flag, &transparent_hugepage_flags);
  234. } else if (!memcmp("no", buf,
  235. min(sizeof("no")-1, count))) {
  236. clear_bit(flag, &transparent_hugepage_flags);
  237. } else
  238. return -EINVAL;
  239. return count;
  240. }
  241. /*
  242. * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
  243. * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
  244. * memory just to allocate one more hugepage.
  245. */
  246. static ssize_t defrag_show(struct kobject *kobj,
  247. struct kobj_attribute *attr, char *buf)
  248. {
  249. return double_flag_show(kobj, attr, buf,
  250. TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
  251. TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
  252. }
  253. static ssize_t defrag_store(struct kobject *kobj,
  254. struct kobj_attribute *attr,
  255. const char *buf, size_t count)
  256. {
  257. return double_flag_store(kobj, attr, buf, count,
  258. TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
  259. TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
  260. }
  261. static struct kobj_attribute defrag_attr =
  262. __ATTR(defrag, 0644, defrag_show, defrag_store);
  263. #ifdef CONFIG_DEBUG_VM
  264. static ssize_t debug_cow_show(struct kobject *kobj,
  265. struct kobj_attribute *attr, char *buf)
  266. {
  267. return single_flag_show(kobj, attr, buf,
  268. TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
  269. }
  270. static ssize_t debug_cow_store(struct kobject *kobj,
  271. struct kobj_attribute *attr,
  272. const char *buf, size_t count)
  273. {
  274. return single_flag_store(kobj, attr, buf, count,
  275. TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
  276. }
  277. static struct kobj_attribute debug_cow_attr =
  278. __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
  279. #endif /* CONFIG_DEBUG_VM */
  280. static struct attribute *hugepage_attr[] = {
  281. &enabled_attr.attr,
  282. &defrag_attr.attr,
  283. #ifdef CONFIG_DEBUG_VM
  284. &debug_cow_attr.attr,
  285. #endif
  286. NULL,
  287. };
  288. static struct attribute_group hugepage_attr_group = {
  289. .attrs = hugepage_attr,
  290. };
  291. static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
  292. struct kobj_attribute *attr,
  293. char *buf)
  294. {
  295. return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
  296. }
  297. static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
  298. struct kobj_attribute *attr,
  299. const char *buf, size_t count)
  300. {
  301. unsigned long msecs;
  302. int err;
  303. err = strict_strtoul(buf, 10, &msecs);
  304. if (err || msecs > UINT_MAX)
  305. return -EINVAL;
  306. khugepaged_scan_sleep_millisecs = msecs;
  307. wake_up_interruptible(&khugepaged_wait);
  308. return count;
  309. }
  310. static struct kobj_attribute scan_sleep_millisecs_attr =
  311. __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
  312. scan_sleep_millisecs_store);
  313. static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
  314. struct kobj_attribute *attr,
  315. char *buf)
  316. {
  317. return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
  318. }
  319. static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
  320. struct kobj_attribute *attr,
  321. const char *buf, size_t count)
  322. {
  323. unsigned long msecs;
  324. int err;
  325. err = strict_strtoul(buf, 10, &msecs);
  326. if (err || msecs > UINT_MAX)
  327. return -EINVAL;
  328. khugepaged_alloc_sleep_millisecs = msecs;
  329. wake_up_interruptible(&khugepaged_wait);
  330. return count;
  331. }
  332. static struct kobj_attribute alloc_sleep_millisecs_attr =
  333. __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
  334. alloc_sleep_millisecs_store);
  335. static ssize_t pages_to_scan_show(struct kobject *kobj,
  336. struct kobj_attribute *attr,
  337. char *buf)
  338. {
  339. return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
  340. }
  341. static ssize_t pages_to_scan_store(struct kobject *kobj,
  342. struct kobj_attribute *attr,
  343. const char *buf, size_t count)
  344. {
  345. int err;
  346. unsigned long pages;
  347. err = strict_strtoul(buf, 10, &pages);
  348. if (err || !pages || pages > UINT_MAX)
  349. return -EINVAL;
  350. khugepaged_pages_to_scan = pages;
  351. return count;
  352. }
  353. static struct kobj_attribute pages_to_scan_attr =
  354. __ATTR(pages_to_scan, 0644, pages_to_scan_show,
  355. pages_to_scan_store);
  356. static ssize_t pages_collapsed_show(struct kobject *kobj,
  357. struct kobj_attribute *attr,
  358. char *buf)
  359. {
  360. return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
  361. }
  362. static struct kobj_attribute pages_collapsed_attr =
  363. __ATTR_RO(pages_collapsed);
  364. static ssize_t full_scans_show(struct kobject *kobj,
  365. struct kobj_attribute *attr,
  366. char *buf)
  367. {
  368. return sprintf(buf, "%u\n", khugepaged_full_scans);
  369. }
  370. static struct kobj_attribute full_scans_attr =
  371. __ATTR_RO(full_scans);
  372. static ssize_t khugepaged_defrag_show(struct kobject *kobj,
  373. struct kobj_attribute *attr, char *buf)
  374. {
  375. return single_flag_show(kobj, attr, buf,
  376. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  377. }
  378. static ssize_t khugepaged_defrag_store(struct kobject *kobj,
  379. struct kobj_attribute *attr,
  380. const char *buf, size_t count)
  381. {
  382. return single_flag_store(kobj, attr, buf, count,
  383. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  384. }
  385. static struct kobj_attribute khugepaged_defrag_attr =
  386. __ATTR(defrag, 0644, khugepaged_defrag_show,
  387. khugepaged_defrag_store);
  388. /*
  389. * max_ptes_none controls if khugepaged should collapse hugepages over
  390. * any unmapped ptes in turn potentially increasing the memory
  391. * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
  392. * reduce the available free memory in the system as it
  393. * runs. Increasing max_ptes_none will instead potentially reduce the
  394. * free memory in the system during the khugepaged scan.
  395. */
  396. static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
  397. struct kobj_attribute *attr,
  398. char *buf)
  399. {
  400. return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
  401. }
  402. static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
  403. struct kobj_attribute *attr,
  404. const char *buf, size_t count)
  405. {
  406. int err;
  407. unsigned long max_ptes_none;
  408. err = strict_strtoul(buf, 10, &max_ptes_none);
  409. if (err || max_ptes_none > HPAGE_PMD_NR-1)
  410. return -EINVAL;
  411. khugepaged_max_ptes_none = max_ptes_none;
  412. return count;
  413. }
  414. static struct kobj_attribute khugepaged_max_ptes_none_attr =
  415. __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
  416. khugepaged_max_ptes_none_store);
  417. static struct attribute *khugepaged_attr[] = {
  418. &khugepaged_defrag_attr.attr,
  419. &khugepaged_max_ptes_none_attr.attr,
  420. &pages_to_scan_attr.attr,
  421. &pages_collapsed_attr.attr,
  422. &full_scans_attr.attr,
  423. &scan_sleep_millisecs_attr.attr,
  424. &alloc_sleep_millisecs_attr.attr,
  425. NULL,
  426. };
  427. static struct attribute_group khugepaged_attr_group = {
  428. .attrs = khugepaged_attr,
  429. .name = "khugepaged",
  430. };
  431. #endif /* CONFIG_SYSFS */
  432. static int __init hugepage_init(void)
  433. {
  434. int err;
  435. #ifdef CONFIG_SYSFS
  436. static struct kobject *hugepage_kobj;
  437. #endif
  438. err = -EINVAL;
  439. if (!has_transparent_hugepage()) {
  440. transparent_hugepage_flags = 0;
  441. goto out;
  442. }
  443. #ifdef CONFIG_SYSFS
  444. err = -ENOMEM;
  445. hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
  446. if (unlikely(!hugepage_kobj)) {
  447. printk(KERN_ERR "hugepage: failed kobject create\n");
  448. goto out;
  449. }
  450. err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
  451. if (err) {
  452. printk(KERN_ERR "hugepage: failed register hugeage group\n");
  453. goto out;
  454. }
  455. err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
  456. if (err) {
  457. printk(KERN_ERR "hugepage: failed register hugeage group\n");
  458. goto out;
  459. }
  460. #endif
  461. err = khugepaged_slab_init();
  462. if (err)
  463. goto out;
  464. err = mm_slots_hash_init();
  465. if (err) {
  466. khugepaged_slab_free();
  467. goto out;
  468. }
  469. /*
  470. * By default disable transparent hugepages on smaller systems,
  471. * where the extra memory used could hurt more than TLB overhead
  472. * is likely to save. The admin can still enable it through /sys.
  473. */
  474. if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
  475. transparent_hugepage_flags = 0;
  476. start_khugepaged();
  477. set_recommended_min_free_kbytes();
  478. out:
  479. return err;
  480. }
  481. module_init(hugepage_init)
  482. static int __init setup_transparent_hugepage(char *str)
  483. {
  484. int ret = 0;
  485. if (!str)
  486. goto out;
  487. if (!strcmp(str, "always")) {
  488. set_bit(TRANSPARENT_HUGEPAGE_FLAG,
  489. &transparent_hugepage_flags);
  490. clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  491. &transparent_hugepage_flags);
  492. ret = 1;
  493. } else if (!strcmp(str, "madvise")) {
  494. clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
  495. &transparent_hugepage_flags);
  496. set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  497. &transparent_hugepage_flags);
  498. ret = 1;
  499. } else if (!strcmp(str, "never")) {
  500. clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
  501. &transparent_hugepage_flags);
  502. clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  503. &transparent_hugepage_flags);
  504. ret = 1;
  505. }
  506. out:
  507. if (!ret)
  508. printk(KERN_WARNING
  509. "transparent_hugepage= cannot parse, ignored\n");
  510. return ret;
  511. }
  512. __setup("transparent_hugepage=", setup_transparent_hugepage);
  513. static void prepare_pmd_huge_pte(pgtable_t pgtable,
  514. struct mm_struct *mm)
  515. {
  516. assert_spin_locked(&mm->page_table_lock);
  517. /* FIFO */
  518. if (!mm->pmd_huge_pte)
  519. INIT_LIST_HEAD(&pgtable->lru);
  520. else
  521. list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
  522. mm->pmd_huge_pte = pgtable;
  523. }
  524. static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
  525. {
  526. if (likely(vma->vm_flags & VM_WRITE))
  527. pmd = pmd_mkwrite(pmd);
  528. return pmd;
  529. }
  530. static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
  531. struct vm_area_struct *vma,
  532. unsigned long haddr, pmd_t *pmd,
  533. struct page *page)
  534. {
  535. int ret = 0;
  536. pgtable_t pgtable;
  537. VM_BUG_ON(!PageCompound(page));
  538. pgtable = pte_alloc_one(mm, haddr);
  539. if (unlikely(!pgtable)) {
  540. mem_cgroup_uncharge_page(page);
  541. put_page(page);
  542. return VM_FAULT_OOM;
  543. }
  544. clear_huge_page(page, haddr, HPAGE_PMD_NR);
  545. __SetPageUptodate(page);
  546. spin_lock(&mm->page_table_lock);
  547. if (unlikely(!pmd_none(*pmd))) {
  548. spin_unlock(&mm->page_table_lock);
  549. mem_cgroup_uncharge_page(page);
  550. put_page(page);
  551. pte_free(mm, pgtable);
  552. } else {
  553. pmd_t entry;
  554. entry = mk_pmd(page, vma->vm_page_prot);
  555. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  556. entry = pmd_mkhuge(entry);
  557. /*
  558. * The spinlocking to take the lru_lock inside
  559. * page_add_new_anon_rmap() acts as a full memory
  560. * barrier to be sure clear_huge_page writes become
  561. * visible after the set_pmd_at() write.
  562. */
  563. page_add_new_anon_rmap(page, vma, haddr);
  564. set_pmd_at(mm, haddr, pmd, entry);
  565. prepare_pmd_huge_pte(pgtable, mm);
  566. add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
  567. spin_unlock(&mm->page_table_lock);
  568. }
  569. return ret;
  570. }
  571. static inline gfp_t alloc_hugepage_gfpmask(int defrag)
  572. {
  573. return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT);
  574. }
  575. static inline struct page *alloc_hugepage_vma(int defrag,
  576. struct vm_area_struct *vma,
  577. unsigned long haddr)
  578. {
  579. return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
  580. HPAGE_PMD_ORDER, vma, haddr);
  581. }
  582. #ifndef CONFIG_NUMA
  583. static inline struct page *alloc_hugepage(int defrag)
  584. {
  585. return alloc_pages(alloc_hugepage_gfpmask(defrag),
  586. HPAGE_PMD_ORDER);
  587. }
  588. #endif
  589. int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  590. unsigned long address, pmd_t *pmd,
  591. unsigned int flags)
  592. {
  593. struct page *page;
  594. unsigned long haddr = address & HPAGE_PMD_MASK;
  595. pte_t *pte;
  596. if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
  597. if (unlikely(anon_vma_prepare(vma)))
  598. return VM_FAULT_OOM;
  599. if (unlikely(khugepaged_enter(vma)))
  600. return VM_FAULT_OOM;
  601. page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
  602. vma, haddr);
  603. if (unlikely(!page))
  604. goto out;
  605. if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
  606. put_page(page);
  607. goto out;
  608. }
  609. return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
  610. }
  611. out:
  612. /*
  613. * Use __pte_alloc instead of pte_alloc_map, because we can't
  614. * run pte_offset_map on the pmd, if an huge pmd could
  615. * materialize from under us from a different thread.
  616. */
  617. if (unlikely(__pte_alloc(mm, vma, pmd, address)))
  618. return VM_FAULT_OOM;
  619. /* if an huge pmd materialized from under us just retry later */
  620. if (unlikely(pmd_trans_huge(*pmd)))
  621. return 0;
  622. /*
  623. * A regular pmd is established and it can't morph into a huge pmd
  624. * from under us anymore at this point because we hold the mmap_sem
  625. * read mode and khugepaged takes it in write mode. So now it's
  626. * safe to run pte_offset_map().
  627. */
  628. pte = pte_offset_map(pmd, address);
  629. return handle_pte_fault(mm, vma, address, pte, pmd, flags);
  630. }
  631. int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  632. pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
  633. struct vm_area_struct *vma)
  634. {
  635. struct page *src_page;
  636. pmd_t pmd;
  637. pgtable_t pgtable;
  638. int ret;
  639. ret = -ENOMEM;
  640. pgtable = pte_alloc_one(dst_mm, addr);
  641. if (unlikely(!pgtable))
  642. goto out;
  643. spin_lock(&dst_mm->page_table_lock);
  644. spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
  645. ret = -EAGAIN;
  646. pmd = *src_pmd;
  647. if (unlikely(!pmd_trans_huge(pmd))) {
  648. pte_free(dst_mm, pgtable);
  649. goto out_unlock;
  650. }
  651. if (unlikely(pmd_trans_splitting(pmd))) {
  652. /* split huge page running from under us */
  653. spin_unlock(&src_mm->page_table_lock);
  654. spin_unlock(&dst_mm->page_table_lock);
  655. pte_free(dst_mm, pgtable);
  656. wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
  657. goto out;
  658. }
  659. src_page = pmd_page(pmd);
  660. VM_BUG_ON(!PageHead(src_page));
  661. get_page(src_page);
  662. page_dup_rmap(src_page);
  663. add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
  664. pmdp_set_wrprotect(src_mm, addr, src_pmd);
  665. pmd = pmd_mkold(pmd_wrprotect(pmd));
  666. set_pmd_at(dst_mm, addr, dst_pmd, pmd);
  667. prepare_pmd_huge_pte(pgtable, dst_mm);
  668. ret = 0;
  669. out_unlock:
  670. spin_unlock(&src_mm->page_table_lock);
  671. spin_unlock(&dst_mm->page_table_lock);
  672. out:
  673. return ret;
  674. }
  675. /* no "address" argument so destroys page coloring of some arch */
  676. pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
  677. {
  678. pgtable_t pgtable;
  679. assert_spin_locked(&mm->page_table_lock);
  680. /* FIFO */
  681. pgtable = mm->pmd_huge_pte;
  682. if (list_empty(&pgtable->lru))
  683. mm->pmd_huge_pte = NULL;
  684. else {
  685. mm->pmd_huge_pte = list_entry(pgtable->lru.next,
  686. struct page, lru);
  687. list_del(&pgtable->lru);
  688. }
  689. return pgtable;
  690. }
  691. static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
  692. struct vm_area_struct *vma,
  693. unsigned long address,
  694. pmd_t *pmd, pmd_t orig_pmd,
  695. struct page *page,
  696. unsigned long haddr)
  697. {
  698. pgtable_t pgtable;
  699. pmd_t _pmd;
  700. int ret = 0, i;
  701. struct page **pages;
  702. pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
  703. GFP_KERNEL);
  704. if (unlikely(!pages)) {
  705. ret |= VM_FAULT_OOM;
  706. goto out;
  707. }
  708. for (i = 0; i < HPAGE_PMD_NR; i++) {
  709. pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
  710. vma, address);
  711. if (unlikely(!pages[i] ||
  712. mem_cgroup_newpage_charge(pages[i], mm,
  713. GFP_KERNEL))) {
  714. if (pages[i])
  715. put_page(pages[i]);
  716. mem_cgroup_uncharge_start();
  717. while (--i >= 0) {
  718. mem_cgroup_uncharge_page(pages[i]);
  719. put_page(pages[i]);
  720. }
  721. mem_cgroup_uncharge_end();
  722. kfree(pages);
  723. ret |= VM_FAULT_OOM;
  724. goto out;
  725. }
  726. }
  727. for (i = 0; i < HPAGE_PMD_NR; i++) {
  728. copy_user_highpage(pages[i], page + i,
  729. haddr + PAGE_SHIFT*i, vma);
  730. __SetPageUptodate(pages[i]);
  731. cond_resched();
  732. }
  733. spin_lock(&mm->page_table_lock);
  734. if (unlikely(!pmd_same(*pmd, orig_pmd)))
  735. goto out_free_pages;
  736. VM_BUG_ON(!PageHead(page));
  737. pmdp_clear_flush_notify(vma, haddr, pmd);
  738. /* leave pmd empty until pte is filled */
  739. pgtable = get_pmd_huge_pte(mm);
  740. pmd_populate(mm, &_pmd, pgtable);
  741. for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
  742. pte_t *pte, entry;
  743. entry = mk_pte(pages[i], vma->vm_page_prot);
  744. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  745. page_add_new_anon_rmap(pages[i], vma, haddr);
  746. pte = pte_offset_map(&_pmd, haddr);
  747. VM_BUG_ON(!pte_none(*pte));
  748. set_pte_at(mm, haddr, pte, entry);
  749. pte_unmap(pte);
  750. }
  751. kfree(pages);
  752. mm->nr_ptes++;
  753. smp_wmb(); /* make pte visible before pmd */
  754. pmd_populate(mm, pmd, pgtable);
  755. page_remove_rmap(page);
  756. spin_unlock(&mm->page_table_lock);
  757. ret |= VM_FAULT_WRITE;
  758. put_page(page);
  759. out:
  760. return ret;
  761. out_free_pages:
  762. spin_unlock(&mm->page_table_lock);
  763. mem_cgroup_uncharge_start();
  764. for (i = 0; i < HPAGE_PMD_NR; i++) {
  765. mem_cgroup_uncharge_page(pages[i]);
  766. put_page(pages[i]);
  767. }
  768. mem_cgroup_uncharge_end();
  769. kfree(pages);
  770. goto out;
  771. }
  772. int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
  773. unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
  774. {
  775. int ret = 0;
  776. struct page *page, *new_page;
  777. unsigned long haddr;
  778. VM_BUG_ON(!vma->anon_vma);
  779. spin_lock(&mm->page_table_lock);
  780. if (unlikely(!pmd_same(*pmd, orig_pmd)))
  781. goto out_unlock;
  782. page = pmd_page(orig_pmd);
  783. VM_BUG_ON(!PageCompound(page) || !PageHead(page));
  784. haddr = address & HPAGE_PMD_MASK;
  785. if (page_mapcount(page) == 1) {
  786. pmd_t entry;
  787. entry = pmd_mkyoung(orig_pmd);
  788. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  789. if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
  790. update_mmu_cache(vma, address, entry);
  791. ret |= VM_FAULT_WRITE;
  792. goto out_unlock;
  793. }
  794. get_page(page);
  795. spin_unlock(&mm->page_table_lock);
  796. if (transparent_hugepage_enabled(vma) &&
  797. !transparent_hugepage_debug_cow())
  798. new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
  799. vma, haddr);
  800. else
  801. new_page = NULL;
  802. if (unlikely(!new_page)) {
  803. ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
  804. pmd, orig_pmd, page, haddr);
  805. put_page(page);
  806. goto out;
  807. }
  808. if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
  809. put_page(new_page);
  810. put_page(page);
  811. ret |= VM_FAULT_OOM;
  812. goto out;
  813. }
  814. copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
  815. __SetPageUptodate(new_page);
  816. spin_lock(&mm->page_table_lock);
  817. put_page(page);
  818. if (unlikely(!pmd_same(*pmd, orig_pmd))) {
  819. mem_cgroup_uncharge_page(new_page);
  820. put_page(new_page);
  821. } else {
  822. pmd_t entry;
  823. VM_BUG_ON(!PageHead(page));
  824. entry = mk_pmd(new_page, vma->vm_page_prot);
  825. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  826. entry = pmd_mkhuge(entry);
  827. pmdp_clear_flush_notify(vma, haddr, pmd);
  828. page_add_new_anon_rmap(new_page, vma, haddr);
  829. set_pmd_at(mm, haddr, pmd, entry);
  830. update_mmu_cache(vma, address, entry);
  831. page_remove_rmap(page);
  832. put_page(page);
  833. ret |= VM_FAULT_WRITE;
  834. }
  835. out_unlock:
  836. spin_unlock(&mm->page_table_lock);
  837. out:
  838. return ret;
  839. }
  840. struct page *follow_trans_huge_pmd(struct mm_struct *mm,
  841. unsigned long addr,
  842. pmd_t *pmd,
  843. unsigned int flags)
  844. {
  845. struct page *page = NULL;
  846. assert_spin_locked(&mm->page_table_lock);
  847. if (flags & FOLL_WRITE && !pmd_write(*pmd))
  848. goto out;
  849. page = pmd_page(*pmd);
  850. VM_BUG_ON(!PageHead(page));
  851. if (flags & FOLL_TOUCH) {
  852. pmd_t _pmd;
  853. /*
  854. * We should set the dirty bit only for FOLL_WRITE but
  855. * for now the dirty bit in the pmd is meaningless.
  856. * And if the dirty bit will become meaningful and
  857. * we'll only set it with FOLL_WRITE, an atomic
  858. * set_bit will be required on the pmd to set the
  859. * young bit, instead of the current set_pmd_at.
  860. */
  861. _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
  862. set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
  863. }
  864. page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
  865. VM_BUG_ON(!PageCompound(page));
  866. if (flags & FOLL_GET)
  867. get_page(page);
  868. out:
  869. return page;
  870. }
  871. int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
  872. pmd_t *pmd)
  873. {
  874. int ret = 0;
  875. spin_lock(&tlb->mm->page_table_lock);
  876. if (likely(pmd_trans_huge(*pmd))) {
  877. if (unlikely(pmd_trans_splitting(*pmd))) {
  878. spin_unlock(&tlb->mm->page_table_lock);
  879. wait_split_huge_page(vma->anon_vma,
  880. pmd);
  881. } else {
  882. struct page *page;
  883. pgtable_t pgtable;
  884. pgtable = get_pmd_huge_pte(tlb->mm);
  885. page = pmd_page(*pmd);
  886. pmd_clear(pmd);
  887. page_remove_rmap(page);
  888. VM_BUG_ON(page_mapcount(page) < 0);
  889. add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
  890. VM_BUG_ON(!PageHead(page));
  891. spin_unlock(&tlb->mm->page_table_lock);
  892. tlb_remove_page(tlb, page);
  893. pte_free(tlb->mm, pgtable);
  894. ret = 1;
  895. }
  896. } else
  897. spin_unlock(&tlb->mm->page_table_lock);
  898. return ret;
  899. }
  900. int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  901. unsigned long addr, unsigned long end,
  902. unsigned char *vec)
  903. {
  904. int ret = 0;
  905. spin_lock(&vma->vm_mm->page_table_lock);
  906. if (likely(pmd_trans_huge(*pmd))) {
  907. ret = !pmd_trans_splitting(*pmd);
  908. spin_unlock(&vma->vm_mm->page_table_lock);
  909. if (unlikely(!ret))
  910. wait_split_huge_page(vma->anon_vma, pmd);
  911. else {
  912. /*
  913. * All logical pages in the range are present
  914. * if backed by a huge page.
  915. */
  916. memset(vec, 1, (end - addr) >> PAGE_SHIFT);
  917. }
  918. } else
  919. spin_unlock(&vma->vm_mm->page_table_lock);
  920. return ret;
  921. }
  922. int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  923. unsigned long addr, pgprot_t newprot)
  924. {
  925. struct mm_struct *mm = vma->vm_mm;
  926. int ret = 0;
  927. spin_lock(&mm->page_table_lock);
  928. if (likely(pmd_trans_huge(*pmd))) {
  929. if (unlikely(pmd_trans_splitting(*pmd))) {
  930. spin_unlock(&mm->page_table_lock);
  931. wait_split_huge_page(vma->anon_vma, pmd);
  932. } else {
  933. pmd_t entry;
  934. entry = pmdp_get_and_clear(mm, addr, pmd);
  935. entry = pmd_modify(entry, newprot);
  936. set_pmd_at(mm, addr, pmd, entry);
  937. spin_unlock(&vma->vm_mm->page_table_lock);
  938. flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
  939. ret = 1;
  940. }
  941. } else
  942. spin_unlock(&vma->vm_mm->page_table_lock);
  943. return ret;
  944. }
  945. pmd_t *page_check_address_pmd(struct page *page,
  946. struct mm_struct *mm,
  947. unsigned long address,
  948. enum page_check_address_pmd_flag flag)
  949. {
  950. pgd_t *pgd;
  951. pud_t *pud;
  952. pmd_t *pmd, *ret = NULL;
  953. if (address & ~HPAGE_PMD_MASK)
  954. goto out;
  955. pgd = pgd_offset(mm, address);
  956. if (!pgd_present(*pgd))
  957. goto out;
  958. pud = pud_offset(pgd, address);
  959. if (!pud_present(*pud))
  960. goto out;
  961. pmd = pmd_offset(pud, address);
  962. if (pmd_none(*pmd))
  963. goto out;
  964. if (pmd_page(*pmd) != page)
  965. goto out;
  966. /*
  967. * split_vma() may create temporary aliased mappings. There is
  968. * no risk as long as all huge pmd are found and have their
  969. * splitting bit set before __split_huge_page_refcount
  970. * runs. Finding the same huge pmd more than once during the
  971. * same rmap walk is not a problem.
  972. */
  973. if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
  974. pmd_trans_splitting(*pmd))
  975. goto out;
  976. if (pmd_trans_huge(*pmd)) {
  977. VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
  978. !pmd_trans_splitting(*pmd));
  979. ret = pmd;
  980. }
  981. out:
  982. return ret;
  983. }
  984. static int __split_huge_page_splitting(struct page *page,
  985. struct vm_area_struct *vma,
  986. unsigned long address)
  987. {
  988. struct mm_struct *mm = vma->vm_mm;
  989. pmd_t *pmd;
  990. int ret = 0;
  991. spin_lock(&mm->page_table_lock);
  992. pmd = page_check_address_pmd(page, mm, address,
  993. PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
  994. if (pmd) {
  995. /*
  996. * We can't temporarily set the pmd to null in order
  997. * to split it, the pmd must remain marked huge at all
  998. * times or the VM won't take the pmd_trans_huge paths
  999. * and it won't wait on the anon_vma->root->lock to
  1000. * serialize against split_huge_page*.
  1001. */
  1002. pmdp_splitting_flush_notify(vma, address, pmd);
  1003. ret = 1;
  1004. }
  1005. spin_unlock(&mm->page_table_lock);
  1006. return ret;
  1007. }
  1008. static void __split_huge_page_refcount(struct page *page)
  1009. {
  1010. int i;
  1011. unsigned long head_index = page->index;
  1012. struct zone *zone = page_zone(page);
  1013. int zonestat;
  1014. /* prevent PageLRU to go away from under us, and freeze lru stats */
  1015. spin_lock_irq(&zone->lru_lock);
  1016. compound_lock(page);
  1017. for (i = 1; i < HPAGE_PMD_NR; i++) {
  1018. struct page *page_tail = page + i;
  1019. /* tail_page->_count cannot change */
  1020. atomic_sub(atomic_read(&page_tail->_count), &page->_count);
  1021. BUG_ON(page_count(page) <= 0);
  1022. atomic_add(page_mapcount(page) + 1, &page_tail->_count);
  1023. BUG_ON(atomic_read(&page_tail->_count) <= 0);
  1024. /* after clearing PageTail the gup refcount can be released */
  1025. smp_mb();
  1026. page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  1027. page_tail->flags |= (page->flags &
  1028. ((1L << PG_referenced) |
  1029. (1L << PG_swapbacked) |
  1030. (1L << PG_mlocked) |
  1031. (1L << PG_uptodate)));
  1032. page_tail->flags |= (1L << PG_dirty);
  1033. /*
  1034. * 1) clear PageTail before overwriting first_page
  1035. * 2) clear PageTail before clearing PageHead for VM_BUG_ON
  1036. */
  1037. smp_wmb();
  1038. /*
  1039. * __split_huge_page_splitting() already set the
  1040. * splitting bit in all pmd that could map this
  1041. * hugepage, that will ensure no CPU can alter the
  1042. * mapcount on the head page. The mapcount is only
  1043. * accounted in the head page and it has to be
  1044. * transferred to all tail pages in the below code. So
  1045. * for this code to be safe, the split the mapcount
  1046. * can't change. But that doesn't mean userland can't
  1047. * keep changing and reading the page contents while
  1048. * we transfer the mapcount, so the pmd splitting
  1049. * status is achieved setting a reserved bit in the
  1050. * pmd, not by clearing the present bit.
  1051. */
  1052. BUG_ON(page_mapcount(page_tail));
  1053. page_tail->_mapcount = page->_mapcount;
  1054. BUG_ON(page_tail->mapping);
  1055. page_tail->mapping = page->mapping;
  1056. page_tail->index = ++head_index;
  1057. BUG_ON(!PageAnon(page_tail));
  1058. BUG_ON(!PageUptodate(page_tail));
  1059. BUG_ON(!PageDirty(page_tail));
  1060. BUG_ON(!PageSwapBacked(page_tail));
  1061. mem_cgroup_split_huge_fixup(page, page_tail);
  1062. lru_add_page_tail(zone, page, page_tail);
  1063. }
  1064. __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
  1065. __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
  1066. /*
  1067. * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
  1068. * so adjust those appropriately if this page is on the LRU.
  1069. */
  1070. if (PageLRU(page)) {
  1071. zonestat = NR_LRU_BASE + page_lru(page);
  1072. __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
  1073. }
  1074. ClearPageCompound(page);
  1075. compound_unlock(page);
  1076. spin_unlock_irq(&zone->lru_lock);
  1077. for (i = 1; i < HPAGE_PMD_NR; i++) {
  1078. struct page *page_tail = page + i;
  1079. BUG_ON(page_count(page_tail) <= 0);
  1080. /*
  1081. * Tail pages may be freed if there wasn't any mapping
  1082. * like if add_to_swap() is running on a lru page that
  1083. * had its mapping zapped. And freeing these pages
  1084. * requires taking the lru_lock so we do the put_page
  1085. * of the tail pages after the split is complete.
  1086. */
  1087. put_page(page_tail);
  1088. }
  1089. /*
  1090. * Only the head page (now become a regular page) is required
  1091. * to be pinned by the caller.
  1092. */
  1093. BUG_ON(page_count(page) <= 0);
  1094. }
  1095. static int __split_huge_page_map(struct page *page,
  1096. struct vm_area_struct *vma,
  1097. unsigned long address)
  1098. {
  1099. struct mm_struct *mm = vma->vm_mm;
  1100. pmd_t *pmd, _pmd;
  1101. int ret = 0, i;
  1102. pgtable_t pgtable;
  1103. unsigned long haddr;
  1104. spin_lock(&mm->page_table_lock);
  1105. pmd = page_check_address_pmd(page, mm, address,
  1106. PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
  1107. if (pmd) {
  1108. pgtable = get_pmd_huge_pte(mm);
  1109. pmd_populate(mm, &_pmd, pgtable);
  1110. for (i = 0, haddr = address; i < HPAGE_PMD_NR;
  1111. i++, haddr += PAGE_SIZE) {
  1112. pte_t *pte, entry;
  1113. BUG_ON(PageCompound(page+i));
  1114. entry = mk_pte(page + i, vma->vm_page_prot);
  1115. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  1116. if (!pmd_write(*pmd))
  1117. entry = pte_wrprotect(entry);
  1118. else
  1119. BUG_ON(page_mapcount(page) != 1);
  1120. if (!pmd_young(*pmd))
  1121. entry = pte_mkold(entry);
  1122. pte = pte_offset_map(&_pmd, haddr);
  1123. BUG_ON(!pte_none(*pte));
  1124. set_pte_at(mm, haddr, pte, entry);
  1125. pte_unmap(pte);
  1126. }
  1127. mm->nr_ptes++;
  1128. smp_wmb(); /* make pte visible before pmd */
  1129. /*
  1130. * Up to this point the pmd is present and huge and
  1131. * userland has the whole access to the hugepage
  1132. * during the split (which happens in place). If we
  1133. * overwrite the pmd with the not-huge version
  1134. * pointing to the pte here (which of course we could
  1135. * if all CPUs were bug free), userland could trigger
  1136. * a small page size TLB miss on the small sized TLB
  1137. * while the hugepage TLB entry is still established
  1138. * in the huge TLB. Some CPU doesn't like that. See
  1139. * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
  1140. * Erratum 383 on page 93. Intel should be safe but is
  1141. * also warns that it's only safe if the permission
  1142. * and cache attributes of the two entries loaded in
  1143. * the two TLB is identical (which should be the case
  1144. * here). But it is generally safer to never allow
  1145. * small and huge TLB entries for the same virtual
  1146. * address to be loaded simultaneously. So instead of
  1147. * doing "pmd_populate(); flush_tlb_range();" we first
  1148. * mark the current pmd notpresent (atomically because
  1149. * here the pmd_trans_huge and pmd_trans_splitting
  1150. * must remain set at all times on the pmd until the
  1151. * split is complete for this pmd), then we flush the
  1152. * SMP TLB and finally we write the non-huge version
  1153. * of the pmd entry with pmd_populate.
  1154. */
  1155. set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
  1156. flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  1157. pmd_populate(mm, pmd, pgtable);
  1158. ret = 1;
  1159. }
  1160. spin_unlock(&mm->page_table_lock);
  1161. return ret;
  1162. }
  1163. /* must be called with anon_vma->root->lock hold */
  1164. static void __split_huge_page(struct page *page,
  1165. struct anon_vma *anon_vma)
  1166. {
  1167. int mapcount, mapcount2;
  1168. struct anon_vma_chain *avc;
  1169. BUG_ON(!PageHead(page));
  1170. BUG_ON(PageTail(page));
  1171. mapcount = 0;
  1172. list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
  1173. struct vm_area_struct *vma = avc->vma;
  1174. unsigned long addr = vma_address(page, vma);
  1175. BUG_ON(is_vma_temporary_stack(vma));
  1176. if (addr == -EFAULT)
  1177. continue;
  1178. mapcount += __split_huge_page_splitting(page, vma, addr);
  1179. }
  1180. /*
  1181. * It is critical that new vmas are added to the tail of the
  1182. * anon_vma list. This guarantes that if copy_huge_pmd() runs
  1183. * and establishes a child pmd before
  1184. * __split_huge_page_splitting() freezes the parent pmd (so if
  1185. * we fail to prevent copy_huge_pmd() from running until the
  1186. * whole __split_huge_page() is complete), we will still see
  1187. * the newly established pmd of the child later during the
  1188. * walk, to be able to set it as pmd_trans_splitting too.
  1189. */
  1190. if (mapcount != page_mapcount(page))
  1191. printk(KERN_ERR "mapcount %d page_mapcount %d\n",
  1192. mapcount, page_mapcount(page));
  1193. BUG_ON(mapcount != page_mapcount(page));
  1194. __split_huge_page_refcount(page);
  1195. mapcount2 = 0;
  1196. list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
  1197. struct vm_area_struct *vma = avc->vma;
  1198. unsigned long addr = vma_address(page, vma);
  1199. BUG_ON(is_vma_temporary_stack(vma));
  1200. if (addr == -EFAULT)
  1201. continue;
  1202. mapcount2 += __split_huge_page_map(page, vma, addr);
  1203. }
  1204. if (mapcount != mapcount2)
  1205. printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
  1206. mapcount, mapcount2, page_mapcount(page));
  1207. BUG_ON(mapcount != mapcount2);
  1208. }
  1209. int split_huge_page(struct page *page)
  1210. {
  1211. struct anon_vma *anon_vma;
  1212. int ret = 1;
  1213. BUG_ON(!PageAnon(page));
  1214. anon_vma = page_lock_anon_vma(page);
  1215. if (!anon_vma)
  1216. goto out;
  1217. ret = 0;
  1218. if (!PageCompound(page))
  1219. goto out_unlock;
  1220. BUG_ON(!PageSwapBacked(page));
  1221. __split_huge_page(page, anon_vma);
  1222. BUG_ON(PageCompound(page));
  1223. out_unlock:
  1224. page_unlock_anon_vma(anon_vma);
  1225. out:
  1226. return ret;
  1227. }
  1228. int hugepage_madvise(struct vm_area_struct *vma,
  1229. unsigned long *vm_flags, int advice)
  1230. {
  1231. switch (advice) {
  1232. case MADV_HUGEPAGE:
  1233. /*
  1234. * Be somewhat over-protective like KSM for now!
  1235. */
  1236. if (*vm_flags & (VM_HUGEPAGE |
  1237. VM_SHARED | VM_MAYSHARE |
  1238. VM_PFNMAP | VM_IO | VM_DONTEXPAND |
  1239. VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
  1240. VM_MIXEDMAP | VM_SAO))
  1241. return -EINVAL;
  1242. *vm_flags &= ~VM_NOHUGEPAGE;
  1243. *vm_flags |= VM_HUGEPAGE;
  1244. /*
  1245. * If the vma become good for khugepaged to scan,
  1246. * register it here without waiting a page fault that
  1247. * may not happen any time soon.
  1248. */
  1249. if (unlikely(khugepaged_enter_vma_merge(vma)))
  1250. return -ENOMEM;
  1251. break;
  1252. case MADV_NOHUGEPAGE:
  1253. /*
  1254. * Be somewhat over-protective like KSM for now!
  1255. */
  1256. if (*vm_flags & (VM_NOHUGEPAGE |
  1257. VM_SHARED | VM_MAYSHARE |
  1258. VM_PFNMAP | VM_IO | VM_DONTEXPAND |
  1259. VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
  1260. VM_MIXEDMAP | VM_SAO))
  1261. return -EINVAL;
  1262. *vm_flags &= ~VM_HUGEPAGE;
  1263. *vm_flags |= VM_NOHUGEPAGE;
  1264. /*
  1265. * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
  1266. * this vma even if we leave the mm registered in khugepaged if
  1267. * it got registered before VM_NOHUGEPAGE was set.
  1268. */
  1269. break;
  1270. }
  1271. return 0;
  1272. }
  1273. static int __init khugepaged_slab_init(void)
  1274. {
  1275. mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
  1276. sizeof(struct mm_slot),
  1277. __alignof__(struct mm_slot), 0, NULL);
  1278. if (!mm_slot_cache)
  1279. return -ENOMEM;
  1280. return 0;
  1281. }
  1282. static void __init khugepaged_slab_free(void)
  1283. {
  1284. kmem_cache_destroy(mm_slot_cache);
  1285. mm_slot_cache = NULL;
  1286. }
  1287. static inline struct mm_slot *alloc_mm_slot(void)
  1288. {
  1289. if (!mm_slot_cache) /* initialization failed */
  1290. return NULL;
  1291. return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
  1292. }
  1293. static inline void free_mm_slot(struct mm_slot *mm_slot)
  1294. {
  1295. kmem_cache_free(mm_slot_cache, mm_slot);
  1296. }
  1297. static int __init mm_slots_hash_init(void)
  1298. {
  1299. mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
  1300. GFP_KERNEL);
  1301. if (!mm_slots_hash)
  1302. return -ENOMEM;
  1303. return 0;
  1304. }
  1305. #if 0
  1306. static void __init mm_slots_hash_free(void)
  1307. {
  1308. kfree(mm_slots_hash);
  1309. mm_slots_hash = NULL;
  1310. }
  1311. #endif
  1312. static struct mm_slot *get_mm_slot(struct mm_struct *mm)
  1313. {
  1314. struct mm_slot *mm_slot;
  1315. struct hlist_head *bucket;
  1316. struct hlist_node *node;
  1317. bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
  1318. % MM_SLOTS_HASH_HEADS];
  1319. hlist_for_each_entry(mm_slot, node, bucket, hash) {
  1320. if (mm == mm_slot->mm)
  1321. return mm_slot;
  1322. }
  1323. return NULL;
  1324. }
  1325. static void insert_to_mm_slots_hash(struct mm_struct *mm,
  1326. struct mm_slot *mm_slot)
  1327. {
  1328. struct hlist_head *bucket;
  1329. bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
  1330. % MM_SLOTS_HASH_HEADS];
  1331. mm_slot->mm = mm;
  1332. hlist_add_head(&mm_slot->hash, bucket);
  1333. }
  1334. static inline int khugepaged_test_exit(struct mm_struct *mm)
  1335. {
  1336. return atomic_read(&mm->mm_users) == 0;
  1337. }
  1338. int __khugepaged_enter(struct mm_struct *mm)
  1339. {
  1340. struct mm_slot *mm_slot;
  1341. int wakeup;
  1342. mm_slot = alloc_mm_slot();
  1343. if (!mm_slot)
  1344. return -ENOMEM;
  1345. /* __khugepaged_exit() must not run from under us */
  1346. VM_BUG_ON(khugepaged_test_exit(mm));
  1347. if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
  1348. free_mm_slot(mm_slot);
  1349. return 0;
  1350. }
  1351. spin_lock(&khugepaged_mm_lock);
  1352. insert_to_mm_slots_hash(mm, mm_slot);
  1353. /*
  1354. * Insert just behind the scanning cursor, to let the area settle
  1355. * down a little.
  1356. */
  1357. wakeup = list_empty(&khugepaged_scan.mm_head);
  1358. list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
  1359. spin_unlock(&khugepaged_mm_lock);
  1360. atomic_inc(&mm->mm_count);
  1361. if (wakeup)
  1362. wake_up_interruptible(&khugepaged_wait);
  1363. return 0;
  1364. }
  1365. int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
  1366. {
  1367. unsigned long hstart, hend;
  1368. if (!vma->anon_vma)
  1369. /*
  1370. * Not yet faulted in so we will register later in the
  1371. * page fault if needed.
  1372. */
  1373. return 0;
  1374. if (vma->vm_file || vma->vm_ops)
  1375. /* khugepaged not yet working on file or special mappings */
  1376. return 0;
  1377. VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
  1378. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  1379. hend = vma->vm_end & HPAGE_PMD_MASK;
  1380. if (hstart < hend)
  1381. return khugepaged_enter(vma);
  1382. return 0;
  1383. }
  1384. void __khugepaged_exit(struct mm_struct *mm)
  1385. {
  1386. struct mm_slot *mm_slot;
  1387. int free = 0;
  1388. spin_lock(&khugepaged_mm_lock);
  1389. mm_slot = get_mm_slot(mm);
  1390. if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
  1391. hlist_del(&mm_slot->hash);
  1392. list_del(&mm_slot->mm_node);
  1393. free = 1;
  1394. }
  1395. if (free) {
  1396. spin_unlock(&khugepaged_mm_lock);
  1397. clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
  1398. free_mm_slot(mm_slot);
  1399. mmdrop(mm);
  1400. } else if (mm_slot) {
  1401. spin_unlock(&khugepaged_mm_lock);
  1402. /*
  1403. * This is required to serialize against
  1404. * khugepaged_test_exit() (which is guaranteed to run
  1405. * under mmap sem read mode). Stop here (after we
  1406. * return all pagetables will be destroyed) until
  1407. * khugepaged has finished working on the pagetables
  1408. * under the mmap_sem.
  1409. */
  1410. down_write(&mm->mmap_sem);
  1411. up_write(&mm->mmap_sem);
  1412. } else
  1413. spin_unlock(&khugepaged_mm_lock);
  1414. }
  1415. static void release_pte_page(struct page *page)
  1416. {
  1417. /* 0 stands for page_is_file_cache(page) == false */
  1418. dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
  1419. unlock_page(page);
  1420. putback_lru_page(page);
  1421. }
  1422. static void release_pte_pages(pte_t *pte, pte_t *_pte)
  1423. {
  1424. while (--_pte >= pte) {
  1425. pte_t pteval = *_pte;
  1426. if (!pte_none(pteval))
  1427. release_pte_page(pte_page(pteval));
  1428. }
  1429. }
  1430. static void release_all_pte_pages(pte_t *pte)
  1431. {
  1432. release_pte_pages(pte, pte + HPAGE_PMD_NR);
  1433. }
  1434. static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
  1435. unsigned long address,
  1436. pte_t *pte)
  1437. {
  1438. struct page *page;
  1439. pte_t *_pte;
  1440. int referenced = 0, isolated = 0, none = 0;
  1441. for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
  1442. _pte++, address += PAGE_SIZE) {
  1443. pte_t pteval = *_pte;
  1444. if (pte_none(pteval)) {
  1445. if (++none <= khugepaged_max_ptes_none)
  1446. continue;
  1447. else {
  1448. release_pte_pages(pte, _pte);
  1449. goto out;
  1450. }
  1451. }
  1452. if (!pte_present(pteval) || !pte_write(pteval)) {
  1453. release_pte_pages(pte, _pte);
  1454. goto out;
  1455. }
  1456. page = vm_normal_page(vma, address, pteval);
  1457. if (unlikely(!page)) {
  1458. release_pte_pages(pte, _pte);
  1459. goto out;
  1460. }
  1461. VM_BUG_ON(PageCompound(page));
  1462. BUG_ON(!PageAnon(page));
  1463. VM_BUG_ON(!PageSwapBacked(page));
  1464. /* cannot use mapcount: can't collapse if there's a gup pin */
  1465. if (page_count(page) != 1) {
  1466. release_pte_pages(pte, _pte);
  1467. goto out;
  1468. }
  1469. /*
  1470. * We can do it before isolate_lru_page because the
  1471. * page can't be freed from under us. NOTE: PG_lock
  1472. * is needed to serialize against split_huge_page
  1473. * when invoked from the VM.
  1474. */
  1475. if (!trylock_page(page)) {
  1476. release_pte_pages(pte, _pte);
  1477. goto out;
  1478. }
  1479. /*
  1480. * Isolate the page to avoid collapsing an hugepage
  1481. * currently in use by the VM.
  1482. */
  1483. if (isolate_lru_page(page)) {
  1484. unlock_page(page);
  1485. release_pte_pages(pte, _pte);
  1486. goto out;
  1487. }
  1488. /* 0 stands for page_is_file_cache(page) == false */
  1489. inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
  1490. VM_BUG_ON(!PageLocked(page));
  1491. VM_BUG_ON(PageLRU(page));
  1492. /* If there is no mapped pte young don't collapse the page */
  1493. if (pte_young(pteval) || PageReferenced(page) ||
  1494. mmu_notifier_test_young(vma->vm_mm, address))
  1495. referenced = 1;
  1496. }
  1497. if (unlikely(!referenced))
  1498. release_all_pte_pages(pte);
  1499. else
  1500. isolated = 1;
  1501. out:
  1502. return isolated;
  1503. }
  1504. static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
  1505. struct vm_area_struct *vma,
  1506. unsigned long address,
  1507. spinlock_t *ptl)
  1508. {
  1509. pte_t *_pte;
  1510. for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
  1511. pte_t pteval = *_pte;
  1512. struct page *src_page;
  1513. if (pte_none(pteval)) {
  1514. clear_user_highpage(page, address);
  1515. add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
  1516. } else {
  1517. src_page = pte_page(pteval);
  1518. copy_user_highpage(page, src_page, address, vma);
  1519. VM_BUG_ON(page_mapcount(src_page) != 1);
  1520. VM_BUG_ON(page_count(src_page) != 2);
  1521. release_pte_page(src_page);
  1522. /*
  1523. * ptl mostly unnecessary, but preempt has to
  1524. * be disabled to update the per-cpu stats
  1525. * inside page_remove_rmap().
  1526. */
  1527. spin_lock(ptl);
  1528. /*
  1529. * paravirt calls inside pte_clear here are
  1530. * superfluous.
  1531. */
  1532. pte_clear(vma->vm_mm, address, _pte);
  1533. page_remove_rmap(src_page);
  1534. spin_unlock(ptl);
  1535. free_page_and_swap_cache(src_page);
  1536. }
  1537. address += PAGE_SIZE;
  1538. page++;
  1539. }
  1540. }
  1541. static void collapse_huge_page(struct mm_struct *mm,
  1542. unsigned long address,
  1543. struct page **hpage,
  1544. struct vm_area_struct *vma)
  1545. {
  1546. pgd_t *pgd;
  1547. pud_t *pud;
  1548. pmd_t *pmd, _pmd;
  1549. pte_t *pte;
  1550. pgtable_t pgtable;
  1551. struct page *new_page;
  1552. spinlock_t *ptl;
  1553. int isolated;
  1554. unsigned long hstart, hend;
  1555. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1556. #ifndef CONFIG_NUMA
  1557. VM_BUG_ON(!*hpage);
  1558. new_page = *hpage;
  1559. #else
  1560. VM_BUG_ON(*hpage);
  1561. /*
  1562. * Allocate the page while the vma is still valid and under
  1563. * the mmap_sem read mode so there is no memory allocation
  1564. * later when we take the mmap_sem in write mode. This is more
  1565. * friendly behavior (OTOH it may actually hide bugs) to
  1566. * filesystems in userland with daemons allocating memory in
  1567. * the userland I/O paths. Allocating memory with the
  1568. * mmap_sem in read mode is good idea also to allow greater
  1569. * scalability.
  1570. */
  1571. new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address);
  1572. if (unlikely(!new_page)) {
  1573. up_read(&mm->mmap_sem);
  1574. *hpage = ERR_PTR(-ENOMEM);
  1575. return;
  1576. }
  1577. #endif
  1578. if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
  1579. up_read(&mm->mmap_sem);
  1580. put_page(new_page);
  1581. return;
  1582. }
  1583. /* after allocating the hugepage upgrade to mmap_sem write mode */
  1584. up_read(&mm->mmap_sem);
  1585. /*
  1586. * Prevent all access to pagetables with the exception of
  1587. * gup_fast later hanlded by the ptep_clear_flush and the VM
  1588. * handled by the anon_vma lock + PG_lock.
  1589. */
  1590. down_write(&mm->mmap_sem);
  1591. if (unlikely(khugepaged_test_exit(mm)))
  1592. goto out;
  1593. vma = find_vma(mm, address);
  1594. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  1595. hend = vma->vm_end & HPAGE_PMD_MASK;
  1596. if (address < hstart || address + HPAGE_PMD_SIZE > hend)
  1597. goto out;
  1598. if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
  1599. (vma->vm_flags & VM_NOHUGEPAGE))
  1600. goto out;
  1601. /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
  1602. if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
  1603. goto out;
  1604. VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
  1605. pgd = pgd_offset(mm, address);
  1606. if (!pgd_present(*pgd))
  1607. goto out;
  1608. pud = pud_offset(pgd, address);
  1609. if (!pud_present(*pud))
  1610. goto out;
  1611. pmd = pmd_offset(pud, address);
  1612. /* pmd can't go away or become huge under us */
  1613. if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
  1614. goto out;
  1615. anon_vma_lock(vma->anon_vma);
  1616. pte = pte_offset_map(pmd, address);
  1617. ptl = pte_lockptr(mm, pmd);
  1618. spin_lock(&mm->page_table_lock); /* probably unnecessary */
  1619. /*
  1620. * After this gup_fast can't run anymore. This also removes
  1621. * any huge TLB entry from the CPU so we won't allow
  1622. * huge and small TLB entries for the same virtual address
  1623. * to avoid the risk of CPU bugs in that area.
  1624. */
  1625. _pmd = pmdp_clear_flush_notify(vma, address, pmd);
  1626. spin_unlock(&mm->page_table_lock);
  1627. spin_lock(ptl);
  1628. isolated = __collapse_huge_page_isolate(vma, address, pte);
  1629. spin_unlock(ptl);
  1630. if (unlikely(!isolated)) {
  1631. pte_unmap(pte);
  1632. spin_lock(&mm->page_table_lock);
  1633. BUG_ON(!pmd_none(*pmd));
  1634. set_pmd_at(mm, address, pmd, _pmd);
  1635. spin_unlock(&mm->page_table_lock);
  1636. anon_vma_unlock(vma->anon_vma);
  1637. mem_cgroup_uncharge_page(new_page);
  1638. goto out;
  1639. }
  1640. /*
  1641. * All pages are isolated and locked so anon_vma rmap
  1642. * can't run anymore.
  1643. */
  1644. anon_vma_unlock(vma->anon_vma);
  1645. __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
  1646. pte_unmap(pte);
  1647. __SetPageUptodate(new_page);
  1648. pgtable = pmd_pgtable(_pmd);
  1649. VM_BUG_ON(page_count(pgtable) != 1);
  1650. VM_BUG_ON(page_mapcount(pgtable) != 0);
  1651. _pmd = mk_pmd(new_page, vma->vm_page_prot);
  1652. _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
  1653. _pmd = pmd_mkhuge(_pmd);
  1654. /*
  1655. * spin_lock() below is not the equivalent of smp_wmb(), so
  1656. * this is needed to avoid the copy_huge_page writes to become
  1657. * visible after the set_pmd_at() write.
  1658. */
  1659. smp_wmb();
  1660. spin_lock(&mm->page_table_lock);
  1661. BUG_ON(!pmd_none(*pmd));
  1662. page_add_new_anon_rmap(new_page, vma, address);
  1663. set_pmd_at(mm, address, pmd, _pmd);
  1664. update_mmu_cache(vma, address, entry);
  1665. prepare_pmd_huge_pte(pgtable, mm);
  1666. mm->nr_ptes--;
  1667. spin_unlock(&mm->page_table_lock);
  1668. #ifndef CONFIG_NUMA
  1669. *hpage = NULL;
  1670. #endif
  1671. khugepaged_pages_collapsed++;
  1672. out_up_write:
  1673. up_write(&mm->mmap_sem);
  1674. return;
  1675. out:
  1676. #ifdef CONFIG_NUMA
  1677. put_page(new_page);
  1678. #endif
  1679. goto out_up_write;
  1680. }
  1681. static int khugepaged_scan_pmd(struct mm_struct *mm,
  1682. struct vm_area_struct *vma,
  1683. unsigned long address,
  1684. struct page **hpage)
  1685. {
  1686. pgd_t *pgd;
  1687. pud_t *pud;
  1688. pmd_t *pmd;
  1689. pte_t *pte, *_pte;
  1690. int ret = 0, referenced = 0, none = 0;
  1691. struct page *page;
  1692. unsigned long _address;
  1693. spinlock_t *ptl;
  1694. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1695. pgd = pgd_offset(mm, address);
  1696. if (!pgd_present(*pgd))
  1697. goto out;
  1698. pud = pud_offset(pgd, address);
  1699. if (!pud_present(*pud))
  1700. goto out;
  1701. pmd = pmd_offset(pud, address);
  1702. if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
  1703. goto out;
  1704. pte = pte_offset_map_lock(mm, pmd, address, &ptl);
  1705. for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
  1706. _pte++, _address += PAGE_SIZE) {
  1707. pte_t pteval = *_pte;
  1708. if (pte_none(pteval)) {
  1709. if (++none <= khugepaged_max_ptes_none)
  1710. continue;
  1711. else
  1712. goto out_unmap;
  1713. }
  1714. if (!pte_present(pteval) || !pte_write(pteval))
  1715. goto out_unmap;
  1716. page = vm_normal_page(vma, _address, pteval);
  1717. if (unlikely(!page))
  1718. goto out_unmap;
  1719. VM_BUG_ON(PageCompound(page));
  1720. if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
  1721. goto out_unmap;
  1722. /* cannot use mapcount: can't collapse if there's a gup pin */
  1723. if (page_count(page) != 1)
  1724. goto out_unmap;
  1725. if (pte_young(pteval) || PageReferenced(page) ||
  1726. mmu_notifier_test_young(vma->vm_mm, address))
  1727. referenced = 1;
  1728. }
  1729. if (referenced)
  1730. ret = 1;
  1731. out_unmap:
  1732. pte_unmap_unlock(pte, ptl);
  1733. if (ret)
  1734. /* collapse_huge_page will return with the mmap_sem released */
  1735. collapse_huge_page(mm, address, hpage, vma);
  1736. out:
  1737. return ret;
  1738. }
  1739. static void collect_mm_slot(struct mm_slot *mm_slot)
  1740. {
  1741. struct mm_struct *mm = mm_slot->mm;
  1742. VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
  1743. if (khugepaged_test_exit(mm)) {
  1744. /* free mm_slot */
  1745. hlist_del(&mm_slot->hash);
  1746. list_del(&mm_slot->mm_node);
  1747. /*
  1748. * Not strictly needed because the mm exited already.
  1749. *
  1750. * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
  1751. */
  1752. /* khugepaged_mm_lock actually not necessary for the below */
  1753. free_mm_slot(mm_slot);
  1754. mmdrop(mm);
  1755. }
  1756. }
  1757. static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
  1758. struct page **hpage)
  1759. {
  1760. struct mm_slot *mm_slot;
  1761. struct mm_struct *mm;
  1762. struct vm_area_struct *vma;
  1763. int progress = 0;
  1764. VM_BUG_ON(!pages);
  1765. VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
  1766. if (khugepaged_scan.mm_slot)
  1767. mm_slot = khugepaged_scan.mm_slot;
  1768. else {
  1769. mm_slot = list_entry(khugepaged_scan.mm_head.next,
  1770. struct mm_slot, mm_node);
  1771. khugepaged_scan.address = 0;
  1772. khugepaged_scan.mm_slot = mm_slot;
  1773. }
  1774. spin_unlock(&khugepaged_mm_lock);
  1775. mm = mm_slot->mm;
  1776. down_read(&mm->mmap_sem);
  1777. if (unlikely(khugepaged_test_exit(mm)))
  1778. vma = NULL;
  1779. else
  1780. vma = find_vma(mm, khugepaged_scan.address);
  1781. progress++;
  1782. for (; vma; vma = vma->vm_next) {
  1783. unsigned long hstart, hend;
  1784. cond_resched();
  1785. if (unlikely(khugepaged_test_exit(mm))) {
  1786. progress++;
  1787. break;
  1788. }
  1789. if ((!(vma->vm_flags & VM_HUGEPAGE) &&
  1790. !khugepaged_always()) ||
  1791. (vma->vm_flags & VM_NOHUGEPAGE)) {
  1792. progress++;
  1793. continue;
  1794. }
  1795. /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
  1796. if (!vma->anon_vma || vma->vm_ops || vma->vm_file) {
  1797. khugepaged_scan.address = vma->vm_end;
  1798. progress++;
  1799. continue;
  1800. }
  1801. VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
  1802. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  1803. hend = vma->vm_end & HPAGE_PMD_MASK;
  1804. if (hstart >= hend) {
  1805. progress++;
  1806. continue;
  1807. }
  1808. if (khugepaged_scan.address < hstart)
  1809. khugepaged_scan.address = hstart;
  1810. if (khugepaged_scan.address > hend) {
  1811. khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
  1812. progress++;
  1813. continue;
  1814. }
  1815. BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
  1816. while (khugepaged_scan.address < hend) {
  1817. int ret;
  1818. cond_resched();
  1819. if (unlikely(khugepaged_test_exit(mm)))
  1820. goto breakouterloop;
  1821. VM_BUG_ON(khugepaged_scan.address < hstart ||
  1822. khugepaged_scan.address + HPAGE_PMD_SIZE >
  1823. hend);
  1824. ret = khugepaged_scan_pmd(mm, vma,
  1825. khugepaged_scan.address,
  1826. hpage);
  1827. /* move to next address */
  1828. khugepaged_scan.address += HPAGE_PMD_SIZE;
  1829. progress += HPAGE_PMD_NR;
  1830. if (ret)
  1831. /* we released mmap_sem so break loop */
  1832. goto breakouterloop_mmap_sem;
  1833. if (progress >= pages)
  1834. goto breakouterloop;
  1835. }
  1836. }
  1837. breakouterloop:
  1838. up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
  1839. breakouterloop_mmap_sem:
  1840. spin_lock(&khugepaged_mm_lock);
  1841. BUG_ON(khugepaged_scan.mm_slot != mm_slot);
  1842. /*
  1843. * Release the current mm_slot if this mm is about to die, or
  1844. * if we scanned all vmas of this mm.
  1845. */
  1846. if (khugepaged_test_exit(mm) || !vma) {
  1847. /*
  1848. * Make sure that if mm_users is reaching zero while
  1849. * khugepaged runs here, khugepaged_exit will find
  1850. * mm_slot not pointing to the exiting mm.
  1851. */
  1852. if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
  1853. khugepaged_scan.mm_slot = list_entry(
  1854. mm_slot->mm_node.next,
  1855. struct mm_slot, mm_node);
  1856. khugepaged_scan.address = 0;
  1857. } else {
  1858. khugepaged_scan.mm_slot = NULL;
  1859. khugepaged_full_scans++;
  1860. }
  1861. collect_mm_slot(mm_slot);
  1862. }
  1863. return progress;
  1864. }
  1865. static int khugepaged_has_work(void)
  1866. {
  1867. return !list_empty(&khugepaged_scan.mm_head) &&
  1868. khugepaged_enabled();
  1869. }
  1870. static int khugepaged_wait_event(void)
  1871. {
  1872. return !list_empty(&khugepaged_scan.mm_head) ||
  1873. !khugepaged_enabled();
  1874. }
  1875. static void khugepaged_do_scan(struct page **hpage)
  1876. {
  1877. unsigned int progress = 0, pass_through_head = 0;
  1878. unsigned int pages = khugepaged_pages_to_scan;
  1879. barrier(); /* write khugepaged_pages_to_scan to local stack */
  1880. while (progress < pages) {
  1881. cond_resched();
  1882. #ifndef CONFIG_NUMA
  1883. if (!*hpage) {
  1884. *hpage = alloc_hugepage(khugepaged_defrag());
  1885. if (unlikely(!*hpage))
  1886. break;
  1887. }
  1888. #else
  1889. if (IS_ERR(*hpage))
  1890. break;
  1891. #endif
  1892. if (unlikely(kthread_should_stop() || freezing(current)))
  1893. break;
  1894. spin_lock(&khugepaged_mm_lock);
  1895. if (!khugepaged_scan.mm_slot)
  1896. pass_through_head++;
  1897. if (khugepaged_has_work() &&
  1898. pass_through_head < 2)
  1899. progress += khugepaged_scan_mm_slot(pages - progress,
  1900. hpage);
  1901. else
  1902. progress = pages;
  1903. spin_unlock(&khugepaged_mm_lock);
  1904. }
  1905. }
  1906. static void khugepaged_alloc_sleep(void)
  1907. {
  1908. DEFINE_WAIT(wait);
  1909. add_wait_queue(&khugepaged_wait, &wait);
  1910. schedule_timeout_interruptible(
  1911. msecs_to_jiffies(
  1912. khugepaged_alloc_sleep_millisecs));
  1913. remove_wait_queue(&khugepaged_wait, &wait);
  1914. }
  1915. #ifndef CONFIG_NUMA
  1916. static struct page *khugepaged_alloc_hugepage(void)
  1917. {
  1918. struct page *hpage;
  1919. do {
  1920. hpage = alloc_hugepage(khugepaged_defrag());
  1921. if (!hpage)
  1922. khugepaged_alloc_sleep();
  1923. } while (unlikely(!hpage) &&
  1924. likely(khugepaged_enabled()));
  1925. return hpage;
  1926. }
  1927. #endif
  1928. static void khugepaged_loop(void)
  1929. {
  1930. struct page *hpage;
  1931. #ifdef CONFIG_NUMA
  1932. hpage = NULL;
  1933. #endif
  1934. while (likely(khugepaged_enabled())) {
  1935. #ifndef CONFIG_NUMA
  1936. hpage = khugepaged_alloc_hugepage();
  1937. if (unlikely(!hpage))
  1938. break;
  1939. #else
  1940. if (IS_ERR(hpage)) {
  1941. khugepaged_alloc_sleep();
  1942. hpage = NULL;
  1943. }
  1944. #endif
  1945. khugepaged_do_scan(&hpage);
  1946. #ifndef CONFIG_NUMA
  1947. if (hpage)
  1948. put_page(hpage);
  1949. #endif
  1950. try_to_freeze();
  1951. if (unlikely(kthread_should_stop()))
  1952. break;
  1953. if (khugepaged_has_work()) {
  1954. DEFINE_WAIT(wait);
  1955. if (!khugepaged_scan_sleep_millisecs)
  1956. continue;
  1957. add_wait_queue(&khugepaged_wait, &wait);
  1958. schedule_timeout_interruptible(
  1959. msecs_to_jiffies(
  1960. khugepaged_scan_sleep_millisecs));
  1961. remove_wait_queue(&khugepaged_wait, &wait);
  1962. } else if (khugepaged_enabled())
  1963. wait_event_freezable(khugepaged_wait,
  1964. khugepaged_wait_event());
  1965. }
  1966. }
  1967. static int khugepaged(void *none)
  1968. {
  1969. struct mm_slot *mm_slot;
  1970. set_freezable();
  1971. set_user_nice(current, 19);
  1972. /* serialize with start_khugepaged() */
  1973. mutex_lock(&khugepaged_mutex);
  1974. for (;;) {
  1975. mutex_unlock(&khugepaged_mutex);
  1976. BUG_ON(khugepaged_thread != current);
  1977. khugepaged_loop();
  1978. BUG_ON(khugepaged_thread != current);
  1979. mutex_lock(&khugepaged_mutex);
  1980. if (!khugepaged_enabled())
  1981. break;
  1982. if (unlikely(kthread_should_stop()))
  1983. break;
  1984. }
  1985. spin_lock(&khugepaged_mm_lock);
  1986. mm_slot = khugepaged_scan.mm_slot;
  1987. khugepaged_scan.mm_slot = NULL;
  1988. if (mm_slot)
  1989. collect_mm_slot(mm_slot);
  1990. spin_unlock(&khugepaged_mm_lock);
  1991. khugepaged_thread = NULL;
  1992. mutex_unlock(&khugepaged_mutex);
  1993. return 0;
  1994. }
  1995. void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
  1996. {
  1997. struct page *page;
  1998. spin_lock(&mm->page_table_lock);
  1999. if (unlikely(!pmd_trans_huge(*pmd))) {
  2000. spin_unlock(&mm->page_table_lock);
  2001. return;
  2002. }
  2003. page = pmd_page(*pmd);
  2004. VM_BUG_ON(!page_count(page));
  2005. get_page(page);
  2006. spin_unlock(&mm->page_table_lock);
  2007. split_huge_page(page);
  2008. put_page(page);
  2009. BUG_ON(pmd_trans_huge(*pmd));
  2010. }
  2011. static void split_huge_page_address(struct mm_struct *mm,
  2012. unsigned long address)
  2013. {
  2014. pgd_t *pgd;
  2015. pud_t *pud;
  2016. pmd_t *pmd;
  2017. VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
  2018. pgd = pgd_offset(mm, address);
  2019. if (!pgd_present(*pgd))
  2020. return;
  2021. pud = pud_offset(pgd, address);
  2022. if (!pud_present(*pud))
  2023. return;
  2024. pmd = pmd_offset(pud, address);
  2025. if (!pmd_present(*pmd))
  2026. return;
  2027. /*
  2028. * Caller holds the mmap_sem write mode, so a huge pmd cannot
  2029. * materialize from under us.
  2030. */
  2031. split_huge_page_pmd(mm, pmd);
  2032. }
  2033. void __vma_adjust_trans_huge(struct vm_area_struct *vma,
  2034. unsigned long start,
  2035. unsigned long end,
  2036. long adjust_next)
  2037. {
  2038. /*
  2039. * If the new start address isn't hpage aligned and it could
  2040. * previously contain an hugepage: check if we need to split
  2041. * an huge pmd.
  2042. */
  2043. if (start & ~HPAGE_PMD_MASK &&
  2044. (start & HPAGE_PMD_MASK) >= vma->vm_start &&
  2045. (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
  2046. split_huge_page_address(vma->vm_mm, start);
  2047. /*
  2048. * If the new end address isn't hpage aligned and it could
  2049. * previously contain an hugepage: check if we need to split
  2050. * an huge pmd.
  2051. */
  2052. if (end & ~HPAGE_PMD_MASK &&
  2053. (end & HPAGE_PMD_MASK) >= vma->vm_start &&
  2054. (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
  2055. split_huge_page_address(vma->vm_mm, end);
  2056. /*
  2057. * If we're also updating the vma->vm_next->vm_start, if the new
  2058. * vm_next->vm_start isn't page aligned and it could previously
  2059. * contain an hugepage: check if we need to split an huge pmd.
  2060. */
  2061. if (adjust_next > 0) {
  2062. struct vm_area_struct *next = vma->vm_next;
  2063. unsigned long nstart = next->vm_start;
  2064. nstart += adjust_next << PAGE_SHIFT;
  2065. if (nstart & ~HPAGE_PMD_MASK &&
  2066. (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
  2067. (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
  2068. split_huge_page_address(next->vm_mm, nstart);
  2069. }
  2070. }