huge_memory.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429
  1. /*
  2. * Copyright (C) 2009 Red Hat, Inc.
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2. See
  5. * the COPYING file in the top-level directory.
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/sched.h>
  9. #include <linux/highmem.h>
  10. #include <linux/hugetlb.h>
  11. #include <linux/mmu_notifier.h>
  12. #include <linux/rmap.h>
  13. #include <linux/swap.h>
  14. #include <linux/mm_inline.h>
  15. #include <linux/kthread.h>
  16. #include <linux/khugepaged.h>
  17. #include <linux/freezer.h>
  18. #include <linux/mman.h>
  19. #include <linux/pagemap.h>
  20. #include <asm/tlb.h>
  21. #include <asm/pgalloc.h>
  22. #include "internal.h"
  23. /*
  24. * By default transparent hugepage support is enabled for all mappings
  25. * and khugepaged scans all mappings. Defrag is only invoked by
  26. * khugepaged hugepage allocations and by page faults inside
  27. * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
  28. * allocations.
  29. */
  30. unsigned long transparent_hugepage_flags __read_mostly =
  31. #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
  32. (1<<TRANSPARENT_HUGEPAGE_FLAG)|
  33. #endif
  34. #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
  35. (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
  36. #endif
  37. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
  38. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  39. /* default scan 8*512 pte (or vmas) every 30 second */
  40. static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
  41. static unsigned int khugepaged_pages_collapsed;
  42. static unsigned int khugepaged_full_scans;
  43. static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  44. /* during fragmentation poll the hugepage allocator once every minute */
  45. static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  46. static struct task_struct *khugepaged_thread __read_mostly;
  47. static DEFINE_MUTEX(khugepaged_mutex);
  48. static DEFINE_SPINLOCK(khugepaged_mm_lock);
  49. static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  50. /*
  51. * default collapse hugepages if there is at least one pte mapped like
  52. * it would have happened if the vma was large enough during page
  53. * fault.
  54. */
  55. static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
  56. static int khugepaged(void *none);
  57. static int mm_slots_hash_init(void);
  58. static int khugepaged_slab_init(void);
  59. static void khugepaged_slab_free(void);
  60. #define MM_SLOTS_HASH_HEADS 1024
  61. static struct hlist_head *mm_slots_hash __read_mostly;
  62. static struct kmem_cache *mm_slot_cache __read_mostly;
  63. /**
  64. * struct mm_slot - hash lookup from mm to mm_slot
  65. * @hash: hash collision list
  66. * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  67. * @mm: the mm that this information is valid for
  68. */
  69. struct mm_slot {
  70. struct hlist_node hash;
  71. struct list_head mm_node;
  72. struct mm_struct *mm;
  73. };
  74. /**
  75. * struct khugepaged_scan - cursor for scanning
  76. * @mm_head: the head of the mm list to scan
  77. * @mm_slot: the current mm_slot we are scanning
  78. * @address: the next address inside that to be scanned
  79. *
  80. * There is only the one khugepaged_scan instance of this cursor structure.
  81. */
  82. struct khugepaged_scan {
  83. struct list_head mm_head;
  84. struct mm_slot *mm_slot;
  85. unsigned long address;
  86. };
  87. static struct khugepaged_scan khugepaged_scan = {
  88. .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
  89. };
  90. static int set_recommended_min_free_kbytes(void)
  91. {
  92. struct zone *zone;
  93. int nr_zones = 0;
  94. unsigned long recommended_min;
  95. extern int min_free_kbytes;
  96. if (!khugepaged_enabled())
  97. return 0;
  98. for_each_populated_zone(zone)
  99. nr_zones++;
  100. /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
  101. recommended_min = pageblock_nr_pages * nr_zones * 2;
  102. /*
  103. * Make sure that on average at least two pageblocks are almost free
  104. * of another type, one for a migratetype to fall back to and a
  105. * second to avoid subsequent fallbacks of other types There are 3
  106. * MIGRATE_TYPES we care about.
  107. */
  108. recommended_min += pageblock_nr_pages * nr_zones *
  109. MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
  110. /* don't ever allow to reserve more than 5% of the lowmem */
  111. recommended_min = min(recommended_min,
  112. (unsigned long) nr_free_buffer_pages() / 20);
  113. recommended_min <<= (PAGE_SHIFT-10);
  114. if (recommended_min > min_free_kbytes)
  115. min_free_kbytes = recommended_min;
  116. setup_per_zone_wmarks();
  117. return 0;
  118. }
  119. late_initcall(set_recommended_min_free_kbytes);
  120. static int start_khugepaged(void)
  121. {
  122. int err = 0;
  123. if (khugepaged_enabled()) {
  124. if (!khugepaged_thread)
  125. khugepaged_thread = kthread_run(khugepaged, NULL,
  126. "khugepaged");
  127. if (unlikely(IS_ERR(khugepaged_thread))) {
  128. printk(KERN_ERR
  129. "khugepaged: kthread_run(khugepaged) failed\n");
  130. err = PTR_ERR(khugepaged_thread);
  131. khugepaged_thread = NULL;
  132. }
  133. if (!list_empty(&khugepaged_scan.mm_head))
  134. wake_up_interruptible(&khugepaged_wait);
  135. set_recommended_min_free_kbytes();
  136. } else if (khugepaged_thread) {
  137. kthread_stop(khugepaged_thread);
  138. khugepaged_thread = NULL;
  139. }
  140. return err;
  141. }
  142. #ifdef CONFIG_SYSFS
  143. static ssize_t double_flag_show(struct kobject *kobj,
  144. struct kobj_attribute *attr, char *buf,
  145. enum transparent_hugepage_flag enabled,
  146. enum transparent_hugepage_flag req_madv)
  147. {
  148. if (test_bit(enabled, &transparent_hugepage_flags)) {
  149. VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
  150. return sprintf(buf, "[always] madvise never\n");
  151. } else if (test_bit(req_madv, &transparent_hugepage_flags))
  152. return sprintf(buf, "always [madvise] never\n");
  153. else
  154. return sprintf(buf, "always madvise [never]\n");
  155. }
  156. static ssize_t double_flag_store(struct kobject *kobj,
  157. struct kobj_attribute *attr,
  158. const char *buf, size_t count,
  159. enum transparent_hugepage_flag enabled,
  160. enum transparent_hugepage_flag req_madv)
  161. {
  162. if (!memcmp("always", buf,
  163. min(sizeof("always")-1, count))) {
  164. set_bit(enabled, &transparent_hugepage_flags);
  165. clear_bit(req_madv, &transparent_hugepage_flags);
  166. } else if (!memcmp("madvise", buf,
  167. min(sizeof("madvise")-1, count))) {
  168. clear_bit(enabled, &transparent_hugepage_flags);
  169. set_bit(req_madv, &transparent_hugepage_flags);
  170. } else if (!memcmp("never", buf,
  171. min(sizeof("never")-1, count))) {
  172. clear_bit(enabled, &transparent_hugepage_flags);
  173. clear_bit(req_madv, &transparent_hugepage_flags);
  174. } else
  175. return -EINVAL;
  176. return count;
  177. }
  178. static ssize_t enabled_show(struct kobject *kobj,
  179. struct kobj_attribute *attr, char *buf)
  180. {
  181. return double_flag_show(kobj, attr, buf,
  182. TRANSPARENT_HUGEPAGE_FLAG,
  183. TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
  184. }
  185. static ssize_t enabled_store(struct kobject *kobj,
  186. struct kobj_attribute *attr,
  187. const char *buf, size_t count)
  188. {
  189. ssize_t ret;
  190. ret = double_flag_store(kobj, attr, buf, count,
  191. TRANSPARENT_HUGEPAGE_FLAG,
  192. TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
  193. if (ret > 0) {
  194. int err;
  195. mutex_lock(&khugepaged_mutex);
  196. err = start_khugepaged();
  197. mutex_unlock(&khugepaged_mutex);
  198. if (err)
  199. ret = err;
  200. }
  201. return ret;
  202. }
  203. static struct kobj_attribute enabled_attr =
  204. __ATTR(enabled, 0644, enabled_show, enabled_store);
  205. static ssize_t single_flag_show(struct kobject *kobj,
  206. struct kobj_attribute *attr, char *buf,
  207. enum transparent_hugepage_flag flag)
  208. {
  209. return sprintf(buf, "%d\n",
  210. !!test_bit(flag, &transparent_hugepage_flags));
  211. }
  212. static ssize_t single_flag_store(struct kobject *kobj,
  213. struct kobj_attribute *attr,
  214. const char *buf, size_t count,
  215. enum transparent_hugepage_flag flag)
  216. {
  217. unsigned long value;
  218. int ret;
  219. ret = kstrtoul(buf, 10, &value);
  220. if (ret < 0)
  221. return ret;
  222. if (value > 1)
  223. return -EINVAL;
  224. if (value)
  225. set_bit(flag, &transparent_hugepage_flags);
  226. else
  227. clear_bit(flag, &transparent_hugepage_flags);
  228. return count;
  229. }
  230. /*
  231. * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
  232. * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
  233. * memory just to allocate one more hugepage.
  234. */
  235. static ssize_t defrag_show(struct kobject *kobj,
  236. struct kobj_attribute *attr, char *buf)
  237. {
  238. return double_flag_show(kobj, attr, buf,
  239. TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
  240. TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
  241. }
  242. static ssize_t defrag_store(struct kobject *kobj,
  243. struct kobj_attribute *attr,
  244. const char *buf, size_t count)
  245. {
  246. return double_flag_store(kobj, attr, buf, count,
  247. TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
  248. TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
  249. }
  250. static struct kobj_attribute defrag_attr =
  251. __ATTR(defrag, 0644, defrag_show, defrag_store);
  252. #ifdef CONFIG_DEBUG_VM
  253. static ssize_t debug_cow_show(struct kobject *kobj,
  254. struct kobj_attribute *attr, char *buf)
  255. {
  256. return single_flag_show(kobj, attr, buf,
  257. TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
  258. }
  259. static ssize_t debug_cow_store(struct kobject *kobj,
  260. struct kobj_attribute *attr,
  261. const char *buf, size_t count)
  262. {
  263. return single_flag_store(kobj, attr, buf, count,
  264. TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
  265. }
  266. static struct kobj_attribute debug_cow_attr =
  267. __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
  268. #endif /* CONFIG_DEBUG_VM */
  269. static struct attribute *hugepage_attr[] = {
  270. &enabled_attr.attr,
  271. &defrag_attr.attr,
  272. #ifdef CONFIG_DEBUG_VM
  273. &debug_cow_attr.attr,
  274. #endif
  275. NULL,
  276. };
  277. static struct attribute_group hugepage_attr_group = {
  278. .attrs = hugepage_attr,
  279. };
  280. static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
  281. struct kobj_attribute *attr,
  282. char *buf)
  283. {
  284. return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
  285. }
  286. static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
  287. struct kobj_attribute *attr,
  288. const char *buf, size_t count)
  289. {
  290. unsigned long msecs;
  291. int err;
  292. err = strict_strtoul(buf, 10, &msecs);
  293. if (err || msecs > UINT_MAX)
  294. return -EINVAL;
  295. khugepaged_scan_sleep_millisecs = msecs;
  296. wake_up_interruptible(&khugepaged_wait);
  297. return count;
  298. }
  299. static struct kobj_attribute scan_sleep_millisecs_attr =
  300. __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
  301. scan_sleep_millisecs_store);
  302. static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
  303. struct kobj_attribute *attr,
  304. char *buf)
  305. {
  306. return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
  307. }
  308. static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
  309. struct kobj_attribute *attr,
  310. const char *buf, size_t count)
  311. {
  312. unsigned long msecs;
  313. int err;
  314. err = strict_strtoul(buf, 10, &msecs);
  315. if (err || msecs > UINT_MAX)
  316. return -EINVAL;
  317. khugepaged_alloc_sleep_millisecs = msecs;
  318. wake_up_interruptible(&khugepaged_wait);
  319. return count;
  320. }
  321. static struct kobj_attribute alloc_sleep_millisecs_attr =
  322. __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
  323. alloc_sleep_millisecs_store);
  324. static ssize_t pages_to_scan_show(struct kobject *kobj,
  325. struct kobj_attribute *attr,
  326. char *buf)
  327. {
  328. return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
  329. }
  330. static ssize_t pages_to_scan_store(struct kobject *kobj,
  331. struct kobj_attribute *attr,
  332. const char *buf, size_t count)
  333. {
  334. int err;
  335. unsigned long pages;
  336. err = strict_strtoul(buf, 10, &pages);
  337. if (err || !pages || pages > UINT_MAX)
  338. return -EINVAL;
  339. khugepaged_pages_to_scan = pages;
  340. return count;
  341. }
  342. static struct kobj_attribute pages_to_scan_attr =
  343. __ATTR(pages_to_scan, 0644, pages_to_scan_show,
  344. pages_to_scan_store);
  345. static ssize_t pages_collapsed_show(struct kobject *kobj,
  346. struct kobj_attribute *attr,
  347. char *buf)
  348. {
  349. return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
  350. }
  351. static struct kobj_attribute pages_collapsed_attr =
  352. __ATTR_RO(pages_collapsed);
  353. static ssize_t full_scans_show(struct kobject *kobj,
  354. struct kobj_attribute *attr,
  355. char *buf)
  356. {
  357. return sprintf(buf, "%u\n", khugepaged_full_scans);
  358. }
  359. static struct kobj_attribute full_scans_attr =
  360. __ATTR_RO(full_scans);
  361. static ssize_t khugepaged_defrag_show(struct kobject *kobj,
  362. struct kobj_attribute *attr, char *buf)
  363. {
  364. return single_flag_show(kobj, attr, buf,
  365. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  366. }
  367. static ssize_t khugepaged_defrag_store(struct kobject *kobj,
  368. struct kobj_attribute *attr,
  369. const char *buf, size_t count)
  370. {
  371. return single_flag_store(kobj, attr, buf, count,
  372. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  373. }
  374. static struct kobj_attribute khugepaged_defrag_attr =
  375. __ATTR(defrag, 0644, khugepaged_defrag_show,
  376. khugepaged_defrag_store);
  377. /*
  378. * max_ptes_none controls if khugepaged should collapse hugepages over
  379. * any unmapped ptes in turn potentially increasing the memory
  380. * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
  381. * reduce the available free memory in the system as it
  382. * runs. Increasing max_ptes_none will instead potentially reduce the
  383. * free memory in the system during the khugepaged scan.
  384. */
  385. static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
  386. struct kobj_attribute *attr,
  387. char *buf)
  388. {
  389. return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
  390. }
  391. static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
  392. struct kobj_attribute *attr,
  393. const char *buf, size_t count)
  394. {
  395. int err;
  396. unsigned long max_ptes_none;
  397. err = strict_strtoul(buf, 10, &max_ptes_none);
  398. if (err || max_ptes_none > HPAGE_PMD_NR-1)
  399. return -EINVAL;
  400. khugepaged_max_ptes_none = max_ptes_none;
  401. return count;
  402. }
  403. static struct kobj_attribute khugepaged_max_ptes_none_attr =
  404. __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
  405. khugepaged_max_ptes_none_store);
  406. static struct attribute *khugepaged_attr[] = {
  407. &khugepaged_defrag_attr.attr,
  408. &khugepaged_max_ptes_none_attr.attr,
  409. &pages_to_scan_attr.attr,
  410. &pages_collapsed_attr.attr,
  411. &full_scans_attr.attr,
  412. &scan_sleep_millisecs_attr.attr,
  413. &alloc_sleep_millisecs_attr.attr,
  414. NULL,
  415. };
  416. static struct attribute_group khugepaged_attr_group = {
  417. .attrs = khugepaged_attr,
  418. .name = "khugepaged",
  419. };
  420. static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
  421. {
  422. int err;
  423. *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
  424. if (unlikely(!*hugepage_kobj)) {
  425. printk(KERN_ERR "hugepage: failed kobject create\n");
  426. return -ENOMEM;
  427. }
  428. err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
  429. if (err) {
  430. printk(KERN_ERR "hugepage: failed register hugeage group\n");
  431. goto delete_obj;
  432. }
  433. err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
  434. if (err) {
  435. printk(KERN_ERR "hugepage: failed register hugeage group\n");
  436. goto remove_hp_group;
  437. }
  438. return 0;
  439. remove_hp_group:
  440. sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
  441. delete_obj:
  442. kobject_put(*hugepage_kobj);
  443. return err;
  444. }
  445. static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
  446. {
  447. sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
  448. sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
  449. kobject_put(hugepage_kobj);
  450. }
  451. #else
  452. static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
  453. {
  454. return 0;
  455. }
  456. static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
  457. {
  458. }
  459. #endif /* CONFIG_SYSFS */
  460. static int __init hugepage_init(void)
  461. {
  462. int err;
  463. struct kobject *hugepage_kobj;
  464. if (!has_transparent_hugepage()) {
  465. transparent_hugepage_flags = 0;
  466. return -EINVAL;
  467. }
  468. err = hugepage_init_sysfs(&hugepage_kobj);
  469. if (err)
  470. return err;
  471. err = khugepaged_slab_init();
  472. if (err)
  473. goto out;
  474. err = mm_slots_hash_init();
  475. if (err) {
  476. khugepaged_slab_free();
  477. goto out;
  478. }
  479. /*
  480. * By default disable transparent hugepages on smaller systems,
  481. * where the extra memory used could hurt more than TLB overhead
  482. * is likely to save. The admin can still enable it through /sys.
  483. */
  484. if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
  485. transparent_hugepage_flags = 0;
  486. start_khugepaged();
  487. return 0;
  488. out:
  489. hugepage_exit_sysfs(hugepage_kobj);
  490. return err;
  491. }
  492. module_init(hugepage_init)
  493. static int __init setup_transparent_hugepage(char *str)
  494. {
  495. int ret = 0;
  496. if (!str)
  497. goto out;
  498. if (!strcmp(str, "always")) {
  499. set_bit(TRANSPARENT_HUGEPAGE_FLAG,
  500. &transparent_hugepage_flags);
  501. clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  502. &transparent_hugepage_flags);
  503. ret = 1;
  504. } else if (!strcmp(str, "madvise")) {
  505. clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
  506. &transparent_hugepage_flags);
  507. set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  508. &transparent_hugepage_flags);
  509. ret = 1;
  510. } else if (!strcmp(str, "never")) {
  511. clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
  512. &transparent_hugepage_flags);
  513. clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  514. &transparent_hugepage_flags);
  515. ret = 1;
  516. }
  517. out:
  518. if (!ret)
  519. printk(KERN_WARNING
  520. "transparent_hugepage= cannot parse, ignored\n");
  521. return ret;
  522. }
  523. __setup("transparent_hugepage=", setup_transparent_hugepage);
  524. static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
  525. {
  526. if (likely(vma->vm_flags & VM_WRITE))
  527. pmd = pmd_mkwrite(pmd);
  528. return pmd;
  529. }
  530. static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
  531. struct vm_area_struct *vma,
  532. unsigned long haddr, pmd_t *pmd,
  533. struct page *page)
  534. {
  535. pgtable_t pgtable;
  536. VM_BUG_ON(!PageCompound(page));
  537. pgtable = pte_alloc_one(mm, haddr);
  538. if (unlikely(!pgtable))
  539. return VM_FAULT_OOM;
  540. clear_huge_page(page, haddr, HPAGE_PMD_NR);
  541. __SetPageUptodate(page);
  542. spin_lock(&mm->page_table_lock);
  543. if (unlikely(!pmd_none(*pmd))) {
  544. spin_unlock(&mm->page_table_lock);
  545. mem_cgroup_uncharge_page(page);
  546. put_page(page);
  547. pte_free(mm, pgtable);
  548. } else {
  549. pmd_t entry;
  550. entry = mk_pmd(page, vma->vm_page_prot);
  551. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  552. entry = pmd_mkhuge(entry);
  553. /*
  554. * The spinlocking to take the lru_lock inside
  555. * page_add_new_anon_rmap() acts as a full memory
  556. * barrier to be sure clear_huge_page writes become
  557. * visible after the set_pmd_at() write.
  558. */
  559. page_add_new_anon_rmap(page, vma, haddr);
  560. set_pmd_at(mm, haddr, pmd, entry);
  561. pgtable_trans_huge_deposit(mm, pgtable);
  562. add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
  563. mm->nr_ptes++;
  564. spin_unlock(&mm->page_table_lock);
  565. }
  566. return 0;
  567. }
  568. static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
  569. {
  570. return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
  571. }
  572. static inline struct page *alloc_hugepage_vma(int defrag,
  573. struct vm_area_struct *vma,
  574. unsigned long haddr, int nd,
  575. gfp_t extra_gfp)
  576. {
  577. return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
  578. HPAGE_PMD_ORDER, vma, haddr, nd);
  579. }
  580. #ifndef CONFIG_NUMA
  581. static inline struct page *alloc_hugepage(int defrag)
  582. {
  583. return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
  584. HPAGE_PMD_ORDER);
  585. }
  586. #endif
  587. int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  588. unsigned long address, pmd_t *pmd,
  589. unsigned int flags)
  590. {
  591. struct page *page;
  592. unsigned long haddr = address & HPAGE_PMD_MASK;
  593. pte_t *pte;
  594. if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
  595. if (unlikely(anon_vma_prepare(vma)))
  596. return VM_FAULT_OOM;
  597. if (unlikely(khugepaged_enter(vma)))
  598. return VM_FAULT_OOM;
  599. page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
  600. vma, haddr, numa_node_id(), 0);
  601. if (unlikely(!page)) {
  602. count_vm_event(THP_FAULT_FALLBACK);
  603. goto out;
  604. }
  605. count_vm_event(THP_FAULT_ALLOC);
  606. if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
  607. put_page(page);
  608. goto out;
  609. }
  610. if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
  611. page))) {
  612. mem_cgroup_uncharge_page(page);
  613. put_page(page);
  614. goto out;
  615. }
  616. return 0;
  617. }
  618. out:
  619. /*
  620. * Use __pte_alloc instead of pte_alloc_map, because we can't
  621. * run pte_offset_map on the pmd, if an huge pmd could
  622. * materialize from under us from a different thread.
  623. */
  624. if (unlikely(__pte_alloc(mm, vma, pmd, address)))
  625. return VM_FAULT_OOM;
  626. /* if an huge pmd materialized from under us just retry later */
  627. if (unlikely(pmd_trans_huge(*pmd)))
  628. return 0;
  629. /*
  630. * A regular pmd is established and it can't morph into a huge pmd
  631. * from under us anymore at this point because we hold the mmap_sem
  632. * read mode and khugepaged takes it in write mode. So now it's
  633. * safe to run pte_offset_map().
  634. */
  635. pte = pte_offset_map(pmd, address);
  636. return handle_pte_fault(mm, vma, address, pte, pmd, flags);
  637. }
  638. int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  639. pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
  640. struct vm_area_struct *vma)
  641. {
  642. struct page *src_page;
  643. pmd_t pmd;
  644. pgtable_t pgtable;
  645. int ret;
  646. ret = -ENOMEM;
  647. pgtable = pte_alloc_one(dst_mm, addr);
  648. if (unlikely(!pgtable))
  649. goto out;
  650. spin_lock(&dst_mm->page_table_lock);
  651. spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
  652. ret = -EAGAIN;
  653. pmd = *src_pmd;
  654. if (unlikely(!pmd_trans_huge(pmd))) {
  655. pte_free(dst_mm, pgtable);
  656. goto out_unlock;
  657. }
  658. if (unlikely(pmd_trans_splitting(pmd))) {
  659. /* split huge page running from under us */
  660. spin_unlock(&src_mm->page_table_lock);
  661. spin_unlock(&dst_mm->page_table_lock);
  662. pte_free(dst_mm, pgtable);
  663. wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
  664. goto out;
  665. }
  666. src_page = pmd_page(pmd);
  667. VM_BUG_ON(!PageHead(src_page));
  668. get_page(src_page);
  669. page_dup_rmap(src_page);
  670. add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
  671. pmdp_set_wrprotect(src_mm, addr, src_pmd);
  672. pmd = pmd_mkold(pmd_wrprotect(pmd));
  673. set_pmd_at(dst_mm, addr, dst_pmd, pmd);
  674. pgtable_trans_huge_deposit(dst_mm, pgtable);
  675. dst_mm->nr_ptes++;
  676. ret = 0;
  677. out_unlock:
  678. spin_unlock(&src_mm->page_table_lock);
  679. spin_unlock(&dst_mm->page_table_lock);
  680. out:
  681. return ret;
  682. }
  683. static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
  684. struct vm_area_struct *vma,
  685. unsigned long address,
  686. pmd_t *pmd, pmd_t orig_pmd,
  687. struct page *page,
  688. unsigned long haddr)
  689. {
  690. pgtable_t pgtable;
  691. pmd_t _pmd;
  692. int ret = 0, i;
  693. struct page **pages;
  694. unsigned long mmun_start; /* For mmu_notifiers */
  695. unsigned long mmun_end; /* For mmu_notifiers */
  696. pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
  697. GFP_KERNEL);
  698. if (unlikely(!pages)) {
  699. ret |= VM_FAULT_OOM;
  700. goto out;
  701. }
  702. for (i = 0; i < HPAGE_PMD_NR; i++) {
  703. pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
  704. __GFP_OTHER_NODE,
  705. vma, address, page_to_nid(page));
  706. if (unlikely(!pages[i] ||
  707. mem_cgroup_newpage_charge(pages[i], mm,
  708. GFP_KERNEL))) {
  709. if (pages[i])
  710. put_page(pages[i]);
  711. mem_cgroup_uncharge_start();
  712. while (--i >= 0) {
  713. mem_cgroup_uncharge_page(pages[i]);
  714. put_page(pages[i]);
  715. }
  716. mem_cgroup_uncharge_end();
  717. kfree(pages);
  718. ret |= VM_FAULT_OOM;
  719. goto out;
  720. }
  721. }
  722. for (i = 0; i < HPAGE_PMD_NR; i++) {
  723. copy_user_highpage(pages[i], page + i,
  724. haddr + PAGE_SIZE * i, vma);
  725. __SetPageUptodate(pages[i]);
  726. cond_resched();
  727. }
  728. mmun_start = haddr;
  729. mmun_end = haddr + HPAGE_PMD_SIZE;
  730. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  731. spin_lock(&mm->page_table_lock);
  732. if (unlikely(!pmd_same(*pmd, orig_pmd)))
  733. goto out_free_pages;
  734. VM_BUG_ON(!PageHead(page));
  735. pmdp_clear_flush(vma, haddr, pmd);
  736. /* leave pmd empty until pte is filled */
  737. pgtable = pgtable_trans_huge_withdraw(mm);
  738. pmd_populate(mm, &_pmd, pgtable);
  739. for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
  740. pte_t *pte, entry;
  741. entry = mk_pte(pages[i], vma->vm_page_prot);
  742. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  743. page_add_new_anon_rmap(pages[i], vma, haddr);
  744. pte = pte_offset_map(&_pmd, haddr);
  745. VM_BUG_ON(!pte_none(*pte));
  746. set_pte_at(mm, haddr, pte, entry);
  747. pte_unmap(pte);
  748. }
  749. kfree(pages);
  750. smp_wmb(); /* make pte visible before pmd */
  751. pmd_populate(mm, pmd, pgtable);
  752. page_remove_rmap(page);
  753. spin_unlock(&mm->page_table_lock);
  754. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  755. ret |= VM_FAULT_WRITE;
  756. put_page(page);
  757. out:
  758. return ret;
  759. out_free_pages:
  760. spin_unlock(&mm->page_table_lock);
  761. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  762. mem_cgroup_uncharge_start();
  763. for (i = 0; i < HPAGE_PMD_NR; i++) {
  764. mem_cgroup_uncharge_page(pages[i]);
  765. put_page(pages[i]);
  766. }
  767. mem_cgroup_uncharge_end();
  768. kfree(pages);
  769. goto out;
  770. }
  771. int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
  772. unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
  773. {
  774. int ret = 0;
  775. struct page *page, *new_page;
  776. unsigned long haddr;
  777. unsigned long mmun_start; /* For mmu_notifiers */
  778. unsigned long mmun_end; /* For mmu_notifiers */
  779. VM_BUG_ON(!vma->anon_vma);
  780. spin_lock(&mm->page_table_lock);
  781. if (unlikely(!pmd_same(*pmd, orig_pmd)))
  782. goto out_unlock;
  783. page = pmd_page(orig_pmd);
  784. VM_BUG_ON(!PageCompound(page) || !PageHead(page));
  785. haddr = address & HPAGE_PMD_MASK;
  786. if (page_mapcount(page) == 1) {
  787. pmd_t entry;
  788. entry = pmd_mkyoung(orig_pmd);
  789. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  790. if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
  791. update_mmu_cache_pmd(vma, address, pmd);
  792. ret |= VM_FAULT_WRITE;
  793. goto out_unlock;
  794. }
  795. get_page(page);
  796. spin_unlock(&mm->page_table_lock);
  797. if (transparent_hugepage_enabled(vma) &&
  798. !transparent_hugepage_debug_cow())
  799. new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
  800. vma, haddr, numa_node_id(), 0);
  801. else
  802. new_page = NULL;
  803. if (unlikely(!new_page)) {
  804. count_vm_event(THP_FAULT_FALLBACK);
  805. ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
  806. pmd, orig_pmd, page, haddr);
  807. if (ret & VM_FAULT_OOM)
  808. split_huge_page(page);
  809. put_page(page);
  810. goto out;
  811. }
  812. count_vm_event(THP_FAULT_ALLOC);
  813. if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
  814. put_page(new_page);
  815. split_huge_page(page);
  816. put_page(page);
  817. ret |= VM_FAULT_OOM;
  818. goto out;
  819. }
  820. copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
  821. __SetPageUptodate(new_page);
  822. mmun_start = haddr;
  823. mmun_end = haddr + HPAGE_PMD_SIZE;
  824. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  825. spin_lock(&mm->page_table_lock);
  826. put_page(page);
  827. if (unlikely(!pmd_same(*pmd, orig_pmd))) {
  828. spin_unlock(&mm->page_table_lock);
  829. mem_cgroup_uncharge_page(new_page);
  830. put_page(new_page);
  831. goto out_mn;
  832. } else {
  833. pmd_t entry;
  834. VM_BUG_ON(!PageHead(page));
  835. entry = mk_pmd(new_page, vma->vm_page_prot);
  836. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  837. entry = pmd_mkhuge(entry);
  838. pmdp_clear_flush(vma, haddr, pmd);
  839. page_add_new_anon_rmap(new_page, vma, haddr);
  840. set_pmd_at(mm, haddr, pmd, entry);
  841. update_mmu_cache_pmd(vma, address, pmd);
  842. page_remove_rmap(page);
  843. put_page(page);
  844. ret |= VM_FAULT_WRITE;
  845. }
  846. spin_unlock(&mm->page_table_lock);
  847. out_mn:
  848. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  849. out:
  850. return ret;
  851. out_unlock:
  852. spin_unlock(&mm->page_table_lock);
  853. return ret;
  854. }
  855. struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
  856. unsigned long addr,
  857. pmd_t *pmd,
  858. unsigned int flags)
  859. {
  860. struct mm_struct *mm = vma->vm_mm;
  861. struct page *page = NULL;
  862. assert_spin_locked(&mm->page_table_lock);
  863. if (flags & FOLL_WRITE && !pmd_write(*pmd))
  864. goto out;
  865. page = pmd_page(*pmd);
  866. VM_BUG_ON(!PageHead(page));
  867. if (flags & FOLL_TOUCH) {
  868. pmd_t _pmd;
  869. /*
  870. * We should set the dirty bit only for FOLL_WRITE but
  871. * for now the dirty bit in the pmd is meaningless.
  872. * And if the dirty bit will become meaningful and
  873. * we'll only set it with FOLL_WRITE, an atomic
  874. * set_bit will be required on the pmd to set the
  875. * young bit, instead of the current set_pmd_at.
  876. */
  877. _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
  878. set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
  879. }
  880. if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
  881. if (page->mapping && trylock_page(page)) {
  882. lru_add_drain();
  883. if (page->mapping)
  884. mlock_vma_page(page);
  885. unlock_page(page);
  886. }
  887. }
  888. page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
  889. VM_BUG_ON(!PageCompound(page));
  890. if (flags & FOLL_GET)
  891. get_page_foll(page);
  892. out:
  893. return page;
  894. }
  895. int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
  896. pmd_t *pmd, unsigned long addr)
  897. {
  898. int ret = 0;
  899. if (__pmd_trans_huge_lock(pmd, vma) == 1) {
  900. struct page *page;
  901. pgtable_t pgtable;
  902. pmd_t orig_pmd;
  903. pgtable = pgtable_trans_huge_withdraw(tlb->mm);
  904. orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
  905. page = pmd_page(orig_pmd);
  906. tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
  907. page_remove_rmap(page);
  908. VM_BUG_ON(page_mapcount(page) < 0);
  909. add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
  910. VM_BUG_ON(!PageHead(page));
  911. tlb->mm->nr_ptes--;
  912. spin_unlock(&tlb->mm->page_table_lock);
  913. tlb_remove_page(tlb, page);
  914. pte_free(tlb->mm, pgtable);
  915. ret = 1;
  916. }
  917. return ret;
  918. }
  919. int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  920. unsigned long addr, unsigned long end,
  921. unsigned char *vec)
  922. {
  923. int ret = 0;
  924. if (__pmd_trans_huge_lock(pmd, vma) == 1) {
  925. /*
  926. * All logical pages in the range are present
  927. * if backed by a huge page.
  928. */
  929. spin_unlock(&vma->vm_mm->page_table_lock);
  930. memset(vec, 1, (end - addr) >> PAGE_SHIFT);
  931. ret = 1;
  932. }
  933. return ret;
  934. }
  935. int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
  936. unsigned long old_addr,
  937. unsigned long new_addr, unsigned long old_end,
  938. pmd_t *old_pmd, pmd_t *new_pmd)
  939. {
  940. int ret = 0;
  941. pmd_t pmd;
  942. struct mm_struct *mm = vma->vm_mm;
  943. if ((old_addr & ~HPAGE_PMD_MASK) ||
  944. (new_addr & ~HPAGE_PMD_MASK) ||
  945. old_end - old_addr < HPAGE_PMD_SIZE ||
  946. (new_vma->vm_flags & VM_NOHUGEPAGE))
  947. goto out;
  948. /*
  949. * The destination pmd shouldn't be established, free_pgtables()
  950. * should have release it.
  951. */
  952. if (WARN_ON(!pmd_none(*new_pmd))) {
  953. VM_BUG_ON(pmd_trans_huge(*new_pmd));
  954. goto out;
  955. }
  956. ret = __pmd_trans_huge_lock(old_pmd, vma);
  957. if (ret == 1) {
  958. pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
  959. VM_BUG_ON(!pmd_none(*new_pmd));
  960. set_pmd_at(mm, new_addr, new_pmd, pmd);
  961. spin_unlock(&mm->page_table_lock);
  962. }
  963. out:
  964. return ret;
  965. }
  966. int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  967. unsigned long addr, pgprot_t newprot)
  968. {
  969. struct mm_struct *mm = vma->vm_mm;
  970. int ret = 0;
  971. if (__pmd_trans_huge_lock(pmd, vma) == 1) {
  972. pmd_t entry;
  973. entry = pmdp_get_and_clear(mm, addr, pmd);
  974. entry = pmd_modify(entry, newprot);
  975. set_pmd_at(mm, addr, pmd, entry);
  976. spin_unlock(&vma->vm_mm->page_table_lock);
  977. ret = 1;
  978. }
  979. return ret;
  980. }
  981. /*
  982. * Returns 1 if a given pmd maps a stable (not under splitting) thp.
  983. * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
  984. *
  985. * Note that if it returns 1, this routine returns without unlocking page
  986. * table locks. So callers must unlock them.
  987. */
  988. int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
  989. {
  990. spin_lock(&vma->vm_mm->page_table_lock);
  991. if (likely(pmd_trans_huge(*pmd))) {
  992. if (unlikely(pmd_trans_splitting(*pmd))) {
  993. spin_unlock(&vma->vm_mm->page_table_lock);
  994. wait_split_huge_page(vma->anon_vma, pmd);
  995. return -1;
  996. } else {
  997. /* Thp mapped by 'pmd' is stable, so we can
  998. * handle it as it is. */
  999. return 1;
  1000. }
  1001. }
  1002. spin_unlock(&vma->vm_mm->page_table_lock);
  1003. return 0;
  1004. }
  1005. pmd_t *page_check_address_pmd(struct page *page,
  1006. struct mm_struct *mm,
  1007. unsigned long address,
  1008. enum page_check_address_pmd_flag flag)
  1009. {
  1010. pgd_t *pgd;
  1011. pud_t *pud;
  1012. pmd_t *pmd, *ret = NULL;
  1013. if (address & ~HPAGE_PMD_MASK)
  1014. goto out;
  1015. pgd = pgd_offset(mm, address);
  1016. if (!pgd_present(*pgd))
  1017. goto out;
  1018. pud = pud_offset(pgd, address);
  1019. if (!pud_present(*pud))
  1020. goto out;
  1021. pmd = pmd_offset(pud, address);
  1022. if (pmd_none(*pmd))
  1023. goto out;
  1024. if (pmd_page(*pmd) != page)
  1025. goto out;
  1026. /*
  1027. * split_vma() may create temporary aliased mappings. There is
  1028. * no risk as long as all huge pmd are found and have their
  1029. * splitting bit set before __split_huge_page_refcount
  1030. * runs. Finding the same huge pmd more than once during the
  1031. * same rmap walk is not a problem.
  1032. */
  1033. if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
  1034. pmd_trans_splitting(*pmd))
  1035. goto out;
  1036. if (pmd_trans_huge(*pmd)) {
  1037. VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
  1038. !pmd_trans_splitting(*pmd));
  1039. ret = pmd;
  1040. }
  1041. out:
  1042. return ret;
  1043. }
  1044. static int __split_huge_page_splitting(struct page *page,
  1045. struct vm_area_struct *vma,
  1046. unsigned long address)
  1047. {
  1048. struct mm_struct *mm = vma->vm_mm;
  1049. pmd_t *pmd;
  1050. int ret = 0;
  1051. /* For mmu_notifiers */
  1052. const unsigned long mmun_start = address;
  1053. const unsigned long mmun_end = address + HPAGE_PMD_SIZE;
  1054. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  1055. spin_lock(&mm->page_table_lock);
  1056. pmd = page_check_address_pmd(page, mm, address,
  1057. PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
  1058. if (pmd) {
  1059. /*
  1060. * We can't temporarily set the pmd to null in order
  1061. * to split it, the pmd must remain marked huge at all
  1062. * times or the VM won't take the pmd_trans_huge paths
  1063. * and it won't wait on the anon_vma->root->mutex to
  1064. * serialize against split_huge_page*.
  1065. */
  1066. pmdp_splitting_flush(vma, address, pmd);
  1067. ret = 1;
  1068. }
  1069. spin_unlock(&mm->page_table_lock);
  1070. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  1071. return ret;
  1072. }
  1073. static void __split_huge_page_refcount(struct page *page)
  1074. {
  1075. int i;
  1076. struct zone *zone = page_zone(page);
  1077. struct lruvec *lruvec;
  1078. int tail_count = 0;
  1079. /* prevent PageLRU to go away from under us, and freeze lru stats */
  1080. spin_lock_irq(&zone->lru_lock);
  1081. lruvec = mem_cgroup_page_lruvec(page, zone);
  1082. compound_lock(page);
  1083. /* complete memcg works before add pages to LRU */
  1084. mem_cgroup_split_huge_fixup(page);
  1085. for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
  1086. struct page *page_tail = page + i;
  1087. /* tail_page->_mapcount cannot change */
  1088. BUG_ON(page_mapcount(page_tail) < 0);
  1089. tail_count += page_mapcount(page_tail);
  1090. /* check for overflow */
  1091. BUG_ON(tail_count < 0);
  1092. BUG_ON(atomic_read(&page_tail->_count) != 0);
  1093. /*
  1094. * tail_page->_count is zero and not changing from
  1095. * under us. But get_page_unless_zero() may be running
  1096. * from under us on the tail_page. If we used
  1097. * atomic_set() below instead of atomic_add(), we
  1098. * would then run atomic_set() concurrently with
  1099. * get_page_unless_zero(), and atomic_set() is
  1100. * implemented in C not using locked ops. spin_unlock
  1101. * on x86 sometime uses locked ops because of PPro
  1102. * errata 66, 92, so unless somebody can guarantee
  1103. * atomic_set() here would be safe on all archs (and
  1104. * not only on x86), it's safer to use atomic_add().
  1105. */
  1106. atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
  1107. &page_tail->_count);
  1108. /* after clearing PageTail the gup refcount can be released */
  1109. smp_mb();
  1110. /*
  1111. * retain hwpoison flag of the poisoned tail page:
  1112. * fix for the unsuitable process killed on Guest Machine(KVM)
  1113. * by the memory-failure.
  1114. */
  1115. page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
  1116. page_tail->flags |= (page->flags &
  1117. ((1L << PG_referenced) |
  1118. (1L << PG_swapbacked) |
  1119. (1L << PG_mlocked) |
  1120. (1L << PG_uptodate)));
  1121. page_tail->flags |= (1L << PG_dirty);
  1122. /* clear PageTail before overwriting first_page */
  1123. smp_wmb();
  1124. /*
  1125. * __split_huge_page_splitting() already set the
  1126. * splitting bit in all pmd that could map this
  1127. * hugepage, that will ensure no CPU can alter the
  1128. * mapcount on the head page. The mapcount is only
  1129. * accounted in the head page and it has to be
  1130. * transferred to all tail pages in the below code. So
  1131. * for this code to be safe, the split the mapcount
  1132. * can't change. But that doesn't mean userland can't
  1133. * keep changing and reading the page contents while
  1134. * we transfer the mapcount, so the pmd splitting
  1135. * status is achieved setting a reserved bit in the
  1136. * pmd, not by clearing the present bit.
  1137. */
  1138. page_tail->_mapcount = page->_mapcount;
  1139. BUG_ON(page_tail->mapping);
  1140. page_tail->mapping = page->mapping;
  1141. page_tail->index = page->index + i;
  1142. BUG_ON(!PageAnon(page_tail));
  1143. BUG_ON(!PageUptodate(page_tail));
  1144. BUG_ON(!PageDirty(page_tail));
  1145. BUG_ON(!PageSwapBacked(page_tail));
  1146. lru_add_page_tail(page, page_tail, lruvec);
  1147. }
  1148. atomic_sub(tail_count, &page->_count);
  1149. BUG_ON(atomic_read(&page->_count) <= 0);
  1150. __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
  1151. __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
  1152. ClearPageCompound(page);
  1153. compound_unlock(page);
  1154. spin_unlock_irq(&zone->lru_lock);
  1155. for (i = 1; i < HPAGE_PMD_NR; i++) {
  1156. struct page *page_tail = page + i;
  1157. BUG_ON(page_count(page_tail) <= 0);
  1158. /*
  1159. * Tail pages may be freed if there wasn't any mapping
  1160. * like if add_to_swap() is running on a lru page that
  1161. * had its mapping zapped. And freeing these pages
  1162. * requires taking the lru_lock so we do the put_page
  1163. * of the tail pages after the split is complete.
  1164. */
  1165. put_page(page_tail);
  1166. }
  1167. /*
  1168. * Only the head page (now become a regular page) is required
  1169. * to be pinned by the caller.
  1170. */
  1171. BUG_ON(page_count(page) <= 0);
  1172. }
  1173. static int __split_huge_page_map(struct page *page,
  1174. struct vm_area_struct *vma,
  1175. unsigned long address)
  1176. {
  1177. struct mm_struct *mm = vma->vm_mm;
  1178. pmd_t *pmd, _pmd;
  1179. int ret = 0, i;
  1180. pgtable_t pgtable;
  1181. unsigned long haddr;
  1182. spin_lock(&mm->page_table_lock);
  1183. pmd = page_check_address_pmd(page, mm, address,
  1184. PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
  1185. if (pmd) {
  1186. pgtable = pgtable_trans_huge_withdraw(mm);
  1187. pmd_populate(mm, &_pmd, pgtable);
  1188. haddr = address;
  1189. for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
  1190. pte_t *pte, entry;
  1191. BUG_ON(PageCompound(page+i));
  1192. entry = mk_pte(page + i, vma->vm_page_prot);
  1193. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  1194. if (!pmd_write(*pmd))
  1195. entry = pte_wrprotect(entry);
  1196. else
  1197. BUG_ON(page_mapcount(page) != 1);
  1198. if (!pmd_young(*pmd))
  1199. entry = pte_mkold(entry);
  1200. pte = pte_offset_map(&_pmd, haddr);
  1201. BUG_ON(!pte_none(*pte));
  1202. set_pte_at(mm, haddr, pte, entry);
  1203. pte_unmap(pte);
  1204. }
  1205. smp_wmb(); /* make pte visible before pmd */
  1206. /*
  1207. * Up to this point the pmd is present and huge and
  1208. * userland has the whole access to the hugepage
  1209. * during the split (which happens in place). If we
  1210. * overwrite the pmd with the not-huge version
  1211. * pointing to the pte here (which of course we could
  1212. * if all CPUs were bug free), userland could trigger
  1213. * a small page size TLB miss on the small sized TLB
  1214. * while the hugepage TLB entry is still established
  1215. * in the huge TLB. Some CPU doesn't like that. See
  1216. * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
  1217. * Erratum 383 on page 93. Intel should be safe but is
  1218. * also warns that it's only safe if the permission
  1219. * and cache attributes of the two entries loaded in
  1220. * the two TLB is identical (which should be the case
  1221. * here). But it is generally safer to never allow
  1222. * small and huge TLB entries for the same virtual
  1223. * address to be loaded simultaneously. So instead of
  1224. * doing "pmd_populate(); flush_tlb_range();" we first
  1225. * mark the current pmd notpresent (atomically because
  1226. * here the pmd_trans_huge and pmd_trans_splitting
  1227. * must remain set at all times on the pmd until the
  1228. * split is complete for this pmd), then we flush the
  1229. * SMP TLB and finally we write the non-huge version
  1230. * of the pmd entry with pmd_populate.
  1231. */
  1232. pmdp_invalidate(vma, address, pmd);
  1233. pmd_populate(mm, pmd, pgtable);
  1234. ret = 1;
  1235. }
  1236. spin_unlock(&mm->page_table_lock);
  1237. return ret;
  1238. }
  1239. /* must be called with anon_vma->root->mutex hold */
  1240. static void __split_huge_page(struct page *page,
  1241. struct anon_vma *anon_vma)
  1242. {
  1243. int mapcount, mapcount2;
  1244. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  1245. struct anon_vma_chain *avc;
  1246. BUG_ON(!PageHead(page));
  1247. BUG_ON(PageTail(page));
  1248. mapcount = 0;
  1249. anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
  1250. struct vm_area_struct *vma = avc->vma;
  1251. unsigned long addr = vma_address(page, vma);
  1252. BUG_ON(is_vma_temporary_stack(vma));
  1253. mapcount += __split_huge_page_splitting(page, vma, addr);
  1254. }
  1255. /*
  1256. * It is critical that new vmas are added to the tail of the
  1257. * anon_vma list. This guarantes that if copy_huge_pmd() runs
  1258. * and establishes a child pmd before
  1259. * __split_huge_page_splitting() freezes the parent pmd (so if
  1260. * we fail to prevent copy_huge_pmd() from running until the
  1261. * whole __split_huge_page() is complete), we will still see
  1262. * the newly established pmd of the child later during the
  1263. * walk, to be able to set it as pmd_trans_splitting too.
  1264. */
  1265. if (mapcount != page_mapcount(page))
  1266. printk(KERN_ERR "mapcount %d page_mapcount %d\n",
  1267. mapcount, page_mapcount(page));
  1268. BUG_ON(mapcount != page_mapcount(page));
  1269. __split_huge_page_refcount(page);
  1270. mapcount2 = 0;
  1271. anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
  1272. struct vm_area_struct *vma = avc->vma;
  1273. unsigned long addr = vma_address(page, vma);
  1274. BUG_ON(is_vma_temporary_stack(vma));
  1275. mapcount2 += __split_huge_page_map(page, vma, addr);
  1276. }
  1277. if (mapcount != mapcount2)
  1278. printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
  1279. mapcount, mapcount2, page_mapcount(page));
  1280. BUG_ON(mapcount != mapcount2);
  1281. }
  1282. int split_huge_page(struct page *page)
  1283. {
  1284. struct anon_vma *anon_vma;
  1285. int ret = 1;
  1286. BUG_ON(!PageAnon(page));
  1287. anon_vma = page_lock_anon_vma(page);
  1288. if (!anon_vma)
  1289. goto out;
  1290. ret = 0;
  1291. if (!PageCompound(page))
  1292. goto out_unlock;
  1293. BUG_ON(!PageSwapBacked(page));
  1294. __split_huge_page(page, anon_vma);
  1295. count_vm_event(THP_SPLIT);
  1296. BUG_ON(PageCompound(page));
  1297. out_unlock:
  1298. page_unlock_anon_vma(anon_vma);
  1299. out:
  1300. return ret;
  1301. }
  1302. #define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
  1303. int hugepage_madvise(struct vm_area_struct *vma,
  1304. unsigned long *vm_flags, int advice)
  1305. {
  1306. struct mm_struct *mm = vma->vm_mm;
  1307. switch (advice) {
  1308. case MADV_HUGEPAGE:
  1309. /*
  1310. * Be somewhat over-protective like KSM for now!
  1311. */
  1312. if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
  1313. return -EINVAL;
  1314. if (mm->def_flags & VM_NOHUGEPAGE)
  1315. return -EINVAL;
  1316. *vm_flags &= ~VM_NOHUGEPAGE;
  1317. *vm_flags |= VM_HUGEPAGE;
  1318. /*
  1319. * If the vma become good for khugepaged to scan,
  1320. * register it here without waiting a page fault that
  1321. * may not happen any time soon.
  1322. */
  1323. if (unlikely(khugepaged_enter_vma_merge(vma)))
  1324. return -ENOMEM;
  1325. break;
  1326. case MADV_NOHUGEPAGE:
  1327. /*
  1328. * Be somewhat over-protective like KSM for now!
  1329. */
  1330. if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
  1331. return -EINVAL;
  1332. *vm_flags &= ~VM_HUGEPAGE;
  1333. *vm_flags |= VM_NOHUGEPAGE;
  1334. /*
  1335. * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
  1336. * this vma even if we leave the mm registered in khugepaged if
  1337. * it got registered before VM_NOHUGEPAGE was set.
  1338. */
  1339. break;
  1340. }
  1341. return 0;
  1342. }
  1343. static int __init khugepaged_slab_init(void)
  1344. {
  1345. mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
  1346. sizeof(struct mm_slot),
  1347. __alignof__(struct mm_slot), 0, NULL);
  1348. if (!mm_slot_cache)
  1349. return -ENOMEM;
  1350. return 0;
  1351. }
  1352. static void __init khugepaged_slab_free(void)
  1353. {
  1354. kmem_cache_destroy(mm_slot_cache);
  1355. mm_slot_cache = NULL;
  1356. }
  1357. static inline struct mm_slot *alloc_mm_slot(void)
  1358. {
  1359. if (!mm_slot_cache) /* initialization failed */
  1360. return NULL;
  1361. return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
  1362. }
  1363. static inline void free_mm_slot(struct mm_slot *mm_slot)
  1364. {
  1365. kmem_cache_free(mm_slot_cache, mm_slot);
  1366. }
  1367. static int __init mm_slots_hash_init(void)
  1368. {
  1369. mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
  1370. GFP_KERNEL);
  1371. if (!mm_slots_hash)
  1372. return -ENOMEM;
  1373. return 0;
  1374. }
  1375. #if 0
  1376. static void __init mm_slots_hash_free(void)
  1377. {
  1378. kfree(mm_slots_hash);
  1379. mm_slots_hash = NULL;
  1380. }
  1381. #endif
  1382. static struct mm_slot *get_mm_slot(struct mm_struct *mm)
  1383. {
  1384. struct mm_slot *mm_slot;
  1385. struct hlist_head *bucket;
  1386. struct hlist_node *node;
  1387. bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
  1388. % MM_SLOTS_HASH_HEADS];
  1389. hlist_for_each_entry(mm_slot, node, bucket, hash) {
  1390. if (mm == mm_slot->mm)
  1391. return mm_slot;
  1392. }
  1393. return NULL;
  1394. }
  1395. static void insert_to_mm_slots_hash(struct mm_struct *mm,
  1396. struct mm_slot *mm_slot)
  1397. {
  1398. struct hlist_head *bucket;
  1399. bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
  1400. % MM_SLOTS_HASH_HEADS];
  1401. mm_slot->mm = mm;
  1402. hlist_add_head(&mm_slot->hash, bucket);
  1403. }
  1404. static inline int khugepaged_test_exit(struct mm_struct *mm)
  1405. {
  1406. return atomic_read(&mm->mm_users) == 0;
  1407. }
  1408. int __khugepaged_enter(struct mm_struct *mm)
  1409. {
  1410. struct mm_slot *mm_slot;
  1411. int wakeup;
  1412. mm_slot = alloc_mm_slot();
  1413. if (!mm_slot)
  1414. return -ENOMEM;
  1415. /* __khugepaged_exit() must not run from under us */
  1416. VM_BUG_ON(khugepaged_test_exit(mm));
  1417. if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
  1418. free_mm_slot(mm_slot);
  1419. return 0;
  1420. }
  1421. spin_lock(&khugepaged_mm_lock);
  1422. insert_to_mm_slots_hash(mm, mm_slot);
  1423. /*
  1424. * Insert just behind the scanning cursor, to let the area settle
  1425. * down a little.
  1426. */
  1427. wakeup = list_empty(&khugepaged_scan.mm_head);
  1428. list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
  1429. spin_unlock(&khugepaged_mm_lock);
  1430. atomic_inc(&mm->mm_count);
  1431. if (wakeup)
  1432. wake_up_interruptible(&khugepaged_wait);
  1433. return 0;
  1434. }
  1435. int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
  1436. {
  1437. unsigned long hstart, hend;
  1438. if (!vma->anon_vma)
  1439. /*
  1440. * Not yet faulted in so we will register later in the
  1441. * page fault if needed.
  1442. */
  1443. return 0;
  1444. if (vma->vm_ops)
  1445. /* khugepaged not yet working on file or special mappings */
  1446. return 0;
  1447. VM_BUG_ON(vma->vm_flags & VM_NO_THP);
  1448. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  1449. hend = vma->vm_end & HPAGE_PMD_MASK;
  1450. if (hstart < hend)
  1451. return khugepaged_enter(vma);
  1452. return 0;
  1453. }
  1454. void __khugepaged_exit(struct mm_struct *mm)
  1455. {
  1456. struct mm_slot *mm_slot;
  1457. int free = 0;
  1458. spin_lock(&khugepaged_mm_lock);
  1459. mm_slot = get_mm_slot(mm);
  1460. if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
  1461. hlist_del(&mm_slot->hash);
  1462. list_del(&mm_slot->mm_node);
  1463. free = 1;
  1464. }
  1465. spin_unlock(&khugepaged_mm_lock);
  1466. if (free) {
  1467. clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
  1468. free_mm_slot(mm_slot);
  1469. mmdrop(mm);
  1470. } else if (mm_slot) {
  1471. /*
  1472. * This is required to serialize against
  1473. * khugepaged_test_exit() (which is guaranteed to run
  1474. * under mmap sem read mode). Stop here (after we
  1475. * return all pagetables will be destroyed) until
  1476. * khugepaged has finished working on the pagetables
  1477. * under the mmap_sem.
  1478. */
  1479. down_write(&mm->mmap_sem);
  1480. up_write(&mm->mmap_sem);
  1481. }
  1482. }
  1483. static void release_pte_page(struct page *page)
  1484. {
  1485. /* 0 stands for page_is_file_cache(page) == false */
  1486. dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
  1487. unlock_page(page);
  1488. putback_lru_page(page);
  1489. }
  1490. static void release_pte_pages(pte_t *pte, pte_t *_pte)
  1491. {
  1492. while (--_pte >= pte) {
  1493. pte_t pteval = *_pte;
  1494. if (!pte_none(pteval))
  1495. release_pte_page(pte_page(pteval));
  1496. }
  1497. }
  1498. static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
  1499. unsigned long address,
  1500. pte_t *pte)
  1501. {
  1502. struct page *page;
  1503. pte_t *_pte;
  1504. int referenced = 0, none = 0;
  1505. for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
  1506. _pte++, address += PAGE_SIZE) {
  1507. pte_t pteval = *_pte;
  1508. if (pte_none(pteval)) {
  1509. if (++none <= khugepaged_max_ptes_none)
  1510. continue;
  1511. else
  1512. goto out;
  1513. }
  1514. if (!pte_present(pteval) || !pte_write(pteval))
  1515. goto out;
  1516. page = vm_normal_page(vma, address, pteval);
  1517. if (unlikely(!page))
  1518. goto out;
  1519. VM_BUG_ON(PageCompound(page));
  1520. BUG_ON(!PageAnon(page));
  1521. VM_BUG_ON(!PageSwapBacked(page));
  1522. /* cannot use mapcount: can't collapse if there's a gup pin */
  1523. if (page_count(page) != 1)
  1524. goto out;
  1525. /*
  1526. * We can do it before isolate_lru_page because the
  1527. * page can't be freed from under us. NOTE: PG_lock
  1528. * is needed to serialize against split_huge_page
  1529. * when invoked from the VM.
  1530. */
  1531. if (!trylock_page(page))
  1532. goto out;
  1533. /*
  1534. * Isolate the page to avoid collapsing an hugepage
  1535. * currently in use by the VM.
  1536. */
  1537. if (isolate_lru_page(page)) {
  1538. unlock_page(page);
  1539. goto out;
  1540. }
  1541. /* 0 stands for page_is_file_cache(page) == false */
  1542. inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
  1543. VM_BUG_ON(!PageLocked(page));
  1544. VM_BUG_ON(PageLRU(page));
  1545. /* If there is no mapped pte young don't collapse the page */
  1546. if (pte_young(pteval) || PageReferenced(page) ||
  1547. mmu_notifier_test_young(vma->vm_mm, address))
  1548. referenced = 1;
  1549. }
  1550. if (likely(referenced))
  1551. return 1;
  1552. out:
  1553. release_pte_pages(pte, _pte);
  1554. return 0;
  1555. }
  1556. static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
  1557. struct vm_area_struct *vma,
  1558. unsigned long address,
  1559. spinlock_t *ptl)
  1560. {
  1561. pte_t *_pte;
  1562. for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
  1563. pte_t pteval = *_pte;
  1564. struct page *src_page;
  1565. if (pte_none(pteval)) {
  1566. clear_user_highpage(page, address);
  1567. add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
  1568. } else {
  1569. src_page = pte_page(pteval);
  1570. copy_user_highpage(page, src_page, address, vma);
  1571. VM_BUG_ON(page_mapcount(src_page) != 1);
  1572. release_pte_page(src_page);
  1573. /*
  1574. * ptl mostly unnecessary, but preempt has to
  1575. * be disabled to update the per-cpu stats
  1576. * inside page_remove_rmap().
  1577. */
  1578. spin_lock(ptl);
  1579. /*
  1580. * paravirt calls inside pte_clear here are
  1581. * superfluous.
  1582. */
  1583. pte_clear(vma->vm_mm, address, _pte);
  1584. page_remove_rmap(src_page);
  1585. spin_unlock(ptl);
  1586. free_page_and_swap_cache(src_page);
  1587. }
  1588. address += PAGE_SIZE;
  1589. page++;
  1590. }
  1591. }
  1592. static void khugepaged_alloc_sleep(void)
  1593. {
  1594. wait_event_freezable_timeout(khugepaged_wait, false,
  1595. msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
  1596. }
  1597. #ifdef CONFIG_NUMA
  1598. static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
  1599. {
  1600. if (IS_ERR(*hpage)) {
  1601. if (!*wait)
  1602. return false;
  1603. *wait = false;
  1604. *hpage = NULL;
  1605. khugepaged_alloc_sleep();
  1606. } else if (*hpage) {
  1607. put_page(*hpage);
  1608. *hpage = NULL;
  1609. }
  1610. return true;
  1611. }
  1612. static struct page
  1613. *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
  1614. struct vm_area_struct *vma, unsigned long address,
  1615. int node)
  1616. {
  1617. VM_BUG_ON(*hpage);
  1618. /*
  1619. * Allocate the page while the vma is still valid and under
  1620. * the mmap_sem read mode so there is no memory allocation
  1621. * later when we take the mmap_sem in write mode. This is more
  1622. * friendly behavior (OTOH it may actually hide bugs) to
  1623. * filesystems in userland with daemons allocating memory in
  1624. * the userland I/O paths. Allocating memory with the
  1625. * mmap_sem in read mode is good idea also to allow greater
  1626. * scalability.
  1627. */
  1628. *hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
  1629. node, __GFP_OTHER_NODE);
  1630. /*
  1631. * After allocating the hugepage, release the mmap_sem read lock in
  1632. * preparation for taking it in write mode.
  1633. */
  1634. up_read(&mm->mmap_sem);
  1635. if (unlikely(!*hpage)) {
  1636. count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
  1637. *hpage = ERR_PTR(-ENOMEM);
  1638. return NULL;
  1639. }
  1640. count_vm_event(THP_COLLAPSE_ALLOC);
  1641. return *hpage;
  1642. }
  1643. #else
  1644. static struct page *khugepaged_alloc_hugepage(bool *wait)
  1645. {
  1646. struct page *hpage;
  1647. do {
  1648. hpage = alloc_hugepage(khugepaged_defrag());
  1649. if (!hpage) {
  1650. count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
  1651. if (!*wait)
  1652. return NULL;
  1653. *wait = false;
  1654. khugepaged_alloc_sleep();
  1655. } else
  1656. count_vm_event(THP_COLLAPSE_ALLOC);
  1657. } while (unlikely(!hpage) && likely(khugepaged_enabled()));
  1658. return hpage;
  1659. }
  1660. static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
  1661. {
  1662. if (!*hpage)
  1663. *hpage = khugepaged_alloc_hugepage(wait);
  1664. if (unlikely(!*hpage))
  1665. return false;
  1666. return true;
  1667. }
  1668. static struct page
  1669. *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
  1670. struct vm_area_struct *vma, unsigned long address,
  1671. int node)
  1672. {
  1673. up_read(&mm->mmap_sem);
  1674. VM_BUG_ON(!*hpage);
  1675. return *hpage;
  1676. }
  1677. #endif
  1678. static void collapse_huge_page(struct mm_struct *mm,
  1679. unsigned long address,
  1680. struct page **hpage,
  1681. struct vm_area_struct *vma,
  1682. int node)
  1683. {
  1684. pgd_t *pgd;
  1685. pud_t *pud;
  1686. pmd_t *pmd, _pmd;
  1687. pte_t *pte;
  1688. pgtable_t pgtable;
  1689. struct page *new_page;
  1690. spinlock_t *ptl;
  1691. int isolated;
  1692. unsigned long hstart, hend;
  1693. unsigned long mmun_start; /* For mmu_notifiers */
  1694. unsigned long mmun_end; /* For mmu_notifiers */
  1695. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1696. /* release the mmap_sem read lock. */
  1697. new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
  1698. if (!new_page)
  1699. return;
  1700. if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
  1701. return;
  1702. /*
  1703. * Prevent all access to pagetables with the exception of
  1704. * gup_fast later hanlded by the ptep_clear_flush and the VM
  1705. * handled by the anon_vma lock + PG_lock.
  1706. */
  1707. down_write(&mm->mmap_sem);
  1708. if (unlikely(khugepaged_test_exit(mm)))
  1709. goto out;
  1710. vma = find_vma(mm, address);
  1711. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  1712. hend = vma->vm_end & HPAGE_PMD_MASK;
  1713. if (address < hstart || address + HPAGE_PMD_SIZE > hend)
  1714. goto out;
  1715. if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
  1716. (vma->vm_flags & VM_NOHUGEPAGE))
  1717. goto out;
  1718. if (!vma->anon_vma || vma->vm_ops)
  1719. goto out;
  1720. if (is_vma_temporary_stack(vma))
  1721. goto out;
  1722. VM_BUG_ON(vma->vm_flags & VM_NO_THP);
  1723. pgd = pgd_offset(mm, address);
  1724. if (!pgd_present(*pgd))
  1725. goto out;
  1726. pud = pud_offset(pgd, address);
  1727. if (!pud_present(*pud))
  1728. goto out;
  1729. pmd = pmd_offset(pud, address);
  1730. /* pmd can't go away or become huge under us */
  1731. if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
  1732. goto out;
  1733. anon_vma_lock(vma->anon_vma);
  1734. pte = pte_offset_map(pmd, address);
  1735. ptl = pte_lockptr(mm, pmd);
  1736. mmun_start = address;
  1737. mmun_end = address + HPAGE_PMD_SIZE;
  1738. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  1739. spin_lock(&mm->page_table_lock); /* probably unnecessary */
  1740. /*
  1741. * After this gup_fast can't run anymore. This also removes
  1742. * any huge TLB entry from the CPU so we won't allow
  1743. * huge and small TLB entries for the same virtual address
  1744. * to avoid the risk of CPU bugs in that area.
  1745. */
  1746. _pmd = pmdp_clear_flush(vma, address, pmd);
  1747. spin_unlock(&mm->page_table_lock);
  1748. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  1749. spin_lock(ptl);
  1750. isolated = __collapse_huge_page_isolate(vma, address, pte);
  1751. spin_unlock(ptl);
  1752. if (unlikely(!isolated)) {
  1753. pte_unmap(pte);
  1754. spin_lock(&mm->page_table_lock);
  1755. BUG_ON(!pmd_none(*pmd));
  1756. set_pmd_at(mm, address, pmd, _pmd);
  1757. spin_unlock(&mm->page_table_lock);
  1758. anon_vma_unlock(vma->anon_vma);
  1759. goto out;
  1760. }
  1761. /*
  1762. * All pages are isolated and locked so anon_vma rmap
  1763. * can't run anymore.
  1764. */
  1765. anon_vma_unlock(vma->anon_vma);
  1766. __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
  1767. pte_unmap(pte);
  1768. __SetPageUptodate(new_page);
  1769. pgtable = pmd_pgtable(_pmd);
  1770. _pmd = mk_pmd(new_page, vma->vm_page_prot);
  1771. _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
  1772. _pmd = pmd_mkhuge(_pmd);
  1773. /*
  1774. * spin_lock() below is not the equivalent of smp_wmb(), so
  1775. * this is needed to avoid the copy_huge_page writes to become
  1776. * visible after the set_pmd_at() write.
  1777. */
  1778. smp_wmb();
  1779. spin_lock(&mm->page_table_lock);
  1780. BUG_ON(!pmd_none(*pmd));
  1781. page_add_new_anon_rmap(new_page, vma, address);
  1782. set_pmd_at(mm, address, pmd, _pmd);
  1783. update_mmu_cache_pmd(vma, address, pmd);
  1784. pgtable_trans_huge_deposit(mm, pgtable);
  1785. spin_unlock(&mm->page_table_lock);
  1786. *hpage = NULL;
  1787. khugepaged_pages_collapsed++;
  1788. out_up_write:
  1789. up_write(&mm->mmap_sem);
  1790. return;
  1791. out:
  1792. mem_cgroup_uncharge_page(new_page);
  1793. goto out_up_write;
  1794. }
  1795. static int khugepaged_scan_pmd(struct mm_struct *mm,
  1796. struct vm_area_struct *vma,
  1797. unsigned long address,
  1798. struct page **hpage)
  1799. {
  1800. pgd_t *pgd;
  1801. pud_t *pud;
  1802. pmd_t *pmd;
  1803. pte_t *pte, *_pte;
  1804. int ret = 0, referenced = 0, none = 0;
  1805. struct page *page;
  1806. unsigned long _address;
  1807. spinlock_t *ptl;
  1808. int node = -1;
  1809. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  1810. pgd = pgd_offset(mm, address);
  1811. if (!pgd_present(*pgd))
  1812. goto out;
  1813. pud = pud_offset(pgd, address);
  1814. if (!pud_present(*pud))
  1815. goto out;
  1816. pmd = pmd_offset(pud, address);
  1817. if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
  1818. goto out;
  1819. pte = pte_offset_map_lock(mm, pmd, address, &ptl);
  1820. for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
  1821. _pte++, _address += PAGE_SIZE) {
  1822. pte_t pteval = *_pte;
  1823. if (pte_none(pteval)) {
  1824. if (++none <= khugepaged_max_ptes_none)
  1825. continue;
  1826. else
  1827. goto out_unmap;
  1828. }
  1829. if (!pte_present(pteval) || !pte_write(pteval))
  1830. goto out_unmap;
  1831. page = vm_normal_page(vma, _address, pteval);
  1832. if (unlikely(!page))
  1833. goto out_unmap;
  1834. /*
  1835. * Chose the node of the first page. This could
  1836. * be more sophisticated and look at more pages,
  1837. * but isn't for now.
  1838. */
  1839. if (node == -1)
  1840. node = page_to_nid(page);
  1841. VM_BUG_ON(PageCompound(page));
  1842. if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
  1843. goto out_unmap;
  1844. /* cannot use mapcount: can't collapse if there's a gup pin */
  1845. if (page_count(page) != 1)
  1846. goto out_unmap;
  1847. if (pte_young(pteval) || PageReferenced(page) ||
  1848. mmu_notifier_test_young(vma->vm_mm, address))
  1849. referenced = 1;
  1850. }
  1851. if (referenced)
  1852. ret = 1;
  1853. out_unmap:
  1854. pte_unmap_unlock(pte, ptl);
  1855. if (ret)
  1856. /* collapse_huge_page will return with the mmap_sem released */
  1857. collapse_huge_page(mm, address, hpage, vma, node);
  1858. out:
  1859. return ret;
  1860. }
  1861. static void collect_mm_slot(struct mm_slot *mm_slot)
  1862. {
  1863. struct mm_struct *mm = mm_slot->mm;
  1864. VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
  1865. if (khugepaged_test_exit(mm)) {
  1866. /* free mm_slot */
  1867. hlist_del(&mm_slot->hash);
  1868. list_del(&mm_slot->mm_node);
  1869. /*
  1870. * Not strictly needed because the mm exited already.
  1871. *
  1872. * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
  1873. */
  1874. /* khugepaged_mm_lock actually not necessary for the below */
  1875. free_mm_slot(mm_slot);
  1876. mmdrop(mm);
  1877. }
  1878. }
  1879. static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
  1880. struct page **hpage)
  1881. __releases(&khugepaged_mm_lock)
  1882. __acquires(&khugepaged_mm_lock)
  1883. {
  1884. struct mm_slot *mm_slot;
  1885. struct mm_struct *mm;
  1886. struct vm_area_struct *vma;
  1887. int progress = 0;
  1888. VM_BUG_ON(!pages);
  1889. VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
  1890. if (khugepaged_scan.mm_slot)
  1891. mm_slot = khugepaged_scan.mm_slot;
  1892. else {
  1893. mm_slot = list_entry(khugepaged_scan.mm_head.next,
  1894. struct mm_slot, mm_node);
  1895. khugepaged_scan.address = 0;
  1896. khugepaged_scan.mm_slot = mm_slot;
  1897. }
  1898. spin_unlock(&khugepaged_mm_lock);
  1899. mm = mm_slot->mm;
  1900. down_read(&mm->mmap_sem);
  1901. if (unlikely(khugepaged_test_exit(mm)))
  1902. vma = NULL;
  1903. else
  1904. vma = find_vma(mm, khugepaged_scan.address);
  1905. progress++;
  1906. for (; vma; vma = vma->vm_next) {
  1907. unsigned long hstart, hend;
  1908. cond_resched();
  1909. if (unlikely(khugepaged_test_exit(mm))) {
  1910. progress++;
  1911. break;
  1912. }
  1913. if ((!(vma->vm_flags & VM_HUGEPAGE) &&
  1914. !khugepaged_always()) ||
  1915. (vma->vm_flags & VM_NOHUGEPAGE)) {
  1916. skip:
  1917. progress++;
  1918. continue;
  1919. }
  1920. if (!vma->anon_vma || vma->vm_ops)
  1921. goto skip;
  1922. if (is_vma_temporary_stack(vma))
  1923. goto skip;
  1924. VM_BUG_ON(vma->vm_flags & VM_NO_THP);
  1925. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  1926. hend = vma->vm_end & HPAGE_PMD_MASK;
  1927. if (hstart >= hend)
  1928. goto skip;
  1929. if (khugepaged_scan.address > hend)
  1930. goto skip;
  1931. if (khugepaged_scan.address < hstart)
  1932. khugepaged_scan.address = hstart;
  1933. VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
  1934. while (khugepaged_scan.address < hend) {
  1935. int ret;
  1936. cond_resched();
  1937. if (unlikely(khugepaged_test_exit(mm)))
  1938. goto breakouterloop;
  1939. VM_BUG_ON(khugepaged_scan.address < hstart ||
  1940. khugepaged_scan.address + HPAGE_PMD_SIZE >
  1941. hend);
  1942. ret = khugepaged_scan_pmd(mm, vma,
  1943. khugepaged_scan.address,
  1944. hpage);
  1945. /* move to next address */
  1946. khugepaged_scan.address += HPAGE_PMD_SIZE;
  1947. progress += HPAGE_PMD_NR;
  1948. if (ret)
  1949. /* we released mmap_sem so break loop */
  1950. goto breakouterloop_mmap_sem;
  1951. if (progress >= pages)
  1952. goto breakouterloop;
  1953. }
  1954. }
  1955. breakouterloop:
  1956. up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
  1957. breakouterloop_mmap_sem:
  1958. spin_lock(&khugepaged_mm_lock);
  1959. VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
  1960. /*
  1961. * Release the current mm_slot if this mm is about to die, or
  1962. * if we scanned all vmas of this mm.
  1963. */
  1964. if (khugepaged_test_exit(mm) || !vma) {
  1965. /*
  1966. * Make sure that if mm_users is reaching zero while
  1967. * khugepaged runs here, khugepaged_exit will find
  1968. * mm_slot not pointing to the exiting mm.
  1969. */
  1970. if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
  1971. khugepaged_scan.mm_slot = list_entry(
  1972. mm_slot->mm_node.next,
  1973. struct mm_slot, mm_node);
  1974. khugepaged_scan.address = 0;
  1975. } else {
  1976. khugepaged_scan.mm_slot = NULL;
  1977. khugepaged_full_scans++;
  1978. }
  1979. collect_mm_slot(mm_slot);
  1980. }
  1981. return progress;
  1982. }
  1983. static int khugepaged_has_work(void)
  1984. {
  1985. return !list_empty(&khugepaged_scan.mm_head) &&
  1986. khugepaged_enabled();
  1987. }
  1988. static int khugepaged_wait_event(void)
  1989. {
  1990. return !list_empty(&khugepaged_scan.mm_head) ||
  1991. kthread_should_stop();
  1992. }
  1993. static void khugepaged_do_scan(void)
  1994. {
  1995. struct page *hpage = NULL;
  1996. unsigned int progress = 0, pass_through_head = 0;
  1997. unsigned int pages = khugepaged_pages_to_scan;
  1998. bool wait = true;
  1999. barrier(); /* write khugepaged_pages_to_scan to local stack */
  2000. while (progress < pages) {
  2001. if (!khugepaged_prealloc_page(&hpage, &wait))
  2002. break;
  2003. cond_resched();
  2004. if (unlikely(kthread_should_stop() || freezing(current)))
  2005. break;
  2006. spin_lock(&khugepaged_mm_lock);
  2007. if (!khugepaged_scan.mm_slot)
  2008. pass_through_head++;
  2009. if (khugepaged_has_work() &&
  2010. pass_through_head < 2)
  2011. progress += khugepaged_scan_mm_slot(pages - progress,
  2012. &hpage);
  2013. else
  2014. progress = pages;
  2015. spin_unlock(&khugepaged_mm_lock);
  2016. }
  2017. if (!IS_ERR_OR_NULL(hpage))
  2018. put_page(hpage);
  2019. }
  2020. static void khugepaged_wait_work(void)
  2021. {
  2022. try_to_freeze();
  2023. if (khugepaged_has_work()) {
  2024. if (!khugepaged_scan_sleep_millisecs)
  2025. return;
  2026. wait_event_freezable_timeout(khugepaged_wait,
  2027. kthread_should_stop(),
  2028. msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
  2029. return;
  2030. }
  2031. if (khugepaged_enabled())
  2032. wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
  2033. }
  2034. static int khugepaged(void *none)
  2035. {
  2036. struct mm_slot *mm_slot;
  2037. set_freezable();
  2038. set_user_nice(current, 19);
  2039. while (!kthread_should_stop()) {
  2040. khugepaged_do_scan();
  2041. khugepaged_wait_work();
  2042. }
  2043. spin_lock(&khugepaged_mm_lock);
  2044. mm_slot = khugepaged_scan.mm_slot;
  2045. khugepaged_scan.mm_slot = NULL;
  2046. if (mm_slot)
  2047. collect_mm_slot(mm_slot);
  2048. spin_unlock(&khugepaged_mm_lock);
  2049. return 0;
  2050. }
  2051. void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
  2052. {
  2053. struct page *page;
  2054. spin_lock(&mm->page_table_lock);
  2055. if (unlikely(!pmd_trans_huge(*pmd))) {
  2056. spin_unlock(&mm->page_table_lock);
  2057. return;
  2058. }
  2059. page = pmd_page(*pmd);
  2060. VM_BUG_ON(!page_count(page));
  2061. get_page(page);
  2062. spin_unlock(&mm->page_table_lock);
  2063. split_huge_page(page);
  2064. put_page(page);
  2065. BUG_ON(pmd_trans_huge(*pmd));
  2066. }
  2067. static void split_huge_page_address(struct mm_struct *mm,
  2068. unsigned long address)
  2069. {
  2070. pgd_t *pgd;
  2071. pud_t *pud;
  2072. pmd_t *pmd;
  2073. VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
  2074. pgd = pgd_offset(mm, address);
  2075. if (!pgd_present(*pgd))
  2076. return;
  2077. pud = pud_offset(pgd, address);
  2078. if (!pud_present(*pud))
  2079. return;
  2080. pmd = pmd_offset(pud, address);
  2081. if (!pmd_present(*pmd))
  2082. return;
  2083. /*
  2084. * Caller holds the mmap_sem write mode, so a huge pmd cannot
  2085. * materialize from under us.
  2086. */
  2087. split_huge_page_pmd(mm, pmd);
  2088. }
  2089. void __vma_adjust_trans_huge(struct vm_area_struct *vma,
  2090. unsigned long start,
  2091. unsigned long end,
  2092. long adjust_next)
  2093. {
  2094. /*
  2095. * If the new start address isn't hpage aligned and it could
  2096. * previously contain an hugepage: check if we need to split
  2097. * an huge pmd.
  2098. */
  2099. if (start & ~HPAGE_PMD_MASK &&
  2100. (start & HPAGE_PMD_MASK) >= vma->vm_start &&
  2101. (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
  2102. split_huge_page_address(vma->vm_mm, start);
  2103. /*
  2104. * If the new end address isn't hpage aligned and it could
  2105. * previously contain an hugepage: check if we need to split
  2106. * an huge pmd.
  2107. */
  2108. if (end & ~HPAGE_PMD_MASK &&
  2109. (end & HPAGE_PMD_MASK) >= vma->vm_start &&
  2110. (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
  2111. split_huge_page_address(vma->vm_mm, end);
  2112. /*
  2113. * If we're also updating the vma->vm_next->vm_start, if the new
  2114. * vm_next->vm_start isn't page aligned and it could previously
  2115. * contain an hugepage: check if we need to split an huge pmd.
  2116. */
  2117. if (adjust_next > 0) {
  2118. struct vm_area_struct *next = vma->vm_next;
  2119. unsigned long nstart = next->vm_start;
  2120. nstart += adjust_next << PAGE_SHIFT;
  2121. if (nstart & ~HPAGE_PMD_MASK &&
  2122. (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
  2123. (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
  2124. split_huge_page_address(next->vm_mm, nstart);
  2125. }
  2126. }