hugetlb.c 76 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921
  1. /*
  2. * Generic hugetlb support.
  3. * (C) William Irwin, April 2004
  4. */
  5. #include <linux/list.h>
  6. #include <linux/init.h>
  7. #include <linux/module.h>
  8. #include <linux/mm.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/sysctl.h>
  11. #include <linux/highmem.h>
  12. #include <linux/mmu_notifier.h>
  13. #include <linux/nodemask.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/mempolicy.h>
  16. #include <linux/cpuset.h>
  17. #include <linux/mutex.h>
  18. #include <linux/bootmem.h>
  19. #include <linux/sysfs.h>
  20. #include <linux/slab.h>
  21. #include <linux/rmap.h>
  22. #include <linux/swap.h>
  23. #include <linux/swapops.h>
  24. #include <asm/page.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/io.h>
  27. #include <linux/hugetlb.h>
  28. #include <linux/node.h>
  29. #include "internal.h"
  30. const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
  31. static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
  32. unsigned long hugepages_treat_as_movable;
  33. static int max_hstate;
  34. unsigned int default_hstate_idx;
  35. struct hstate hstates[HUGE_MAX_HSTATE];
  36. __initdata LIST_HEAD(huge_boot_pages);
  37. /* for command line parsing */
  38. static struct hstate * __initdata parsed_hstate;
  39. static unsigned long __initdata default_hstate_max_huge_pages;
  40. static unsigned long __initdata default_hstate_size;
  41. #define for_each_hstate(h) \
  42. for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
  43. /*
  44. * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  45. */
  46. static DEFINE_SPINLOCK(hugetlb_lock);
  47. /*
  48. * Region tracking -- allows tracking of reservations and instantiated pages
  49. * across the pages in a mapping.
  50. *
  51. * The region data structures are protected by a combination of the mmap_sem
  52. * and the hugetlb_instantion_mutex. To access or modify a region the caller
  53. * must either hold the mmap_sem for write, or the mmap_sem for read and
  54. * the hugetlb_instantiation mutex:
  55. *
  56. * down_write(&mm->mmap_sem);
  57. * or
  58. * down_read(&mm->mmap_sem);
  59. * mutex_lock(&hugetlb_instantiation_mutex);
  60. */
  61. struct file_region {
  62. struct list_head link;
  63. long from;
  64. long to;
  65. };
  66. static long region_add(struct list_head *head, long f, long t)
  67. {
  68. struct file_region *rg, *nrg, *trg;
  69. /* Locate the region we are either in or before. */
  70. list_for_each_entry(rg, head, link)
  71. if (f <= rg->to)
  72. break;
  73. /* Round our left edge to the current segment if it encloses us. */
  74. if (f > rg->from)
  75. f = rg->from;
  76. /* Check for and consume any regions we now overlap with. */
  77. nrg = rg;
  78. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  79. if (&rg->link == head)
  80. break;
  81. if (rg->from > t)
  82. break;
  83. /* If this area reaches higher then extend our area to
  84. * include it completely. If this is not the first area
  85. * which we intend to reuse, free it. */
  86. if (rg->to > t)
  87. t = rg->to;
  88. if (rg != nrg) {
  89. list_del(&rg->link);
  90. kfree(rg);
  91. }
  92. }
  93. nrg->from = f;
  94. nrg->to = t;
  95. return 0;
  96. }
  97. static long region_chg(struct list_head *head, long f, long t)
  98. {
  99. struct file_region *rg, *nrg;
  100. long chg = 0;
  101. /* Locate the region we are before or in. */
  102. list_for_each_entry(rg, head, link)
  103. if (f <= rg->to)
  104. break;
  105. /* If we are below the current region then a new region is required.
  106. * Subtle, allocate a new region at the position but make it zero
  107. * size such that we can guarantee to record the reservation. */
  108. if (&rg->link == head || t < rg->from) {
  109. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  110. if (!nrg)
  111. return -ENOMEM;
  112. nrg->from = f;
  113. nrg->to = f;
  114. INIT_LIST_HEAD(&nrg->link);
  115. list_add(&nrg->link, rg->link.prev);
  116. return t - f;
  117. }
  118. /* Round our left edge to the current segment if it encloses us. */
  119. if (f > rg->from)
  120. f = rg->from;
  121. chg = t - f;
  122. /* Check for and consume any regions we now overlap with. */
  123. list_for_each_entry(rg, rg->link.prev, link) {
  124. if (&rg->link == head)
  125. break;
  126. if (rg->from > t)
  127. return chg;
  128. /* We overlap with this area, if it extends futher than
  129. * us then we must extend ourselves. Account for its
  130. * existing reservation. */
  131. if (rg->to > t) {
  132. chg += rg->to - t;
  133. t = rg->to;
  134. }
  135. chg -= rg->to - rg->from;
  136. }
  137. return chg;
  138. }
  139. static long region_truncate(struct list_head *head, long end)
  140. {
  141. struct file_region *rg, *trg;
  142. long chg = 0;
  143. /* Locate the region we are either in or before. */
  144. list_for_each_entry(rg, head, link)
  145. if (end <= rg->to)
  146. break;
  147. if (&rg->link == head)
  148. return 0;
  149. /* If we are in the middle of a region then adjust it. */
  150. if (end > rg->from) {
  151. chg = rg->to - end;
  152. rg->to = end;
  153. rg = list_entry(rg->link.next, typeof(*rg), link);
  154. }
  155. /* Drop any remaining regions. */
  156. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  157. if (&rg->link == head)
  158. break;
  159. chg += rg->to - rg->from;
  160. list_del(&rg->link);
  161. kfree(rg);
  162. }
  163. return chg;
  164. }
  165. static long region_count(struct list_head *head, long f, long t)
  166. {
  167. struct file_region *rg;
  168. long chg = 0;
  169. /* Locate each segment we overlap with, and count that overlap. */
  170. list_for_each_entry(rg, head, link) {
  171. int seg_from;
  172. int seg_to;
  173. if (rg->to <= f)
  174. continue;
  175. if (rg->from >= t)
  176. break;
  177. seg_from = max(rg->from, f);
  178. seg_to = min(rg->to, t);
  179. chg += seg_to - seg_from;
  180. }
  181. return chg;
  182. }
  183. /*
  184. * Convert the address within this vma to the page offset within
  185. * the mapping, in pagecache page units; huge pages here.
  186. */
  187. static pgoff_t vma_hugecache_offset(struct hstate *h,
  188. struct vm_area_struct *vma, unsigned long address)
  189. {
  190. return ((address - vma->vm_start) >> huge_page_shift(h)) +
  191. (vma->vm_pgoff >> huge_page_order(h));
  192. }
  193. pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  194. unsigned long address)
  195. {
  196. return vma_hugecache_offset(hstate_vma(vma), vma, address);
  197. }
  198. /*
  199. * Return the size of the pages allocated when backing a VMA. In the majority
  200. * cases this will be same size as used by the page table entries.
  201. */
  202. unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
  203. {
  204. struct hstate *hstate;
  205. if (!is_vm_hugetlb_page(vma))
  206. return PAGE_SIZE;
  207. hstate = hstate_vma(vma);
  208. return 1UL << (hstate->order + PAGE_SHIFT);
  209. }
  210. EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
  211. /*
  212. * Return the page size being used by the MMU to back a VMA. In the majority
  213. * of cases, the page size used by the kernel matches the MMU size. On
  214. * architectures where it differs, an architecture-specific version of this
  215. * function is required.
  216. */
  217. #ifndef vma_mmu_pagesize
  218. unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  219. {
  220. return vma_kernel_pagesize(vma);
  221. }
  222. #endif
  223. /*
  224. * Flags for MAP_PRIVATE reservations. These are stored in the bottom
  225. * bits of the reservation map pointer, which are always clear due to
  226. * alignment.
  227. */
  228. #define HPAGE_RESV_OWNER (1UL << 0)
  229. #define HPAGE_RESV_UNMAPPED (1UL << 1)
  230. #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
  231. /*
  232. * These helpers are used to track how many pages are reserved for
  233. * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
  234. * is guaranteed to have their future faults succeed.
  235. *
  236. * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
  237. * the reserve counters are updated with the hugetlb_lock held. It is safe
  238. * to reset the VMA at fork() time as it is not in use yet and there is no
  239. * chance of the global counters getting corrupted as a result of the values.
  240. *
  241. * The private mapping reservation is represented in a subtly different
  242. * manner to a shared mapping. A shared mapping has a region map associated
  243. * with the underlying file, this region map represents the backing file
  244. * pages which have ever had a reservation assigned which this persists even
  245. * after the page is instantiated. A private mapping has a region map
  246. * associated with the original mmap which is attached to all VMAs which
  247. * reference it, this region map represents those offsets which have consumed
  248. * reservation ie. where pages have been instantiated.
  249. */
  250. static unsigned long get_vma_private_data(struct vm_area_struct *vma)
  251. {
  252. return (unsigned long)vma->vm_private_data;
  253. }
  254. static void set_vma_private_data(struct vm_area_struct *vma,
  255. unsigned long value)
  256. {
  257. vma->vm_private_data = (void *)value;
  258. }
  259. struct resv_map {
  260. struct kref refs;
  261. struct list_head regions;
  262. };
  263. static struct resv_map *resv_map_alloc(void)
  264. {
  265. struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
  266. if (!resv_map)
  267. return NULL;
  268. kref_init(&resv_map->refs);
  269. INIT_LIST_HEAD(&resv_map->regions);
  270. return resv_map;
  271. }
  272. static void resv_map_release(struct kref *ref)
  273. {
  274. struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
  275. /* Clear out any active regions before we release the map. */
  276. region_truncate(&resv_map->regions, 0);
  277. kfree(resv_map);
  278. }
  279. static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
  280. {
  281. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  282. if (!(vma->vm_flags & VM_MAYSHARE))
  283. return (struct resv_map *)(get_vma_private_data(vma) &
  284. ~HPAGE_RESV_MASK);
  285. return NULL;
  286. }
  287. static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
  288. {
  289. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  290. VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
  291. set_vma_private_data(vma, (get_vma_private_data(vma) &
  292. HPAGE_RESV_MASK) | (unsigned long)map);
  293. }
  294. static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
  295. {
  296. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  297. VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
  298. set_vma_private_data(vma, get_vma_private_data(vma) | flags);
  299. }
  300. static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
  301. {
  302. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  303. return (get_vma_private_data(vma) & flag) != 0;
  304. }
  305. /* Decrement the reserved pages in the hugepage pool by one */
  306. static void decrement_hugepage_resv_vma(struct hstate *h,
  307. struct vm_area_struct *vma)
  308. {
  309. if (vma->vm_flags & VM_NORESERVE)
  310. return;
  311. if (vma->vm_flags & VM_MAYSHARE) {
  312. /* Shared mappings always use reserves */
  313. h->resv_huge_pages--;
  314. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  315. /*
  316. * Only the process that called mmap() has reserves for
  317. * private mappings.
  318. */
  319. h->resv_huge_pages--;
  320. }
  321. }
  322. /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
  323. void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  324. {
  325. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  326. if (!(vma->vm_flags & VM_MAYSHARE))
  327. vma->vm_private_data = (void *)0;
  328. }
  329. /* Returns true if the VMA has associated reserve pages */
  330. static int vma_has_reserves(struct vm_area_struct *vma)
  331. {
  332. if (vma->vm_flags & VM_MAYSHARE)
  333. return 1;
  334. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  335. return 1;
  336. return 0;
  337. }
  338. static void clear_gigantic_page(struct page *page,
  339. unsigned long addr, unsigned long sz)
  340. {
  341. int i;
  342. struct page *p = page;
  343. might_sleep();
  344. for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
  345. cond_resched();
  346. clear_user_highpage(p, addr + i * PAGE_SIZE);
  347. }
  348. }
  349. static void clear_huge_page(struct page *page,
  350. unsigned long addr, unsigned long sz)
  351. {
  352. int i;
  353. if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
  354. clear_gigantic_page(page, addr, sz);
  355. return;
  356. }
  357. might_sleep();
  358. for (i = 0; i < sz/PAGE_SIZE; i++) {
  359. cond_resched();
  360. clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  361. }
  362. }
  363. static void copy_gigantic_page(struct page *dst, struct page *src,
  364. unsigned long addr, struct vm_area_struct *vma)
  365. {
  366. int i;
  367. struct hstate *h = hstate_vma(vma);
  368. struct page *dst_base = dst;
  369. struct page *src_base = src;
  370. might_sleep();
  371. for (i = 0; i < pages_per_huge_page(h); ) {
  372. cond_resched();
  373. copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
  374. i++;
  375. dst = mem_map_next(dst, dst_base, i);
  376. src = mem_map_next(src, src_base, i);
  377. }
  378. }
  379. static void copy_huge_page(struct page *dst, struct page *src,
  380. unsigned long addr, struct vm_area_struct *vma)
  381. {
  382. int i;
  383. struct hstate *h = hstate_vma(vma);
  384. if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
  385. copy_gigantic_page(dst, src, addr, vma);
  386. return;
  387. }
  388. might_sleep();
  389. for (i = 0; i < pages_per_huge_page(h); i++) {
  390. cond_resched();
  391. copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
  392. }
  393. }
  394. static void enqueue_huge_page(struct hstate *h, struct page *page)
  395. {
  396. int nid = page_to_nid(page);
  397. list_add(&page->lru, &h->hugepage_freelists[nid]);
  398. h->free_huge_pages++;
  399. h->free_huge_pages_node[nid]++;
  400. }
  401. static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
  402. {
  403. struct page *page;
  404. if (list_empty(&h->hugepage_freelists[nid]))
  405. return NULL;
  406. page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
  407. list_del(&page->lru);
  408. h->free_huge_pages--;
  409. h->free_huge_pages_node[nid]--;
  410. return page;
  411. }
  412. static struct page *dequeue_huge_page_vma(struct hstate *h,
  413. struct vm_area_struct *vma,
  414. unsigned long address, int avoid_reserve)
  415. {
  416. struct page *page = NULL;
  417. struct mempolicy *mpol;
  418. nodemask_t *nodemask;
  419. struct zonelist *zonelist;
  420. struct zone *zone;
  421. struct zoneref *z;
  422. get_mems_allowed();
  423. zonelist = huge_zonelist(vma, address,
  424. htlb_alloc_mask, &mpol, &nodemask);
  425. /*
  426. * A child process with MAP_PRIVATE mappings created by their parent
  427. * have no page reserves. This check ensures that reservations are
  428. * not "stolen". The child may still get SIGKILLed
  429. */
  430. if (!vma_has_reserves(vma) &&
  431. h->free_huge_pages - h->resv_huge_pages == 0)
  432. goto err;
  433. /* If reserves cannot be used, ensure enough pages are in the pool */
  434. if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
  435. goto err;;
  436. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  437. MAX_NR_ZONES - 1, nodemask) {
  438. if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
  439. page = dequeue_huge_page_node(h, zone_to_nid(zone));
  440. if (page) {
  441. if (!avoid_reserve)
  442. decrement_hugepage_resv_vma(h, vma);
  443. break;
  444. }
  445. }
  446. }
  447. err:
  448. mpol_cond_put(mpol);
  449. put_mems_allowed();
  450. return page;
  451. }
  452. static void update_and_free_page(struct hstate *h, struct page *page)
  453. {
  454. int i;
  455. VM_BUG_ON(h->order >= MAX_ORDER);
  456. h->nr_huge_pages--;
  457. h->nr_huge_pages_node[page_to_nid(page)]--;
  458. for (i = 0; i < pages_per_huge_page(h); i++) {
  459. page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
  460. 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
  461. 1 << PG_private | 1<< PG_writeback);
  462. }
  463. set_compound_page_dtor(page, NULL);
  464. set_page_refcounted(page);
  465. arch_release_hugepage(page);
  466. __free_pages(page, huge_page_order(h));
  467. }
  468. struct hstate *size_to_hstate(unsigned long size)
  469. {
  470. struct hstate *h;
  471. for_each_hstate(h) {
  472. if (huge_page_size(h) == size)
  473. return h;
  474. }
  475. return NULL;
  476. }
  477. static void free_huge_page(struct page *page)
  478. {
  479. /*
  480. * Can't pass hstate in here because it is called from the
  481. * compound page destructor.
  482. */
  483. struct hstate *h = page_hstate(page);
  484. int nid = page_to_nid(page);
  485. struct address_space *mapping;
  486. mapping = (struct address_space *) page_private(page);
  487. set_page_private(page, 0);
  488. page->mapping = NULL;
  489. BUG_ON(page_count(page));
  490. BUG_ON(page_mapcount(page));
  491. INIT_LIST_HEAD(&page->lru);
  492. spin_lock(&hugetlb_lock);
  493. if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
  494. update_and_free_page(h, page);
  495. h->surplus_huge_pages--;
  496. h->surplus_huge_pages_node[nid]--;
  497. } else {
  498. enqueue_huge_page(h, page);
  499. }
  500. spin_unlock(&hugetlb_lock);
  501. if (mapping)
  502. hugetlb_put_quota(mapping, 1);
  503. }
  504. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
  505. {
  506. set_compound_page_dtor(page, free_huge_page);
  507. spin_lock(&hugetlb_lock);
  508. h->nr_huge_pages++;
  509. h->nr_huge_pages_node[nid]++;
  510. spin_unlock(&hugetlb_lock);
  511. put_page(page); /* free it into the hugepage allocator */
  512. }
  513. static void prep_compound_gigantic_page(struct page *page, unsigned long order)
  514. {
  515. int i;
  516. int nr_pages = 1 << order;
  517. struct page *p = page + 1;
  518. /* we rely on prep_new_huge_page to set the destructor */
  519. set_compound_order(page, order);
  520. __SetPageHead(page);
  521. for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
  522. __SetPageTail(p);
  523. p->first_page = page;
  524. }
  525. }
  526. int PageHuge(struct page *page)
  527. {
  528. compound_page_dtor *dtor;
  529. if (!PageCompound(page))
  530. return 0;
  531. page = compound_head(page);
  532. dtor = get_compound_page_dtor(page);
  533. return dtor == free_huge_page;
  534. }
  535. EXPORT_SYMBOL_GPL(PageHuge);
  536. static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  537. {
  538. struct page *page;
  539. if (h->order >= MAX_ORDER)
  540. return NULL;
  541. page = alloc_pages_exact_node(nid,
  542. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
  543. __GFP_REPEAT|__GFP_NOWARN,
  544. huge_page_order(h));
  545. if (page) {
  546. if (arch_prepare_hugepage(page)) {
  547. __free_pages(page, huge_page_order(h));
  548. return NULL;
  549. }
  550. prep_new_huge_page(h, page, nid);
  551. }
  552. return page;
  553. }
  554. /*
  555. * common helper functions for hstate_next_node_to_{alloc|free}.
  556. * We may have allocated or freed a huge page based on a different
  557. * nodes_allowed previously, so h->next_node_to_{alloc|free} might
  558. * be outside of *nodes_allowed. Ensure that we use an allowed
  559. * node for alloc or free.
  560. */
  561. static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
  562. {
  563. nid = next_node(nid, *nodes_allowed);
  564. if (nid == MAX_NUMNODES)
  565. nid = first_node(*nodes_allowed);
  566. VM_BUG_ON(nid >= MAX_NUMNODES);
  567. return nid;
  568. }
  569. static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
  570. {
  571. if (!node_isset(nid, *nodes_allowed))
  572. nid = next_node_allowed(nid, nodes_allowed);
  573. return nid;
  574. }
  575. /*
  576. * returns the previously saved node ["this node"] from which to
  577. * allocate a persistent huge page for the pool and advance the
  578. * next node from which to allocate, handling wrap at end of node
  579. * mask.
  580. */
  581. static int hstate_next_node_to_alloc(struct hstate *h,
  582. nodemask_t *nodes_allowed)
  583. {
  584. int nid;
  585. VM_BUG_ON(!nodes_allowed);
  586. nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
  587. h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
  588. return nid;
  589. }
  590. static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
  591. {
  592. struct page *page;
  593. int start_nid;
  594. int next_nid;
  595. int ret = 0;
  596. start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
  597. next_nid = start_nid;
  598. do {
  599. page = alloc_fresh_huge_page_node(h, next_nid);
  600. if (page) {
  601. ret = 1;
  602. break;
  603. }
  604. next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
  605. } while (next_nid != start_nid);
  606. if (ret)
  607. count_vm_event(HTLB_BUDDY_PGALLOC);
  608. else
  609. count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  610. return ret;
  611. }
  612. /*
  613. * helper for free_pool_huge_page() - return the previously saved
  614. * node ["this node"] from which to free a huge page. Advance the
  615. * next node id whether or not we find a free huge page to free so
  616. * that the next attempt to free addresses the next node.
  617. */
  618. static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
  619. {
  620. int nid;
  621. VM_BUG_ON(!nodes_allowed);
  622. nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
  623. h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
  624. return nid;
  625. }
  626. /*
  627. * Free huge page from pool from next node to free.
  628. * Attempt to keep persistent huge pages more or less
  629. * balanced over allowed nodes.
  630. * Called with hugetlb_lock locked.
  631. */
  632. static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
  633. bool acct_surplus)
  634. {
  635. int start_nid;
  636. int next_nid;
  637. int ret = 0;
  638. start_nid = hstate_next_node_to_free(h, nodes_allowed);
  639. next_nid = start_nid;
  640. do {
  641. /*
  642. * If we're returning unused surplus pages, only examine
  643. * nodes with surplus pages.
  644. */
  645. if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
  646. !list_empty(&h->hugepage_freelists[next_nid])) {
  647. struct page *page =
  648. list_entry(h->hugepage_freelists[next_nid].next,
  649. struct page, lru);
  650. list_del(&page->lru);
  651. h->free_huge_pages--;
  652. h->free_huge_pages_node[next_nid]--;
  653. if (acct_surplus) {
  654. h->surplus_huge_pages--;
  655. h->surplus_huge_pages_node[next_nid]--;
  656. }
  657. update_and_free_page(h, page);
  658. ret = 1;
  659. break;
  660. }
  661. next_nid = hstate_next_node_to_free(h, nodes_allowed);
  662. } while (next_nid != start_nid);
  663. return ret;
  664. }
  665. static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
  666. {
  667. struct page *page;
  668. unsigned int r_nid;
  669. if (h->order >= MAX_ORDER)
  670. return NULL;
  671. /*
  672. * Assume we will successfully allocate the surplus page to
  673. * prevent racing processes from causing the surplus to exceed
  674. * overcommit
  675. *
  676. * This however introduces a different race, where a process B
  677. * tries to grow the static hugepage pool while alloc_pages() is
  678. * called by process A. B will only examine the per-node
  679. * counters in determining if surplus huge pages can be
  680. * converted to normal huge pages in adjust_pool_surplus(). A
  681. * won't be able to increment the per-node counter, until the
  682. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  683. * no more huge pages can be converted from surplus to normal
  684. * state (and doesn't try to convert again). Thus, we have a
  685. * case where a surplus huge page exists, the pool is grown, and
  686. * the surplus huge page still exists after, even though it
  687. * should just have been converted to a normal huge page. This
  688. * does not leak memory, though, as the hugepage will be freed
  689. * once it is out of use. It also does not allow the counters to
  690. * go out of whack in adjust_pool_surplus() as we don't modify
  691. * the node values until we've gotten the hugepage and only the
  692. * per-node value is checked there.
  693. */
  694. spin_lock(&hugetlb_lock);
  695. if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
  696. spin_unlock(&hugetlb_lock);
  697. return NULL;
  698. } else {
  699. h->nr_huge_pages++;
  700. h->surplus_huge_pages++;
  701. }
  702. spin_unlock(&hugetlb_lock);
  703. if (nid == NUMA_NO_NODE)
  704. page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
  705. __GFP_REPEAT|__GFP_NOWARN,
  706. huge_page_order(h));
  707. else
  708. page = alloc_pages_exact_node(nid,
  709. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
  710. __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
  711. if (page && arch_prepare_hugepage(page)) {
  712. __free_pages(page, huge_page_order(h));
  713. return NULL;
  714. }
  715. spin_lock(&hugetlb_lock);
  716. if (page) {
  717. /*
  718. * This page is now managed by the hugetlb allocator and has
  719. * no users -- drop the buddy allocator's reference.
  720. */
  721. put_page_testzero(page);
  722. VM_BUG_ON(page_count(page));
  723. r_nid = page_to_nid(page);
  724. set_compound_page_dtor(page, free_huge_page);
  725. /*
  726. * We incremented the global counters already
  727. */
  728. h->nr_huge_pages_node[r_nid]++;
  729. h->surplus_huge_pages_node[r_nid]++;
  730. __count_vm_event(HTLB_BUDDY_PGALLOC);
  731. } else {
  732. h->nr_huge_pages--;
  733. h->surplus_huge_pages--;
  734. __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  735. }
  736. spin_unlock(&hugetlb_lock);
  737. return page;
  738. }
  739. /*
  740. * This allocation function is useful in the context where vma is irrelevant.
  741. * E.g. soft-offlining uses this function because it only cares physical
  742. * address of error page.
  743. */
  744. struct page *alloc_huge_page_node(struct hstate *h, int nid)
  745. {
  746. struct page *page;
  747. spin_lock(&hugetlb_lock);
  748. page = dequeue_huge_page_node(h, nid);
  749. spin_unlock(&hugetlb_lock);
  750. if (!page)
  751. page = alloc_buddy_huge_page(h, nid);
  752. return page;
  753. }
  754. /*
  755. * Increase the hugetlb pool such that it can accomodate a reservation
  756. * of size 'delta'.
  757. */
  758. static int gather_surplus_pages(struct hstate *h, int delta)
  759. {
  760. struct list_head surplus_list;
  761. struct page *page, *tmp;
  762. int ret, i;
  763. int needed, allocated;
  764. needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
  765. if (needed <= 0) {
  766. h->resv_huge_pages += delta;
  767. return 0;
  768. }
  769. allocated = 0;
  770. INIT_LIST_HEAD(&surplus_list);
  771. ret = -ENOMEM;
  772. retry:
  773. spin_unlock(&hugetlb_lock);
  774. for (i = 0; i < needed; i++) {
  775. page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
  776. if (!page) {
  777. /*
  778. * We were not able to allocate enough pages to
  779. * satisfy the entire reservation so we free what
  780. * we've allocated so far.
  781. */
  782. spin_lock(&hugetlb_lock);
  783. needed = 0;
  784. goto free;
  785. }
  786. list_add(&page->lru, &surplus_list);
  787. }
  788. allocated += needed;
  789. /*
  790. * After retaking hugetlb_lock, we need to recalculate 'needed'
  791. * because either resv_huge_pages or free_huge_pages may have changed.
  792. */
  793. spin_lock(&hugetlb_lock);
  794. needed = (h->resv_huge_pages + delta) -
  795. (h->free_huge_pages + allocated);
  796. if (needed > 0)
  797. goto retry;
  798. /*
  799. * The surplus_list now contains _at_least_ the number of extra pages
  800. * needed to accomodate the reservation. Add the appropriate number
  801. * of pages to the hugetlb pool and free the extras back to the buddy
  802. * allocator. Commit the entire reservation here to prevent another
  803. * process from stealing the pages as they are added to the pool but
  804. * before they are reserved.
  805. */
  806. needed += allocated;
  807. h->resv_huge_pages += delta;
  808. ret = 0;
  809. free:
  810. /* Free the needed pages to the hugetlb pool */
  811. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  812. if ((--needed) < 0)
  813. break;
  814. list_del(&page->lru);
  815. enqueue_huge_page(h, page);
  816. }
  817. /* Free unnecessary surplus pages to the buddy allocator */
  818. if (!list_empty(&surplus_list)) {
  819. spin_unlock(&hugetlb_lock);
  820. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  821. list_del(&page->lru);
  822. /*
  823. * The page has a reference count of zero already, so
  824. * call free_huge_page directly instead of using
  825. * put_page. This must be done with hugetlb_lock
  826. * unlocked which is safe because free_huge_page takes
  827. * hugetlb_lock before deciding how to free the page.
  828. */
  829. free_huge_page(page);
  830. }
  831. spin_lock(&hugetlb_lock);
  832. }
  833. return ret;
  834. }
  835. /*
  836. * When releasing a hugetlb pool reservation, any surplus pages that were
  837. * allocated to satisfy the reservation must be explicitly freed if they were
  838. * never used.
  839. * Called with hugetlb_lock held.
  840. */
  841. static void return_unused_surplus_pages(struct hstate *h,
  842. unsigned long unused_resv_pages)
  843. {
  844. unsigned long nr_pages;
  845. /* Uncommit the reservation */
  846. h->resv_huge_pages -= unused_resv_pages;
  847. /* Cannot return gigantic pages currently */
  848. if (h->order >= MAX_ORDER)
  849. return;
  850. nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
  851. /*
  852. * We want to release as many surplus pages as possible, spread
  853. * evenly across all nodes with memory. Iterate across these nodes
  854. * until we can no longer free unreserved surplus pages. This occurs
  855. * when the nodes with surplus pages have no free pages.
  856. * free_pool_huge_page() will balance the the freed pages across the
  857. * on-line nodes with memory and will handle the hstate accounting.
  858. */
  859. while (nr_pages--) {
  860. if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
  861. break;
  862. }
  863. }
  864. /*
  865. * Determine if the huge page at addr within the vma has an associated
  866. * reservation. Where it does not we will need to logically increase
  867. * reservation and actually increase quota before an allocation can occur.
  868. * Where any new reservation would be required the reservation change is
  869. * prepared, but not committed. Once the page has been quota'd allocated
  870. * an instantiated the change should be committed via vma_commit_reservation.
  871. * No action is required on failure.
  872. */
  873. static long vma_needs_reservation(struct hstate *h,
  874. struct vm_area_struct *vma, unsigned long addr)
  875. {
  876. struct address_space *mapping = vma->vm_file->f_mapping;
  877. struct inode *inode = mapping->host;
  878. if (vma->vm_flags & VM_MAYSHARE) {
  879. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  880. return region_chg(&inode->i_mapping->private_list,
  881. idx, idx + 1);
  882. } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  883. return 1;
  884. } else {
  885. long err;
  886. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  887. struct resv_map *reservations = vma_resv_map(vma);
  888. err = region_chg(&reservations->regions, idx, idx + 1);
  889. if (err < 0)
  890. return err;
  891. return 0;
  892. }
  893. }
  894. static void vma_commit_reservation(struct hstate *h,
  895. struct vm_area_struct *vma, unsigned long addr)
  896. {
  897. struct address_space *mapping = vma->vm_file->f_mapping;
  898. struct inode *inode = mapping->host;
  899. if (vma->vm_flags & VM_MAYSHARE) {
  900. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  901. region_add(&inode->i_mapping->private_list, idx, idx + 1);
  902. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  903. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  904. struct resv_map *reservations = vma_resv_map(vma);
  905. /* Mark this page used in the map. */
  906. region_add(&reservations->regions, idx, idx + 1);
  907. }
  908. }
  909. static struct page *alloc_huge_page(struct vm_area_struct *vma,
  910. unsigned long addr, int avoid_reserve)
  911. {
  912. struct hstate *h = hstate_vma(vma);
  913. struct page *page;
  914. struct address_space *mapping = vma->vm_file->f_mapping;
  915. struct inode *inode = mapping->host;
  916. long chg;
  917. /*
  918. * Processes that did not create the mapping will have no reserves and
  919. * will not have accounted against quota. Check that the quota can be
  920. * made before satisfying the allocation
  921. * MAP_NORESERVE mappings may also need pages and quota allocated
  922. * if no reserve mapping overlaps.
  923. */
  924. chg = vma_needs_reservation(h, vma, addr);
  925. if (chg < 0)
  926. return ERR_PTR(chg);
  927. if (chg)
  928. if (hugetlb_get_quota(inode->i_mapping, chg))
  929. return ERR_PTR(-ENOSPC);
  930. spin_lock(&hugetlb_lock);
  931. page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
  932. spin_unlock(&hugetlb_lock);
  933. if (!page) {
  934. page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
  935. if (!page) {
  936. hugetlb_put_quota(inode->i_mapping, chg);
  937. return ERR_PTR(-VM_FAULT_SIGBUS);
  938. }
  939. }
  940. set_page_refcounted(page);
  941. set_page_private(page, (unsigned long) mapping);
  942. vma_commit_reservation(h, vma, addr);
  943. return page;
  944. }
  945. int __weak alloc_bootmem_huge_page(struct hstate *h)
  946. {
  947. struct huge_bootmem_page *m;
  948. int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
  949. while (nr_nodes) {
  950. void *addr;
  951. addr = __alloc_bootmem_node_nopanic(
  952. NODE_DATA(hstate_next_node_to_alloc(h,
  953. &node_states[N_HIGH_MEMORY])),
  954. huge_page_size(h), huge_page_size(h), 0);
  955. if (addr) {
  956. /*
  957. * Use the beginning of the huge page to store the
  958. * huge_bootmem_page struct (until gather_bootmem
  959. * puts them into the mem_map).
  960. */
  961. m = addr;
  962. goto found;
  963. }
  964. nr_nodes--;
  965. }
  966. return 0;
  967. found:
  968. BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
  969. /* Put them into a private list first because mem_map is not up yet */
  970. list_add(&m->list, &huge_boot_pages);
  971. m->hstate = h;
  972. return 1;
  973. }
  974. static void prep_compound_huge_page(struct page *page, int order)
  975. {
  976. if (unlikely(order > (MAX_ORDER - 1)))
  977. prep_compound_gigantic_page(page, order);
  978. else
  979. prep_compound_page(page, order);
  980. }
  981. /* Put bootmem huge pages into the standard lists after mem_map is up */
  982. static void __init gather_bootmem_prealloc(void)
  983. {
  984. struct huge_bootmem_page *m;
  985. list_for_each_entry(m, &huge_boot_pages, list) {
  986. struct page *page = virt_to_page(m);
  987. struct hstate *h = m->hstate;
  988. __ClearPageReserved(page);
  989. WARN_ON(page_count(page) != 1);
  990. prep_compound_huge_page(page, h->order);
  991. prep_new_huge_page(h, page, page_to_nid(page));
  992. }
  993. }
  994. static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  995. {
  996. unsigned long i;
  997. for (i = 0; i < h->max_huge_pages; ++i) {
  998. if (h->order >= MAX_ORDER) {
  999. if (!alloc_bootmem_huge_page(h))
  1000. break;
  1001. } else if (!alloc_fresh_huge_page(h,
  1002. &node_states[N_HIGH_MEMORY]))
  1003. break;
  1004. }
  1005. h->max_huge_pages = i;
  1006. }
  1007. static void __init hugetlb_init_hstates(void)
  1008. {
  1009. struct hstate *h;
  1010. for_each_hstate(h) {
  1011. /* oversize hugepages were init'ed in early boot */
  1012. if (h->order < MAX_ORDER)
  1013. hugetlb_hstate_alloc_pages(h);
  1014. }
  1015. }
  1016. static char * __init memfmt(char *buf, unsigned long n)
  1017. {
  1018. if (n >= (1UL << 30))
  1019. sprintf(buf, "%lu GB", n >> 30);
  1020. else if (n >= (1UL << 20))
  1021. sprintf(buf, "%lu MB", n >> 20);
  1022. else
  1023. sprintf(buf, "%lu KB", n >> 10);
  1024. return buf;
  1025. }
  1026. static void __init report_hugepages(void)
  1027. {
  1028. struct hstate *h;
  1029. for_each_hstate(h) {
  1030. char buf[32];
  1031. printk(KERN_INFO "HugeTLB registered %s page size, "
  1032. "pre-allocated %ld pages\n",
  1033. memfmt(buf, huge_page_size(h)),
  1034. h->free_huge_pages);
  1035. }
  1036. }
  1037. #ifdef CONFIG_HIGHMEM
  1038. static void try_to_free_low(struct hstate *h, unsigned long count,
  1039. nodemask_t *nodes_allowed)
  1040. {
  1041. int i;
  1042. if (h->order >= MAX_ORDER)
  1043. return;
  1044. for_each_node_mask(i, *nodes_allowed) {
  1045. struct page *page, *next;
  1046. struct list_head *freel = &h->hugepage_freelists[i];
  1047. list_for_each_entry_safe(page, next, freel, lru) {
  1048. if (count >= h->nr_huge_pages)
  1049. return;
  1050. if (PageHighMem(page))
  1051. continue;
  1052. list_del(&page->lru);
  1053. update_and_free_page(h, page);
  1054. h->free_huge_pages--;
  1055. h->free_huge_pages_node[page_to_nid(page)]--;
  1056. }
  1057. }
  1058. }
  1059. #else
  1060. static inline void try_to_free_low(struct hstate *h, unsigned long count,
  1061. nodemask_t *nodes_allowed)
  1062. {
  1063. }
  1064. #endif
  1065. /*
  1066. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  1067. * balanced by operating on them in a round-robin fashion.
  1068. * Returns 1 if an adjustment was made.
  1069. */
  1070. static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
  1071. int delta)
  1072. {
  1073. int start_nid, next_nid;
  1074. int ret = 0;
  1075. VM_BUG_ON(delta != -1 && delta != 1);
  1076. if (delta < 0)
  1077. start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
  1078. else
  1079. start_nid = hstate_next_node_to_free(h, nodes_allowed);
  1080. next_nid = start_nid;
  1081. do {
  1082. int nid = next_nid;
  1083. if (delta < 0) {
  1084. /*
  1085. * To shrink on this node, there must be a surplus page
  1086. */
  1087. if (!h->surplus_huge_pages_node[nid]) {
  1088. next_nid = hstate_next_node_to_alloc(h,
  1089. nodes_allowed);
  1090. continue;
  1091. }
  1092. }
  1093. if (delta > 0) {
  1094. /*
  1095. * Surplus cannot exceed the total number of pages
  1096. */
  1097. if (h->surplus_huge_pages_node[nid] >=
  1098. h->nr_huge_pages_node[nid]) {
  1099. next_nid = hstate_next_node_to_free(h,
  1100. nodes_allowed);
  1101. continue;
  1102. }
  1103. }
  1104. h->surplus_huge_pages += delta;
  1105. h->surplus_huge_pages_node[nid] += delta;
  1106. ret = 1;
  1107. break;
  1108. } while (next_nid != start_nid);
  1109. return ret;
  1110. }
  1111. #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
  1112. static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
  1113. nodemask_t *nodes_allowed)
  1114. {
  1115. unsigned long min_count, ret;
  1116. if (h->order >= MAX_ORDER)
  1117. return h->max_huge_pages;
  1118. /*
  1119. * Increase the pool size
  1120. * First take pages out of surplus state. Then make up the
  1121. * remaining difference by allocating fresh huge pages.
  1122. *
  1123. * We might race with alloc_buddy_huge_page() here and be unable
  1124. * to convert a surplus huge page to a normal huge page. That is
  1125. * not critical, though, it just means the overall size of the
  1126. * pool might be one hugepage larger than it needs to be, but
  1127. * within all the constraints specified by the sysctls.
  1128. */
  1129. spin_lock(&hugetlb_lock);
  1130. while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
  1131. if (!adjust_pool_surplus(h, nodes_allowed, -1))
  1132. break;
  1133. }
  1134. while (count > persistent_huge_pages(h)) {
  1135. /*
  1136. * If this allocation races such that we no longer need the
  1137. * page, free_huge_page will handle it by freeing the page
  1138. * and reducing the surplus.
  1139. */
  1140. spin_unlock(&hugetlb_lock);
  1141. ret = alloc_fresh_huge_page(h, nodes_allowed);
  1142. spin_lock(&hugetlb_lock);
  1143. if (!ret)
  1144. goto out;
  1145. /* Bail for signals. Probably ctrl-c from user */
  1146. if (signal_pending(current))
  1147. goto out;
  1148. }
  1149. /*
  1150. * Decrease the pool size
  1151. * First return free pages to the buddy allocator (being careful
  1152. * to keep enough around to satisfy reservations). Then place
  1153. * pages into surplus state as needed so the pool will shrink
  1154. * to the desired size as pages become free.
  1155. *
  1156. * By placing pages into the surplus state independent of the
  1157. * overcommit value, we are allowing the surplus pool size to
  1158. * exceed overcommit. There are few sane options here. Since
  1159. * alloc_buddy_huge_page() is checking the global counter,
  1160. * though, we'll note that we're not allowed to exceed surplus
  1161. * and won't grow the pool anywhere else. Not until one of the
  1162. * sysctls are changed, or the surplus pages go out of use.
  1163. */
  1164. min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
  1165. min_count = max(count, min_count);
  1166. try_to_free_low(h, min_count, nodes_allowed);
  1167. while (min_count < persistent_huge_pages(h)) {
  1168. if (!free_pool_huge_page(h, nodes_allowed, 0))
  1169. break;
  1170. }
  1171. while (count < persistent_huge_pages(h)) {
  1172. if (!adjust_pool_surplus(h, nodes_allowed, 1))
  1173. break;
  1174. }
  1175. out:
  1176. ret = persistent_huge_pages(h);
  1177. spin_unlock(&hugetlb_lock);
  1178. return ret;
  1179. }
  1180. #define HSTATE_ATTR_RO(_name) \
  1181. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  1182. #define HSTATE_ATTR(_name) \
  1183. static struct kobj_attribute _name##_attr = \
  1184. __ATTR(_name, 0644, _name##_show, _name##_store)
  1185. static struct kobject *hugepages_kobj;
  1186. static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1187. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
  1188. static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
  1189. {
  1190. int i;
  1191. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1192. if (hstate_kobjs[i] == kobj) {
  1193. if (nidp)
  1194. *nidp = NUMA_NO_NODE;
  1195. return &hstates[i];
  1196. }
  1197. return kobj_to_node_hstate(kobj, nidp);
  1198. }
  1199. static ssize_t nr_hugepages_show_common(struct kobject *kobj,
  1200. struct kobj_attribute *attr, char *buf)
  1201. {
  1202. struct hstate *h;
  1203. unsigned long nr_huge_pages;
  1204. int nid;
  1205. h = kobj_to_hstate(kobj, &nid);
  1206. if (nid == NUMA_NO_NODE)
  1207. nr_huge_pages = h->nr_huge_pages;
  1208. else
  1209. nr_huge_pages = h->nr_huge_pages_node[nid];
  1210. return sprintf(buf, "%lu\n", nr_huge_pages);
  1211. }
  1212. static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
  1213. struct kobject *kobj, struct kobj_attribute *attr,
  1214. const char *buf, size_t len)
  1215. {
  1216. int err;
  1217. int nid;
  1218. unsigned long count;
  1219. struct hstate *h;
  1220. NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
  1221. err = strict_strtoul(buf, 10, &count);
  1222. if (err)
  1223. return 0;
  1224. h = kobj_to_hstate(kobj, &nid);
  1225. if (nid == NUMA_NO_NODE) {
  1226. /*
  1227. * global hstate attribute
  1228. */
  1229. if (!(obey_mempolicy &&
  1230. init_nodemask_of_mempolicy(nodes_allowed))) {
  1231. NODEMASK_FREE(nodes_allowed);
  1232. nodes_allowed = &node_states[N_HIGH_MEMORY];
  1233. }
  1234. } else if (nodes_allowed) {
  1235. /*
  1236. * per node hstate attribute: adjust count to global,
  1237. * but restrict alloc/free to the specified node.
  1238. */
  1239. count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
  1240. init_nodemask_of_node(nodes_allowed, nid);
  1241. } else
  1242. nodes_allowed = &node_states[N_HIGH_MEMORY];
  1243. h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
  1244. if (nodes_allowed != &node_states[N_HIGH_MEMORY])
  1245. NODEMASK_FREE(nodes_allowed);
  1246. return len;
  1247. }
  1248. static ssize_t nr_hugepages_show(struct kobject *kobj,
  1249. struct kobj_attribute *attr, char *buf)
  1250. {
  1251. return nr_hugepages_show_common(kobj, attr, buf);
  1252. }
  1253. static ssize_t nr_hugepages_store(struct kobject *kobj,
  1254. struct kobj_attribute *attr, const char *buf, size_t len)
  1255. {
  1256. return nr_hugepages_store_common(false, kobj, attr, buf, len);
  1257. }
  1258. HSTATE_ATTR(nr_hugepages);
  1259. #ifdef CONFIG_NUMA
  1260. /*
  1261. * hstate attribute for optionally mempolicy-based constraint on persistent
  1262. * huge page alloc/free.
  1263. */
  1264. static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
  1265. struct kobj_attribute *attr, char *buf)
  1266. {
  1267. return nr_hugepages_show_common(kobj, attr, buf);
  1268. }
  1269. static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
  1270. struct kobj_attribute *attr, const char *buf, size_t len)
  1271. {
  1272. return nr_hugepages_store_common(true, kobj, attr, buf, len);
  1273. }
  1274. HSTATE_ATTR(nr_hugepages_mempolicy);
  1275. #endif
  1276. static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
  1277. struct kobj_attribute *attr, char *buf)
  1278. {
  1279. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1280. return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
  1281. }
  1282. static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
  1283. struct kobj_attribute *attr, const char *buf, size_t count)
  1284. {
  1285. int err;
  1286. unsigned long input;
  1287. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1288. err = strict_strtoul(buf, 10, &input);
  1289. if (err)
  1290. return 0;
  1291. spin_lock(&hugetlb_lock);
  1292. h->nr_overcommit_huge_pages = input;
  1293. spin_unlock(&hugetlb_lock);
  1294. return count;
  1295. }
  1296. HSTATE_ATTR(nr_overcommit_hugepages);
  1297. static ssize_t free_hugepages_show(struct kobject *kobj,
  1298. struct kobj_attribute *attr, char *buf)
  1299. {
  1300. struct hstate *h;
  1301. unsigned long free_huge_pages;
  1302. int nid;
  1303. h = kobj_to_hstate(kobj, &nid);
  1304. if (nid == NUMA_NO_NODE)
  1305. free_huge_pages = h->free_huge_pages;
  1306. else
  1307. free_huge_pages = h->free_huge_pages_node[nid];
  1308. return sprintf(buf, "%lu\n", free_huge_pages);
  1309. }
  1310. HSTATE_ATTR_RO(free_hugepages);
  1311. static ssize_t resv_hugepages_show(struct kobject *kobj,
  1312. struct kobj_attribute *attr, char *buf)
  1313. {
  1314. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1315. return sprintf(buf, "%lu\n", h->resv_huge_pages);
  1316. }
  1317. HSTATE_ATTR_RO(resv_hugepages);
  1318. static ssize_t surplus_hugepages_show(struct kobject *kobj,
  1319. struct kobj_attribute *attr, char *buf)
  1320. {
  1321. struct hstate *h;
  1322. unsigned long surplus_huge_pages;
  1323. int nid;
  1324. h = kobj_to_hstate(kobj, &nid);
  1325. if (nid == NUMA_NO_NODE)
  1326. surplus_huge_pages = h->surplus_huge_pages;
  1327. else
  1328. surplus_huge_pages = h->surplus_huge_pages_node[nid];
  1329. return sprintf(buf, "%lu\n", surplus_huge_pages);
  1330. }
  1331. HSTATE_ATTR_RO(surplus_hugepages);
  1332. static struct attribute *hstate_attrs[] = {
  1333. &nr_hugepages_attr.attr,
  1334. &nr_overcommit_hugepages_attr.attr,
  1335. &free_hugepages_attr.attr,
  1336. &resv_hugepages_attr.attr,
  1337. &surplus_hugepages_attr.attr,
  1338. #ifdef CONFIG_NUMA
  1339. &nr_hugepages_mempolicy_attr.attr,
  1340. #endif
  1341. NULL,
  1342. };
  1343. static struct attribute_group hstate_attr_group = {
  1344. .attrs = hstate_attrs,
  1345. };
  1346. static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
  1347. struct kobject **hstate_kobjs,
  1348. struct attribute_group *hstate_attr_group)
  1349. {
  1350. int retval;
  1351. int hi = h - hstates;
  1352. hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
  1353. if (!hstate_kobjs[hi])
  1354. return -ENOMEM;
  1355. retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
  1356. if (retval)
  1357. kobject_put(hstate_kobjs[hi]);
  1358. return retval;
  1359. }
  1360. static void __init hugetlb_sysfs_init(void)
  1361. {
  1362. struct hstate *h;
  1363. int err;
  1364. hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
  1365. if (!hugepages_kobj)
  1366. return;
  1367. for_each_hstate(h) {
  1368. err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
  1369. hstate_kobjs, &hstate_attr_group);
  1370. if (err)
  1371. printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
  1372. h->name);
  1373. }
  1374. }
  1375. #ifdef CONFIG_NUMA
  1376. /*
  1377. * node_hstate/s - associate per node hstate attributes, via their kobjects,
  1378. * with node sysdevs in node_devices[] using a parallel array. The array
  1379. * index of a node sysdev or _hstate == node id.
  1380. * This is here to avoid any static dependency of the node sysdev driver, in
  1381. * the base kernel, on the hugetlb module.
  1382. */
  1383. struct node_hstate {
  1384. struct kobject *hugepages_kobj;
  1385. struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1386. };
  1387. struct node_hstate node_hstates[MAX_NUMNODES];
  1388. /*
  1389. * A subset of global hstate attributes for node sysdevs
  1390. */
  1391. static struct attribute *per_node_hstate_attrs[] = {
  1392. &nr_hugepages_attr.attr,
  1393. &free_hugepages_attr.attr,
  1394. &surplus_hugepages_attr.attr,
  1395. NULL,
  1396. };
  1397. static struct attribute_group per_node_hstate_attr_group = {
  1398. .attrs = per_node_hstate_attrs,
  1399. };
  1400. /*
  1401. * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
  1402. * Returns node id via non-NULL nidp.
  1403. */
  1404. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  1405. {
  1406. int nid;
  1407. for (nid = 0; nid < nr_node_ids; nid++) {
  1408. struct node_hstate *nhs = &node_hstates[nid];
  1409. int i;
  1410. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1411. if (nhs->hstate_kobjs[i] == kobj) {
  1412. if (nidp)
  1413. *nidp = nid;
  1414. return &hstates[i];
  1415. }
  1416. }
  1417. BUG();
  1418. return NULL;
  1419. }
  1420. /*
  1421. * Unregister hstate attributes from a single node sysdev.
  1422. * No-op if no hstate attributes attached.
  1423. */
  1424. void hugetlb_unregister_node(struct node *node)
  1425. {
  1426. struct hstate *h;
  1427. struct node_hstate *nhs = &node_hstates[node->sysdev.id];
  1428. if (!nhs->hugepages_kobj)
  1429. return; /* no hstate attributes */
  1430. for_each_hstate(h)
  1431. if (nhs->hstate_kobjs[h - hstates]) {
  1432. kobject_put(nhs->hstate_kobjs[h - hstates]);
  1433. nhs->hstate_kobjs[h - hstates] = NULL;
  1434. }
  1435. kobject_put(nhs->hugepages_kobj);
  1436. nhs->hugepages_kobj = NULL;
  1437. }
  1438. /*
  1439. * hugetlb module exit: unregister hstate attributes from node sysdevs
  1440. * that have them.
  1441. */
  1442. static void hugetlb_unregister_all_nodes(void)
  1443. {
  1444. int nid;
  1445. /*
  1446. * disable node sysdev registrations.
  1447. */
  1448. register_hugetlbfs_with_node(NULL, NULL);
  1449. /*
  1450. * remove hstate attributes from any nodes that have them.
  1451. */
  1452. for (nid = 0; nid < nr_node_ids; nid++)
  1453. hugetlb_unregister_node(&node_devices[nid]);
  1454. }
  1455. /*
  1456. * Register hstate attributes for a single node sysdev.
  1457. * No-op if attributes already registered.
  1458. */
  1459. void hugetlb_register_node(struct node *node)
  1460. {
  1461. struct hstate *h;
  1462. struct node_hstate *nhs = &node_hstates[node->sysdev.id];
  1463. int err;
  1464. if (nhs->hugepages_kobj)
  1465. return; /* already allocated */
  1466. nhs->hugepages_kobj = kobject_create_and_add("hugepages",
  1467. &node->sysdev.kobj);
  1468. if (!nhs->hugepages_kobj)
  1469. return;
  1470. for_each_hstate(h) {
  1471. err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
  1472. nhs->hstate_kobjs,
  1473. &per_node_hstate_attr_group);
  1474. if (err) {
  1475. printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
  1476. " for node %d\n",
  1477. h->name, node->sysdev.id);
  1478. hugetlb_unregister_node(node);
  1479. break;
  1480. }
  1481. }
  1482. }
  1483. /*
  1484. * hugetlb init time: register hstate attributes for all registered node
  1485. * sysdevs of nodes that have memory. All on-line nodes should have
  1486. * registered their associated sysdev by this time.
  1487. */
  1488. static void hugetlb_register_all_nodes(void)
  1489. {
  1490. int nid;
  1491. for_each_node_state(nid, N_HIGH_MEMORY) {
  1492. struct node *node = &node_devices[nid];
  1493. if (node->sysdev.id == nid)
  1494. hugetlb_register_node(node);
  1495. }
  1496. /*
  1497. * Let the node sysdev driver know we're here so it can
  1498. * [un]register hstate attributes on node hotplug.
  1499. */
  1500. register_hugetlbfs_with_node(hugetlb_register_node,
  1501. hugetlb_unregister_node);
  1502. }
  1503. #else /* !CONFIG_NUMA */
  1504. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  1505. {
  1506. BUG();
  1507. if (nidp)
  1508. *nidp = -1;
  1509. return NULL;
  1510. }
  1511. static void hugetlb_unregister_all_nodes(void) { }
  1512. static void hugetlb_register_all_nodes(void) { }
  1513. #endif
  1514. static void __exit hugetlb_exit(void)
  1515. {
  1516. struct hstate *h;
  1517. hugetlb_unregister_all_nodes();
  1518. for_each_hstate(h) {
  1519. kobject_put(hstate_kobjs[h - hstates]);
  1520. }
  1521. kobject_put(hugepages_kobj);
  1522. }
  1523. module_exit(hugetlb_exit);
  1524. static int __init hugetlb_init(void)
  1525. {
  1526. /* Some platform decide whether they support huge pages at boot
  1527. * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
  1528. * there is no such support
  1529. */
  1530. if (HPAGE_SHIFT == 0)
  1531. return 0;
  1532. if (!size_to_hstate(default_hstate_size)) {
  1533. default_hstate_size = HPAGE_SIZE;
  1534. if (!size_to_hstate(default_hstate_size))
  1535. hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
  1536. }
  1537. default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
  1538. if (default_hstate_max_huge_pages)
  1539. default_hstate.max_huge_pages = default_hstate_max_huge_pages;
  1540. hugetlb_init_hstates();
  1541. gather_bootmem_prealloc();
  1542. report_hugepages();
  1543. hugetlb_sysfs_init();
  1544. hugetlb_register_all_nodes();
  1545. return 0;
  1546. }
  1547. module_init(hugetlb_init);
  1548. /* Should be called on processing a hugepagesz=... option */
  1549. void __init hugetlb_add_hstate(unsigned order)
  1550. {
  1551. struct hstate *h;
  1552. unsigned long i;
  1553. if (size_to_hstate(PAGE_SIZE << order)) {
  1554. printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
  1555. return;
  1556. }
  1557. BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
  1558. BUG_ON(order == 0);
  1559. h = &hstates[max_hstate++];
  1560. h->order = order;
  1561. h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
  1562. h->nr_huge_pages = 0;
  1563. h->free_huge_pages = 0;
  1564. for (i = 0; i < MAX_NUMNODES; ++i)
  1565. INIT_LIST_HEAD(&h->hugepage_freelists[i]);
  1566. h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
  1567. h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
  1568. snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
  1569. huge_page_size(h)/1024);
  1570. parsed_hstate = h;
  1571. }
  1572. static int __init hugetlb_nrpages_setup(char *s)
  1573. {
  1574. unsigned long *mhp;
  1575. static unsigned long *last_mhp;
  1576. /*
  1577. * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
  1578. * so this hugepages= parameter goes to the "default hstate".
  1579. */
  1580. if (!max_hstate)
  1581. mhp = &default_hstate_max_huge_pages;
  1582. else
  1583. mhp = &parsed_hstate->max_huge_pages;
  1584. if (mhp == last_mhp) {
  1585. printk(KERN_WARNING "hugepages= specified twice without "
  1586. "interleaving hugepagesz=, ignoring\n");
  1587. return 1;
  1588. }
  1589. if (sscanf(s, "%lu", mhp) <= 0)
  1590. *mhp = 0;
  1591. /*
  1592. * Global state is always initialized later in hugetlb_init.
  1593. * But we need to allocate >= MAX_ORDER hstates here early to still
  1594. * use the bootmem allocator.
  1595. */
  1596. if (max_hstate && parsed_hstate->order >= MAX_ORDER)
  1597. hugetlb_hstate_alloc_pages(parsed_hstate);
  1598. last_mhp = mhp;
  1599. return 1;
  1600. }
  1601. __setup("hugepages=", hugetlb_nrpages_setup);
  1602. static int __init hugetlb_default_setup(char *s)
  1603. {
  1604. default_hstate_size = memparse(s, &s);
  1605. return 1;
  1606. }
  1607. __setup("default_hugepagesz=", hugetlb_default_setup);
  1608. static unsigned int cpuset_mems_nr(unsigned int *array)
  1609. {
  1610. int node;
  1611. unsigned int nr = 0;
  1612. for_each_node_mask(node, cpuset_current_mems_allowed)
  1613. nr += array[node];
  1614. return nr;
  1615. }
  1616. #ifdef CONFIG_SYSCTL
  1617. static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
  1618. struct ctl_table *table, int write,
  1619. void __user *buffer, size_t *length, loff_t *ppos)
  1620. {
  1621. struct hstate *h = &default_hstate;
  1622. unsigned long tmp;
  1623. if (!write)
  1624. tmp = h->max_huge_pages;
  1625. table->data = &tmp;
  1626. table->maxlen = sizeof(unsigned long);
  1627. proc_doulongvec_minmax(table, write, buffer, length, ppos);
  1628. if (write) {
  1629. NODEMASK_ALLOC(nodemask_t, nodes_allowed,
  1630. GFP_KERNEL | __GFP_NORETRY);
  1631. if (!(obey_mempolicy &&
  1632. init_nodemask_of_mempolicy(nodes_allowed))) {
  1633. NODEMASK_FREE(nodes_allowed);
  1634. nodes_allowed = &node_states[N_HIGH_MEMORY];
  1635. }
  1636. h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
  1637. if (nodes_allowed != &node_states[N_HIGH_MEMORY])
  1638. NODEMASK_FREE(nodes_allowed);
  1639. }
  1640. return 0;
  1641. }
  1642. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  1643. void __user *buffer, size_t *length, loff_t *ppos)
  1644. {
  1645. return hugetlb_sysctl_handler_common(false, table, write,
  1646. buffer, length, ppos);
  1647. }
  1648. #ifdef CONFIG_NUMA
  1649. int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
  1650. void __user *buffer, size_t *length, loff_t *ppos)
  1651. {
  1652. return hugetlb_sysctl_handler_common(true, table, write,
  1653. buffer, length, ppos);
  1654. }
  1655. #endif /* CONFIG_NUMA */
  1656. int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
  1657. void __user *buffer,
  1658. size_t *length, loff_t *ppos)
  1659. {
  1660. proc_dointvec(table, write, buffer, length, ppos);
  1661. if (hugepages_treat_as_movable)
  1662. htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
  1663. else
  1664. htlb_alloc_mask = GFP_HIGHUSER;
  1665. return 0;
  1666. }
  1667. int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  1668. void __user *buffer,
  1669. size_t *length, loff_t *ppos)
  1670. {
  1671. struct hstate *h = &default_hstate;
  1672. unsigned long tmp;
  1673. if (!write)
  1674. tmp = h->nr_overcommit_huge_pages;
  1675. table->data = &tmp;
  1676. table->maxlen = sizeof(unsigned long);
  1677. proc_doulongvec_minmax(table, write, buffer, length, ppos);
  1678. if (write) {
  1679. spin_lock(&hugetlb_lock);
  1680. h->nr_overcommit_huge_pages = tmp;
  1681. spin_unlock(&hugetlb_lock);
  1682. }
  1683. return 0;
  1684. }
  1685. #endif /* CONFIG_SYSCTL */
  1686. void hugetlb_report_meminfo(struct seq_file *m)
  1687. {
  1688. struct hstate *h = &default_hstate;
  1689. seq_printf(m,
  1690. "HugePages_Total: %5lu\n"
  1691. "HugePages_Free: %5lu\n"
  1692. "HugePages_Rsvd: %5lu\n"
  1693. "HugePages_Surp: %5lu\n"
  1694. "Hugepagesize: %8lu kB\n",
  1695. h->nr_huge_pages,
  1696. h->free_huge_pages,
  1697. h->resv_huge_pages,
  1698. h->surplus_huge_pages,
  1699. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  1700. }
  1701. int hugetlb_report_node_meminfo(int nid, char *buf)
  1702. {
  1703. struct hstate *h = &default_hstate;
  1704. return sprintf(buf,
  1705. "Node %d HugePages_Total: %5u\n"
  1706. "Node %d HugePages_Free: %5u\n"
  1707. "Node %d HugePages_Surp: %5u\n",
  1708. nid, h->nr_huge_pages_node[nid],
  1709. nid, h->free_huge_pages_node[nid],
  1710. nid, h->surplus_huge_pages_node[nid]);
  1711. }
  1712. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  1713. unsigned long hugetlb_total_pages(void)
  1714. {
  1715. struct hstate *h = &default_hstate;
  1716. return h->nr_huge_pages * pages_per_huge_page(h);
  1717. }
  1718. static int hugetlb_acct_memory(struct hstate *h, long delta)
  1719. {
  1720. int ret = -ENOMEM;
  1721. spin_lock(&hugetlb_lock);
  1722. /*
  1723. * When cpuset is configured, it breaks the strict hugetlb page
  1724. * reservation as the accounting is done on a global variable. Such
  1725. * reservation is completely rubbish in the presence of cpuset because
  1726. * the reservation is not checked against page availability for the
  1727. * current cpuset. Application can still potentially OOM'ed by kernel
  1728. * with lack of free htlb page in cpuset that the task is in.
  1729. * Attempt to enforce strict accounting with cpuset is almost
  1730. * impossible (or too ugly) because cpuset is too fluid that
  1731. * task or memory node can be dynamically moved between cpusets.
  1732. *
  1733. * The change of semantics for shared hugetlb mapping with cpuset is
  1734. * undesirable. However, in order to preserve some of the semantics,
  1735. * we fall back to check against current free page availability as
  1736. * a best attempt and hopefully to minimize the impact of changing
  1737. * semantics that cpuset has.
  1738. */
  1739. if (delta > 0) {
  1740. if (gather_surplus_pages(h, delta) < 0)
  1741. goto out;
  1742. if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
  1743. return_unused_surplus_pages(h, delta);
  1744. goto out;
  1745. }
  1746. }
  1747. ret = 0;
  1748. if (delta < 0)
  1749. return_unused_surplus_pages(h, (unsigned long) -delta);
  1750. out:
  1751. spin_unlock(&hugetlb_lock);
  1752. return ret;
  1753. }
  1754. static void hugetlb_vm_op_open(struct vm_area_struct *vma)
  1755. {
  1756. struct resv_map *reservations = vma_resv_map(vma);
  1757. /*
  1758. * This new VMA should share its siblings reservation map if present.
  1759. * The VMA will only ever have a valid reservation map pointer where
  1760. * it is being copied for another still existing VMA. As that VMA
  1761. * has a reference to the reservation map it cannot dissappear until
  1762. * after this open call completes. It is therefore safe to take a
  1763. * new reference here without additional locking.
  1764. */
  1765. if (reservations)
  1766. kref_get(&reservations->refs);
  1767. }
  1768. static void hugetlb_vm_op_close(struct vm_area_struct *vma)
  1769. {
  1770. struct hstate *h = hstate_vma(vma);
  1771. struct resv_map *reservations = vma_resv_map(vma);
  1772. unsigned long reserve;
  1773. unsigned long start;
  1774. unsigned long end;
  1775. if (reservations) {
  1776. start = vma_hugecache_offset(h, vma, vma->vm_start);
  1777. end = vma_hugecache_offset(h, vma, vma->vm_end);
  1778. reserve = (end - start) -
  1779. region_count(&reservations->regions, start, end);
  1780. kref_put(&reservations->refs, resv_map_release);
  1781. if (reserve) {
  1782. hugetlb_acct_memory(h, -reserve);
  1783. hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
  1784. }
  1785. }
  1786. }
  1787. /*
  1788. * We cannot handle pagefaults against hugetlb pages at all. They cause
  1789. * handle_mm_fault() to try to instantiate regular-sized pages in the
  1790. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  1791. * this far.
  1792. */
  1793. static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1794. {
  1795. BUG();
  1796. return 0;
  1797. }
  1798. const struct vm_operations_struct hugetlb_vm_ops = {
  1799. .fault = hugetlb_vm_op_fault,
  1800. .open = hugetlb_vm_op_open,
  1801. .close = hugetlb_vm_op_close,
  1802. };
  1803. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  1804. int writable)
  1805. {
  1806. pte_t entry;
  1807. if (writable) {
  1808. entry =
  1809. pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
  1810. } else {
  1811. entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
  1812. }
  1813. entry = pte_mkyoung(entry);
  1814. entry = pte_mkhuge(entry);
  1815. return entry;
  1816. }
  1817. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  1818. unsigned long address, pte_t *ptep)
  1819. {
  1820. pte_t entry;
  1821. entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
  1822. if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
  1823. update_mmu_cache(vma, address, ptep);
  1824. }
  1825. }
  1826. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  1827. struct vm_area_struct *vma)
  1828. {
  1829. pte_t *src_pte, *dst_pte, entry;
  1830. struct page *ptepage;
  1831. unsigned long addr;
  1832. int cow;
  1833. struct hstate *h = hstate_vma(vma);
  1834. unsigned long sz = huge_page_size(h);
  1835. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  1836. for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
  1837. src_pte = huge_pte_offset(src, addr);
  1838. if (!src_pte)
  1839. continue;
  1840. dst_pte = huge_pte_alloc(dst, addr, sz);
  1841. if (!dst_pte)
  1842. goto nomem;
  1843. /* If the pagetables are shared don't copy or take references */
  1844. if (dst_pte == src_pte)
  1845. continue;
  1846. spin_lock(&dst->page_table_lock);
  1847. spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
  1848. if (!huge_pte_none(huge_ptep_get(src_pte))) {
  1849. if (cow)
  1850. huge_ptep_set_wrprotect(src, addr, src_pte);
  1851. entry = huge_ptep_get(src_pte);
  1852. ptepage = pte_page(entry);
  1853. get_page(ptepage);
  1854. page_dup_rmap(ptepage);
  1855. set_huge_pte_at(dst, addr, dst_pte, entry);
  1856. }
  1857. spin_unlock(&src->page_table_lock);
  1858. spin_unlock(&dst->page_table_lock);
  1859. }
  1860. return 0;
  1861. nomem:
  1862. return -ENOMEM;
  1863. }
  1864. static int is_hugetlb_entry_hwpoisoned(pte_t pte)
  1865. {
  1866. swp_entry_t swp;
  1867. if (huge_pte_none(pte) || pte_present(pte))
  1868. return 0;
  1869. swp = pte_to_swp_entry(pte);
  1870. if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
  1871. return 1;
  1872. } else
  1873. return 0;
  1874. }
  1875. void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  1876. unsigned long end, struct page *ref_page)
  1877. {
  1878. struct mm_struct *mm = vma->vm_mm;
  1879. unsigned long address;
  1880. pte_t *ptep;
  1881. pte_t pte;
  1882. struct page *page;
  1883. struct page *tmp;
  1884. struct hstate *h = hstate_vma(vma);
  1885. unsigned long sz = huge_page_size(h);
  1886. /*
  1887. * A page gathering list, protected by per file i_mmap_lock. The
  1888. * lock is used to avoid list corruption from multiple unmapping
  1889. * of the same page since we are using page->lru.
  1890. */
  1891. LIST_HEAD(page_list);
  1892. WARN_ON(!is_vm_hugetlb_page(vma));
  1893. BUG_ON(start & ~huge_page_mask(h));
  1894. BUG_ON(end & ~huge_page_mask(h));
  1895. mmu_notifier_invalidate_range_start(mm, start, end);
  1896. spin_lock(&mm->page_table_lock);
  1897. for (address = start; address < end; address += sz) {
  1898. ptep = huge_pte_offset(mm, address);
  1899. if (!ptep)
  1900. continue;
  1901. if (huge_pmd_unshare(mm, &address, ptep))
  1902. continue;
  1903. /*
  1904. * If a reference page is supplied, it is because a specific
  1905. * page is being unmapped, not a range. Ensure the page we
  1906. * are about to unmap is the actual page of interest.
  1907. */
  1908. if (ref_page) {
  1909. pte = huge_ptep_get(ptep);
  1910. if (huge_pte_none(pte))
  1911. continue;
  1912. page = pte_page(pte);
  1913. if (page != ref_page)
  1914. continue;
  1915. /*
  1916. * Mark the VMA as having unmapped its page so that
  1917. * future faults in this VMA will fail rather than
  1918. * looking like data was lost
  1919. */
  1920. set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
  1921. }
  1922. pte = huge_ptep_get_and_clear(mm, address, ptep);
  1923. if (huge_pte_none(pte))
  1924. continue;
  1925. /*
  1926. * HWPoisoned hugepage is already unmapped and dropped reference
  1927. */
  1928. if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
  1929. continue;
  1930. page = pte_page(pte);
  1931. if (pte_dirty(pte))
  1932. set_page_dirty(page);
  1933. list_add(&page->lru, &page_list);
  1934. }
  1935. spin_unlock(&mm->page_table_lock);
  1936. flush_tlb_range(vma, start, end);
  1937. mmu_notifier_invalidate_range_end(mm, start, end);
  1938. list_for_each_entry_safe(page, tmp, &page_list, lru) {
  1939. page_remove_rmap(page);
  1940. list_del(&page->lru);
  1941. put_page(page);
  1942. }
  1943. }
  1944. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  1945. unsigned long end, struct page *ref_page)
  1946. {
  1947. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  1948. __unmap_hugepage_range(vma, start, end, ref_page);
  1949. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  1950. }
  1951. /*
  1952. * This is called when the original mapper is failing to COW a MAP_PRIVATE
  1953. * mappping it owns the reserve page for. The intention is to unmap the page
  1954. * from other VMAs and let the children be SIGKILLed if they are faulting the
  1955. * same region.
  1956. */
  1957. static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  1958. struct page *page, unsigned long address)
  1959. {
  1960. struct hstate *h = hstate_vma(vma);
  1961. struct vm_area_struct *iter_vma;
  1962. struct address_space *mapping;
  1963. struct prio_tree_iter iter;
  1964. pgoff_t pgoff;
  1965. /*
  1966. * vm_pgoff is in PAGE_SIZE units, hence the different calculation
  1967. * from page cache lookup which is in HPAGE_SIZE units.
  1968. */
  1969. address = address & huge_page_mask(h);
  1970. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
  1971. + (vma->vm_pgoff >> PAGE_SHIFT);
  1972. mapping = (struct address_space *)page_private(page);
  1973. /*
  1974. * Take the mapping lock for the duration of the table walk. As
  1975. * this mapping should be shared between all the VMAs,
  1976. * __unmap_hugepage_range() is called as the lock is already held
  1977. */
  1978. spin_lock(&mapping->i_mmap_lock);
  1979. vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  1980. /* Do not unmap the current VMA */
  1981. if (iter_vma == vma)
  1982. continue;
  1983. /*
  1984. * Unmap the page from other VMAs without their own reserves.
  1985. * They get marked to be SIGKILLed if they fault in these
  1986. * areas. This is because a future no-page fault on this VMA
  1987. * could insert a zeroed page instead of the data existing
  1988. * from the time of fork. This would look like data corruption
  1989. */
  1990. if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
  1991. __unmap_hugepage_range(iter_vma,
  1992. address, address + huge_page_size(h),
  1993. page);
  1994. }
  1995. spin_unlock(&mapping->i_mmap_lock);
  1996. return 1;
  1997. }
  1998. /*
  1999. * Hugetlb_cow() should be called with page lock of the original hugepage held.
  2000. */
  2001. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  2002. unsigned long address, pte_t *ptep, pte_t pte,
  2003. struct page *pagecache_page)
  2004. {
  2005. struct hstate *h = hstate_vma(vma);
  2006. struct page *old_page, *new_page;
  2007. int avoidcopy;
  2008. int outside_reserve = 0;
  2009. old_page = pte_page(pte);
  2010. retry_avoidcopy:
  2011. /* If no-one else is actually using this page, avoid the copy
  2012. * and just make the page writable */
  2013. avoidcopy = (page_mapcount(old_page) == 1);
  2014. if (avoidcopy) {
  2015. if (PageAnon(old_page))
  2016. page_move_anon_rmap(old_page, vma, address);
  2017. set_huge_ptep_writable(vma, address, ptep);
  2018. return 0;
  2019. }
  2020. /*
  2021. * If the process that created a MAP_PRIVATE mapping is about to
  2022. * perform a COW due to a shared page count, attempt to satisfy
  2023. * the allocation without using the existing reserves. The pagecache
  2024. * page is used to determine if the reserve at this address was
  2025. * consumed or not. If reserves were used, a partial faulted mapping
  2026. * at the time of fork() could consume its reserves on COW instead
  2027. * of the full address range.
  2028. */
  2029. if (!(vma->vm_flags & VM_MAYSHARE) &&
  2030. is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
  2031. old_page != pagecache_page)
  2032. outside_reserve = 1;
  2033. page_cache_get(old_page);
  2034. /* Drop page_table_lock as buddy allocator may be called */
  2035. spin_unlock(&mm->page_table_lock);
  2036. new_page = alloc_huge_page(vma, address, outside_reserve);
  2037. if (IS_ERR(new_page)) {
  2038. page_cache_release(old_page);
  2039. /*
  2040. * If a process owning a MAP_PRIVATE mapping fails to COW,
  2041. * it is due to references held by a child and an insufficient
  2042. * huge page pool. To guarantee the original mappers
  2043. * reliability, unmap the page from child processes. The child
  2044. * may get SIGKILLed if it later faults.
  2045. */
  2046. if (outside_reserve) {
  2047. BUG_ON(huge_pte_none(pte));
  2048. if (unmap_ref_private(mm, vma, old_page, address)) {
  2049. BUG_ON(page_count(old_page) != 1);
  2050. BUG_ON(huge_pte_none(pte));
  2051. spin_lock(&mm->page_table_lock);
  2052. goto retry_avoidcopy;
  2053. }
  2054. WARN_ON_ONCE(1);
  2055. }
  2056. /* Caller expects lock to be held */
  2057. spin_lock(&mm->page_table_lock);
  2058. return -PTR_ERR(new_page);
  2059. }
  2060. /*
  2061. * When the original hugepage is shared one, it does not have
  2062. * anon_vma prepared.
  2063. */
  2064. if (unlikely(anon_vma_prepare(vma)))
  2065. return VM_FAULT_OOM;
  2066. copy_huge_page(new_page, old_page, address, vma);
  2067. __SetPageUptodate(new_page);
  2068. /*
  2069. * Retake the page_table_lock to check for racing updates
  2070. * before the page tables are altered
  2071. */
  2072. spin_lock(&mm->page_table_lock);
  2073. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  2074. if (likely(pte_same(huge_ptep_get(ptep), pte))) {
  2075. /* Break COW */
  2076. mmu_notifier_invalidate_range_start(mm,
  2077. address & huge_page_mask(h),
  2078. (address & huge_page_mask(h)) + huge_page_size(h));
  2079. huge_ptep_clear_flush(vma, address, ptep);
  2080. set_huge_pte_at(mm, address, ptep,
  2081. make_huge_pte(vma, new_page, 1));
  2082. page_remove_rmap(old_page);
  2083. hugepage_add_new_anon_rmap(new_page, vma, address);
  2084. /* Make the old page be freed below */
  2085. new_page = old_page;
  2086. mmu_notifier_invalidate_range_end(mm,
  2087. address & huge_page_mask(h),
  2088. (address & huge_page_mask(h)) + huge_page_size(h));
  2089. }
  2090. page_cache_release(new_page);
  2091. page_cache_release(old_page);
  2092. return 0;
  2093. }
  2094. /* Return the pagecache page at a given address within a VMA */
  2095. static struct page *hugetlbfs_pagecache_page(struct hstate *h,
  2096. struct vm_area_struct *vma, unsigned long address)
  2097. {
  2098. struct address_space *mapping;
  2099. pgoff_t idx;
  2100. mapping = vma->vm_file->f_mapping;
  2101. idx = vma_hugecache_offset(h, vma, address);
  2102. return find_lock_page(mapping, idx);
  2103. }
  2104. /*
  2105. * Return whether there is a pagecache page to back given address within VMA.
  2106. * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
  2107. */
  2108. static bool hugetlbfs_pagecache_present(struct hstate *h,
  2109. struct vm_area_struct *vma, unsigned long address)
  2110. {
  2111. struct address_space *mapping;
  2112. pgoff_t idx;
  2113. struct page *page;
  2114. mapping = vma->vm_file->f_mapping;
  2115. idx = vma_hugecache_offset(h, vma, address);
  2116. page = find_get_page(mapping, idx);
  2117. if (page)
  2118. put_page(page);
  2119. return page != NULL;
  2120. }
  2121. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2122. unsigned long address, pte_t *ptep, unsigned int flags)
  2123. {
  2124. struct hstate *h = hstate_vma(vma);
  2125. int ret = VM_FAULT_SIGBUS;
  2126. pgoff_t idx;
  2127. unsigned long size;
  2128. struct page *page;
  2129. struct address_space *mapping;
  2130. pte_t new_pte;
  2131. /*
  2132. * Currently, we are forced to kill the process in the event the
  2133. * original mapper has unmapped pages from the child due to a failed
  2134. * COW. Warn that such a situation has occured as it may not be obvious
  2135. */
  2136. if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
  2137. printk(KERN_WARNING
  2138. "PID %d killed due to inadequate hugepage pool\n",
  2139. current->pid);
  2140. return ret;
  2141. }
  2142. mapping = vma->vm_file->f_mapping;
  2143. idx = vma_hugecache_offset(h, vma, address);
  2144. /*
  2145. * Use page lock to guard against racing truncation
  2146. * before we get page_table_lock.
  2147. */
  2148. retry:
  2149. page = find_lock_page(mapping, idx);
  2150. if (!page) {
  2151. size = i_size_read(mapping->host) >> huge_page_shift(h);
  2152. if (idx >= size)
  2153. goto out;
  2154. page = alloc_huge_page(vma, address, 0);
  2155. if (IS_ERR(page)) {
  2156. ret = -PTR_ERR(page);
  2157. goto out;
  2158. }
  2159. clear_huge_page(page, address, huge_page_size(h));
  2160. __SetPageUptodate(page);
  2161. if (vma->vm_flags & VM_MAYSHARE) {
  2162. int err;
  2163. struct inode *inode = mapping->host;
  2164. err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  2165. if (err) {
  2166. put_page(page);
  2167. if (err == -EEXIST)
  2168. goto retry;
  2169. goto out;
  2170. }
  2171. spin_lock(&inode->i_lock);
  2172. inode->i_blocks += blocks_per_huge_page(h);
  2173. spin_unlock(&inode->i_lock);
  2174. page_dup_rmap(page);
  2175. } else {
  2176. lock_page(page);
  2177. if (unlikely(anon_vma_prepare(vma))) {
  2178. ret = VM_FAULT_OOM;
  2179. goto backout_unlocked;
  2180. }
  2181. hugepage_add_new_anon_rmap(page, vma, address);
  2182. }
  2183. } else {
  2184. /*
  2185. * If memory error occurs between mmap() and fault, some process
  2186. * don't have hwpoisoned swap entry for errored virtual address.
  2187. * So we need to block hugepage fault by PG_hwpoison bit check.
  2188. */
  2189. if (unlikely(PageHWPoison(page))) {
  2190. ret = VM_FAULT_HWPOISON;
  2191. goto backout_unlocked;
  2192. }
  2193. page_dup_rmap(page);
  2194. }
  2195. /*
  2196. * If we are going to COW a private mapping later, we examine the
  2197. * pending reservations for this page now. This will ensure that
  2198. * any allocations necessary to record that reservation occur outside
  2199. * the spinlock.
  2200. */
  2201. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
  2202. if (vma_needs_reservation(h, vma, address) < 0) {
  2203. ret = VM_FAULT_OOM;
  2204. goto backout_unlocked;
  2205. }
  2206. spin_lock(&mm->page_table_lock);
  2207. size = i_size_read(mapping->host) >> huge_page_shift(h);
  2208. if (idx >= size)
  2209. goto backout;
  2210. ret = 0;
  2211. if (!huge_pte_none(huge_ptep_get(ptep)))
  2212. goto backout;
  2213. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  2214. && (vma->vm_flags & VM_SHARED)));
  2215. set_huge_pte_at(mm, address, ptep, new_pte);
  2216. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  2217. /* Optimization, do the COW without a second fault */
  2218. ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
  2219. }
  2220. spin_unlock(&mm->page_table_lock);
  2221. unlock_page(page);
  2222. out:
  2223. return ret;
  2224. backout:
  2225. spin_unlock(&mm->page_table_lock);
  2226. backout_unlocked:
  2227. unlock_page(page);
  2228. put_page(page);
  2229. goto out;
  2230. }
  2231. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  2232. unsigned long address, unsigned int flags)
  2233. {
  2234. pte_t *ptep;
  2235. pte_t entry;
  2236. int ret;
  2237. struct page *page = NULL;
  2238. struct page *pagecache_page = NULL;
  2239. static DEFINE_MUTEX(hugetlb_instantiation_mutex);
  2240. struct hstate *h = hstate_vma(vma);
  2241. ptep = huge_pte_offset(mm, address);
  2242. if (ptep) {
  2243. entry = huge_ptep_get(ptep);
  2244. if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
  2245. return VM_FAULT_HWPOISON;
  2246. }
  2247. ptep = huge_pte_alloc(mm, address, huge_page_size(h));
  2248. if (!ptep)
  2249. return VM_FAULT_OOM;
  2250. /*
  2251. * Serialize hugepage allocation and instantiation, so that we don't
  2252. * get spurious allocation failures if two CPUs race to instantiate
  2253. * the same page in the page cache.
  2254. */
  2255. mutex_lock(&hugetlb_instantiation_mutex);
  2256. entry = huge_ptep_get(ptep);
  2257. if (huge_pte_none(entry)) {
  2258. ret = hugetlb_no_page(mm, vma, address, ptep, flags);
  2259. goto out_mutex;
  2260. }
  2261. ret = 0;
  2262. /*
  2263. * If we are going to COW the mapping later, we examine the pending
  2264. * reservations for this page now. This will ensure that any
  2265. * allocations necessary to record that reservation occur outside the
  2266. * spinlock. For private mappings, we also lookup the pagecache
  2267. * page now as it is used to determine if a reservation has been
  2268. * consumed.
  2269. */
  2270. if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
  2271. if (vma_needs_reservation(h, vma, address) < 0) {
  2272. ret = VM_FAULT_OOM;
  2273. goto out_mutex;
  2274. }
  2275. if (!(vma->vm_flags & VM_MAYSHARE))
  2276. pagecache_page = hugetlbfs_pagecache_page(h,
  2277. vma, address);
  2278. }
  2279. /*
  2280. * hugetlb_cow() requires page locks of pte_page(entry) and
  2281. * pagecache_page, so here we need take the former one
  2282. * when page != pagecache_page or !pagecache_page.
  2283. * Note that locking order is always pagecache_page -> page,
  2284. * so no worry about deadlock.
  2285. */
  2286. page = pte_page(entry);
  2287. if (page != pagecache_page)
  2288. lock_page(page);
  2289. spin_lock(&mm->page_table_lock);
  2290. /* Check for a racing update before calling hugetlb_cow */
  2291. if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
  2292. goto out_page_table_lock;
  2293. if (flags & FAULT_FLAG_WRITE) {
  2294. if (!pte_write(entry)) {
  2295. ret = hugetlb_cow(mm, vma, address, ptep, entry,
  2296. pagecache_page);
  2297. goto out_page_table_lock;
  2298. }
  2299. entry = pte_mkdirty(entry);
  2300. }
  2301. entry = pte_mkyoung(entry);
  2302. if (huge_ptep_set_access_flags(vma, address, ptep, entry,
  2303. flags & FAULT_FLAG_WRITE))
  2304. update_mmu_cache(vma, address, ptep);
  2305. out_page_table_lock:
  2306. spin_unlock(&mm->page_table_lock);
  2307. if (pagecache_page) {
  2308. unlock_page(pagecache_page);
  2309. put_page(pagecache_page);
  2310. }
  2311. unlock_page(page);
  2312. out_mutex:
  2313. mutex_unlock(&hugetlb_instantiation_mutex);
  2314. return ret;
  2315. }
  2316. /* Can be overriden by architectures */
  2317. __attribute__((weak)) struct page *
  2318. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  2319. pud_t *pud, int write)
  2320. {
  2321. BUG();
  2322. return NULL;
  2323. }
  2324. int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2325. struct page **pages, struct vm_area_struct **vmas,
  2326. unsigned long *position, int *length, int i,
  2327. unsigned int flags)
  2328. {
  2329. unsigned long pfn_offset;
  2330. unsigned long vaddr = *position;
  2331. int remainder = *length;
  2332. struct hstate *h = hstate_vma(vma);
  2333. spin_lock(&mm->page_table_lock);
  2334. while (vaddr < vma->vm_end && remainder) {
  2335. pte_t *pte;
  2336. int absent;
  2337. struct page *page;
  2338. /*
  2339. * Some archs (sparc64, sh*) have multiple pte_ts to
  2340. * each hugepage. We have to make sure we get the
  2341. * first, for the page indexing below to work.
  2342. */
  2343. pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
  2344. absent = !pte || huge_pte_none(huge_ptep_get(pte));
  2345. /*
  2346. * When coredumping, it suits get_dump_page if we just return
  2347. * an error where there's an empty slot with no huge pagecache
  2348. * to back it. This way, we avoid allocating a hugepage, and
  2349. * the sparse dumpfile avoids allocating disk blocks, but its
  2350. * huge holes still show up with zeroes where they need to be.
  2351. */
  2352. if (absent && (flags & FOLL_DUMP) &&
  2353. !hugetlbfs_pagecache_present(h, vma, vaddr)) {
  2354. remainder = 0;
  2355. break;
  2356. }
  2357. if (absent ||
  2358. ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
  2359. int ret;
  2360. spin_unlock(&mm->page_table_lock);
  2361. ret = hugetlb_fault(mm, vma, vaddr,
  2362. (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
  2363. spin_lock(&mm->page_table_lock);
  2364. if (!(ret & VM_FAULT_ERROR))
  2365. continue;
  2366. remainder = 0;
  2367. break;
  2368. }
  2369. pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
  2370. page = pte_page(huge_ptep_get(pte));
  2371. same_page:
  2372. if (pages) {
  2373. pages[i] = mem_map_offset(page, pfn_offset);
  2374. get_page(pages[i]);
  2375. }
  2376. if (vmas)
  2377. vmas[i] = vma;
  2378. vaddr += PAGE_SIZE;
  2379. ++pfn_offset;
  2380. --remainder;
  2381. ++i;
  2382. if (vaddr < vma->vm_end && remainder &&
  2383. pfn_offset < pages_per_huge_page(h)) {
  2384. /*
  2385. * We use pfn_offset to avoid touching the pageframes
  2386. * of this compound page.
  2387. */
  2388. goto same_page;
  2389. }
  2390. }
  2391. spin_unlock(&mm->page_table_lock);
  2392. *length = remainder;
  2393. *position = vaddr;
  2394. return i ? i : -EFAULT;
  2395. }
  2396. void hugetlb_change_protection(struct vm_area_struct *vma,
  2397. unsigned long address, unsigned long end, pgprot_t newprot)
  2398. {
  2399. struct mm_struct *mm = vma->vm_mm;
  2400. unsigned long start = address;
  2401. pte_t *ptep;
  2402. pte_t pte;
  2403. struct hstate *h = hstate_vma(vma);
  2404. BUG_ON(address >= end);
  2405. flush_cache_range(vma, address, end);
  2406. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  2407. spin_lock(&mm->page_table_lock);
  2408. for (; address < end; address += huge_page_size(h)) {
  2409. ptep = huge_pte_offset(mm, address);
  2410. if (!ptep)
  2411. continue;
  2412. if (huge_pmd_unshare(mm, &address, ptep))
  2413. continue;
  2414. if (!huge_pte_none(huge_ptep_get(ptep))) {
  2415. pte = huge_ptep_get_and_clear(mm, address, ptep);
  2416. pte = pte_mkhuge(pte_modify(pte, newprot));
  2417. set_huge_pte_at(mm, address, ptep, pte);
  2418. }
  2419. }
  2420. spin_unlock(&mm->page_table_lock);
  2421. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  2422. flush_tlb_range(vma, start, end);
  2423. }
  2424. int hugetlb_reserve_pages(struct inode *inode,
  2425. long from, long to,
  2426. struct vm_area_struct *vma,
  2427. int acctflag)
  2428. {
  2429. long ret, chg;
  2430. struct hstate *h = hstate_inode(inode);
  2431. /*
  2432. * Only apply hugepage reservation if asked. At fault time, an
  2433. * attempt will be made for VM_NORESERVE to allocate a page
  2434. * and filesystem quota without using reserves
  2435. */
  2436. if (acctflag & VM_NORESERVE)
  2437. return 0;
  2438. /*
  2439. * Shared mappings base their reservation on the number of pages that
  2440. * are already allocated on behalf of the file. Private mappings need
  2441. * to reserve the full area even if read-only as mprotect() may be
  2442. * called to make the mapping read-write. Assume !vma is a shm mapping
  2443. */
  2444. if (!vma || vma->vm_flags & VM_MAYSHARE)
  2445. chg = region_chg(&inode->i_mapping->private_list, from, to);
  2446. else {
  2447. struct resv_map *resv_map = resv_map_alloc();
  2448. if (!resv_map)
  2449. return -ENOMEM;
  2450. chg = to - from;
  2451. set_vma_resv_map(vma, resv_map);
  2452. set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
  2453. }
  2454. if (chg < 0)
  2455. return chg;
  2456. /* There must be enough filesystem quota for the mapping */
  2457. if (hugetlb_get_quota(inode->i_mapping, chg))
  2458. return -ENOSPC;
  2459. /*
  2460. * Check enough hugepages are available for the reservation.
  2461. * Hand back the quota if there are not
  2462. */
  2463. ret = hugetlb_acct_memory(h, chg);
  2464. if (ret < 0) {
  2465. hugetlb_put_quota(inode->i_mapping, chg);
  2466. return ret;
  2467. }
  2468. /*
  2469. * Account for the reservations made. Shared mappings record regions
  2470. * that have reservations as they are shared by multiple VMAs.
  2471. * When the last VMA disappears, the region map says how much
  2472. * the reservation was and the page cache tells how much of
  2473. * the reservation was consumed. Private mappings are per-VMA and
  2474. * only the consumed reservations are tracked. When the VMA
  2475. * disappears, the original reservation is the VMA size and the
  2476. * consumed reservations are stored in the map. Hence, nothing
  2477. * else has to be done for private mappings here
  2478. */
  2479. if (!vma || vma->vm_flags & VM_MAYSHARE)
  2480. region_add(&inode->i_mapping->private_list, from, to);
  2481. return 0;
  2482. }
  2483. void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
  2484. {
  2485. struct hstate *h = hstate_inode(inode);
  2486. long chg = region_truncate(&inode->i_mapping->private_list, offset);
  2487. spin_lock(&inode->i_lock);
  2488. inode->i_blocks -= (blocks_per_huge_page(h) * freed);
  2489. spin_unlock(&inode->i_lock);
  2490. hugetlb_put_quota(inode->i_mapping, (chg - freed));
  2491. hugetlb_acct_memory(h, -(chg - freed));
  2492. }
  2493. /*
  2494. * This function is called from memory failure code.
  2495. * Assume the caller holds page lock of the head page.
  2496. */
  2497. void __isolate_hwpoisoned_huge_page(struct page *hpage)
  2498. {
  2499. struct hstate *h = page_hstate(hpage);
  2500. int nid = page_to_nid(hpage);
  2501. spin_lock(&hugetlb_lock);
  2502. list_del(&hpage->lru);
  2503. h->free_huge_pages--;
  2504. h->free_huge_pages_node[nid]--;
  2505. spin_unlock(&hugetlb_lock);
  2506. }