hugetlb.c 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787
  1. /*
  2. * Generic hugetlb support.
  3. * (C) William Irwin, April 2004
  4. */
  5. #include <linux/list.h>
  6. #include <linux/init.h>
  7. #include <linux/module.h>
  8. #include <linux/mm.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/sysctl.h>
  11. #include <linux/highmem.h>
  12. #include <linux/mmu_notifier.h>
  13. #include <linux/nodemask.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/mempolicy.h>
  16. #include <linux/cpuset.h>
  17. #include <linux/mutex.h>
  18. #include <linux/bootmem.h>
  19. #include <linux/sysfs.h>
  20. #include <linux/slab.h>
  21. #include <asm/page.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/io.h>
  24. #include <linux/hugetlb.h>
  25. #include <linux/node.h>
  26. #include "internal.h"
  27. const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
  28. static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
  29. unsigned long hugepages_treat_as_movable;
  30. static int max_hstate;
  31. unsigned int default_hstate_idx;
  32. struct hstate hstates[HUGE_MAX_HSTATE];
  33. __initdata LIST_HEAD(huge_boot_pages);
  34. /* for command line parsing */
  35. static struct hstate * __initdata parsed_hstate;
  36. static unsigned long __initdata default_hstate_max_huge_pages;
  37. static unsigned long __initdata default_hstate_size;
  38. #define for_each_hstate(h) \
  39. for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
  40. /*
  41. * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  42. */
  43. static DEFINE_SPINLOCK(hugetlb_lock);
  44. /*
  45. * Region tracking -- allows tracking of reservations and instantiated pages
  46. * across the pages in a mapping.
  47. *
  48. * The region data structures are protected by a combination of the mmap_sem
  49. * and the hugetlb_instantion_mutex. To access or modify a region the caller
  50. * must either hold the mmap_sem for write, or the mmap_sem for read and
  51. * the hugetlb_instantiation mutex:
  52. *
  53. * down_write(&mm->mmap_sem);
  54. * or
  55. * down_read(&mm->mmap_sem);
  56. * mutex_lock(&hugetlb_instantiation_mutex);
  57. */
  58. struct file_region {
  59. struct list_head link;
  60. long from;
  61. long to;
  62. };
  63. static long region_add(struct list_head *head, long f, long t)
  64. {
  65. struct file_region *rg, *nrg, *trg;
  66. /* Locate the region we are either in or before. */
  67. list_for_each_entry(rg, head, link)
  68. if (f <= rg->to)
  69. break;
  70. /* Round our left edge to the current segment if it encloses us. */
  71. if (f > rg->from)
  72. f = rg->from;
  73. /* Check for and consume any regions we now overlap with. */
  74. nrg = rg;
  75. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  76. if (&rg->link == head)
  77. break;
  78. if (rg->from > t)
  79. break;
  80. /* If this area reaches higher then extend our area to
  81. * include it completely. If this is not the first area
  82. * which we intend to reuse, free it. */
  83. if (rg->to > t)
  84. t = rg->to;
  85. if (rg != nrg) {
  86. list_del(&rg->link);
  87. kfree(rg);
  88. }
  89. }
  90. nrg->from = f;
  91. nrg->to = t;
  92. return 0;
  93. }
  94. static long region_chg(struct list_head *head, long f, long t)
  95. {
  96. struct file_region *rg, *nrg;
  97. long chg = 0;
  98. /* Locate the region we are before or in. */
  99. list_for_each_entry(rg, head, link)
  100. if (f <= rg->to)
  101. break;
  102. /* If we are below the current region then a new region is required.
  103. * Subtle, allocate a new region at the position but make it zero
  104. * size such that we can guarantee to record the reservation. */
  105. if (&rg->link == head || t < rg->from) {
  106. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  107. if (!nrg)
  108. return -ENOMEM;
  109. nrg->from = f;
  110. nrg->to = f;
  111. INIT_LIST_HEAD(&nrg->link);
  112. list_add(&nrg->link, rg->link.prev);
  113. return t - f;
  114. }
  115. /* Round our left edge to the current segment if it encloses us. */
  116. if (f > rg->from)
  117. f = rg->from;
  118. chg = t - f;
  119. /* Check for and consume any regions we now overlap with. */
  120. list_for_each_entry(rg, rg->link.prev, link) {
  121. if (&rg->link == head)
  122. break;
  123. if (rg->from > t)
  124. return chg;
  125. /* We overlap with this area, if it extends futher than
  126. * us then we must extend ourselves. Account for its
  127. * existing reservation. */
  128. if (rg->to > t) {
  129. chg += rg->to - t;
  130. t = rg->to;
  131. }
  132. chg -= rg->to - rg->from;
  133. }
  134. return chg;
  135. }
  136. static long region_truncate(struct list_head *head, long end)
  137. {
  138. struct file_region *rg, *trg;
  139. long chg = 0;
  140. /* Locate the region we are either in or before. */
  141. list_for_each_entry(rg, head, link)
  142. if (end <= rg->to)
  143. break;
  144. if (&rg->link == head)
  145. return 0;
  146. /* If we are in the middle of a region then adjust it. */
  147. if (end > rg->from) {
  148. chg = rg->to - end;
  149. rg->to = end;
  150. rg = list_entry(rg->link.next, typeof(*rg), link);
  151. }
  152. /* Drop any remaining regions. */
  153. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  154. if (&rg->link == head)
  155. break;
  156. chg += rg->to - rg->from;
  157. list_del(&rg->link);
  158. kfree(rg);
  159. }
  160. return chg;
  161. }
  162. static long region_count(struct list_head *head, long f, long t)
  163. {
  164. struct file_region *rg;
  165. long chg = 0;
  166. /* Locate each segment we overlap with, and count that overlap. */
  167. list_for_each_entry(rg, head, link) {
  168. int seg_from;
  169. int seg_to;
  170. if (rg->to <= f)
  171. continue;
  172. if (rg->from >= t)
  173. break;
  174. seg_from = max(rg->from, f);
  175. seg_to = min(rg->to, t);
  176. chg += seg_to - seg_from;
  177. }
  178. return chg;
  179. }
  180. /*
  181. * Convert the address within this vma to the page offset within
  182. * the mapping, in pagecache page units; huge pages here.
  183. */
  184. static pgoff_t vma_hugecache_offset(struct hstate *h,
  185. struct vm_area_struct *vma, unsigned long address)
  186. {
  187. return ((address - vma->vm_start) >> huge_page_shift(h)) +
  188. (vma->vm_pgoff >> huge_page_order(h));
  189. }
  190. /*
  191. * Return the size of the pages allocated when backing a VMA. In the majority
  192. * cases this will be same size as used by the page table entries.
  193. */
  194. unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
  195. {
  196. struct hstate *hstate;
  197. if (!is_vm_hugetlb_page(vma))
  198. return PAGE_SIZE;
  199. hstate = hstate_vma(vma);
  200. return 1UL << (hstate->order + PAGE_SHIFT);
  201. }
  202. EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
  203. /*
  204. * Return the page size being used by the MMU to back a VMA. In the majority
  205. * of cases, the page size used by the kernel matches the MMU size. On
  206. * architectures where it differs, an architecture-specific version of this
  207. * function is required.
  208. */
  209. #ifndef vma_mmu_pagesize
  210. unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  211. {
  212. return vma_kernel_pagesize(vma);
  213. }
  214. #endif
  215. /*
  216. * Flags for MAP_PRIVATE reservations. These are stored in the bottom
  217. * bits of the reservation map pointer, which are always clear due to
  218. * alignment.
  219. */
  220. #define HPAGE_RESV_OWNER (1UL << 0)
  221. #define HPAGE_RESV_UNMAPPED (1UL << 1)
  222. #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
  223. /*
  224. * These helpers are used to track how many pages are reserved for
  225. * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
  226. * is guaranteed to have their future faults succeed.
  227. *
  228. * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
  229. * the reserve counters are updated with the hugetlb_lock held. It is safe
  230. * to reset the VMA at fork() time as it is not in use yet and there is no
  231. * chance of the global counters getting corrupted as a result of the values.
  232. *
  233. * The private mapping reservation is represented in a subtly different
  234. * manner to a shared mapping. A shared mapping has a region map associated
  235. * with the underlying file, this region map represents the backing file
  236. * pages which have ever had a reservation assigned which this persists even
  237. * after the page is instantiated. A private mapping has a region map
  238. * associated with the original mmap which is attached to all VMAs which
  239. * reference it, this region map represents those offsets which have consumed
  240. * reservation ie. where pages have been instantiated.
  241. */
  242. static unsigned long get_vma_private_data(struct vm_area_struct *vma)
  243. {
  244. return (unsigned long)vma->vm_private_data;
  245. }
  246. static void set_vma_private_data(struct vm_area_struct *vma,
  247. unsigned long value)
  248. {
  249. vma->vm_private_data = (void *)value;
  250. }
  251. struct resv_map {
  252. struct kref refs;
  253. struct list_head regions;
  254. };
  255. static struct resv_map *resv_map_alloc(void)
  256. {
  257. struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
  258. if (!resv_map)
  259. return NULL;
  260. kref_init(&resv_map->refs);
  261. INIT_LIST_HEAD(&resv_map->regions);
  262. return resv_map;
  263. }
  264. static void resv_map_release(struct kref *ref)
  265. {
  266. struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
  267. /* Clear out any active regions before we release the map. */
  268. region_truncate(&resv_map->regions, 0);
  269. kfree(resv_map);
  270. }
  271. static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
  272. {
  273. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  274. if (!(vma->vm_flags & VM_MAYSHARE))
  275. return (struct resv_map *)(get_vma_private_data(vma) &
  276. ~HPAGE_RESV_MASK);
  277. return NULL;
  278. }
  279. static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
  280. {
  281. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  282. VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
  283. set_vma_private_data(vma, (get_vma_private_data(vma) &
  284. HPAGE_RESV_MASK) | (unsigned long)map);
  285. }
  286. static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
  287. {
  288. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  289. VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
  290. set_vma_private_data(vma, get_vma_private_data(vma) | flags);
  291. }
  292. static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
  293. {
  294. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  295. return (get_vma_private_data(vma) & flag) != 0;
  296. }
  297. /* Decrement the reserved pages in the hugepage pool by one */
  298. static void decrement_hugepage_resv_vma(struct hstate *h,
  299. struct vm_area_struct *vma)
  300. {
  301. if (vma->vm_flags & VM_NORESERVE)
  302. return;
  303. if (vma->vm_flags & VM_MAYSHARE) {
  304. /* Shared mappings always use reserves */
  305. h->resv_huge_pages--;
  306. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  307. /*
  308. * Only the process that called mmap() has reserves for
  309. * private mappings.
  310. */
  311. h->resv_huge_pages--;
  312. }
  313. }
  314. /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
  315. void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  316. {
  317. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  318. if (!(vma->vm_flags & VM_MAYSHARE))
  319. vma->vm_private_data = (void *)0;
  320. }
  321. /* Returns true if the VMA has associated reserve pages */
  322. static int vma_has_reserves(struct vm_area_struct *vma)
  323. {
  324. if (vma->vm_flags & VM_MAYSHARE)
  325. return 1;
  326. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  327. return 1;
  328. return 0;
  329. }
  330. static void clear_gigantic_page(struct page *page,
  331. unsigned long addr, unsigned long sz)
  332. {
  333. int i;
  334. struct page *p = page;
  335. might_sleep();
  336. for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
  337. cond_resched();
  338. clear_user_highpage(p, addr + i * PAGE_SIZE);
  339. }
  340. }
  341. static void clear_huge_page(struct page *page,
  342. unsigned long addr, unsigned long sz)
  343. {
  344. int i;
  345. if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
  346. clear_gigantic_page(page, addr, sz);
  347. return;
  348. }
  349. might_sleep();
  350. for (i = 0; i < sz/PAGE_SIZE; i++) {
  351. cond_resched();
  352. clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  353. }
  354. }
  355. static void copy_gigantic_page(struct page *dst, struct page *src,
  356. unsigned long addr, struct vm_area_struct *vma)
  357. {
  358. int i;
  359. struct hstate *h = hstate_vma(vma);
  360. struct page *dst_base = dst;
  361. struct page *src_base = src;
  362. might_sleep();
  363. for (i = 0; i < pages_per_huge_page(h); ) {
  364. cond_resched();
  365. copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
  366. i++;
  367. dst = mem_map_next(dst, dst_base, i);
  368. src = mem_map_next(src, src_base, i);
  369. }
  370. }
  371. static void copy_huge_page(struct page *dst, struct page *src,
  372. unsigned long addr, struct vm_area_struct *vma)
  373. {
  374. int i;
  375. struct hstate *h = hstate_vma(vma);
  376. if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
  377. copy_gigantic_page(dst, src, addr, vma);
  378. return;
  379. }
  380. might_sleep();
  381. for (i = 0; i < pages_per_huge_page(h); i++) {
  382. cond_resched();
  383. copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
  384. }
  385. }
  386. static void enqueue_huge_page(struct hstate *h, struct page *page)
  387. {
  388. int nid = page_to_nid(page);
  389. list_add(&page->lru, &h->hugepage_freelists[nid]);
  390. h->free_huge_pages++;
  391. h->free_huge_pages_node[nid]++;
  392. }
  393. static struct page *dequeue_huge_page_vma(struct hstate *h,
  394. struct vm_area_struct *vma,
  395. unsigned long address, int avoid_reserve)
  396. {
  397. int nid;
  398. struct page *page = NULL;
  399. struct mempolicy *mpol;
  400. nodemask_t *nodemask;
  401. struct zonelist *zonelist;
  402. struct zone *zone;
  403. struct zoneref *z;
  404. get_mems_allowed();
  405. zonelist = huge_zonelist(vma, address,
  406. htlb_alloc_mask, &mpol, &nodemask);
  407. /*
  408. * A child process with MAP_PRIVATE mappings created by their parent
  409. * have no page reserves. This check ensures that reservations are
  410. * not "stolen". The child may still get SIGKILLed
  411. */
  412. if (!vma_has_reserves(vma) &&
  413. h->free_huge_pages - h->resv_huge_pages == 0)
  414. goto err;
  415. /* If reserves cannot be used, ensure enough pages are in the pool */
  416. if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
  417. goto err;;
  418. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  419. MAX_NR_ZONES - 1, nodemask) {
  420. nid = zone_to_nid(zone);
  421. if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
  422. !list_empty(&h->hugepage_freelists[nid])) {
  423. page = list_entry(h->hugepage_freelists[nid].next,
  424. struct page, lru);
  425. list_del(&page->lru);
  426. h->free_huge_pages--;
  427. h->free_huge_pages_node[nid]--;
  428. if (!avoid_reserve)
  429. decrement_hugepage_resv_vma(h, vma);
  430. break;
  431. }
  432. }
  433. err:
  434. mpol_cond_put(mpol);
  435. put_mems_allowed();
  436. return page;
  437. }
  438. static void update_and_free_page(struct hstate *h, struct page *page)
  439. {
  440. int i;
  441. VM_BUG_ON(h->order >= MAX_ORDER);
  442. h->nr_huge_pages--;
  443. h->nr_huge_pages_node[page_to_nid(page)]--;
  444. for (i = 0; i < pages_per_huge_page(h); i++) {
  445. page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
  446. 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
  447. 1 << PG_private | 1<< PG_writeback);
  448. }
  449. set_compound_page_dtor(page, NULL);
  450. set_page_refcounted(page);
  451. arch_release_hugepage(page);
  452. __free_pages(page, huge_page_order(h));
  453. }
  454. struct hstate *size_to_hstate(unsigned long size)
  455. {
  456. struct hstate *h;
  457. for_each_hstate(h) {
  458. if (huge_page_size(h) == size)
  459. return h;
  460. }
  461. return NULL;
  462. }
  463. static void free_huge_page(struct page *page)
  464. {
  465. /*
  466. * Can't pass hstate in here because it is called from the
  467. * compound page destructor.
  468. */
  469. struct hstate *h = page_hstate(page);
  470. int nid = page_to_nid(page);
  471. struct address_space *mapping;
  472. mapping = (struct address_space *) page_private(page);
  473. set_page_private(page, 0);
  474. page->mapping = NULL;
  475. BUG_ON(page_count(page));
  476. INIT_LIST_HEAD(&page->lru);
  477. spin_lock(&hugetlb_lock);
  478. if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
  479. update_and_free_page(h, page);
  480. h->surplus_huge_pages--;
  481. h->surplus_huge_pages_node[nid]--;
  482. } else {
  483. enqueue_huge_page(h, page);
  484. }
  485. spin_unlock(&hugetlb_lock);
  486. if (mapping)
  487. hugetlb_put_quota(mapping, 1);
  488. }
  489. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
  490. {
  491. set_compound_page_dtor(page, free_huge_page);
  492. spin_lock(&hugetlb_lock);
  493. h->nr_huge_pages++;
  494. h->nr_huge_pages_node[nid]++;
  495. spin_unlock(&hugetlb_lock);
  496. put_page(page); /* free it into the hugepage allocator */
  497. }
  498. static void prep_compound_gigantic_page(struct page *page, unsigned long order)
  499. {
  500. int i;
  501. int nr_pages = 1 << order;
  502. struct page *p = page + 1;
  503. /* we rely on prep_new_huge_page to set the destructor */
  504. set_compound_order(page, order);
  505. __SetPageHead(page);
  506. for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
  507. __SetPageTail(p);
  508. p->first_page = page;
  509. }
  510. }
  511. int PageHuge(struct page *page)
  512. {
  513. compound_page_dtor *dtor;
  514. if (!PageCompound(page))
  515. return 0;
  516. page = compound_head(page);
  517. dtor = get_compound_page_dtor(page);
  518. return dtor == free_huge_page;
  519. }
  520. static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  521. {
  522. struct page *page;
  523. if (h->order >= MAX_ORDER)
  524. return NULL;
  525. page = alloc_pages_exact_node(nid,
  526. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
  527. __GFP_REPEAT|__GFP_NOWARN,
  528. huge_page_order(h));
  529. if (page) {
  530. if (arch_prepare_hugepage(page)) {
  531. __free_pages(page, huge_page_order(h));
  532. return NULL;
  533. }
  534. prep_new_huge_page(h, page, nid);
  535. }
  536. return page;
  537. }
  538. /*
  539. * common helper functions for hstate_next_node_to_{alloc|free}.
  540. * We may have allocated or freed a huge page based on a different
  541. * nodes_allowed previously, so h->next_node_to_{alloc|free} might
  542. * be outside of *nodes_allowed. Ensure that we use an allowed
  543. * node for alloc or free.
  544. */
  545. static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
  546. {
  547. nid = next_node(nid, *nodes_allowed);
  548. if (nid == MAX_NUMNODES)
  549. nid = first_node(*nodes_allowed);
  550. VM_BUG_ON(nid >= MAX_NUMNODES);
  551. return nid;
  552. }
  553. static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
  554. {
  555. if (!node_isset(nid, *nodes_allowed))
  556. nid = next_node_allowed(nid, nodes_allowed);
  557. return nid;
  558. }
  559. /*
  560. * returns the previously saved node ["this node"] from which to
  561. * allocate a persistent huge page for the pool and advance the
  562. * next node from which to allocate, handling wrap at end of node
  563. * mask.
  564. */
  565. static int hstate_next_node_to_alloc(struct hstate *h,
  566. nodemask_t *nodes_allowed)
  567. {
  568. int nid;
  569. VM_BUG_ON(!nodes_allowed);
  570. nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
  571. h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
  572. return nid;
  573. }
  574. static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
  575. {
  576. struct page *page;
  577. int start_nid;
  578. int next_nid;
  579. int ret = 0;
  580. start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
  581. next_nid = start_nid;
  582. do {
  583. page = alloc_fresh_huge_page_node(h, next_nid);
  584. if (page) {
  585. ret = 1;
  586. break;
  587. }
  588. next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
  589. } while (next_nid != start_nid);
  590. if (ret)
  591. count_vm_event(HTLB_BUDDY_PGALLOC);
  592. else
  593. count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  594. return ret;
  595. }
  596. /*
  597. * helper for free_pool_huge_page() - return the previously saved
  598. * node ["this node"] from which to free a huge page. Advance the
  599. * next node id whether or not we find a free huge page to free so
  600. * that the next attempt to free addresses the next node.
  601. */
  602. static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
  603. {
  604. int nid;
  605. VM_BUG_ON(!nodes_allowed);
  606. nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
  607. h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
  608. return nid;
  609. }
  610. /*
  611. * Free huge page from pool from next node to free.
  612. * Attempt to keep persistent huge pages more or less
  613. * balanced over allowed nodes.
  614. * Called with hugetlb_lock locked.
  615. */
  616. static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
  617. bool acct_surplus)
  618. {
  619. int start_nid;
  620. int next_nid;
  621. int ret = 0;
  622. start_nid = hstate_next_node_to_free(h, nodes_allowed);
  623. next_nid = start_nid;
  624. do {
  625. /*
  626. * If we're returning unused surplus pages, only examine
  627. * nodes with surplus pages.
  628. */
  629. if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
  630. !list_empty(&h->hugepage_freelists[next_nid])) {
  631. struct page *page =
  632. list_entry(h->hugepage_freelists[next_nid].next,
  633. struct page, lru);
  634. list_del(&page->lru);
  635. h->free_huge_pages--;
  636. h->free_huge_pages_node[next_nid]--;
  637. if (acct_surplus) {
  638. h->surplus_huge_pages--;
  639. h->surplus_huge_pages_node[next_nid]--;
  640. }
  641. update_and_free_page(h, page);
  642. ret = 1;
  643. break;
  644. }
  645. next_nid = hstate_next_node_to_free(h, nodes_allowed);
  646. } while (next_nid != start_nid);
  647. return ret;
  648. }
  649. static struct page *alloc_buddy_huge_page(struct hstate *h,
  650. struct vm_area_struct *vma, unsigned long address)
  651. {
  652. struct page *page;
  653. unsigned int nid;
  654. if (h->order >= MAX_ORDER)
  655. return NULL;
  656. /*
  657. * Assume we will successfully allocate the surplus page to
  658. * prevent racing processes from causing the surplus to exceed
  659. * overcommit
  660. *
  661. * This however introduces a different race, where a process B
  662. * tries to grow the static hugepage pool while alloc_pages() is
  663. * called by process A. B will only examine the per-node
  664. * counters in determining if surplus huge pages can be
  665. * converted to normal huge pages in adjust_pool_surplus(). A
  666. * won't be able to increment the per-node counter, until the
  667. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  668. * no more huge pages can be converted from surplus to normal
  669. * state (and doesn't try to convert again). Thus, we have a
  670. * case where a surplus huge page exists, the pool is grown, and
  671. * the surplus huge page still exists after, even though it
  672. * should just have been converted to a normal huge page. This
  673. * does not leak memory, though, as the hugepage will be freed
  674. * once it is out of use. It also does not allow the counters to
  675. * go out of whack in adjust_pool_surplus() as we don't modify
  676. * the node values until we've gotten the hugepage and only the
  677. * per-node value is checked there.
  678. */
  679. spin_lock(&hugetlb_lock);
  680. if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
  681. spin_unlock(&hugetlb_lock);
  682. return NULL;
  683. } else {
  684. h->nr_huge_pages++;
  685. h->surplus_huge_pages++;
  686. }
  687. spin_unlock(&hugetlb_lock);
  688. page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
  689. __GFP_REPEAT|__GFP_NOWARN,
  690. huge_page_order(h));
  691. if (page && arch_prepare_hugepage(page)) {
  692. __free_pages(page, huge_page_order(h));
  693. return NULL;
  694. }
  695. spin_lock(&hugetlb_lock);
  696. if (page) {
  697. /*
  698. * This page is now managed by the hugetlb allocator and has
  699. * no users -- drop the buddy allocator's reference.
  700. */
  701. put_page_testzero(page);
  702. VM_BUG_ON(page_count(page));
  703. nid = page_to_nid(page);
  704. set_compound_page_dtor(page, free_huge_page);
  705. /*
  706. * We incremented the global counters already
  707. */
  708. h->nr_huge_pages_node[nid]++;
  709. h->surplus_huge_pages_node[nid]++;
  710. __count_vm_event(HTLB_BUDDY_PGALLOC);
  711. } else {
  712. h->nr_huge_pages--;
  713. h->surplus_huge_pages--;
  714. __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  715. }
  716. spin_unlock(&hugetlb_lock);
  717. return page;
  718. }
  719. /*
  720. * Increase the hugetlb pool such that it can accomodate a reservation
  721. * of size 'delta'.
  722. */
  723. static int gather_surplus_pages(struct hstate *h, int delta)
  724. {
  725. struct list_head surplus_list;
  726. struct page *page, *tmp;
  727. int ret, i;
  728. int needed, allocated;
  729. needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
  730. if (needed <= 0) {
  731. h->resv_huge_pages += delta;
  732. return 0;
  733. }
  734. allocated = 0;
  735. INIT_LIST_HEAD(&surplus_list);
  736. ret = -ENOMEM;
  737. retry:
  738. spin_unlock(&hugetlb_lock);
  739. for (i = 0; i < needed; i++) {
  740. page = alloc_buddy_huge_page(h, NULL, 0);
  741. if (!page) {
  742. /*
  743. * We were not able to allocate enough pages to
  744. * satisfy the entire reservation so we free what
  745. * we've allocated so far.
  746. */
  747. spin_lock(&hugetlb_lock);
  748. needed = 0;
  749. goto free;
  750. }
  751. list_add(&page->lru, &surplus_list);
  752. }
  753. allocated += needed;
  754. /*
  755. * After retaking hugetlb_lock, we need to recalculate 'needed'
  756. * because either resv_huge_pages or free_huge_pages may have changed.
  757. */
  758. spin_lock(&hugetlb_lock);
  759. needed = (h->resv_huge_pages + delta) -
  760. (h->free_huge_pages + allocated);
  761. if (needed > 0)
  762. goto retry;
  763. /*
  764. * The surplus_list now contains _at_least_ the number of extra pages
  765. * needed to accomodate the reservation. Add the appropriate number
  766. * of pages to the hugetlb pool and free the extras back to the buddy
  767. * allocator. Commit the entire reservation here to prevent another
  768. * process from stealing the pages as they are added to the pool but
  769. * before they are reserved.
  770. */
  771. needed += allocated;
  772. h->resv_huge_pages += delta;
  773. ret = 0;
  774. free:
  775. /* Free the needed pages to the hugetlb pool */
  776. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  777. if ((--needed) < 0)
  778. break;
  779. list_del(&page->lru);
  780. enqueue_huge_page(h, page);
  781. }
  782. /* Free unnecessary surplus pages to the buddy allocator */
  783. if (!list_empty(&surplus_list)) {
  784. spin_unlock(&hugetlb_lock);
  785. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  786. list_del(&page->lru);
  787. /*
  788. * The page has a reference count of zero already, so
  789. * call free_huge_page directly instead of using
  790. * put_page. This must be done with hugetlb_lock
  791. * unlocked which is safe because free_huge_page takes
  792. * hugetlb_lock before deciding how to free the page.
  793. */
  794. free_huge_page(page);
  795. }
  796. spin_lock(&hugetlb_lock);
  797. }
  798. return ret;
  799. }
  800. /*
  801. * When releasing a hugetlb pool reservation, any surplus pages that were
  802. * allocated to satisfy the reservation must be explicitly freed if they were
  803. * never used.
  804. * Called with hugetlb_lock held.
  805. */
  806. static void return_unused_surplus_pages(struct hstate *h,
  807. unsigned long unused_resv_pages)
  808. {
  809. unsigned long nr_pages;
  810. /* Uncommit the reservation */
  811. h->resv_huge_pages -= unused_resv_pages;
  812. /* Cannot return gigantic pages currently */
  813. if (h->order >= MAX_ORDER)
  814. return;
  815. nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
  816. /*
  817. * We want to release as many surplus pages as possible, spread
  818. * evenly across all nodes with memory. Iterate across these nodes
  819. * until we can no longer free unreserved surplus pages. This occurs
  820. * when the nodes with surplus pages have no free pages.
  821. * free_pool_huge_page() will balance the the freed pages across the
  822. * on-line nodes with memory and will handle the hstate accounting.
  823. */
  824. while (nr_pages--) {
  825. if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
  826. break;
  827. }
  828. }
  829. /*
  830. * Determine if the huge page at addr within the vma has an associated
  831. * reservation. Where it does not we will need to logically increase
  832. * reservation and actually increase quota before an allocation can occur.
  833. * Where any new reservation would be required the reservation change is
  834. * prepared, but not committed. Once the page has been quota'd allocated
  835. * an instantiated the change should be committed via vma_commit_reservation.
  836. * No action is required on failure.
  837. */
  838. static long vma_needs_reservation(struct hstate *h,
  839. struct vm_area_struct *vma, unsigned long addr)
  840. {
  841. struct address_space *mapping = vma->vm_file->f_mapping;
  842. struct inode *inode = mapping->host;
  843. if (vma->vm_flags & VM_MAYSHARE) {
  844. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  845. return region_chg(&inode->i_mapping->private_list,
  846. idx, idx + 1);
  847. } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  848. return 1;
  849. } else {
  850. long err;
  851. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  852. struct resv_map *reservations = vma_resv_map(vma);
  853. err = region_chg(&reservations->regions, idx, idx + 1);
  854. if (err < 0)
  855. return err;
  856. return 0;
  857. }
  858. }
  859. static void vma_commit_reservation(struct hstate *h,
  860. struct vm_area_struct *vma, unsigned long addr)
  861. {
  862. struct address_space *mapping = vma->vm_file->f_mapping;
  863. struct inode *inode = mapping->host;
  864. if (vma->vm_flags & VM_MAYSHARE) {
  865. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  866. region_add(&inode->i_mapping->private_list, idx, idx + 1);
  867. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  868. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  869. struct resv_map *reservations = vma_resv_map(vma);
  870. /* Mark this page used in the map. */
  871. region_add(&reservations->regions, idx, idx + 1);
  872. }
  873. }
  874. static struct page *alloc_huge_page(struct vm_area_struct *vma,
  875. unsigned long addr, int avoid_reserve)
  876. {
  877. struct hstate *h = hstate_vma(vma);
  878. struct page *page;
  879. struct address_space *mapping = vma->vm_file->f_mapping;
  880. struct inode *inode = mapping->host;
  881. long chg;
  882. /*
  883. * Processes that did not create the mapping will have no reserves and
  884. * will not have accounted against quota. Check that the quota can be
  885. * made before satisfying the allocation
  886. * MAP_NORESERVE mappings may also need pages and quota allocated
  887. * if no reserve mapping overlaps.
  888. */
  889. chg = vma_needs_reservation(h, vma, addr);
  890. if (chg < 0)
  891. return ERR_PTR(chg);
  892. if (chg)
  893. if (hugetlb_get_quota(inode->i_mapping, chg))
  894. return ERR_PTR(-ENOSPC);
  895. spin_lock(&hugetlb_lock);
  896. page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
  897. spin_unlock(&hugetlb_lock);
  898. if (!page) {
  899. page = alloc_buddy_huge_page(h, vma, addr);
  900. if (!page) {
  901. hugetlb_put_quota(inode->i_mapping, chg);
  902. return ERR_PTR(-VM_FAULT_SIGBUS);
  903. }
  904. }
  905. set_page_refcounted(page);
  906. set_page_private(page, (unsigned long) mapping);
  907. vma_commit_reservation(h, vma, addr);
  908. return page;
  909. }
  910. int __weak alloc_bootmem_huge_page(struct hstate *h)
  911. {
  912. struct huge_bootmem_page *m;
  913. int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
  914. while (nr_nodes) {
  915. void *addr;
  916. addr = __alloc_bootmem_node_nopanic(
  917. NODE_DATA(hstate_next_node_to_alloc(h,
  918. &node_states[N_HIGH_MEMORY])),
  919. huge_page_size(h), huge_page_size(h), 0);
  920. if (addr) {
  921. /*
  922. * Use the beginning of the huge page to store the
  923. * huge_bootmem_page struct (until gather_bootmem
  924. * puts them into the mem_map).
  925. */
  926. m = addr;
  927. goto found;
  928. }
  929. nr_nodes--;
  930. }
  931. return 0;
  932. found:
  933. BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
  934. /* Put them into a private list first because mem_map is not up yet */
  935. list_add(&m->list, &huge_boot_pages);
  936. m->hstate = h;
  937. return 1;
  938. }
  939. static void prep_compound_huge_page(struct page *page, int order)
  940. {
  941. if (unlikely(order > (MAX_ORDER - 1)))
  942. prep_compound_gigantic_page(page, order);
  943. else
  944. prep_compound_page(page, order);
  945. }
  946. /* Put bootmem huge pages into the standard lists after mem_map is up */
  947. static void __init gather_bootmem_prealloc(void)
  948. {
  949. struct huge_bootmem_page *m;
  950. list_for_each_entry(m, &huge_boot_pages, list) {
  951. struct page *page = virt_to_page(m);
  952. struct hstate *h = m->hstate;
  953. __ClearPageReserved(page);
  954. WARN_ON(page_count(page) != 1);
  955. prep_compound_huge_page(page, h->order);
  956. prep_new_huge_page(h, page, page_to_nid(page));
  957. }
  958. }
  959. static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  960. {
  961. unsigned long i;
  962. for (i = 0; i < h->max_huge_pages; ++i) {
  963. if (h->order >= MAX_ORDER) {
  964. if (!alloc_bootmem_huge_page(h))
  965. break;
  966. } else if (!alloc_fresh_huge_page(h,
  967. &node_states[N_HIGH_MEMORY]))
  968. break;
  969. }
  970. h->max_huge_pages = i;
  971. }
  972. static void __init hugetlb_init_hstates(void)
  973. {
  974. struct hstate *h;
  975. for_each_hstate(h) {
  976. /* oversize hugepages were init'ed in early boot */
  977. if (h->order < MAX_ORDER)
  978. hugetlb_hstate_alloc_pages(h);
  979. }
  980. }
  981. static char * __init memfmt(char *buf, unsigned long n)
  982. {
  983. if (n >= (1UL << 30))
  984. sprintf(buf, "%lu GB", n >> 30);
  985. else if (n >= (1UL << 20))
  986. sprintf(buf, "%lu MB", n >> 20);
  987. else
  988. sprintf(buf, "%lu KB", n >> 10);
  989. return buf;
  990. }
  991. static void __init report_hugepages(void)
  992. {
  993. struct hstate *h;
  994. for_each_hstate(h) {
  995. char buf[32];
  996. printk(KERN_INFO "HugeTLB registered %s page size, "
  997. "pre-allocated %ld pages\n",
  998. memfmt(buf, huge_page_size(h)),
  999. h->free_huge_pages);
  1000. }
  1001. }
  1002. #ifdef CONFIG_HIGHMEM
  1003. static void try_to_free_low(struct hstate *h, unsigned long count,
  1004. nodemask_t *nodes_allowed)
  1005. {
  1006. int i;
  1007. if (h->order >= MAX_ORDER)
  1008. return;
  1009. for_each_node_mask(i, *nodes_allowed) {
  1010. struct page *page, *next;
  1011. struct list_head *freel = &h->hugepage_freelists[i];
  1012. list_for_each_entry_safe(page, next, freel, lru) {
  1013. if (count >= h->nr_huge_pages)
  1014. return;
  1015. if (PageHighMem(page))
  1016. continue;
  1017. list_del(&page->lru);
  1018. update_and_free_page(h, page);
  1019. h->free_huge_pages--;
  1020. h->free_huge_pages_node[page_to_nid(page)]--;
  1021. }
  1022. }
  1023. }
  1024. #else
  1025. static inline void try_to_free_low(struct hstate *h, unsigned long count,
  1026. nodemask_t *nodes_allowed)
  1027. {
  1028. }
  1029. #endif
  1030. /*
  1031. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  1032. * balanced by operating on them in a round-robin fashion.
  1033. * Returns 1 if an adjustment was made.
  1034. */
  1035. static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
  1036. int delta)
  1037. {
  1038. int start_nid, next_nid;
  1039. int ret = 0;
  1040. VM_BUG_ON(delta != -1 && delta != 1);
  1041. if (delta < 0)
  1042. start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
  1043. else
  1044. start_nid = hstate_next_node_to_free(h, nodes_allowed);
  1045. next_nid = start_nid;
  1046. do {
  1047. int nid = next_nid;
  1048. if (delta < 0) {
  1049. /*
  1050. * To shrink on this node, there must be a surplus page
  1051. */
  1052. if (!h->surplus_huge_pages_node[nid]) {
  1053. next_nid = hstate_next_node_to_alloc(h,
  1054. nodes_allowed);
  1055. continue;
  1056. }
  1057. }
  1058. if (delta > 0) {
  1059. /*
  1060. * Surplus cannot exceed the total number of pages
  1061. */
  1062. if (h->surplus_huge_pages_node[nid] >=
  1063. h->nr_huge_pages_node[nid]) {
  1064. next_nid = hstate_next_node_to_free(h,
  1065. nodes_allowed);
  1066. continue;
  1067. }
  1068. }
  1069. h->surplus_huge_pages += delta;
  1070. h->surplus_huge_pages_node[nid] += delta;
  1071. ret = 1;
  1072. break;
  1073. } while (next_nid != start_nid);
  1074. return ret;
  1075. }
  1076. #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
  1077. static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
  1078. nodemask_t *nodes_allowed)
  1079. {
  1080. unsigned long min_count, ret;
  1081. if (h->order >= MAX_ORDER)
  1082. return h->max_huge_pages;
  1083. /*
  1084. * Increase the pool size
  1085. * First take pages out of surplus state. Then make up the
  1086. * remaining difference by allocating fresh huge pages.
  1087. *
  1088. * We might race with alloc_buddy_huge_page() here and be unable
  1089. * to convert a surplus huge page to a normal huge page. That is
  1090. * not critical, though, it just means the overall size of the
  1091. * pool might be one hugepage larger than it needs to be, but
  1092. * within all the constraints specified by the sysctls.
  1093. */
  1094. spin_lock(&hugetlb_lock);
  1095. while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
  1096. if (!adjust_pool_surplus(h, nodes_allowed, -1))
  1097. break;
  1098. }
  1099. while (count > persistent_huge_pages(h)) {
  1100. /*
  1101. * If this allocation races such that we no longer need the
  1102. * page, free_huge_page will handle it by freeing the page
  1103. * and reducing the surplus.
  1104. */
  1105. spin_unlock(&hugetlb_lock);
  1106. ret = alloc_fresh_huge_page(h, nodes_allowed);
  1107. spin_lock(&hugetlb_lock);
  1108. if (!ret)
  1109. goto out;
  1110. /* Bail for signals. Probably ctrl-c from user */
  1111. if (signal_pending(current))
  1112. goto out;
  1113. }
  1114. /*
  1115. * Decrease the pool size
  1116. * First return free pages to the buddy allocator (being careful
  1117. * to keep enough around to satisfy reservations). Then place
  1118. * pages into surplus state as needed so the pool will shrink
  1119. * to the desired size as pages become free.
  1120. *
  1121. * By placing pages into the surplus state independent of the
  1122. * overcommit value, we are allowing the surplus pool size to
  1123. * exceed overcommit. There are few sane options here. Since
  1124. * alloc_buddy_huge_page() is checking the global counter,
  1125. * though, we'll note that we're not allowed to exceed surplus
  1126. * and won't grow the pool anywhere else. Not until one of the
  1127. * sysctls are changed, or the surplus pages go out of use.
  1128. */
  1129. min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
  1130. min_count = max(count, min_count);
  1131. try_to_free_low(h, min_count, nodes_allowed);
  1132. while (min_count < persistent_huge_pages(h)) {
  1133. if (!free_pool_huge_page(h, nodes_allowed, 0))
  1134. break;
  1135. }
  1136. while (count < persistent_huge_pages(h)) {
  1137. if (!adjust_pool_surplus(h, nodes_allowed, 1))
  1138. break;
  1139. }
  1140. out:
  1141. ret = persistent_huge_pages(h);
  1142. spin_unlock(&hugetlb_lock);
  1143. return ret;
  1144. }
  1145. #define HSTATE_ATTR_RO(_name) \
  1146. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  1147. #define HSTATE_ATTR(_name) \
  1148. static struct kobj_attribute _name##_attr = \
  1149. __ATTR(_name, 0644, _name##_show, _name##_store)
  1150. static struct kobject *hugepages_kobj;
  1151. static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1152. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
  1153. static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
  1154. {
  1155. int i;
  1156. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1157. if (hstate_kobjs[i] == kobj) {
  1158. if (nidp)
  1159. *nidp = NUMA_NO_NODE;
  1160. return &hstates[i];
  1161. }
  1162. return kobj_to_node_hstate(kobj, nidp);
  1163. }
  1164. static ssize_t nr_hugepages_show_common(struct kobject *kobj,
  1165. struct kobj_attribute *attr, char *buf)
  1166. {
  1167. struct hstate *h;
  1168. unsigned long nr_huge_pages;
  1169. int nid;
  1170. h = kobj_to_hstate(kobj, &nid);
  1171. if (nid == NUMA_NO_NODE)
  1172. nr_huge_pages = h->nr_huge_pages;
  1173. else
  1174. nr_huge_pages = h->nr_huge_pages_node[nid];
  1175. return sprintf(buf, "%lu\n", nr_huge_pages);
  1176. }
  1177. static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
  1178. struct kobject *kobj, struct kobj_attribute *attr,
  1179. const char *buf, size_t len)
  1180. {
  1181. int err;
  1182. int nid;
  1183. unsigned long count;
  1184. struct hstate *h;
  1185. NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
  1186. err = strict_strtoul(buf, 10, &count);
  1187. if (err)
  1188. return 0;
  1189. h = kobj_to_hstate(kobj, &nid);
  1190. if (nid == NUMA_NO_NODE) {
  1191. /*
  1192. * global hstate attribute
  1193. */
  1194. if (!(obey_mempolicy &&
  1195. init_nodemask_of_mempolicy(nodes_allowed))) {
  1196. NODEMASK_FREE(nodes_allowed);
  1197. nodes_allowed = &node_states[N_HIGH_MEMORY];
  1198. }
  1199. } else if (nodes_allowed) {
  1200. /*
  1201. * per node hstate attribute: adjust count to global,
  1202. * but restrict alloc/free to the specified node.
  1203. */
  1204. count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
  1205. init_nodemask_of_node(nodes_allowed, nid);
  1206. } else
  1207. nodes_allowed = &node_states[N_HIGH_MEMORY];
  1208. h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
  1209. if (nodes_allowed != &node_states[N_HIGH_MEMORY])
  1210. NODEMASK_FREE(nodes_allowed);
  1211. return len;
  1212. }
  1213. static ssize_t nr_hugepages_show(struct kobject *kobj,
  1214. struct kobj_attribute *attr, char *buf)
  1215. {
  1216. return nr_hugepages_show_common(kobj, attr, buf);
  1217. }
  1218. static ssize_t nr_hugepages_store(struct kobject *kobj,
  1219. struct kobj_attribute *attr, const char *buf, size_t len)
  1220. {
  1221. return nr_hugepages_store_common(false, kobj, attr, buf, len);
  1222. }
  1223. HSTATE_ATTR(nr_hugepages);
  1224. #ifdef CONFIG_NUMA
  1225. /*
  1226. * hstate attribute for optionally mempolicy-based constraint on persistent
  1227. * huge page alloc/free.
  1228. */
  1229. static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
  1230. struct kobj_attribute *attr, char *buf)
  1231. {
  1232. return nr_hugepages_show_common(kobj, attr, buf);
  1233. }
  1234. static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
  1235. struct kobj_attribute *attr, const char *buf, size_t len)
  1236. {
  1237. return nr_hugepages_store_common(true, kobj, attr, buf, len);
  1238. }
  1239. HSTATE_ATTR(nr_hugepages_mempolicy);
  1240. #endif
  1241. static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
  1242. struct kobj_attribute *attr, char *buf)
  1243. {
  1244. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1245. return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
  1246. }
  1247. static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
  1248. struct kobj_attribute *attr, const char *buf, size_t count)
  1249. {
  1250. int err;
  1251. unsigned long input;
  1252. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1253. err = strict_strtoul(buf, 10, &input);
  1254. if (err)
  1255. return 0;
  1256. spin_lock(&hugetlb_lock);
  1257. h->nr_overcommit_huge_pages = input;
  1258. spin_unlock(&hugetlb_lock);
  1259. return count;
  1260. }
  1261. HSTATE_ATTR(nr_overcommit_hugepages);
  1262. static ssize_t free_hugepages_show(struct kobject *kobj,
  1263. struct kobj_attribute *attr, char *buf)
  1264. {
  1265. struct hstate *h;
  1266. unsigned long free_huge_pages;
  1267. int nid;
  1268. h = kobj_to_hstate(kobj, &nid);
  1269. if (nid == NUMA_NO_NODE)
  1270. free_huge_pages = h->free_huge_pages;
  1271. else
  1272. free_huge_pages = h->free_huge_pages_node[nid];
  1273. return sprintf(buf, "%lu\n", free_huge_pages);
  1274. }
  1275. HSTATE_ATTR_RO(free_hugepages);
  1276. static ssize_t resv_hugepages_show(struct kobject *kobj,
  1277. struct kobj_attribute *attr, char *buf)
  1278. {
  1279. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1280. return sprintf(buf, "%lu\n", h->resv_huge_pages);
  1281. }
  1282. HSTATE_ATTR_RO(resv_hugepages);
  1283. static ssize_t surplus_hugepages_show(struct kobject *kobj,
  1284. struct kobj_attribute *attr, char *buf)
  1285. {
  1286. struct hstate *h;
  1287. unsigned long surplus_huge_pages;
  1288. int nid;
  1289. h = kobj_to_hstate(kobj, &nid);
  1290. if (nid == NUMA_NO_NODE)
  1291. surplus_huge_pages = h->surplus_huge_pages;
  1292. else
  1293. surplus_huge_pages = h->surplus_huge_pages_node[nid];
  1294. return sprintf(buf, "%lu\n", surplus_huge_pages);
  1295. }
  1296. HSTATE_ATTR_RO(surplus_hugepages);
  1297. static struct attribute *hstate_attrs[] = {
  1298. &nr_hugepages_attr.attr,
  1299. &nr_overcommit_hugepages_attr.attr,
  1300. &free_hugepages_attr.attr,
  1301. &resv_hugepages_attr.attr,
  1302. &surplus_hugepages_attr.attr,
  1303. #ifdef CONFIG_NUMA
  1304. &nr_hugepages_mempolicy_attr.attr,
  1305. #endif
  1306. NULL,
  1307. };
  1308. static struct attribute_group hstate_attr_group = {
  1309. .attrs = hstate_attrs,
  1310. };
  1311. static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
  1312. struct kobject **hstate_kobjs,
  1313. struct attribute_group *hstate_attr_group)
  1314. {
  1315. int retval;
  1316. int hi = h - hstates;
  1317. hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
  1318. if (!hstate_kobjs[hi])
  1319. return -ENOMEM;
  1320. retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
  1321. if (retval)
  1322. kobject_put(hstate_kobjs[hi]);
  1323. return retval;
  1324. }
  1325. static void __init hugetlb_sysfs_init(void)
  1326. {
  1327. struct hstate *h;
  1328. int err;
  1329. hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
  1330. if (!hugepages_kobj)
  1331. return;
  1332. for_each_hstate(h) {
  1333. err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
  1334. hstate_kobjs, &hstate_attr_group);
  1335. if (err)
  1336. printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
  1337. h->name);
  1338. }
  1339. }
  1340. #ifdef CONFIG_NUMA
  1341. /*
  1342. * node_hstate/s - associate per node hstate attributes, via their kobjects,
  1343. * with node sysdevs in node_devices[] using a parallel array. The array
  1344. * index of a node sysdev or _hstate == node id.
  1345. * This is here to avoid any static dependency of the node sysdev driver, in
  1346. * the base kernel, on the hugetlb module.
  1347. */
  1348. struct node_hstate {
  1349. struct kobject *hugepages_kobj;
  1350. struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1351. };
  1352. struct node_hstate node_hstates[MAX_NUMNODES];
  1353. /*
  1354. * A subset of global hstate attributes for node sysdevs
  1355. */
  1356. static struct attribute *per_node_hstate_attrs[] = {
  1357. &nr_hugepages_attr.attr,
  1358. &free_hugepages_attr.attr,
  1359. &surplus_hugepages_attr.attr,
  1360. NULL,
  1361. };
  1362. static struct attribute_group per_node_hstate_attr_group = {
  1363. .attrs = per_node_hstate_attrs,
  1364. };
  1365. /*
  1366. * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
  1367. * Returns node id via non-NULL nidp.
  1368. */
  1369. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  1370. {
  1371. int nid;
  1372. for (nid = 0; nid < nr_node_ids; nid++) {
  1373. struct node_hstate *nhs = &node_hstates[nid];
  1374. int i;
  1375. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1376. if (nhs->hstate_kobjs[i] == kobj) {
  1377. if (nidp)
  1378. *nidp = nid;
  1379. return &hstates[i];
  1380. }
  1381. }
  1382. BUG();
  1383. return NULL;
  1384. }
  1385. /*
  1386. * Unregister hstate attributes from a single node sysdev.
  1387. * No-op if no hstate attributes attached.
  1388. */
  1389. void hugetlb_unregister_node(struct node *node)
  1390. {
  1391. struct hstate *h;
  1392. struct node_hstate *nhs = &node_hstates[node->sysdev.id];
  1393. if (!nhs->hugepages_kobj)
  1394. return; /* no hstate attributes */
  1395. for_each_hstate(h)
  1396. if (nhs->hstate_kobjs[h - hstates]) {
  1397. kobject_put(nhs->hstate_kobjs[h - hstates]);
  1398. nhs->hstate_kobjs[h - hstates] = NULL;
  1399. }
  1400. kobject_put(nhs->hugepages_kobj);
  1401. nhs->hugepages_kobj = NULL;
  1402. }
  1403. /*
  1404. * hugetlb module exit: unregister hstate attributes from node sysdevs
  1405. * that have them.
  1406. */
  1407. static void hugetlb_unregister_all_nodes(void)
  1408. {
  1409. int nid;
  1410. /*
  1411. * disable node sysdev registrations.
  1412. */
  1413. register_hugetlbfs_with_node(NULL, NULL);
  1414. /*
  1415. * remove hstate attributes from any nodes that have them.
  1416. */
  1417. for (nid = 0; nid < nr_node_ids; nid++)
  1418. hugetlb_unregister_node(&node_devices[nid]);
  1419. }
  1420. /*
  1421. * Register hstate attributes for a single node sysdev.
  1422. * No-op if attributes already registered.
  1423. */
  1424. void hugetlb_register_node(struct node *node)
  1425. {
  1426. struct hstate *h;
  1427. struct node_hstate *nhs = &node_hstates[node->sysdev.id];
  1428. int err;
  1429. if (nhs->hugepages_kobj)
  1430. return; /* already allocated */
  1431. nhs->hugepages_kobj = kobject_create_and_add("hugepages",
  1432. &node->sysdev.kobj);
  1433. if (!nhs->hugepages_kobj)
  1434. return;
  1435. for_each_hstate(h) {
  1436. err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
  1437. nhs->hstate_kobjs,
  1438. &per_node_hstate_attr_group);
  1439. if (err) {
  1440. printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
  1441. " for node %d\n",
  1442. h->name, node->sysdev.id);
  1443. hugetlb_unregister_node(node);
  1444. break;
  1445. }
  1446. }
  1447. }
  1448. /*
  1449. * hugetlb init time: register hstate attributes for all registered node
  1450. * sysdevs of nodes that have memory. All on-line nodes should have
  1451. * registered their associated sysdev by this time.
  1452. */
  1453. static void hugetlb_register_all_nodes(void)
  1454. {
  1455. int nid;
  1456. for_each_node_state(nid, N_HIGH_MEMORY) {
  1457. struct node *node = &node_devices[nid];
  1458. if (node->sysdev.id == nid)
  1459. hugetlb_register_node(node);
  1460. }
  1461. /*
  1462. * Let the node sysdev driver know we're here so it can
  1463. * [un]register hstate attributes on node hotplug.
  1464. */
  1465. register_hugetlbfs_with_node(hugetlb_register_node,
  1466. hugetlb_unregister_node);
  1467. }
  1468. #else /* !CONFIG_NUMA */
  1469. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  1470. {
  1471. BUG();
  1472. if (nidp)
  1473. *nidp = -1;
  1474. return NULL;
  1475. }
  1476. static void hugetlb_unregister_all_nodes(void) { }
  1477. static void hugetlb_register_all_nodes(void) { }
  1478. #endif
  1479. static void __exit hugetlb_exit(void)
  1480. {
  1481. struct hstate *h;
  1482. hugetlb_unregister_all_nodes();
  1483. for_each_hstate(h) {
  1484. kobject_put(hstate_kobjs[h - hstates]);
  1485. }
  1486. kobject_put(hugepages_kobj);
  1487. }
  1488. module_exit(hugetlb_exit);
  1489. static int __init hugetlb_init(void)
  1490. {
  1491. /* Some platform decide whether they support huge pages at boot
  1492. * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
  1493. * there is no such support
  1494. */
  1495. if (HPAGE_SHIFT == 0)
  1496. return 0;
  1497. if (!size_to_hstate(default_hstate_size)) {
  1498. default_hstate_size = HPAGE_SIZE;
  1499. if (!size_to_hstate(default_hstate_size))
  1500. hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
  1501. }
  1502. default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
  1503. if (default_hstate_max_huge_pages)
  1504. default_hstate.max_huge_pages = default_hstate_max_huge_pages;
  1505. hugetlb_init_hstates();
  1506. gather_bootmem_prealloc();
  1507. report_hugepages();
  1508. hugetlb_sysfs_init();
  1509. hugetlb_register_all_nodes();
  1510. return 0;
  1511. }
  1512. module_init(hugetlb_init);
  1513. /* Should be called on processing a hugepagesz=... option */
  1514. void __init hugetlb_add_hstate(unsigned order)
  1515. {
  1516. struct hstate *h;
  1517. unsigned long i;
  1518. if (size_to_hstate(PAGE_SIZE << order)) {
  1519. printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
  1520. return;
  1521. }
  1522. BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
  1523. BUG_ON(order == 0);
  1524. h = &hstates[max_hstate++];
  1525. h->order = order;
  1526. h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
  1527. h->nr_huge_pages = 0;
  1528. h->free_huge_pages = 0;
  1529. for (i = 0; i < MAX_NUMNODES; ++i)
  1530. INIT_LIST_HEAD(&h->hugepage_freelists[i]);
  1531. h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
  1532. h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
  1533. snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
  1534. huge_page_size(h)/1024);
  1535. parsed_hstate = h;
  1536. }
  1537. static int __init hugetlb_nrpages_setup(char *s)
  1538. {
  1539. unsigned long *mhp;
  1540. static unsigned long *last_mhp;
  1541. /*
  1542. * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
  1543. * so this hugepages= parameter goes to the "default hstate".
  1544. */
  1545. if (!max_hstate)
  1546. mhp = &default_hstate_max_huge_pages;
  1547. else
  1548. mhp = &parsed_hstate->max_huge_pages;
  1549. if (mhp == last_mhp) {
  1550. printk(KERN_WARNING "hugepages= specified twice without "
  1551. "interleaving hugepagesz=, ignoring\n");
  1552. return 1;
  1553. }
  1554. if (sscanf(s, "%lu", mhp) <= 0)
  1555. *mhp = 0;
  1556. /*
  1557. * Global state is always initialized later in hugetlb_init.
  1558. * But we need to allocate >= MAX_ORDER hstates here early to still
  1559. * use the bootmem allocator.
  1560. */
  1561. if (max_hstate && parsed_hstate->order >= MAX_ORDER)
  1562. hugetlb_hstate_alloc_pages(parsed_hstate);
  1563. last_mhp = mhp;
  1564. return 1;
  1565. }
  1566. __setup("hugepages=", hugetlb_nrpages_setup);
  1567. static int __init hugetlb_default_setup(char *s)
  1568. {
  1569. default_hstate_size = memparse(s, &s);
  1570. return 1;
  1571. }
  1572. __setup("default_hugepagesz=", hugetlb_default_setup);
  1573. static unsigned int cpuset_mems_nr(unsigned int *array)
  1574. {
  1575. int node;
  1576. unsigned int nr = 0;
  1577. for_each_node_mask(node, cpuset_current_mems_allowed)
  1578. nr += array[node];
  1579. return nr;
  1580. }
  1581. #ifdef CONFIG_SYSCTL
  1582. static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
  1583. struct ctl_table *table, int write,
  1584. void __user *buffer, size_t *length, loff_t *ppos)
  1585. {
  1586. struct hstate *h = &default_hstate;
  1587. unsigned long tmp;
  1588. if (!write)
  1589. tmp = h->max_huge_pages;
  1590. table->data = &tmp;
  1591. table->maxlen = sizeof(unsigned long);
  1592. proc_doulongvec_minmax(table, write, buffer, length, ppos);
  1593. if (write) {
  1594. NODEMASK_ALLOC(nodemask_t, nodes_allowed,
  1595. GFP_KERNEL | __GFP_NORETRY);
  1596. if (!(obey_mempolicy &&
  1597. init_nodemask_of_mempolicy(nodes_allowed))) {
  1598. NODEMASK_FREE(nodes_allowed);
  1599. nodes_allowed = &node_states[N_HIGH_MEMORY];
  1600. }
  1601. h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
  1602. if (nodes_allowed != &node_states[N_HIGH_MEMORY])
  1603. NODEMASK_FREE(nodes_allowed);
  1604. }
  1605. return 0;
  1606. }
  1607. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  1608. void __user *buffer, size_t *length, loff_t *ppos)
  1609. {
  1610. return hugetlb_sysctl_handler_common(false, table, write,
  1611. buffer, length, ppos);
  1612. }
  1613. #ifdef CONFIG_NUMA
  1614. int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
  1615. void __user *buffer, size_t *length, loff_t *ppos)
  1616. {
  1617. return hugetlb_sysctl_handler_common(true, table, write,
  1618. buffer, length, ppos);
  1619. }
  1620. #endif /* CONFIG_NUMA */
  1621. int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
  1622. void __user *buffer,
  1623. size_t *length, loff_t *ppos)
  1624. {
  1625. proc_dointvec(table, write, buffer, length, ppos);
  1626. if (hugepages_treat_as_movable)
  1627. htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
  1628. else
  1629. htlb_alloc_mask = GFP_HIGHUSER;
  1630. return 0;
  1631. }
  1632. int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  1633. void __user *buffer,
  1634. size_t *length, loff_t *ppos)
  1635. {
  1636. struct hstate *h = &default_hstate;
  1637. unsigned long tmp;
  1638. if (!write)
  1639. tmp = h->nr_overcommit_huge_pages;
  1640. table->data = &tmp;
  1641. table->maxlen = sizeof(unsigned long);
  1642. proc_doulongvec_minmax(table, write, buffer, length, ppos);
  1643. if (write) {
  1644. spin_lock(&hugetlb_lock);
  1645. h->nr_overcommit_huge_pages = tmp;
  1646. spin_unlock(&hugetlb_lock);
  1647. }
  1648. return 0;
  1649. }
  1650. #endif /* CONFIG_SYSCTL */
  1651. void hugetlb_report_meminfo(struct seq_file *m)
  1652. {
  1653. struct hstate *h = &default_hstate;
  1654. seq_printf(m,
  1655. "HugePages_Total: %5lu\n"
  1656. "HugePages_Free: %5lu\n"
  1657. "HugePages_Rsvd: %5lu\n"
  1658. "HugePages_Surp: %5lu\n"
  1659. "Hugepagesize: %8lu kB\n",
  1660. h->nr_huge_pages,
  1661. h->free_huge_pages,
  1662. h->resv_huge_pages,
  1663. h->surplus_huge_pages,
  1664. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  1665. }
  1666. int hugetlb_report_node_meminfo(int nid, char *buf)
  1667. {
  1668. struct hstate *h = &default_hstate;
  1669. return sprintf(buf,
  1670. "Node %d HugePages_Total: %5u\n"
  1671. "Node %d HugePages_Free: %5u\n"
  1672. "Node %d HugePages_Surp: %5u\n",
  1673. nid, h->nr_huge_pages_node[nid],
  1674. nid, h->free_huge_pages_node[nid],
  1675. nid, h->surplus_huge_pages_node[nid]);
  1676. }
  1677. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  1678. unsigned long hugetlb_total_pages(void)
  1679. {
  1680. struct hstate *h = &default_hstate;
  1681. return h->nr_huge_pages * pages_per_huge_page(h);
  1682. }
  1683. static int hugetlb_acct_memory(struct hstate *h, long delta)
  1684. {
  1685. int ret = -ENOMEM;
  1686. spin_lock(&hugetlb_lock);
  1687. /*
  1688. * When cpuset is configured, it breaks the strict hugetlb page
  1689. * reservation as the accounting is done on a global variable. Such
  1690. * reservation is completely rubbish in the presence of cpuset because
  1691. * the reservation is not checked against page availability for the
  1692. * current cpuset. Application can still potentially OOM'ed by kernel
  1693. * with lack of free htlb page in cpuset that the task is in.
  1694. * Attempt to enforce strict accounting with cpuset is almost
  1695. * impossible (or too ugly) because cpuset is too fluid that
  1696. * task or memory node can be dynamically moved between cpusets.
  1697. *
  1698. * The change of semantics for shared hugetlb mapping with cpuset is
  1699. * undesirable. However, in order to preserve some of the semantics,
  1700. * we fall back to check against current free page availability as
  1701. * a best attempt and hopefully to minimize the impact of changing
  1702. * semantics that cpuset has.
  1703. */
  1704. if (delta > 0) {
  1705. if (gather_surplus_pages(h, delta) < 0)
  1706. goto out;
  1707. if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
  1708. return_unused_surplus_pages(h, delta);
  1709. goto out;
  1710. }
  1711. }
  1712. ret = 0;
  1713. if (delta < 0)
  1714. return_unused_surplus_pages(h, (unsigned long) -delta);
  1715. out:
  1716. spin_unlock(&hugetlb_lock);
  1717. return ret;
  1718. }
  1719. static void hugetlb_vm_op_open(struct vm_area_struct *vma)
  1720. {
  1721. struct resv_map *reservations = vma_resv_map(vma);
  1722. /*
  1723. * This new VMA should share its siblings reservation map if present.
  1724. * The VMA will only ever have a valid reservation map pointer where
  1725. * it is being copied for another still existing VMA. As that VMA
  1726. * has a reference to the reservation map it cannot dissappear until
  1727. * after this open call completes. It is therefore safe to take a
  1728. * new reference here without additional locking.
  1729. */
  1730. if (reservations)
  1731. kref_get(&reservations->refs);
  1732. }
  1733. static void hugetlb_vm_op_close(struct vm_area_struct *vma)
  1734. {
  1735. struct hstate *h = hstate_vma(vma);
  1736. struct resv_map *reservations = vma_resv_map(vma);
  1737. unsigned long reserve;
  1738. unsigned long start;
  1739. unsigned long end;
  1740. if (reservations) {
  1741. start = vma_hugecache_offset(h, vma, vma->vm_start);
  1742. end = vma_hugecache_offset(h, vma, vma->vm_end);
  1743. reserve = (end - start) -
  1744. region_count(&reservations->regions, start, end);
  1745. kref_put(&reservations->refs, resv_map_release);
  1746. if (reserve) {
  1747. hugetlb_acct_memory(h, -reserve);
  1748. hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
  1749. }
  1750. }
  1751. }
  1752. /*
  1753. * We cannot handle pagefaults against hugetlb pages at all. They cause
  1754. * handle_mm_fault() to try to instantiate regular-sized pages in the
  1755. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  1756. * this far.
  1757. */
  1758. static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1759. {
  1760. BUG();
  1761. return 0;
  1762. }
  1763. const struct vm_operations_struct hugetlb_vm_ops = {
  1764. .fault = hugetlb_vm_op_fault,
  1765. .open = hugetlb_vm_op_open,
  1766. .close = hugetlb_vm_op_close,
  1767. };
  1768. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  1769. int writable)
  1770. {
  1771. pte_t entry;
  1772. if (writable) {
  1773. entry =
  1774. pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
  1775. } else {
  1776. entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
  1777. }
  1778. entry = pte_mkyoung(entry);
  1779. entry = pte_mkhuge(entry);
  1780. return entry;
  1781. }
  1782. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  1783. unsigned long address, pte_t *ptep)
  1784. {
  1785. pte_t entry;
  1786. entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
  1787. if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
  1788. update_mmu_cache(vma, address, ptep);
  1789. }
  1790. }
  1791. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  1792. struct vm_area_struct *vma)
  1793. {
  1794. pte_t *src_pte, *dst_pte, entry;
  1795. struct page *ptepage;
  1796. unsigned long addr;
  1797. int cow;
  1798. struct hstate *h = hstate_vma(vma);
  1799. unsigned long sz = huge_page_size(h);
  1800. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  1801. for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
  1802. src_pte = huge_pte_offset(src, addr);
  1803. if (!src_pte)
  1804. continue;
  1805. dst_pte = huge_pte_alloc(dst, addr, sz);
  1806. if (!dst_pte)
  1807. goto nomem;
  1808. /* If the pagetables are shared don't copy or take references */
  1809. if (dst_pte == src_pte)
  1810. continue;
  1811. spin_lock(&dst->page_table_lock);
  1812. spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
  1813. if (!huge_pte_none(huge_ptep_get(src_pte))) {
  1814. if (cow)
  1815. huge_ptep_set_wrprotect(src, addr, src_pte);
  1816. entry = huge_ptep_get(src_pte);
  1817. ptepage = pte_page(entry);
  1818. get_page(ptepage);
  1819. set_huge_pte_at(dst, addr, dst_pte, entry);
  1820. }
  1821. spin_unlock(&src->page_table_lock);
  1822. spin_unlock(&dst->page_table_lock);
  1823. }
  1824. return 0;
  1825. nomem:
  1826. return -ENOMEM;
  1827. }
  1828. void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  1829. unsigned long end, struct page *ref_page)
  1830. {
  1831. struct mm_struct *mm = vma->vm_mm;
  1832. unsigned long address;
  1833. pte_t *ptep;
  1834. pte_t pte;
  1835. struct page *page;
  1836. struct page *tmp;
  1837. struct hstate *h = hstate_vma(vma);
  1838. unsigned long sz = huge_page_size(h);
  1839. /*
  1840. * A page gathering list, protected by per file i_mmap_lock. The
  1841. * lock is used to avoid list corruption from multiple unmapping
  1842. * of the same page since we are using page->lru.
  1843. */
  1844. LIST_HEAD(page_list);
  1845. WARN_ON(!is_vm_hugetlb_page(vma));
  1846. BUG_ON(start & ~huge_page_mask(h));
  1847. BUG_ON(end & ~huge_page_mask(h));
  1848. mmu_notifier_invalidate_range_start(mm, start, end);
  1849. spin_lock(&mm->page_table_lock);
  1850. for (address = start; address < end; address += sz) {
  1851. ptep = huge_pte_offset(mm, address);
  1852. if (!ptep)
  1853. continue;
  1854. if (huge_pmd_unshare(mm, &address, ptep))
  1855. continue;
  1856. /*
  1857. * If a reference page is supplied, it is because a specific
  1858. * page is being unmapped, not a range. Ensure the page we
  1859. * are about to unmap is the actual page of interest.
  1860. */
  1861. if (ref_page) {
  1862. pte = huge_ptep_get(ptep);
  1863. if (huge_pte_none(pte))
  1864. continue;
  1865. page = pte_page(pte);
  1866. if (page != ref_page)
  1867. continue;
  1868. /*
  1869. * Mark the VMA as having unmapped its page so that
  1870. * future faults in this VMA will fail rather than
  1871. * looking like data was lost
  1872. */
  1873. set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
  1874. }
  1875. pte = huge_ptep_get_and_clear(mm, address, ptep);
  1876. if (huge_pte_none(pte))
  1877. continue;
  1878. page = pte_page(pte);
  1879. if (pte_dirty(pte))
  1880. set_page_dirty(page);
  1881. list_add(&page->lru, &page_list);
  1882. }
  1883. spin_unlock(&mm->page_table_lock);
  1884. flush_tlb_range(vma, start, end);
  1885. mmu_notifier_invalidate_range_end(mm, start, end);
  1886. list_for_each_entry_safe(page, tmp, &page_list, lru) {
  1887. list_del(&page->lru);
  1888. put_page(page);
  1889. }
  1890. }
  1891. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  1892. unsigned long end, struct page *ref_page)
  1893. {
  1894. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  1895. __unmap_hugepage_range(vma, start, end, ref_page);
  1896. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  1897. }
  1898. /*
  1899. * This is called when the original mapper is failing to COW a MAP_PRIVATE
  1900. * mappping it owns the reserve page for. The intention is to unmap the page
  1901. * from other VMAs and let the children be SIGKILLed if they are faulting the
  1902. * same region.
  1903. */
  1904. static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  1905. struct page *page, unsigned long address)
  1906. {
  1907. struct hstate *h = hstate_vma(vma);
  1908. struct vm_area_struct *iter_vma;
  1909. struct address_space *mapping;
  1910. struct prio_tree_iter iter;
  1911. pgoff_t pgoff;
  1912. /*
  1913. * vm_pgoff is in PAGE_SIZE units, hence the different calculation
  1914. * from page cache lookup which is in HPAGE_SIZE units.
  1915. */
  1916. address = address & huge_page_mask(h);
  1917. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
  1918. + (vma->vm_pgoff >> PAGE_SHIFT);
  1919. mapping = (struct address_space *)page_private(page);
  1920. /*
  1921. * Take the mapping lock for the duration of the table walk. As
  1922. * this mapping should be shared between all the VMAs,
  1923. * __unmap_hugepage_range() is called as the lock is already held
  1924. */
  1925. spin_lock(&mapping->i_mmap_lock);
  1926. vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  1927. /* Do not unmap the current VMA */
  1928. if (iter_vma == vma)
  1929. continue;
  1930. /*
  1931. * Unmap the page from other VMAs without their own reserves.
  1932. * They get marked to be SIGKILLed if they fault in these
  1933. * areas. This is because a future no-page fault on this VMA
  1934. * could insert a zeroed page instead of the data existing
  1935. * from the time of fork. This would look like data corruption
  1936. */
  1937. if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
  1938. __unmap_hugepage_range(iter_vma,
  1939. address, address + huge_page_size(h),
  1940. page);
  1941. }
  1942. spin_unlock(&mapping->i_mmap_lock);
  1943. return 1;
  1944. }
  1945. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  1946. unsigned long address, pte_t *ptep, pte_t pte,
  1947. struct page *pagecache_page)
  1948. {
  1949. struct hstate *h = hstate_vma(vma);
  1950. struct page *old_page, *new_page;
  1951. int avoidcopy;
  1952. int outside_reserve = 0;
  1953. old_page = pte_page(pte);
  1954. retry_avoidcopy:
  1955. /* If no-one else is actually using this page, avoid the copy
  1956. * and just make the page writable */
  1957. avoidcopy = (page_count(old_page) == 1);
  1958. if (avoidcopy) {
  1959. set_huge_ptep_writable(vma, address, ptep);
  1960. return 0;
  1961. }
  1962. /*
  1963. * If the process that created a MAP_PRIVATE mapping is about to
  1964. * perform a COW due to a shared page count, attempt to satisfy
  1965. * the allocation without using the existing reserves. The pagecache
  1966. * page is used to determine if the reserve at this address was
  1967. * consumed or not. If reserves were used, a partial faulted mapping
  1968. * at the time of fork() could consume its reserves on COW instead
  1969. * of the full address range.
  1970. */
  1971. if (!(vma->vm_flags & VM_MAYSHARE) &&
  1972. is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
  1973. old_page != pagecache_page)
  1974. outside_reserve = 1;
  1975. page_cache_get(old_page);
  1976. /* Drop page_table_lock as buddy allocator may be called */
  1977. spin_unlock(&mm->page_table_lock);
  1978. new_page = alloc_huge_page(vma, address, outside_reserve);
  1979. if (IS_ERR(new_page)) {
  1980. page_cache_release(old_page);
  1981. /*
  1982. * If a process owning a MAP_PRIVATE mapping fails to COW,
  1983. * it is due to references held by a child and an insufficient
  1984. * huge page pool. To guarantee the original mappers
  1985. * reliability, unmap the page from child processes. The child
  1986. * may get SIGKILLed if it later faults.
  1987. */
  1988. if (outside_reserve) {
  1989. BUG_ON(huge_pte_none(pte));
  1990. if (unmap_ref_private(mm, vma, old_page, address)) {
  1991. BUG_ON(page_count(old_page) != 1);
  1992. BUG_ON(huge_pte_none(pte));
  1993. spin_lock(&mm->page_table_lock);
  1994. goto retry_avoidcopy;
  1995. }
  1996. WARN_ON_ONCE(1);
  1997. }
  1998. /* Caller expects lock to be held */
  1999. spin_lock(&mm->page_table_lock);
  2000. return -PTR_ERR(new_page);
  2001. }
  2002. copy_huge_page(new_page, old_page, address, vma);
  2003. __SetPageUptodate(new_page);
  2004. /*
  2005. * Retake the page_table_lock to check for racing updates
  2006. * before the page tables are altered
  2007. */
  2008. spin_lock(&mm->page_table_lock);
  2009. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  2010. if (likely(pte_same(huge_ptep_get(ptep), pte))) {
  2011. /* Break COW */
  2012. huge_ptep_clear_flush(vma, address, ptep);
  2013. set_huge_pte_at(mm, address, ptep,
  2014. make_huge_pte(vma, new_page, 1));
  2015. /* Make the old page be freed below */
  2016. new_page = old_page;
  2017. }
  2018. page_cache_release(new_page);
  2019. page_cache_release(old_page);
  2020. return 0;
  2021. }
  2022. /* Return the pagecache page at a given address within a VMA */
  2023. static struct page *hugetlbfs_pagecache_page(struct hstate *h,
  2024. struct vm_area_struct *vma, unsigned long address)
  2025. {
  2026. struct address_space *mapping;
  2027. pgoff_t idx;
  2028. mapping = vma->vm_file->f_mapping;
  2029. idx = vma_hugecache_offset(h, vma, address);
  2030. return find_lock_page(mapping, idx);
  2031. }
  2032. /*
  2033. * Return whether there is a pagecache page to back given address within VMA.
  2034. * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
  2035. */
  2036. static bool hugetlbfs_pagecache_present(struct hstate *h,
  2037. struct vm_area_struct *vma, unsigned long address)
  2038. {
  2039. struct address_space *mapping;
  2040. pgoff_t idx;
  2041. struct page *page;
  2042. mapping = vma->vm_file->f_mapping;
  2043. idx = vma_hugecache_offset(h, vma, address);
  2044. page = find_get_page(mapping, idx);
  2045. if (page)
  2046. put_page(page);
  2047. return page != NULL;
  2048. }
  2049. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2050. unsigned long address, pte_t *ptep, unsigned int flags)
  2051. {
  2052. struct hstate *h = hstate_vma(vma);
  2053. int ret = VM_FAULT_SIGBUS;
  2054. pgoff_t idx;
  2055. unsigned long size;
  2056. struct page *page;
  2057. struct address_space *mapping;
  2058. pte_t new_pte;
  2059. /*
  2060. * Currently, we are forced to kill the process in the event the
  2061. * original mapper has unmapped pages from the child due to a failed
  2062. * COW. Warn that such a situation has occured as it may not be obvious
  2063. */
  2064. if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
  2065. printk(KERN_WARNING
  2066. "PID %d killed due to inadequate hugepage pool\n",
  2067. current->pid);
  2068. return ret;
  2069. }
  2070. mapping = vma->vm_file->f_mapping;
  2071. idx = vma_hugecache_offset(h, vma, address);
  2072. /*
  2073. * Use page lock to guard against racing truncation
  2074. * before we get page_table_lock.
  2075. */
  2076. retry:
  2077. page = find_lock_page(mapping, idx);
  2078. if (!page) {
  2079. size = i_size_read(mapping->host) >> huge_page_shift(h);
  2080. if (idx >= size)
  2081. goto out;
  2082. page = alloc_huge_page(vma, address, 0);
  2083. if (IS_ERR(page)) {
  2084. ret = -PTR_ERR(page);
  2085. goto out;
  2086. }
  2087. clear_huge_page(page, address, huge_page_size(h));
  2088. __SetPageUptodate(page);
  2089. if (vma->vm_flags & VM_MAYSHARE) {
  2090. int err;
  2091. struct inode *inode = mapping->host;
  2092. err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  2093. if (err) {
  2094. put_page(page);
  2095. if (err == -EEXIST)
  2096. goto retry;
  2097. goto out;
  2098. }
  2099. spin_lock(&inode->i_lock);
  2100. inode->i_blocks += blocks_per_huge_page(h);
  2101. spin_unlock(&inode->i_lock);
  2102. } else {
  2103. lock_page(page);
  2104. page->mapping = HUGETLB_POISON;
  2105. }
  2106. }
  2107. /*
  2108. * If we are going to COW a private mapping later, we examine the
  2109. * pending reservations for this page now. This will ensure that
  2110. * any allocations necessary to record that reservation occur outside
  2111. * the spinlock.
  2112. */
  2113. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
  2114. if (vma_needs_reservation(h, vma, address) < 0) {
  2115. ret = VM_FAULT_OOM;
  2116. goto backout_unlocked;
  2117. }
  2118. spin_lock(&mm->page_table_lock);
  2119. size = i_size_read(mapping->host) >> huge_page_shift(h);
  2120. if (idx >= size)
  2121. goto backout;
  2122. ret = 0;
  2123. if (!huge_pte_none(huge_ptep_get(ptep)))
  2124. goto backout;
  2125. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  2126. && (vma->vm_flags & VM_SHARED)));
  2127. set_huge_pte_at(mm, address, ptep, new_pte);
  2128. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  2129. /* Optimization, do the COW without a second fault */
  2130. ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
  2131. }
  2132. spin_unlock(&mm->page_table_lock);
  2133. unlock_page(page);
  2134. out:
  2135. return ret;
  2136. backout:
  2137. spin_unlock(&mm->page_table_lock);
  2138. backout_unlocked:
  2139. unlock_page(page);
  2140. put_page(page);
  2141. goto out;
  2142. }
  2143. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  2144. unsigned long address, unsigned int flags)
  2145. {
  2146. pte_t *ptep;
  2147. pte_t entry;
  2148. int ret;
  2149. struct page *pagecache_page = NULL;
  2150. static DEFINE_MUTEX(hugetlb_instantiation_mutex);
  2151. struct hstate *h = hstate_vma(vma);
  2152. ptep = huge_pte_alloc(mm, address, huge_page_size(h));
  2153. if (!ptep)
  2154. return VM_FAULT_OOM;
  2155. /*
  2156. * Serialize hugepage allocation and instantiation, so that we don't
  2157. * get spurious allocation failures if two CPUs race to instantiate
  2158. * the same page in the page cache.
  2159. */
  2160. mutex_lock(&hugetlb_instantiation_mutex);
  2161. entry = huge_ptep_get(ptep);
  2162. if (huge_pte_none(entry)) {
  2163. ret = hugetlb_no_page(mm, vma, address, ptep, flags);
  2164. goto out_mutex;
  2165. }
  2166. ret = 0;
  2167. /*
  2168. * If we are going to COW the mapping later, we examine the pending
  2169. * reservations for this page now. This will ensure that any
  2170. * allocations necessary to record that reservation occur outside the
  2171. * spinlock. For private mappings, we also lookup the pagecache
  2172. * page now as it is used to determine if a reservation has been
  2173. * consumed.
  2174. */
  2175. if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
  2176. if (vma_needs_reservation(h, vma, address) < 0) {
  2177. ret = VM_FAULT_OOM;
  2178. goto out_mutex;
  2179. }
  2180. if (!(vma->vm_flags & VM_MAYSHARE))
  2181. pagecache_page = hugetlbfs_pagecache_page(h,
  2182. vma, address);
  2183. }
  2184. spin_lock(&mm->page_table_lock);
  2185. /* Check for a racing update before calling hugetlb_cow */
  2186. if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
  2187. goto out_page_table_lock;
  2188. if (flags & FAULT_FLAG_WRITE) {
  2189. if (!pte_write(entry)) {
  2190. ret = hugetlb_cow(mm, vma, address, ptep, entry,
  2191. pagecache_page);
  2192. goto out_page_table_lock;
  2193. }
  2194. entry = pte_mkdirty(entry);
  2195. }
  2196. entry = pte_mkyoung(entry);
  2197. if (huge_ptep_set_access_flags(vma, address, ptep, entry,
  2198. flags & FAULT_FLAG_WRITE))
  2199. update_mmu_cache(vma, address, ptep);
  2200. out_page_table_lock:
  2201. spin_unlock(&mm->page_table_lock);
  2202. if (pagecache_page) {
  2203. unlock_page(pagecache_page);
  2204. put_page(pagecache_page);
  2205. }
  2206. out_mutex:
  2207. mutex_unlock(&hugetlb_instantiation_mutex);
  2208. return ret;
  2209. }
  2210. /* Can be overriden by architectures */
  2211. __attribute__((weak)) struct page *
  2212. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  2213. pud_t *pud, int write)
  2214. {
  2215. BUG();
  2216. return NULL;
  2217. }
  2218. int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2219. struct page **pages, struct vm_area_struct **vmas,
  2220. unsigned long *position, int *length, int i,
  2221. unsigned int flags)
  2222. {
  2223. unsigned long pfn_offset;
  2224. unsigned long vaddr = *position;
  2225. int remainder = *length;
  2226. struct hstate *h = hstate_vma(vma);
  2227. spin_lock(&mm->page_table_lock);
  2228. while (vaddr < vma->vm_end && remainder) {
  2229. pte_t *pte;
  2230. int absent;
  2231. struct page *page;
  2232. /*
  2233. * Some archs (sparc64, sh*) have multiple pte_ts to
  2234. * each hugepage. We have to make sure we get the
  2235. * first, for the page indexing below to work.
  2236. */
  2237. pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
  2238. absent = !pte || huge_pte_none(huge_ptep_get(pte));
  2239. /*
  2240. * When coredumping, it suits get_dump_page if we just return
  2241. * an error where there's an empty slot with no huge pagecache
  2242. * to back it. This way, we avoid allocating a hugepage, and
  2243. * the sparse dumpfile avoids allocating disk blocks, but its
  2244. * huge holes still show up with zeroes where they need to be.
  2245. */
  2246. if (absent && (flags & FOLL_DUMP) &&
  2247. !hugetlbfs_pagecache_present(h, vma, vaddr)) {
  2248. remainder = 0;
  2249. break;
  2250. }
  2251. if (absent ||
  2252. ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
  2253. int ret;
  2254. spin_unlock(&mm->page_table_lock);
  2255. ret = hugetlb_fault(mm, vma, vaddr,
  2256. (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
  2257. spin_lock(&mm->page_table_lock);
  2258. if (!(ret & VM_FAULT_ERROR))
  2259. continue;
  2260. remainder = 0;
  2261. break;
  2262. }
  2263. pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
  2264. page = pte_page(huge_ptep_get(pte));
  2265. same_page:
  2266. if (pages) {
  2267. pages[i] = mem_map_offset(page, pfn_offset);
  2268. get_page(pages[i]);
  2269. }
  2270. if (vmas)
  2271. vmas[i] = vma;
  2272. vaddr += PAGE_SIZE;
  2273. ++pfn_offset;
  2274. --remainder;
  2275. ++i;
  2276. if (vaddr < vma->vm_end && remainder &&
  2277. pfn_offset < pages_per_huge_page(h)) {
  2278. /*
  2279. * We use pfn_offset to avoid touching the pageframes
  2280. * of this compound page.
  2281. */
  2282. goto same_page;
  2283. }
  2284. }
  2285. spin_unlock(&mm->page_table_lock);
  2286. *length = remainder;
  2287. *position = vaddr;
  2288. return i ? i : -EFAULT;
  2289. }
  2290. void hugetlb_change_protection(struct vm_area_struct *vma,
  2291. unsigned long address, unsigned long end, pgprot_t newprot)
  2292. {
  2293. struct mm_struct *mm = vma->vm_mm;
  2294. unsigned long start = address;
  2295. pte_t *ptep;
  2296. pte_t pte;
  2297. struct hstate *h = hstate_vma(vma);
  2298. BUG_ON(address >= end);
  2299. flush_cache_range(vma, address, end);
  2300. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  2301. spin_lock(&mm->page_table_lock);
  2302. for (; address < end; address += huge_page_size(h)) {
  2303. ptep = huge_pte_offset(mm, address);
  2304. if (!ptep)
  2305. continue;
  2306. if (huge_pmd_unshare(mm, &address, ptep))
  2307. continue;
  2308. if (!huge_pte_none(huge_ptep_get(ptep))) {
  2309. pte = huge_ptep_get_and_clear(mm, address, ptep);
  2310. pte = pte_mkhuge(pte_modify(pte, newprot));
  2311. set_huge_pte_at(mm, address, ptep, pte);
  2312. }
  2313. }
  2314. spin_unlock(&mm->page_table_lock);
  2315. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  2316. flush_tlb_range(vma, start, end);
  2317. }
  2318. int hugetlb_reserve_pages(struct inode *inode,
  2319. long from, long to,
  2320. struct vm_area_struct *vma,
  2321. int acctflag)
  2322. {
  2323. long ret, chg;
  2324. struct hstate *h = hstate_inode(inode);
  2325. /*
  2326. * Only apply hugepage reservation if asked. At fault time, an
  2327. * attempt will be made for VM_NORESERVE to allocate a page
  2328. * and filesystem quota without using reserves
  2329. */
  2330. if (acctflag & VM_NORESERVE)
  2331. return 0;
  2332. /*
  2333. * Shared mappings base their reservation on the number of pages that
  2334. * are already allocated on behalf of the file. Private mappings need
  2335. * to reserve the full area even if read-only as mprotect() may be
  2336. * called to make the mapping read-write. Assume !vma is a shm mapping
  2337. */
  2338. if (!vma || vma->vm_flags & VM_MAYSHARE)
  2339. chg = region_chg(&inode->i_mapping->private_list, from, to);
  2340. else {
  2341. struct resv_map *resv_map = resv_map_alloc();
  2342. if (!resv_map)
  2343. return -ENOMEM;
  2344. chg = to - from;
  2345. set_vma_resv_map(vma, resv_map);
  2346. set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
  2347. }
  2348. if (chg < 0)
  2349. return chg;
  2350. /* There must be enough filesystem quota for the mapping */
  2351. if (hugetlb_get_quota(inode->i_mapping, chg))
  2352. return -ENOSPC;
  2353. /*
  2354. * Check enough hugepages are available for the reservation.
  2355. * Hand back the quota if there are not
  2356. */
  2357. ret = hugetlb_acct_memory(h, chg);
  2358. if (ret < 0) {
  2359. hugetlb_put_quota(inode->i_mapping, chg);
  2360. return ret;
  2361. }
  2362. /*
  2363. * Account for the reservations made. Shared mappings record regions
  2364. * that have reservations as they are shared by multiple VMAs.
  2365. * When the last VMA disappears, the region map says how much
  2366. * the reservation was and the page cache tells how much of
  2367. * the reservation was consumed. Private mappings are per-VMA and
  2368. * only the consumed reservations are tracked. When the VMA
  2369. * disappears, the original reservation is the VMA size and the
  2370. * consumed reservations are stored in the map. Hence, nothing
  2371. * else has to be done for private mappings here
  2372. */
  2373. if (!vma || vma->vm_flags & VM_MAYSHARE)
  2374. region_add(&inode->i_mapping->private_list, from, to);
  2375. return 0;
  2376. }
  2377. void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
  2378. {
  2379. struct hstate *h = hstate_inode(inode);
  2380. long chg = region_truncate(&inode->i_mapping->private_list, offset);
  2381. spin_lock(&inode->i_lock);
  2382. inode->i_blocks -= (blocks_per_huge_page(h) * freed);
  2383. spin_unlock(&inode->i_lock);
  2384. hugetlb_put_quota(inode->i_mapping, (chg - freed));
  2385. hugetlb_acct_memory(h, -(chg - freed));
  2386. }