hugetlb.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247
  1. /*
  2. * Generic hugetlb support.
  3. * (C) William Irwin, April 2004
  4. */
  5. #include <linux/gfp.h>
  6. #include <linux/list.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/seq_file.h>
  11. #include <linux/sysctl.h>
  12. #include <linux/highmem.h>
  13. #include <linux/mmu_notifier.h>
  14. #include <linux/nodemask.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/mempolicy.h>
  17. #include <linux/cpuset.h>
  18. #include <linux/mutex.h>
  19. #include <linux/bootmem.h>
  20. #include <linux/sysfs.h>
  21. #include <asm/page.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/io.h>
  24. #include <linux/hugetlb.h>
  25. #include "internal.h"
  26. const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
  27. static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
  28. unsigned long hugepages_treat_as_movable;
  29. static int max_hstate;
  30. unsigned int default_hstate_idx;
  31. struct hstate hstates[HUGE_MAX_HSTATE];
  32. __initdata LIST_HEAD(huge_boot_pages);
  33. /* for command line parsing */
  34. static struct hstate * __initdata parsed_hstate;
  35. static unsigned long __initdata default_hstate_max_huge_pages;
  36. static unsigned long __initdata default_hstate_size;
  37. #define for_each_hstate(h) \
  38. for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
  39. /*
  40. * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  41. */
  42. static DEFINE_SPINLOCK(hugetlb_lock);
  43. /*
  44. * Region tracking -- allows tracking of reservations and instantiated pages
  45. * across the pages in a mapping.
  46. *
  47. * The region data structures are protected by a combination of the mmap_sem
  48. * and the hugetlb_instantion_mutex. To access or modify a region the caller
  49. * must either hold the mmap_sem for write, or the mmap_sem for read and
  50. * the hugetlb_instantiation mutex:
  51. *
  52. * down_write(&mm->mmap_sem);
  53. * or
  54. * down_read(&mm->mmap_sem);
  55. * mutex_lock(&hugetlb_instantiation_mutex);
  56. */
  57. struct file_region {
  58. struct list_head link;
  59. long from;
  60. long to;
  61. };
  62. static long region_add(struct list_head *head, long f, long t)
  63. {
  64. struct file_region *rg, *nrg, *trg;
  65. /* Locate the region we are either in or before. */
  66. list_for_each_entry(rg, head, link)
  67. if (f <= rg->to)
  68. break;
  69. /* Round our left edge to the current segment if it encloses us. */
  70. if (f > rg->from)
  71. f = rg->from;
  72. /* Check for and consume any regions we now overlap with. */
  73. nrg = rg;
  74. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  75. if (&rg->link == head)
  76. break;
  77. if (rg->from > t)
  78. break;
  79. /* If this area reaches higher then extend our area to
  80. * include it completely. If this is not the first area
  81. * which we intend to reuse, free it. */
  82. if (rg->to > t)
  83. t = rg->to;
  84. if (rg != nrg) {
  85. list_del(&rg->link);
  86. kfree(rg);
  87. }
  88. }
  89. nrg->from = f;
  90. nrg->to = t;
  91. return 0;
  92. }
  93. static long region_chg(struct list_head *head, long f, long t)
  94. {
  95. struct file_region *rg, *nrg;
  96. long chg = 0;
  97. /* Locate the region we are before or in. */
  98. list_for_each_entry(rg, head, link)
  99. if (f <= rg->to)
  100. break;
  101. /* If we are below the current region then a new region is required.
  102. * Subtle, allocate a new region at the position but make it zero
  103. * size such that we can guarantee to record the reservation. */
  104. if (&rg->link == head || t < rg->from) {
  105. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  106. if (!nrg)
  107. return -ENOMEM;
  108. nrg->from = f;
  109. nrg->to = f;
  110. INIT_LIST_HEAD(&nrg->link);
  111. list_add(&nrg->link, rg->link.prev);
  112. return t - f;
  113. }
  114. /* Round our left edge to the current segment if it encloses us. */
  115. if (f > rg->from)
  116. f = rg->from;
  117. chg = t - f;
  118. /* Check for and consume any regions we now overlap with. */
  119. list_for_each_entry(rg, rg->link.prev, link) {
  120. if (&rg->link == head)
  121. break;
  122. if (rg->from > t)
  123. return chg;
  124. /* We overlap with this area, if it extends futher than
  125. * us then we must extend ourselves. Account for its
  126. * existing reservation. */
  127. if (rg->to > t) {
  128. chg += rg->to - t;
  129. t = rg->to;
  130. }
  131. chg -= rg->to - rg->from;
  132. }
  133. return chg;
  134. }
  135. static long region_truncate(struct list_head *head, long end)
  136. {
  137. struct file_region *rg, *trg;
  138. long chg = 0;
  139. /* Locate the region we are either in or before. */
  140. list_for_each_entry(rg, head, link)
  141. if (end <= rg->to)
  142. break;
  143. if (&rg->link == head)
  144. return 0;
  145. /* If we are in the middle of a region then adjust it. */
  146. if (end > rg->from) {
  147. chg = rg->to - end;
  148. rg->to = end;
  149. rg = list_entry(rg->link.next, typeof(*rg), link);
  150. }
  151. /* Drop any remaining regions. */
  152. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  153. if (&rg->link == head)
  154. break;
  155. chg += rg->to - rg->from;
  156. list_del(&rg->link);
  157. kfree(rg);
  158. }
  159. return chg;
  160. }
  161. static long region_count(struct list_head *head, long f, long t)
  162. {
  163. struct file_region *rg;
  164. long chg = 0;
  165. /* Locate each segment we overlap with, and count that overlap. */
  166. list_for_each_entry(rg, head, link) {
  167. int seg_from;
  168. int seg_to;
  169. if (rg->to <= f)
  170. continue;
  171. if (rg->from >= t)
  172. break;
  173. seg_from = max(rg->from, f);
  174. seg_to = min(rg->to, t);
  175. chg += seg_to - seg_from;
  176. }
  177. return chg;
  178. }
  179. /*
  180. * Convert the address within this vma to the page offset within
  181. * the mapping, in pagecache page units; huge pages here.
  182. */
  183. static pgoff_t vma_hugecache_offset(struct hstate *h,
  184. struct vm_area_struct *vma, unsigned long address)
  185. {
  186. return ((address - vma->vm_start) >> huge_page_shift(h)) +
  187. (vma->vm_pgoff >> huge_page_order(h));
  188. }
  189. /*
  190. * Flags for MAP_PRIVATE reservations. These are stored in the bottom
  191. * bits of the reservation map pointer, which are always clear due to
  192. * alignment.
  193. */
  194. #define HPAGE_RESV_OWNER (1UL << 0)
  195. #define HPAGE_RESV_UNMAPPED (1UL << 1)
  196. #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
  197. /*
  198. * These helpers are used to track how many pages are reserved for
  199. * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
  200. * is guaranteed to have their future faults succeed.
  201. *
  202. * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
  203. * the reserve counters are updated with the hugetlb_lock held. It is safe
  204. * to reset the VMA at fork() time as it is not in use yet and there is no
  205. * chance of the global counters getting corrupted as a result of the values.
  206. *
  207. * The private mapping reservation is represented in a subtly different
  208. * manner to a shared mapping. A shared mapping has a region map associated
  209. * with the underlying file, this region map represents the backing file
  210. * pages which have ever had a reservation assigned which this persists even
  211. * after the page is instantiated. A private mapping has a region map
  212. * associated with the original mmap which is attached to all VMAs which
  213. * reference it, this region map represents those offsets which have consumed
  214. * reservation ie. where pages have been instantiated.
  215. */
  216. static unsigned long get_vma_private_data(struct vm_area_struct *vma)
  217. {
  218. return (unsigned long)vma->vm_private_data;
  219. }
  220. static void set_vma_private_data(struct vm_area_struct *vma,
  221. unsigned long value)
  222. {
  223. vma->vm_private_data = (void *)value;
  224. }
  225. struct resv_map {
  226. struct kref refs;
  227. struct list_head regions;
  228. };
  229. static struct resv_map *resv_map_alloc(void)
  230. {
  231. struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
  232. if (!resv_map)
  233. return NULL;
  234. kref_init(&resv_map->refs);
  235. INIT_LIST_HEAD(&resv_map->regions);
  236. return resv_map;
  237. }
  238. static void resv_map_release(struct kref *ref)
  239. {
  240. struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
  241. /* Clear out any active regions before we release the map. */
  242. region_truncate(&resv_map->regions, 0);
  243. kfree(resv_map);
  244. }
  245. static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
  246. {
  247. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  248. if (!(vma->vm_flags & VM_SHARED))
  249. return (struct resv_map *)(get_vma_private_data(vma) &
  250. ~HPAGE_RESV_MASK);
  251. return NULL;
  252. }
  253. static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
  254. {
  255. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  256. VM_BUG_ON(vma->vm_flags & VM_SHARED);
  257. set_vma_private_data(vma, (get_vma_private_data(vma) &
  258. HPAGE_RESV_MASK) | (unsigned long)map);
  259. }
  260. static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
  261. {
  262. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  263. VM_BUG_ON(vma->vm_flags & VM_SHARED);
  264. set_vma_private_data(vma, get_vma_private_data(vma) | flags);
  265. }
  266. static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
  267. {
  268. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  269. return (get_vma_private_data(vma) & flag) != 0;
  270. }
  271. /* Decrement the reserved pages in the hugepage pool by one */
  272. static void decrement_hugepage_resv_vma(struct hstate *h,
  273. struct vm_area_struct *vma)
  274. {
  275. if (vma->vm_flags & VM_NORESERVE)
  276. return;
  277. if (vma->vm_flags & VM_SHARED) {
  278. /* Shared mappings always use reserves */
  279. h->resv_huge_pages--;
  280. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  281. /*
  282. * Only the process that called mmap() has reserves for
  283. * private mappings.
  284. */
  285. h->resv_huge_pages--;
  286. }
  287. }
  288. /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
  289. void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  290. {
  291. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  292. if (!(vma->vm_flags & VM_SHARED))
  293. vma->vm_private_data = (void *)0;
  294. }
  295. /* Returns true if the VMA has associated reserve pages */
  296. static int vma_has_reserves(struct vm_area_struct *vma)
  297. {
  298. if (vma->vm_flags & VM_SHARED)
  299. return 1;
  300. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  301. return 1;
  302. return 0;
  303. }
  304. static void clear_huge_page(struct page *page,
  305. unsigned long addr, unsigned long sz)
  306. {
  307. int i;
  308. might_sleep();
  309. for (i = 0; i < sz/PAGE_SIZE; i++) {
  310. cond_resched();
  311. clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  312. }
  313. }
  314. static void copy_huge_page(struct page *dst, struct page *src,
  315. unsigned long addr, struct vm_area_struct *vma)
  316. {
  317. int i;
  318. struct hstate *h = hstate_vma(vma);
  319. might_sleep();
  320. for (i = 0; i < pages_per_huge_page(h); i++) {
  321. cond_resched();
  322. copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
  323. }
  324. }
  325. static void enqueue_huge_page(struct hstate *h, struct page *page)
  326. {
  327. int nid = page_to_nid(page);
  328. list_add(&page->lru, &h->hugepage_freelists[nid]);
  329. h->free_huge_pages++;
  330. h->free_huge_pages_node[nid]++;
  331. }
  332. static struct page *dequeue_huge_page(struct hstate *h)
  333. {
  334. int nid;
  335. struct page *page = NULL;
  336. for (nid = 0; nid < MAX_NUMNODES; ++nid) {
  337. if (!list_empty(&h->hugepage_freelists[nid])) {
  338. page = list_entry(h->hugepage_freelists[nid].next,
  339. struct page, lru);
  340. list_del(&page->lru);
  341. h->free_huge_pages--;
  342. h->free_huge_pages_node[nid]--;
  343. break;
  344. }
  345. }
  346. return page;
  347. }
  348. static struct page *dequeue_huge_page_vma(struct hstate *h,
  349. struct vm_area_struct *vma,
  350. unsigned long address, int avoid_reserve)
  351. {
  352. int nid;
  353. struct page *page = NULL;
  354. struct mempolicy *mpol;
  355. nodemask_t *nodemask;
  356. struct zonelist *zonelist = huge_zonelist(vma, address,
  357. htlb_alloc_mask, &mpol, &nodemask);
  358. struct zone *zone;
  359. struct zoneref *z;
  360. /*
  361. * A child process with MAP_PRIVATE mappings created by their parent
  362. * have no page reserves. This check ensures that reservations are
  363. * not "stolen". The child may still get SIGKILLed
  364. */
  365. if (!vma_has_reserves(vma) &&
  366. h->free_huge_pages - h->resv_huge_pages == 0)
  367. return NULL;
  368. /* If reserves cannot be used, ensure enough pages are in the pool */
  369. if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
  370. return NULL;
  371. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  372. MAX_NR_ZONES - 1, nodemask) {
  373. nid = zone_to_nid(zone);
  374. if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
  375. !list_empty(&h->hugepage_freelists[nid])) {
  376. page = list_entry(h->hugepage_freelists[nid].next,
  377. struct page, lru);
  378. list_del(&page->lru);
  379. h->free_huge_pages--;
  380. h->free_huge_pages_node[nid]--;
  381. if (!avoid_reserve)
  382. decrement_hugepage_resv_vma(h, vma);
  383. break;
  384. }
  385. }
  386. mpol_cond_put(mpol);
  387. return page;
  388. }
  389. static void update_and_free_page(struct hstate *h, struct page *page)
  390. {
  391. int i;
  392. h->nr_huge_pages--;
  393. h->nr_huge_pages_node[page_to_nid(page)]--;
  394. for (i = 0; i < pages_per_huge_page(h); i++) {
  395. page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
  396. 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
  397. 1 << PG_private | 1<< PG_writeback);
  398. }
  399. set_compound_page_dtor(page, NULL);
  400. set_page_refcounted(page);
  401. arch_release_hugepage(page);
  402. __free_pages(page, huge_page_order(h));
  403. }
  404. struct hstate *size_to_hstate(unsigned long size)
  405. {
  406. struct hstate *h;
  407. for_each_hstate(h) {
  408. if (huge_page_size(h) == size)
  409. return h;
  410. }
  411. return NULL;
  412. }
  413. static void free_huge_page(struct page *page)
  414. {
  415. /*
  416. * Can't pass hstate in here because it is called from the
  417. * compound page destructor.
  418. */
  419. struct hstate *h = page_hstate(page);
  420. int nid = page_to_nid(page);
  421. struct address_space *mapping;
  422. mapping = (struct address_space *) page_private(page);
  423. set_page_private(page, 0);
  424. BUG_ON(page_count(page));
  425. INIT_LIST_HEAD(&page->lru);
  426. spin_lock(&hugetlb_lock);
  427. if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
  428. update_and_free_page(h, page);
  429. h->surplus_huge_pages--;
  430. h->surplus_huge_pages_node[nid]--;
  431. } else {
  432. enqueue_huge_page(h, page);
  433. }
  434. spin_unlock(&hugetlb_lock);
  435. if (mapping)
  436. hugetlb_put_quota(mapping, 1);
  437. }
  438. /*
  439. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  440. * balanced by operating on them in a round-robin fashion.
  441. * Returns 1 if an adjustment was made.
  442. */
  443. static int adjust_pool_surplus(struct hstate *h, int delta)
  444. {
  445. static int prev_nid;
  446. int nid = prev_nid;
  447. int ret = 0;
  448. VM_BUG_ON(delta != -1 && delta != 1);
  449. do {
  450. nid = next_node(nid, node_online_map);
  451. if (nid == MAX_NUMNODES)
  452. nid = first_node(node_online_map);
  453. /* To shrink on this node, there must be a surplus page */
  454. if (delta < 0 && !h->surplus_huge_pages_node[nid])
  455. continue;
  456. /* Surplus cannot exceed the total number of pages */
  457. if (delta > 0 && h->surplus_huge_pages_node[nid] >=
  458. h->nr_huge_pages_node[nid])
  459. continue;
  460. h->surplus_huge_pages += delta;
  461. h->surplus_huge_pages_node[nid] += delta;
  462. ret = 1;
  463. break;
  464. } while (nid != prev_nid);
  465. prev_nid = nid;
  466. return ret;
  467. }
  468. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
  469. {
  470. set_compound_page_dtor(page, free_huge_page);
  471. spin_lock(&hugetlb_lock);
  472. h->nr_huge_pages++;
  473. h->nr_huge_pages_node[nid]++;
  474. spin_unlock(&hugetlb_lock);
  475. put_page(page); /* free it into the hugepage allocator */
  476. }
  477. static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  478. {
  479. struct page *page;
  480. if (h->order >= MAX_ORDER)
  481. return NULL;
  482. page = alloc_pages_node(nid,
  483. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
  484. __GFP_REPEAT|__GFP_NOWARN,
  485. huge_page_order(h));
  486. if (page) {
  487. if (arch_prepare_hugepage(page)) {
  488. __free_pages(page, huge_page_order(h));
  489. return NULL;
  490. }
  491. prep_new_huge_page(h, page, nid);
  492. }
  493. return page;
  494. }
  495. /*
  496. * Use a helper variable to find the next node and then
  497. * copy it back to hugetlb_next_nid afterwards:
  498. * otherwise there's a window in which a racer might
  499. * pass invalid nid MAX_NUMNODES to alloc_pages_node.
  500. * But we don't need to use a spin_lock here: it really
  501. * doesn't matter if occasionally a racer chooses the
  502. * same nid as we do. Move nid forward in the mask even
  503. * if we just successfully allocated a hugepage so that
  504. * the next caller gets hugepages on the next node.
  505. */
  506. static int hstate_next_node(struct hstate *h)
  507. {
  508. int next_nid;
  509. next_nid = next_node(h->hugetlb_next_nid, node_online_map);
  510. if (next_nid == MAX_NUMNODES)
  511. next_nid = first_node(node_online_map);
  512. h->hugetlb_next_nid = next_nid;
  513. return next_nid;
  514. }
  515. static int alloc_fresh_huge_page(struct hstate *h)
  516. {
  517. struct page *page;
  518. int start_nid;
  519. int next_nid;
  520. int ret = 0;
  521. start_nid = h->hugetlb_next_nid;
  522. do {
  523. page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
  524. if (page)
  525. ret = 1;
  526. next_nid = hstate_next_node(h);
  527. } while (!page && h->hugetlb_next_nid != start_nid);
  528. if (ret)
  529. count_vm_event(HTLB_BUDDY_PGALLOC);
  530. else
  531. count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  532. return ret;
  533. }
  534. static struct page *alloc_buddy_huge_page(struct hstate *h,
  535. struct vm_area_struct *vma, unsigned long address)
  536. {
  537. struct page *page;
  538. unsigned int nid;
  539. if (h->order >= MAX_ORDER)
  540. return NULL;
  541. /*
  542. * Assume we will successfully allocate the surplus page to
  543. * prevent racing processes from causing the surplus to exceed
  544. * overcommit
  545. *
  546. * This however introduces a different race, where a process B
  547. * tries to grow the static hugepage pool while alloc_pages() is
  548. * called by process A. B will only examine the per-node
  549. * counters in determining if surplus huge pages can be
  550. * converted to normal huge pages in adjust_pool_surplus(). A
  551. * won't be able to increment the per-node counter, until the
  552. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  553. * no more huge pages can be converted from surplus to normal
  554. * state (and doesn't try to convert again). Thus, we have a
  555. * case where a surplus huge page exists, the pool is grown, and
  556. * the surplus huge page still exists after, even though it
  557. * should just have been converted to a normal huge page. This
  558. * does not leak memory, though, as the hugepage will be freed
  559. * once it is out of use. It also does not allow the counters to
  560. * go out of whack in adjust_pool_surplus() as we don't modify
  561. * the node values until we've gotten the hugepage and only the
  562. * per-node value is checked there.
  563. */
  564. spin_lock(&hugetlb_lock);
  565. if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
  566. spin_unlock(&hugetlb_lock);
  567. return NULL;
  568. } else {
  569. h->nr_huge_pages++;
  570. h->surplus_huge_pages++;
  571. }
  572. spin_unlock(&hugetlb_lock);
  573. page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
  574. __GFP_REPEAT|__GFP_NOWARN,
  575. huge_page_order(h));
  576. if (page && arch_prepare_hugepage(page)) {
  577. __free_pages(page, huge_page_order(h));
  578. return NULL;
  579. }
  580. spin_lock(&hugetlb_lock);
  581. if (page) {
  582. /*
  583. * This page is now managed by the hugetlb allocator and has
  584. * no users -- drop the buddy allocator's reference.
  585. */
  586. put_page_testzero(page);
  587. VM_BUG_ON(page_count(page));
  588. nid = page_to_nid(page);
  589. set_compound_page_dtor(page, free_huge_page);
  590. /*
  591. * We incremented the global counters already
  592. */
  593. h->nr_huge_pages_node[nid]++;
  594. h->surplus_huge_pages_node[nid]++;
  595. __count_vm_event(HTLB_BUDDY_PGALLOC);
  596. } else {
  597. h->nr_huge_pages--;
  598. h->surplus_huge_pages--;
  599. __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  600. }
  601. spin_unlock(&hugetlb_lock);
  602. return page;
  603. }
  604. /*
  605. * Increase the hugetlb pool such that it can accomodate a reservation
  606. * of size 'delta'.
  607. */
  608. static int gather_surplus_pages(struct hstate *h, int delta)
  609. {
  610. struct list_head surplus_list;
  611. struct page *page, *tmp;
  612. int ret, i;
  613. int needed, allocated;
  614. needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
  615. if (needed <= 0) {
  616. h->resv_huge_pages += delta;
  617. return 0;
  618. }
  619. allocated = 0;
  620. INIT_LIST_HEAD(&surplus_list);
  621. ret = -ENOMEM;
  622. retry:
  623. spin_unlock(&hugetlb_lock);
  624. for (i = 0; i < needed; i++) {
  625. page = alloc_buddy_huge_page(h, NULL, 0);
  626. if (!page) {
  627. /*
  628. * We were not able to allocate enough pages to
  629. * satisfy the entire reservation so we free what
  630. * we've allocated so far.
  631. */
  632. spin_lock(&hugetlb_lock);
  633. needed = 0;
  634. goto free;
  635. }
  636. list_add(&page->lru, &surplus_list);
  637. }
  638. allocated += needed;
  639. /*
  640. * After retaking hugetlb_lock, we need to recalculate 'needed'
  641. * because either resv_huge_pages or free_huge_pages may have changed.
  642. */
  643. spin_lock(&hugetlb_lock);
  644. needed = (h->resv_huge_pages + delta) -
  645. (h->free_huge_pages + allocated);
  646. if (needed > 0)
  647. goto retry;
  648. /*
  649. * The surplus_list now contains _at_least_ the number of extra pages
  650. * needed to accomodate the reservation. Add the appropriate number
  651. * of pages to the hugetlb pool and free the extras back to the buddy
  652. * allocator. Commit the entire reservation here to prevent another
  653. * process from stealing the pages as they are added to the pool but
  654. * before they are reserved.
  655. */
  656. needed += allocated;
  657. h->resv_huge_pages += delta;
  658. ret = 0;
  659. free:
  660. /* Free the needed pages to the hugetlb pool */
  661. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  662. if ((--needed) < 0)
  663. break;
  664. list_del(&page->lru);
  665. enqueue_huge_page(h, page);
  666. }
  667. /* Free unnecessary surplus pages to the buddy allocator */
  668. if (!list_empty(&surplus_list)) {
  669. spin_unlock(&hugetlb_lock);
  670. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  671. list_del(&page->lru);
  672. /*
  673. * The page has a reference count of zero already, so
  674. * call free_huge_page directly instead of using
  675. * put_page. This must be done with hugetlb_lock
  676. * unlocked which is safe because free_huge_page takes
  677. * hugetlb_lock before deciding how to free the page.
  678. */
  679. free_huge_page(page);
  680. }
  681. spin_lock(&hugetlb_lock);
  682. }
  683. return ret;
  684. }
  685. /*
  686. * When releasing a hugetlb pool reservation, any surplus pages that were
  687. * allocated to satisfy the reservation must be explicitly freed if they were
  688. * never used.
  689. */
  690. static void return_unused_surplus_pages(struct hstate *h,
  691. unsigned long unused_resv_pages)
  692. {
  693. static int nid = -1;
  694. struct page *page;
  695. unsigned long nr_pages;
  696. /*
  697. * We want to release as many surplus pages as possible, spread
  698. * evenly across all nodes. Iterate across all nodes until we
  699. * can no longer free unreserved surplus pages. This occurs when
  700. * the nodes with surplus pages have no free pages.
  701. */
  702. unsigned long remaining_iterations = num_online_nodes();
  703. /* Uncommit the reservation */
  704. h->resv_huge_pages -= unused_resv_pages;
  705. /* Cannot return gigantic pages currently */
  706. if (h->order >= MAX_ORDER)
  707. return;
  708. nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
  709. while (remaining_iterations-- && nr_pages) {
  710. nid = next_node(nid, node_online_map);
  711. if (nid == MAX_NUMNODES)
  712. nid = first_node(node_online_map);
  713. if (!h->surplus_huge_pages_node[nid])
  714. continue;
  715. if (!list_empty(&h->hugepage_freelists[nid])) {
  716. page = list_entry(h->hugepage_freelists[nid].next,
  717. struct page, lru);
  718. list_del(&page->lru);
  719. update_and_free_page(h, page);
  720. h->free_huge_pages--;
  721. h->free_huge_pages_node[nid]--;
  722. h->surplus_huge_pages--;
  723. h->surplus_huge_pages_node[nid]--;
  724. nr_pages--;
  725. remaining_iterations = num_online_nodes();
  726. }
  727. }
  728. }
  729. /*
  730. * Determine if the huge page at addr within the vma has an associated
  731. * reservation. Where it does not we will need to logically increase
  732. * reservation and actually increase quota before an allocation can occur.
  733. * Where any new reservation would be required the reservation change is
  734. * prepared, but not committed. Once the page has been quota'd allocated
  735. * an instantiated the change should be committed via vma_commit_reservation.
  736. * No action is required on failure.
  737. */
  738. static int vma_needs_reservation(struct hstate *h,
  739. struct vm_area_struct *vma, unsigned long addr)
  740. {
  741. struct address_space *mapping = vma->vm_file->f_mapping;
  742. struct inode *inode = mapping->host;
  743. if (vma->vm_flags & VM_SHARED) {
  744. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  745. return region_chg(&inode->i_mapping->private_list,
  746. idx, idx + 1);
  747. } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  748. return 1;
  749. } else {
  750. int err;
  751. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  752. struct resv_map *reservations = vma_resv_map(vma);
  753. err = region_chg(&reservations->regions, idx, idx + 1);
  754. if (err < 0)
  755. return err;
  756. return 0;
  757. }
  758. }
  759. static void vma_commit_reservation(struct hstate *h,
  760. struct vm_area_struct *vma, unsigned long addr)
  761. {
  762. struct address_space *mapping = vma->vm_file->f_mapping;
  763. struct inode *inode = mapping->host;
  764. if (vma->vm_flags & VM_SHARED) {
  765. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  766. region_add(&inode->i_mapping->private_list, idx, idx + 1);
  767. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  768. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  769. struct resv_map *reservations = vma_resv_map(vma);
  770. /* Mark this page used in the map. */
  771. region_add(&reservations->regions, idx, idx + 1);
  772. }
  773. }
  774. static struct page *alloc_huge_page(struct vm_area_struct *vma,
  775. unsigned long addr, int avoid_reserve)
  776. {
  777. struct hstate *h = hstate_vma(vma);
  778. struct page *page;
  779. struct address_space *mapping = vma->vm_file->f_mapping;
  780. struct inode *inode = mapping->host;
  781. unsigned int chg;
  782. /*
  783. * Processes that did not create the mapping will have no reserves and
  784. * will not have accounted against quota. Check that the quota can be
  785. * made before satisfying the allocation
  786. * MAP_NORESERVE mappings may also need pages and quota allocated
  787. * if no reserve mapping overlaps.
  788. */
  789. chg = vma_needs_reservation(h, vma, addr);
  790. if (chg < 0)
  791. return ERR_PTR(chg);
  792. if (chg)
  793. if (hugetlb_get_quota(inode->i_mapping, chg))
  794. return ERR_PTR(-ENOSPC);
  795. spin_lock(&hugetlb_lock);
  796. page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
  797. spin_unlock(&hugetlb_lock);
  798. if (!page) {
  799. page = alloc_buddy_huge_page(h, vma, addr);
  800. if (!page) {
  801. hugetlb_put_quota(inode->i_mapping, chg);
  802. return ERR_PTR(-VM_FAULT_OOM);
  803. }
  804. }
  805. set_page_refcounted(page);
  806. set_page_private(page, (unsigned long) mapping);
  807. vma_commit_reservation(h, vma, addr);
  808. return page;
  809. }
  810. __attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h)
  811. {
  812. struct huge_bootmem_page *m;
  813. int nr_nodes = nodes_weight(node_online_map);
  814. while (nr_nodes) {
  815. void *addr;
  816. addr = __alloc_bootmem_node_nopanic(
  817. NODE_DATA(h->hugetlb_next_nid),
  818. huge_page_size(h), huge_page_size(h), 0);
  819. if (addr) {
  820. /*
  821. * Use the beginning of the huge page to store the
  822. * huge_bootmem_page struct (until gather_bootmem
  823. * puts them into the mem_map).
  824. */
  825. m = addr;
  826. if (m)
  827. goto found;
  828. }
  829. hstate_next_node(h);
  830. nr_nodes--;
  831. }
  832. return 0;
  833. found:
  834. BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
  835. /* Put them into a private list first because mem_map is not up yet */
  836. list_add(&m->list, &huge_boot_pages);
  837. m->hstate = h;
  838. return 1;
  839. }
  840. /* Put bootmem huge pages into the standard lists after mem_map is up */
  841. static void __init gather_bootmem_prealloc(void)
  842. {
  843. struct huge_bootmem_page *m;
  844. list_for_each_entry(m, &huge_boot_pages, list) {
  845. struct page *page = virt_to_page(m);
  846. struct hstate *h = m->hstate;
  847. __ClearPageReserved(page);
  848. WARN_ON(page_count(page) != 1);
  849. prep_compound_page(page, h->order);
  850. prep_new_huge_page(h, page, page_to_nid(page));
  851. }
  852. }
  853. static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  854. {
  855. unsigned long i;
  856. for (i = 0; i < h->max_huge_pages; ++i) {
  857. if (h->order >= MAX_ORDER) {
  858. if (!alloc_bootmem_huge_page(h))
  859. break;
  860. } else if (!alloc_fresh_huge_page(h))
  861. break;
  862. }
  863. h->max_huge_pages = i;
  864. }
  865. static void __init hugetlb_init_hstates(void)
  866. {
  867. struct hstate *h;
  868. for_each_hstate(h) {
  869. /* oversize hugepages were init'ed in early boot */
  870. if (h->order < MAX_ORDER)
  871. hugetlb_hstate_alloc_pages(h);
  872. }
  873. }
  874. static char * __init memfmt(char *buf, unsigned long n)
  875. {
  876. if (n >= (1UL << 30))
  877. sprintf(buf, "%lu GB", n >> 30);
  878. else if (n >= (1UL << 20))
  879. sprintf(buf, "%lu MB", n >> 20);
  880. else
  881. sprintf(buf, "%lu KB", n >> 10);
  882. return buf;
  883. }
  884. static void __init report_hugepages(void)
  885. {
  886. struct hstate *h;
  887. for_each_hstate(h) {
  888. char buf[32];
  889. printk(KERN_INFO "HugeTLB registered %s page size, "
  890. "pre-allocated %ld pages\n",
  891. memfmt(buf, huge_page_size(h)),
  892. h->free_huge_pages);
  893. }
  894. }
  895. #ifdef CONFIG_HIGHMEM
  896. static void try_to_free_low(struct hstate *h, unsigned long count)
  897. {
  898. int i;
  899. if (h->order >= MAX_ORDER)
  900. return;
  901. for (i = 0; i < MAX_NUMNODES; ++i) {
  902. struct page *page, *next;
  903. struct list_head *freel = &h->hugepage_freelists[i];
  904. list_for_each_entry_safe(page, next, freel, lru) {
  905. if (count >= h->nr_huge_pages)
  906. return;
  907. if (PageHighMem(page))
  908. continue;
  909. list_del(&page->lru);
  910. update_and_free_page(h, page);
  911. h->free_huge_pages--;
  912. h->free_huge_pages_node[page_to_nid(page)]--;
  913. }
  914. }
  915. }
  916. #else
  917. static inline void try_to_free_low(struct hstate *h, unsigned long count)
  918. {
  919. }
  920. #endif
  921. #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
  922. static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
  923. {
  924. unsigned long min_count, ret;
  925. if (h->order >= MAX_ORDER)
  926. return h->max_huge_pages;
  927. /*
  928. * Increase the pool size
  929. * First take pages out of surplus state. Then make up the
  930. * remaining difference by allocating fresh huge pages.
  931. *
  932. * We might race with alloc_buddy_huge_page() here and be unable
  933. * to convert a surplus huge page to a normal huge page. That is
  934. * not critical, though, it just means the overall size of the
  935. * pool might be one hugepage larger than it needs to be, but
  936. * within all the constraints specified by the sysctls.
  937. */
  938. spin_lock(&hugetlb_lock);
  939. while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
  940. if (!adjust_pool_surplus(h, -1))
  941. break;
  942. }
  943. while (count > persistent_huge_pages(h)) {
  944. /*
  945. * If this allocation races such that we no longer need the
  946. * page, free_huge_page will handle it by freeing the page
  947. * and reducing the surplus.
  948. */
  949. spin_unlock(&hugetlb_lock);
  950. ret = alloc_fresh_huge_page(h);
  951. spin_lock(&hugetlb_lock);
  952. if (!ret)
  953. goto out;
  954. }
  955. /*
  956. * Decrease the pool size
  957. * First return free pages to the buddy allocator (being careful
  958. * to keep enough around to satisfy reservations). Then place
  959. * pages into surplus state as needed so the pool will shrink
  960. * to the desired size as pages become free.
  961. *
  962. * By placing pages into the surplus state independent of the
  963. * overcommit value, we are allowing the surplus pool size to
  964. * exceed overcommit. There are few sane options here. Since
  965. * alloc_buddy_huge_page() is checking the global counter,
  966. * though, we'll note that we're not allowed to exceed surplus
  967. * and won't grow the pool anywhere else. Not until one of the
  968. * sysctls are changed, or the surplus pages go out of use.
  969. */
  970. min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
  971. min_count = max(count, min_count);
  972. try_to_free_low(h, min_count);
  973. while (min_count < persistent_huge_pages(h)) {
  974. struct page *page = dequeue_huge_page(h);
  975. if (!page)
  976. break;
  977. update_and_free_page(h, page);
  978. }
  979. while (count < persistent_huge_pages(h)) {
  980. if (!adjust_pool_surplus(h, 1))
  981. break;
  982. }
  983. out:
  984. ret = persistent_huge_pages(h);
  985. spin_unlock(&hugetlb_lock);
  986. return ret;
  987. }
  988. #define HSTATE_ATTR_RO(_name) \
  989. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  990. #define HSTATE_ATTR(_name) \
  991. static struct kobj_attribute _name##_attr = \
  992. __ATTR(_name, 0644, _name##_show, _name##_store)
  993. static struct kobject *hugepages_kobj;
  994. static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  995. static struct hstate *kobj_to_hstate(struct kobject *kobj)
  996. {
  997. int i;
  998. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  999. if (hstate_kobjs[i] == kobj)
  1000. return &hstates[i];
  1001. BUG();
  1002. return NULL;
  1003. }
  1004. static ssize_t nr_hugepages_show(struct kobject *kobj,
  1005. struct kobj_attribute *attr, char *buf)
  1006. {
  1007. struct hstate *h = kobj_to_hstate(kobj);
  1008. return sprintf(buf, "%lu\n", h->nr_huge_pages);
  1009. }
  1010. static ssize_t nr_hugepages_store(struct kobject *kobj,
  1011. struct kobj_attribute *attr, const char *buf, size_t count)
  1012. {
  1013. int err;
  1014. unsigned long input;
  1015. struct hstate *h = kobj_to_hstate(kobj);
  1016. err = strict_strtoul(buf, 10, &input);
  1017. if (err)
  1018. return 0;
  1019. h->max_huge_pages = set_max_huge_pages(h, input);
  1020. return count;
  1021. }
  1022. HSTATE_ATTR(nr_hugepages);
  1023. static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
  1024. struct kobj_attribute *attr, char *buf)
  1025. {
  1026. struct hstate *h = kobj_to_hstate(kobj);
  1027. return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
  1028. }
  1029. static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
  1030. struct kobj_attribute *attr, const char *buf, size_t count)
  1031. {
  1032. int err;
  1033. unsigned long input;
  1034. struct hstate *h = kobj_to_hstate(kobj);
  1035. err = strict_strtoul(buf, 10, &input);
  1036. if (err)
  1037. return 0;
  1038. spin_lock(&hugetlb_lock);
  1039. h->nr_overcommit_huge_pages = input;
  1040. spin_unlock(&hugetlb_lock);
  1041. return count;
  1042. }
  1043. HSTATE_ATTR(nr_overcommit_hugepages);
  1044. static ssize_t free_hugepages_show(struct kobject *kobj,
  1045. struct kobj_attribute *attr, char *buf)
  1046. {
  1047. struct hstate *h = kobj_to_hstate(kobj);
  1048. return sprintf(buf, "%lu\n", h->free_huge_pages);
  1049. }
  1050. HSTATE_ATTR_RO(free_hugepages);
  1051. static ssize_t resv_hugepages_show(struct kobject *kobj,
  1052. struct kobj_attribute *attr, char *buf)
  1053. {
  1054. struct hstate *h = kobj_to_hstate(kobj);
  1055. return sprintf(buf, "%lu\n", h->resv_huge_pages);
  1056. }
  1057. HSTATE_ATTR_RO(resv_hugepages);
  1058. static ssize_t surplus_hugepages_show(struct kobject *kobj,
  1059. struct kobj_attribute *attr, char *buf)
  1060. {
  1061. struct hstate *h = kobj_to_hstate(kobj);
  1062. return sprintf(buf, "%lu\n", h->surplus_huge_pages);
  1063. }
  1064. HSTATE_ATTR_RO(surplus_hugepages);
  1065. static struct attribute *hstate_attrs[] = {
  1066. &nr_hugepages_attr.attr,
  1067. &nr_overcommit_hugepages_attr.attr,
  1068. &free_hugepages_attr.attr,
  1069. &resv_hugepages_attr.attr,
  1070. &surplus_hugepages_attr.attr,
  1071. NULL,
  1072. };
  1073. static struct attribute_group hstate_attr_group = {
  1074. .attrs = hstate_attrs,
  1075. };
  1076. static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
  1077. {
  1078. int retval;
  1079. hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
  1080. hugepages_kobj);
  1081. if (!hstate_kobjs[h - hstates])
  1082. return -ENOMEM;
  1083. retval = sysfs_create_group(hstate_kobjs[h - hstates],
  1084. &hstate_attr_group);
  1085. if (retval)
  1086. kobject_put(hstate_kobjs[h - hstates]);
  1087. return retval;
  1088. }
  1089. static void __init hugetlb_sysfs_init(void)
  1090. {
  1091. struct hstate *h;
  1092. int err;
  1093. hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
  1094. if (!hugepages_kobj)
  1095. return;
  1096. for_each_hstate(h) {
  1097. err = hugetlb_sysfs_add_hstate(h);
  1098. if (err)
  1099. printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
  1100. h->name);
  1101. }
  1102. }
  1103. static void __exit hugetlb_exit(void)
  1104. {
  1105. struct hstate *h;
  1106. for_each_hstate(h) {
  1107. kobject_put(hstate_kobjs[h - hstates]);
  1108. }
  1109. kobject_put(hugepages_kobj);
  1110. }
  1111. module_exit(hugetlb_exit);
  1112. static int __init hugetlb_init(void)
  1113. {
  1114. /* Some platform decide whether they support huge pages at boot
  1115. * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
  1116. * there is no such support
  1117. */
  1118. if (HPAGE_SHIFT == 0)
  1119. return 0;
  1120. if (!size_to_hstate(default_hstate_size)) {
  1121. default_hstate_size = HPAGE_SIZE;
  1122. if (!size_to_hstate(default_hstate_size))
  1123. hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
  1124. }
  1125. default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
  1126. if (default_hstate_max_huge_pages)
  1127. default_hstate.max_huge_pages = default_hstate_max_huge_pages;
  1128. hugetlb_init_hstates();
  1129. gather_bootmem_prealloc();
  1130. report_hugepages();
  1131. hugetlb_sysfs_init();
  1132. return 0;
  1133. }
  1134. module_init(hugetlb_init);
  1135. /* Should be called on processing a hugepagesz=... option */
  1136. void __init hugetlb_add_hstate(unsigned order)
  1137. {
  1138. struct hstate *h;
  1139. unsigned long i;
  1140. if (size_to_hstate(PAGE_SIZE << order)) {
  1141. printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
  1142. return;
  1143. }
  1144. BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
  1145. BUG_ON(order == 0);
  1146. h = &hstates[max_hstate++];
  1147. h->order = order;
  1148. h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
  1149. h->nr_huge_pages = 0;
  1150. h->free_huge_pages = 0;
  1151. for (i = 0; i < MAX_NUMNODES; ++i)
  1152. INIT_LIST_HEAD(&h->hugepage_freelists[i]);
  1153. h->hugetlb_next_nid = first_node(node_online_map);
  1154. snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
  1155. huge_page_size(h)/1024);
  1156. parsed_hstate = h;
  1157. }
  1158. static int __init hugetlb_nrpages_setup(char *s)
  1159. {
  1160. unsigned long *mhp;
  1161. static unsigned long *last_mhp;
  1162. /*
  1163. * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
  1164. * so this hugepages= parameter goes to the "default hstate".
  1165. */
  1166. if (!max_hstate)
  1167. mhp = &default_hstate_max_huge_pages;
  1168. else
  1169. mhp = &parsed_hstate->max_huge_pages;
  1170. if (mhp == last_mhp) {
  1171. printk(KERN_WARNING "hugepages= specified twice without "
  1172. "interleaving hugepagesz=, ignoring\n");
  1173. return 1;
  1174. }
  1175. if (sscanf(s, "%lu", mhp) <= 0)
  1176. *mhp = 0;
  1177. /*
  1178. * Global state is always initialized later in hugetlb_init.
  1179. * But we need to allocate >= MAX_ORDER hstates here early to still
  1180. * use the bootmem allocator.
  1181. */
  1182. if (max_hstate && parsed_hstate->order >= MAX_ORDER)
  1183. hugetlb_hstate_alloc_pages(parsed_hstate);
  1184. last_mhp = mhp;
  1185. return 1;
  1186. }
  1187. __setup("hugepages=", hugetlb_nrpages_setup);
  1188. static int __init hugetlb_default_setup(char *s)
  1189. {
  1190. default_hstate_size = memparse(s, &s);
  1191. return 1;
  1192. }
  1193. __setup("default_hugepagesz=", hugetlb_default_setup);
  1194. static unsigned int cpuset_mems_nr(unsigned int *array)
  1195. {
  1196. int node;
  1197. unsigned int nr = 0;
  1198. for_each_node_mask(node, cpuset_current_mems_allowed)
  1199. nr += array[node];
  1200. return nr;
  1201. }
  1202. #ifdef CONFIG_SYSCTL
  1203. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  1204. struct file *file, void __user *buffer,
  1205. size_t *length, loff_t *ppos)
  1206. {
  1207. struct hstate *h = &default_hstate;
  1208. unsigned long tmp;
  1209. if (!write)
  1210. tmp = h->max_huge_pages;
  1211. table->data = &tmp;
  1212. table->maxlen = sizeof(unsigned long);
  1213. proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
  1214. if (write)
  1215. h->max_huge_pages = set_max_huge_pages(h, tmp);
  1216. return 0;
  1217. }
  1218. int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
  1219. struct file *file, void __user *buffer,
  1220. size_t *length, loff_t *ppos)
  1221. {
  1222. proc_dointvec(table, write, file, buffer, length, ppos);
  1223. if (hugepages_treat_as_movable)
  1224. htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
  1225. else
  1226. htlb_alloc_mask = GFP_HIGHUSER;
  1227. return 0;
  1228. }
  1229. int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  1230. struct file *file, void __user *buffer,
  1231. size_t *length, loff_t *ppos)
  1232. {
  1233. struct hstate *h = &default_hstate;
  1234. unsigned long tmp;
  1235. if (!write)
  1236. tmp = h->nr_overcommit_huge_pages;
  1237. table->data = &tmp;
  1238. table->maxlen = sizeof(unsigned long);
  1239. proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
  1240. if (write) {
  1241. spin_lock(&hugetlb_lock);
  1242. h->nr_overcommit_huge_pages = tmp;
  1243. spin_unlock(&hugetlb_lock);
  1244. }
  1245. return 0;
  1246. }
  1247. #endif /* CONFIG_SYSCTL */
  1248. void hugetlb_report_meminfo(struct seq_file *m)
  1249. {
  1250. struct hstate *h = &default_hstate;
  1251. seq_printf(m,
  1252. "HugePages_Total: %5lu\n"
  1253. "HugePages_Free: %5lu\n"
  1254. "HugePages_Rsvd: %5lu\n"
  1255. "HugePages_Surp: %5lu\n"
  1256. "Hugepagesize: %8lu kB\n",
  1257. h->nr_huge_pages,
  1258. h->free_huge_pages,
  1259. h->resv_huge_pages,
  1260. h->surplus_huge_pages,
  1261. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  1262. }
  1263. int hugetlb_report_node_meminfo(int nid, char *buf)
  1264. {
  1265. struct hstate *h = &default_hstate;
  1266. return sprintf(buf,
  1267. "Node %d HugePages_Total: %5u\n"
  1268. "Node %d HugePages_Free: %5u\n"
  1269. "Node %d HugePages_Surp: %5u\n",
  1270. nid, h->nr_huge_pages_node[nid],
  1271. nid, h->free_huge_pages_node[nid],
  1272. nid, h->surplus_huge_pages_node[nid]);
  1273. }
  1274. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  1275. unsigned long hugetlb_total_pages(void)
  1276. {
  1277. struct hstate *h = &default_hstate;
  1278. return h->nr_huge_pages * pages_per_huge_page(h);
  1279. }
  1280. static int hugetlb_acct_memory(struct hstate *h, long delta)
  1281. {
  1282. int ret = -ENOMEM;
  1283. spin_lock(&hugetlb_lock);
  1284. /*
  1285. * When cpuset is configured, it breaks the strict hugetlb page
  1286. * reservation as the accounting is done on a global variable. Such
  1287. * reservation is completely rubbish in the presence of cpuset because
  1288. * the reservation is not checked against page availability for the
  1289. * current cpuset. Application can still potentially OOM'ed by kernel
  1290. * with lack of free htlb page in cpuset that the task is in.
  1291. * Attempt to enforce strict accounting with cpuset is almost
  1292. * impossible (or too ugly) because cpuset is too fluid that
  1293. * task or memory node can be dynamically moved between cpusets.
  1294. *
  1295. * The change of semantics for shared hugetlb mapping with cpuset is
  1296. * undesirable. However, in order to preserve some of the semantics,
  1297. * we fall back to check against current free page availability as
  1298. * a best attempt and hopefully to minimize the impact of changing
  1299. * semantics that cpuset has.
  1300. */
  1301. if (delta > 0) {
  1302. if (gather_surplus_pages(h, delta) < 0)
  1303. goto out;
  1304. if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
  1305. return_unused_surplus_pages(h, delta);
  1306. goto out;
  1307. }
  1308. }
  1309. ret = 0;
  1310. if (delta < 0)
  1311. return_unused_surplus_pages(h, (unsigned long) -delta);
  1312. out:
  1313. spin_unlock(&hugetlb_lock);
  1314. return ret;
  1315. }
  1316. static void hugetlb_vm_op_open(struct vm_area_struct *vma)
  1317. {
  1318. struct resv_map *reservations = vma_resv_map(vma);
  1319. /*
  1320. * This new VMA should share its siblings reservation map if present.
  1321. * The VMA will only ever have a valid reservation map pointer where
  1322. * it is being copied for another still existing VMA. As that VMA
  1323. * has a reference to the reservation map it cannot dissappear until
  1324. * after this open call completes. It is therefore safe to take a
  1325. * new reference here without additional locking.
  1326. */
  1327. if (reservations)
  1328. kref_get(&reservations->refs);
  1329. }
  1330. static void hugetlb_vm_op_close(struct vm_area_struct *vma)
  1331. {
  1332. struct hstate *h = hstate_vma(vma);
  1333. struct resv_map *reservations = vma_resv_map(vma);
  1334. unsigned long reserve;
  1335. unsigned long start;
  1336. unsigned long end;
  1337. if (reservations) {
  1338. start = vma_hugecache_offset(h, vma, vma->vm_start);
  1339. end = vma_hugecache_offset(h, vma, vma->vm_end);
  1340. reserve = (end - start) -
  1341. region_count(&reservations->regions, start, end);
  1342. kref_put(&reservations->refs, resv_map_release);
  1343. if (reserve) {
  1344. hugetlb_acct_memory(h, -reserve);
  1345. hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
  1346. }
  1347. }
  1348. }
  1349. /*
  1350. * We cannot handle pagefaults against hugetlb pages at all. They cause
  1351. * handle_mm_fault() to try to instantiate regular-sized pages in the
  1352. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  1353. * this far.
  1354. */
  1355. static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1356. {
  1357. BUG();
  1358. return 0;
  1359. }
  1360. struct vm_operations_struct hugetlb_vm_ops = {
  1361. .fault = hugetlb_vm_op_fault,
  1362. .open = hugetlb_vm_op_open,
  1363. .close = hugetlb_vm_op_close,
  1364. };
  1365. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  1366. int writable)
  1367. {
  1368. pte_t entry;
  1369. if (writable) {
  1370. entry =
  1371. pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
  1372. } else {
  1373. entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
  1374. }
  1375. entry = pte_mkyoung(entry);
  1376. entry = pte_mkhuge(entry);
  1377. return entry;
  1378. }
  1379. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  1380. unsigned long address, pte_t *ptep)
  1381. {
  1382. pte_t entry;
  1383. entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
  1384. if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
  1385. update_mmu_cache(vma, address, entry);
  1386. }
  1387. }
  1388. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  1389. struct vm_area_struct *vma)
  1390. {
  1391. pte_t *src_pte, *dst_pte, entry;
  1392. struct page *ptepage;
  1393. unsigned long addr;
  1394. int cow;
  1395. struct hstate *h = hstate_vma(vma);
  1396. unsigned long sz = huge_page_size(h);
  1397. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  1398. for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
  1399. src_pte = huge_pte_offset(src, addr);
  1400. if (!src_pte)
  1401. continue;
  1402. dst_pte = huge_pte_alloc(dst, addr, sz);
  1403. if (!dst_pte)
  1404. goto nomem;
  1405. /* If the pagetables are shared don't copy or take references */
  1406. if (dst_pte == src_pte)
  1407. continue;
  1408. spin_lock(&dst->page_table_lock);
  1409. spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
  1410. if (!huge_pte_none(huge_ptep_get(src_pte))) {
  1411. if (cow)
  1412. huge_ptep_set_wrprotect(src, addr, src_pte);
  1413. entry = huge_ptep_get(src_pte);
  1414. ptepage = pte_page(entry);
  1415. get_page(ptepage);
  1416. set_huge_pte_at(dst, addr, dst_pte, entry);
  1417. }
  1418. spin_unlock(&src->page_table_lock);
  1419. spin_unlock(&dst->page_table_lock);
  1420. }
  1421. return 0;
  1422. nomem:
  1423. return -ENOMEM;
  1424. }
  1425. void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  1426. unsigned long end, struct page *ref_page)
  1427. {
  1428. struct mm_struct *mm = vma->vm_mm;
  1429. unsigned long address;
  1430. pte_t *ptep;
  1431. pte_t pte;
  1432. struct page *page;
  1433. struct page *tmp;
  1434. struct hstate *h = hstate_vma(vma);
  1435. unsigned long sz = huge_page_size(h);
  1436. /*
  1437. * A page gathering list, protected by per file i_mmap_lock. The
  1438. * lock is used to avoid list corruption from multiple unmapping
  1439. * of the same page since we are using page->lru.
  1440. */
  1441. LIST_HEAD(page_list);
  1442. WARN_ON(!is_vm_hugetlb_page(vma));
  1443. BUG_ON(start & ~huge_page_mask(h));
  1444. BUG_ON(end & ~huge_page_mask(h));
  1445. mmu_notifier_invalidate_range_start(mm, start, end);
  1446. spin_lock(&mm->page_table_lock);
  1447. for (address = start; address < end; address += sz) {
  1448. ptep = huge_pte_offset(mm, address);
  1449. if (!ptep)
  1450. continue;
  1451. if (huge_pmd_unshare(mm, &address, ptep))
  1452. continue;
  1453. /*
  1454. * If a reference page is supplied, it is because a specific
  1455. * page is being unmapped, not a range. Ensure the page we
  1456. * are about to unmap is the actual page of interest.
  1457. */
  1458. if (ref_page) {
  1459. pte = huge_ptep_get(ptep);
  1460. if (huge_pte_none(pte))
  1461. continue;
  1462. page = pte_page(pte);
  1463. if (page != ref_page)
  1464. continue;
  1465. /*
  1466. * Mark the VMA as having unmapped its page so that
  1467. * future faults in this VMA will fail rather than
  1468. * looking like data was lost
  1469. */
  1470. set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
  1471. }
  1472. pte = huge_ptep_get_and_clear(mm, address, ptep);
  1473. if (huge_pte_none(pte))
  1474. continue;
  1475. page = pte_page(pte);
  1476. if (pte_dirty(pte))
  1477. set_page_dirty(page);
  1478. list_add(&page->lru, &page_list);
  1479. }
  1480. spin_unlock(&mm->page_table_lock);
  1481. flush_tlb_range(vma, start, end);
  1482. mmu_notifier_invalidate_range_end(mm, start, end);
  1483. list_for_each_entry_safe(page, tmp, &page_list, lru) {
  1484. list_del(&page->lru);
  1485. put_page(page);
  1486. }
  1487. }
  1488. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  1489. unsigned long end, struct page *ref_page)
  1490. {
  1491. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  1492. __unmap_hugepage_range(vma, start, end, ref_page);
  1493. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  1494. }
  1495. /*
  1496. * This is called when the original mapper is failing to COW a MAP_PRIVATE
  1497. * mappping it owns the reserve page for. The intention is to unmap the page
  1498. * from other VMAs and let the children be SIGKILLed if they are faulting the
  1499. * same region.
  1500. */
  1501. static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  1502. struct page *page, unsigned long address)
  1503. {
  1504. struct vm_area_struct *iter_vma;
  1505. struct address_space *mapping;
  1506. struct prio_tree_iter iter;
  1507. pgoff_t pgoff;
  1508. /*
  1509. * vm_pgoff is in PAGE_SIZE units, hence the different calculation
  1510. * from page cache lookup which is in HPAGE_SIZE units.
  1511. */
  1512. address = address & huge_page_mask(hstate_vma(vma));
  1513. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
  1514. + (vma->vm_pgoff >> PAGE_SHIFT);
  1515. mapping = (struct address_space *)page_private(page);
  1516. vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  1517. /* Do not unmap the current VMA */
  1518. if (iter_vma == vma)
  1519. continue;
  1520. /*
  1521. * Unmap the page from other VMAs without their own reserves.
  1522. * They get marked to be SIGKILLed if they fault in these
  1523. * areas. This is because a future no-page fault on this VMA
  1524. * could insert a zeroed page instead of the data existing
  1525. * from the time of fork. This would look like data corruption
  1526. */
  1527. if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
  1528. unmap_hugepage_range(iter_vma,
  1529. address, address + HPAGE_SIZE,
  1530. page);
  1531. }
  1532. return 1;
  1533. }
  1534. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  1535. unsigned long address, pte_t *ptep, pte_t pte,
  1536. struct page *pagecache_page)
  1537. {
  1538. struct hstate *h = hstate_vma(vma);
  1539. struct page *old_page, *new_page;
  1540. int avoidcopy;
  1541. int outside_reserve = 0;
  1542. old_page = pte_page(pte);
  1543. retry_avoidcopy:
  1544. /* If no-one else is actually using this page, avoid the copy
  1545. * and just make the page writable */
  1546. avoidcopy = (page_count(old_page) == 1);
  1547. if (avoidcopy) {
  1548. set_huge_ptep_writable(vma, address, ptep);
  1549. return 0;
  1550. }
  1551. /*
  1552. * If the process that created a MAP_PRIVATE mapping is about to
  1553. * perform a COW due to a shared page count, attempt to satisfy
  1554. * the allocation without using the existing reserves. The pagecache
  1555. * page is used to determine if the reserve at this address was
  1556. * consumed or not. If reserves were used, a partial faulted mapping
  1557. * at the time of fork() could consume its reserves on COW instead
  1558. * of the full address range.
  1559. */
  1560. if (!(vma->vm_flags & VM_SHARED) &&
  1561. is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
  1562. old_page != pagecache_page)
  1563. outside_reserve = 1;
  1564. page_cache_get(old_page);
  1565. new_page = alloc_huge_page(vma, address, outside_reserve);
  1566. if (IS_ERR(new_page)) {
  1567. page_cache_release(old_page);
  1568. /*
  1569. * If a process owning a MAP_PRIVATE mapping fails to COW,
  1570. * it is due to references held by a child and an insufficient
  1571. * huge page pool. To guarantee the original mappers
  1572. * reliability, unmap the page from child processes. The child
  1573. * may get SIGKILLed if it later faults.
  1574. */
  1575. if (outside_reserve) {
  1576. BUG_ON(huge_pte_none(pte));
  1577. if (unmap_ref_private(mm, vma, old_page, address)) {
  1578. BUG_ON(page_count(old_page) != 1);
  1579. BUG_ON(huge_pte_none(pte));
  1580. goto retry_avoidcopy;
  1581. }
  1582. WARN_ON_ONCE(1);
  1583. }
  1584. return -PTR_ERR(new_page);
  1585. }
  1586. spin_unlock(&mm->page_table_lock);
  1587. copy_huge_page(new_page, old_page, address, vma);
  1588. __SetPageUptodate(new_page);
  1589. spin_lock(&mm->page_table_lock);
  1590. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  1591. if (likely(pte_same(huge_ptep_get(ptep), pte))) {
  1592. /* Break COW */
  1593. huge_ptep_clear_flush(vma, address, ptep);
  1594. set_huge_pte_at(mm, address, ptep,
  1595. make_huge_pte(vma, new_page, 1));
  1596. /* Make the old page be freed below */
  1597. new_page = old_page;
  1598. }
  1599. page_cache_release(new_page);
  1600. page_cache_release(old_page);
  1601. return 0;
  1602. }
  1603. /* Return the pagecache page at a given address within a VMA */
  1604. static struct page *hugetlbfs_pagecache_page(struct hstate *h,
  1605. struct vm_area_struct *vma, unsigned long address)
  1606. {
  1607. struct address_space *mapping;
  1608. pgoff_t idx;
  1609. mapping = vma->vm_file->f_mapping;
  1610. idx = vma_hugecache_offset(h, vma, address);
  1611. return find_lock_page(mapping, idx);
  1612. }
  1613. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1614. unsigned long address, pte_t *ptep, int write_access)
  1615. {
  1616. struct hstate *h = hstate_vma(vma);
  1617. int ret = VM_FAULT_SIGBUS;
  1618. pgoff_t idx;
  1619. unsigned long size;
  1620. struct page *page;
  1621. struct address_space *mapping;
  1622. pte_t new_pte;
  1623. /*
  1624. * Currently, we are forced to kill the process in the event the
  1625. * original mapper has unmapped pages from the child due to a failed
  1626. * COW. Warn that such a situation has occured as it may not be obvious
  1627. */
  1628. if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
  1629. printk(KERN_WARNING
  1630. "PID %d killed due to inadequate hugepage pool\n",
  1631. current->pid);
  1632. return ret;
  1633. }
  1634. mapping = vma->vm_file->f_mapping;
  1635. idx = vma_hugecache_offset(h, vma, address);
  1636. /*
  1637. * Use page lock to guard against racing truncation
  1638. * before we get page_table_lock.
  1639. */
  1640. retry:
  1641. page = find_lock_page(mapping, idx);
  1642. if (!page) {
  1643. size = i_size_read(mapping->host) >> huge_page_shift(h);
  1644. if (idx >= size)
  1645. goto out;
  1646. page = alloc_huge_page(vma, address, 0);
  1647. if (IS_ERR(page)) {
  1648. ret = -PTR_ERR(page);
  1649. goto out;
  1650. }
  1651. clear_huge_page(page, address, huge_page_size(h));
  1652. __SetPageUptodate(page);
  1653. if (vma->vm_flags & VM_SHARED) {
  1654. int err;
  1655. struct inode *inode = mapping->host;
  1656. err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  1657. if (err) {
  1658. put_page(page);
  1659. if (err == -EEXIST)
  1660. goto retry;
  1661. goto out;
  1662. }
  1663. spin_lock(&inode->i_lock);
  1664. inode->i_blocks += blocks_per_huge_page(h);
  1665. spin_unlock(&inode->i_lock);
  1666. } else
  1667. lock_page(page);
  1668. }
  1669. /*
  1670. * If we are going to COW a private mapping later, we examine the
  1671. * pending reservations for this page now. This will ensure that
  1672. * any allocations necessary to record that reservation occur outside
  1673. * the spinlock.
  1674. */
  1675. if (write_access && !(vma->vm_flags & VM_SHARED))
  1676. if (vma_needs_reservation(h, vma, address) < 0) {
  1677. ret = VM_FAULT_OOM;
  1678. goto backout_unlocked;
  1679. }
  1680. spin_lock(&mm->page_table_lock);
  1681. size = i_size_read(mapping->host) >> huge_page_shift(h);
  1682. if (idx >= size)
  1683. goto backout;
  1684. ret = 0;
  1685. if (!huge_pte_none(huge_ptep_get(ptep)))
  1686. goto backout;
  1687. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  1688. && (vma->vm_flags & VM_SHARED)));
  1689. set_huge_pte_at(mm, address, ptep, new_pte);
  1690. if (write_access && !(vma->vm_flags & VM_SHARED)) {
  1691. /* Optimization, do the COW without a second fault */
  1692. ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
  1693. }
  1694. spin_unlock(&mm->page_table_lock);
  1695. unlock_page(page);
  1696. out:
  1697. return ret;
  1698. backout:
  1699. spin_unlock(&mm->page_table_lock);
  1700. backout_unlocked:
  1701. unlock_page(page);
  1702. put_page(page);
  1703. goto out;
  1704. }
  1705. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  1706. unsigned long address, int write_access)
  1707. {
  1708. pte_t *ptep;
  1709. pte_t entry;
  1710. int ret;
  1711. struct page *pagecache_page = NULL;
  1712. static DEFINE_MUTEX(hugetlb_instantiation_mutex);
  1713. struct hstate *h = hstate_vma(vma);
  1714. ptep = huge_pte_alloc(mm, address, huge_page_size(h));
  1715. if (!ptep)
  1716. return VM_FAULT_OOM;
  1717. /*
  1718. * Serialize hugepage allocation and instantiation, so that we don't
  1719. * get spurious allocation failures if two CPUs race to instantiate
  1720. * the same page in the page cache.
  1721. */
  1722. mutex_lock(&hugetlb_instantiation_mutex);
  1723. entry = huge_ptep_get(ptep);
  1724. if (huge_pte_none(entry)) {
  1725. ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
  1726. goto out_mutex;
  1727. }
  1728. ret = 0;
  1729. /*
  1730. * If we are going to COW the mapping later, we examine the pending
  1731. * reservations for this page now. This will ensure that any
  1732. * allocations necessary to record that reservation occur outside the
  1733. * spinlock. For private mappings, we also lookup the pagecache
  1734. * page now as it is used to determine if a reservation has been
  1735. * consumed.
  1736. */
  1737. if (write_access && !pte_write(entry)) {
  1738. if (vma_needs_reservation(h, vma, address) < 0) {
  1739. ret = VM_FAULT_OOM;
  1740. goto out_mutex;
  1741. }
  1742. if (!(vma->vm_flags & VM_SHARED))
  1743. pagecache_page = hugetlbfs_pagecache_page(h,
  1744. vma, address);
  1745. }
  1746. spin_lock(&mm->page_table_lock);
  1747. /* Check for a racing update before calling hugetlb_cow */
  1748. if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
  1749. goto out_page_table_lock;
  1750. if (write_access) {
  1751. if (!pte_write(entry)) {
  1752. ret = hugetlb_cow(mm, vma, address, ptep, entry,
  1753. pagecache_page);
  1754. goto out_page_table_lock;
  1755. }
  1756. entry = pte_mkdirty(entry);
  1757. }
  1758. entry = pte_mkyoung(entry);
  1759. if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access))
  1760. update_mmu_cache(vma, address, entry);
  1761. out_page_table_lock:
  1762. spin_unlock(&mm->page_table_lock);
  1763. if (pagecache_page) {
  1764. unlock_page(pagecache_page);
  1765. put_page(pagecache_page);
  1766. }
  1767. out_mutex:
  1768. mutex_unlock(&hugetlb_instantiation_mutex);
  1769. return ret;
  1770. }
  1771. /* Can be overriden by architectures */
  1772. __attribute__((weak)) struct page *
  1773. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  1774. pud_t *pud, int write)
  1775. {
  1776. BUG();
  1777. return NULL;
  1778. }
  1779. static int huge_zeropage_ok(pte_t *ptep, int write, int shared)
  1780. {
  1781. if (!ptep || write || shared)
  1782. return 0;
  1783. else
  1784. return huge_pte_none(huge_ptep_get(ptep));
  1785. }
  1786. int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1787. struct page **pages, struct vm_area_struct **vmas,
  1788. unsigned long *position, int *length, int i,
  1789. int write)
  1790. {
  1791. unsigned long pfn_offset;
  1792. unsigned long vaddr = *position;
  1793. int remainder = *length;
  1794. struct hstate *h = hstate_vma(vma);
  1795. int zeropage_ok = 0;
  1796. int shared = vma->vm_flags & VM_SHARED;
  1797. spin_lock(&mm->page_table_lock);
  1798. while (vaddr < vma->vm_end && remainder) {
  1799. pte_t *pte;
  1800. struct page *page;
  1801. /*
  1802. * Some archs (sparc64, sh*) have multiple pte_ts to
  1803. * each hugepage. We have to make * sure we get the
  1804. * first, for the page indexing below to work.
  1805. */
  1806. pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
  1807. if (huge_zeropage_ok(pte, write, shared))
  1808. zeropage_ok = 1;
  1809. if (!pte ||
  1810. (huge_pte_none(huge_ptep_get(pte)) && !zeropage_ok) ||
  1811. (write && !pte_write(huge_ptep_get(pte)))) {
  1812. int ret;
  1813. spin_unlock(&mm->page_table_lock);
  1814. ret = hugetlb_fault(mm, vma, vaddr, write);
  1815. spin_lock(&mm->page_table_lock);
  1816. if (!(ret & VM_FAULT_ERROR))
  1817. continue;
  1818. remainder = 0;
  1819. if (!i)
  1820. i = -EFAULT;
  1821. break;
  1822. }
  1823. pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
  1824. page = pte_page(huge_ptep_get(pte));
  1825. same_page:
  1826. if (pages) {
  1827. if (zeropage_ok)
  1828. pages[i] = ZERO_PAGE(0);
  1829. else
  1830. pages[i] = page + pfn_offset;
  1831. get_page(pages[i]);
  1832. }
  1833. if (vmas)
  1834. vmas[i] = vma;
  1835. vaddr += PAGE_SIZE;
  1836. ++pfn_offset;
  1837. --remainder;
  1838. ++i;
  1839. if (vaddr < vma->vm_end && remainder &&
  1840. pfn_offset < pages_per_huge_page(h)) {
  1841. /*
  1842. * We use pfn_offset to avoid touching the pageframes
  1843. * of this compound page.
  1844. */
  1845. goto same_page;
  1846. }
  1847. }
  1848. spin_unlock(&mm->page_table_lock);
  1849. *length = remainder;
  1850. *position = vaddr;
  1851. return i;
  1852. }
  1853. void hugetlb_change_protection(struct vm_area_struct *vma,
  1854. unsigned long address, unsigned long end, pgprot_t newprot)
  1855. {
  1856. struct mm_struct *mm = vma->vm_mm;
  1857. unsigned long start = address;
  1858. pte_t *ptep;
  1859. pte_t pte;
  1860. struct hstate *h = hstate_vma(vma);
  1861. BUG_ON(address >= end);
  1862. flush_cache_range(vma, address, end);
  1863. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  1864. spin_lock(&mm->page_table_lock);
  1865. for (; address < end; address += huge_page_size(h)) {
  1866. ptep = huge_pte_offset(mm, address);
  1867. if (!ptep)
  1868. continue;
  1869. if (huge_pmd_unshare(mm, &address, ptep))
  1870. continue;
  1871. if (!huge_pte_none(huge_ptep_get(ptep))) {
  1872. pte = huge_ptep_get_and_clear(mm, address, ptep);
  1873. pte = pte_mkhuge(pte_modify(pte, newprot));
  1874. set_huge_pte_at(mm, address, ptep, pte);
  1875. }
  1876. }
  1877. spin_unlock(&mm->page_table_lock);
  1878. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  1879. flush_tlb_range(vma, start, end);
  1880. }
  1881. int hugetlb_reserve_pages(struct inode *inode,
  1882. long from, long to,
  1883. struct vm_area_struct *vma)
  1884. {
  1885. long ret, chg;
  1886. struct hstate *h = hstate_inode(inode);
  1887. if (vma && vma->vm_flags & VM_NORESERVE)
  1888. return 0;
  1889. /*
  1890. * Shared mappings base their reservation on the number of pages that
  1891. * are already allocated on behalf of the file. Private mappings need
  1892. * to reserve the full area even if read-only as mprotect() may be
  1893. * called to make the mapping read-write. Assume !vma is a shm mapping
  1894. */
  1895. if (!vma || vma->vm_flags & VM_SHARED)
  1896. chg = region_chg(&inode->i_mapping->private_list, from, to);
  1897. else {
  1898. struct resv_map *resv_map = resv_map_alloc();
  1899. if (!resv_map)
  1900. return -ENOMEM;
  1901. chg = to - from;
  1902. set_vma_resv_map(vma, resv_map);
  1903. set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
  1904. }
  1905. if (chg < 0)
  1906. return chg;
  1907. if (hugetlb_get_quota(inode->i_mapping, chg))
  1908. return -ENOSPC;
  1909. ret = hugetlb_acct_memory(h, chg);
  1910. if (ret < 0) {
  1911. hugetlb_put_quota(inode->i_mapping, chg);
  1912. return ret;
  1913. }
  1914. if (!vma || vma->vm_flags & VM_SHARED)
  1915. region_add(&inode->i_mapping->private_list, from, to);
  1916. return 0;
  1917. }
  1918. void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
  1919. {
  1920. struct hstate *h = hstate_inode(inode);
  1921. long chg = region_truncate(&inode->i_mapping->private_list, offset);
  1922. spin_lock(&inode->i_lock);
  1923. inode->i_blocks -= blocks_per_huge_page(h);
  1924. spin_unlock(&inode->i_lock);
  1925. hugetlb_put_quota(inode->i_mapping, (chg - freed));
  1926. hugetlb_acct_memory(h, -(chg - freed));
  1927. }