hugetlb.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172
  1. /*
  2. * Generic hugetlb support.
  3. * (C) William Irwin, April 2004
  4. */
  5. #include <linux/gfp.h>
  6. #include <linux/list.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/sysctl.h>
  11. #include <linux/highmem.h>
  12. #include <linux/nodemask.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/mempolicy.h>
  15. #include <linux/cpuset.h>
  16. #include <linux/mutex.h>
  17. #include <linux/bootmem.h>
  18. #include <linux/sysfs.h>
  19. #include <asm/io.h>
  20. #include <asm/page.h>
  21. #include <asm/pgtable.h>
  22. #include <linux/hugetlb.h>
  23. #include "internal.h"
  24. const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
  25. static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
  26. unsigned long hugepages_treat_as_movable;
  27. static int max_hstate;
  28. unsigned int default_hstate_idx;
  29. struct hstate hstates[HUGE_MAX_HSTATE];
  30. __initdata LIST_HEAD(huge_boot_pages);
  31. /* for command line parsing */
  32. static struct hstate * __initdata parsed_hstate;
  33. static unsigned long __initdata default_hstate_max_huge_pages;
  34. static unsigned long __initdata default_hstate_size;
  35. #define for_each_hstate(h) \
  36. for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
  37. /*
  38. * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  39. */
  40. static DEFINE_SPINLOCK(hugetlb_lock);
  41. /*
  42. * Region tracking -- allows tracking of reservations and instantiated pages
  43. * across the pages in a mapping.
  44. *
  45. * The region data structures are protected by a combination of the mmap_sem
  46. * and the hugetlb_instantion_mutex. To access or modify a region the caller
  47. * must either hold the mmap_sem for write, or the mmap_sem for read and
  48. * the hugetlb_instantiation mutex:
  49. *
  50. * down_write(&mm->mmap_sem);
  51. * or
  52. * down_read(&mm->mmap_sem);
  53. * mutex_lock(&hugetlb_instantiation_mutex);
  54. */
  55. struct file_region {
  56. struct list_head link;
  57. long from;
  58. long to;
  59. };
  60. static long region_add(struct list_head *head, long f, long t)
  61. {
  62. struct file_region *rg, *nrg, *trg;
  63. /* Locate the region we are either in or before. */
  64. list_for_each_entry(rg, head, link)
  65. if (f <= rg->to)
  66. break;
  67. /* Round our left edge to the current segment if it encloses us. */
  68. if (f > rg->from)
  69. f = rg->from;
  70. /* Check for and consume any regions we now overlap with. */
  71. nrg = rg;
  72. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  73. if (&rg->link == head)
  74. break;
  75. if (rg->from > t)
  76. break;
  77. /* If this area reaches higher then extend our area to
  78. * include it completely. If this is not the first area
  79. * which we intend to reuse, free it. */
  80. if (rg->to > t)
  81. t = rg->to;
  82. if (rg != nrg) {
  83. list_del(&rg->link);
  84. kfree(rg);
  85. }
  86. }
  87. nrg->from = f;
  88. nrg->to = t;
  89. return 0;
  90. }
  91. static long region_chg(struct list_head *head, long f, long t)
  92. {
  93. struct file_region *rg, *nrg;
  94. long chg = 0;
  95. /* Locate the region we are before or in. */
  96. list_for_each_entry(rg, head, link)
  97. if (f <= rg->to)
  98. break;
  99. /* If we are below the current region then a new region is required.
  100. * Subtle, allocate a new region at the position but make it zero
  101. * size such that we can guarantee to record the reservation. */
  102. if (&rg->link == head || t < rg->from) {
  103. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  104. if (!nrg)
  105. return -ENOMEM;
  106. nrg->from = f;
  107. nrg->to = f;
  108. INIT_LIST_HEAD(&nrg->link);
  109. list_add(&nrg->link, rg->link.prev);
  110. return t - f;
  111. }
  112. /* Round our left edge to the current segment if it encloses us. */
  113. if (f > rg->from)
  114. f = rg->from;
  115. chg = t - f;
  116. /* Check for and consume any regions we now overlap with. */
  117. list_for_each_entry(rg, rg->link.prev, link) {
  118. if (&rg->link == head)
  119. break;
  120. if (rg->from > t)
  121. return chg;
  122. /* We overlap with this area, if it extends futher than
  123. * us then we must extend ourselves. Account for its
  124. * existing reservation. */
  125. if (rg->to > t) {
  126. chg += rg->to - t;
  127. t = rg->to;
  128. }
  129. chg -= rg->to - rg->from;
  130. }
  131. return chg;
  132. }
  133. static long region_truncate(struct list_head *head, long end)
  134. {
  135. struct file_region *rg, *trg;
  136. long chg = 0;
  137. /* Locate the region we are either in or before. */
  138. list_for_each_entry(rg, head, link)
  139. if (end <= rg->to)
  140. break;
  141. if (&rg->link == head)
  142. return 0;
  143. /* If we are in the middle of a region then adjust it. */
  144. if (end > rg->from) {
  145. chg = rg->to - end;
  146. rg->to = end;
  147. rg = list_entry(rg->link.next, typeof(*rg), link);
  148. }
  149. /* Drop any remaining regions. */
  150. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  151. if (&rg->link == head)
  152. break;
  153. chg += rg->to - rg->from;
  154. list_del(&rg->link);
  155. kfree(rg);
  156. }
  157. return chg;
  158. }
  159. static long region_count(struct list_head *head, long f, long t)
  160. {
  161. struct file_region *rg;
  162. long chg = 0;
  163. /* Locate each segment we overlap with, and count that overlap. */
  164. list_for_each_entry(rg, head, link) {
  165. int seg_from;
  166. int seg_to;
  167. if (rg->to <= f)
  168. continue;
  169. if (rg->from >= t)
  170. break;
  171. seg_from = max(rg->from, f);
  172. seg_to = min(rg->to, t);
  173. chg += seg_to - seg_from;
  174. }
  175. return chg;
  176. }
  177. /*
  178. * Convert the address within this vma to the page offset within
  179. * the mapping, in pagecache page units; huge pages here.
  180. */
  181. static pgoff_t vma_hugecache_offset(struct hstate *h,
  182. struct vm_area_struct *vma, unsigned long address)
  183. {
  184. return ((address - vma->vm_start) >> huge_page_shift(h)) +
  185. (vma->vm_pgoff >> huge_page_order(h));
  186. }
  187. /*
  188. * Flags for MAP_PRIVATE reservations. These are stored in the bottom
  189. * bits of the reservation map pointer, which are always clear due to
  190. * alignment.
  191. */
  192. #define HPAGE_RESV_OWNER (1UL << 0)
  193. #define HPAGE_RESV_UNMAPPED (1UL << 1)
  194. #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
  195. /*
  196. * These helpers are used to track how many pages are reserved for
  197. * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
  198. * is guaranteed to have their future faults succeed.
  199. *
  200. * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
  201. * the reserve counters are updated with the hugetlb_lock held. It is safe
  202. * to reset the VMA at fork() time as it is not in use yet and there is no
  203. * chance of the global counters getting corrupted as a result of the values.
  204. *
  205. * The private mapping reservation is represented in a subtly different
  206. * manner to a shared mapping. A shared mapping has a region map associated
  207. * with the underlying file, this region map represents the backing file
  208. * pages which have ever had a reservation assigned which this persists even
  209. * after the page is instantiated. A private mapping has a region map
  210. * associated with the original mmap which is attached to all VMAs which
  211. * reference it, this region map represents those offsets which have consumed
  212. * reservation ie. where pages have been instantiated.
  213. */
  214. static unsigned long get_vma_private_data(struct vm_area_struct *vma)
  215. {
  216. return (unsigned long)vma->vm_private_data;
  217. }
  218. static void set_vma_private_data(struct vm_area_struct *vma,
  219. unsigned long value)
  220. {
  221. vma->vm_private_data = (void *)value;
  222. }
  223. struct resv_map {
  224. struct kref refs;
  225. struct list_head regions;
  226. };
  227. struct resv_map *resv_map_alloc(void)
  228. {
  229. struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
  230. if (!resv_map)
  231. return NULL;
  232. kref_init(&resv_map->refs);
  233. INIT_LIST_HEAD(&resv_map->regions);
  234. return resv_map;
  235. }
  236. void resv_map_release(struct kref *ref)
  237. {
  238. struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
  239. /* Clear out any active regions before we release the map. */
  240. region_truncate(&resv_map->regions, 0);
  241. kfree(resv_map);
  242. }
  243. static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
  244. {
  245. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  246. if (!(vma->vm_flags & VM_SHARED))
  247. return (struct resv_map *)(get_vma_private_data(vma) &
  248. ~HPAGE_RESV_MASK);
  249. return 0;
  250. }
  251. static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
  252. {
  253. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  254. VM_BUG_ON(vma->vm_flags & VM_SHARED);
  255. set_vma_private_data(vma, (get_vma_private_data(vma) &
  256. HPAGE_RESV_MASK) | (unsigned long)map);
  257. }
  258. static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
  259. {
  260. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  261. VM_BUG_ON(vma->vm_flags & VM_SHARED);
  262. set_vma_private_data(vma, get_vma_private_data(vma) | flags);
  263. }
  264. static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
  265. {
  266. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  267. return (get_vma_private_data(vma) & flag) != 0;
  268. }
  269. /* Decrement the reserved pages in the hugepage pool by one */
  270. static void decrement_hugepage_resv_vma(struct hstate *h,
  271. struct vm_area_struct *vma)
  272. {
  273. if (vma->vm_flags & VM_NORESERVE)
  274. return;
  275. if (vma->vm_flags & VM_SHARED) {
  276. /* Shared mappings always use reserves */
  277. h->resv_huge_pages--;
  278. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  279. /*
  280. * Only the process that called mmap() has reserves for
  281. * private mappings.
  282. */
  283. h->resv_huge_pages--;
  284. }
  285. }
  286. /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
  287. void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  288. {
  289. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  290. if (!(vma->vm_flags & VM_SHARED))
  291. vma->vm_private_data = (void *)0;
  292. }
  293. /* Returns true if the VMA has associated reserve pages */
  294. static int vma_has_reserves(struct vm_area_struct *vma)
  295. {
  296. if (vma->vm_flags & VM_SHARED)
  297. return 1;
  298. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  299. return 1;
  300. return 0;
  301. }
  302. static void clear_huge_page(struct page *page,
  303. unsigned long addr, unsigned long sz)
  304. {
  305. int i;
  306. might_sleep();
  307. for (i = 0; i < sz/PAGE_SIZE; i++) {
  308. cond_resched();
  309. clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  310. }
  311. }
  312. static void copy_huge_page(struct page *dst, struct page *src,
  313. unsigned long addr, struct vm_area_struct *vma)
  314. {
  315. int i;
  316. struct hstate *h = hstate_vma(vma);
  317. might_sleep();
  318. for (i = 0; i < pages_per_huge_page(h); i++) {
  319. cond_resched();
  320. copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
  321. }
  322. }
  323. static void enqueue_huge_page(struct hstate *h, struct page *page)
  324. {
  325. int nid = page_to_nid(page);
  326. list_add(&page->lru, &h->hugepage_freelists[nid]);
  327. h->free_huge_pages++;
  328. h->free_huge_pages_node[nid]++;
  329. }
  330. static struct page *dequeue_huge_page(struct hstate *h)
  331. {
  332. int nid;
  333. struct page *page = NULL;
  334. for (nid = 0; nid < MAX_NUMNODES; ++nid) {
  335. if (!list_empty(&h->hugepage_freelists[nid])) {
  336. page = list_entry(h->hugepage_freelists[nid].next,
  337. struct page, lru);
  338. list_del(&page->lru);
  339. h->free_huge_pages--;
  340. h->free_huge_pages_node[nid]--;
  341. break;
  342. }
  343. }
  344. return page;
  345. }
  346. static struct page *dequeue_huge_page_vma(struct hstate *h,
  347. struct vm_area_struct *vma,
  348. unsigned long address, int avoid_reserve)
  349. {
  350. int nid;
  351. struct page *page = NULL;
  352. struct mempolicy *mpol;
  353. nodemask_t *nodemask;
  354. struct zonelist *zonelist = huge_zonelist(vma, address,
  355. htlb_alloc_mask, &mpol, &nodemask);
  356. struct zone *zone;
  357. struct zoneref *z;
  358. /*
  359. * A child process with MAP_PRIVATE mappings created by their parent
  360. * have no page reserves. This check ensures that reservations are
  361. * not "stolen". The child may still get SIGKILLed
  362. */
  363. if (!vma_has_reserves(vma) &&
  364. h->free_huge_pages - h->resv_huge_pages == 0)
  365. return NULL;
  366. /* If reserves cannot be used, ensure enough pages are in the pool */
  367. if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
  368. return NULL;
  369. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  370. MAX_NR_ZONES - 1, nodemask) {
  371. nid = zone_to_nid(zone);
  372. if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
  373. !list_empty(&h->hugepage_freelists[nid])) {
  374. page = list_entry(h->hugepage_freelists[nid].next,
  375. struct page, lru);
  376. list_del(&page->lru);
  377. h->free_huge_pages--;
  378. h->free_huge_pages_node[nid]--;
  379. if (!avoid_reserve)
  380. decrement_hugepage_resv_vma(h, vma);
  381. break;
  382. }
  383. }
  384. mpol_cond_put(mpol);
  385. return page;
  386. }
  387. static void update_and_free_page(struct hstate *h, struct page *page)
  388. {
  389. int i;
  390. h->nr_huge_pages--;
  391. h->nr_huge_pages_node[page_to_nid(page)]--;
  392. for (i = 0; i < pages_per_huge_page(h); i++) {
  393. page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
  394. 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
  395. 1 << PG_private | 1<< PG_writeback);
  396. }
  397. set_compound_page_dtor(page, NULL);
  398. set_page_refcounted(page);
  399. arch_release_hugepage(page);
  400. __free_pages(page, huge_page_order(h));
  401. }
  402. struct hstate *size_to_hstate(unsigned long size)
  403. {
  404. struct hstate *h;
  405. for_each_hstate(h) {
  406. if (huge_page_size(h) == size)
  407. return h;
  408. }
  409. return NULL;
  410. }
  411. static void free_huge_page(struct page *page)
  412. {
  413. /*
  414. * Can't pass hstate in here because it is called from the
  415. * compound page destructor.
  416. */
  417. struct hstate *h = page_hstate(page);
  418. int nid = page_to_nid(page);
  419. struct address_space *mapping;
  420. mapping = (struct address_space *) page_private(page);
  421. set_page_private(page, 0);
  422. BUG_ON(page_count(page));
  423. INIT_LIST_HEAD(&page->lru);
  424. spin_lock(&hugetlb_lock);
  425. if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
  426. update_and_free_page(h, page);
  427. h->surplus_huge_pages--;
  428. h->surplus_huge_pages_node[nid]--;
  429. } else {
  430. enqueue_huge_page(h, page);
  431. }
  432. spin_unlock(&hugetlb_lock);
  433. if (mapping)
  434. hugetlb_put_quota(mapping, 1);
  435. }
  436. /*
  437. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  438. * balanced by operating on them in a round-robin fashion.
  439. * Returns 1 if an adjustment was made.
  440. */
  441. static int adjust_pool_surplus(struct hstate *h, int delta)
  442. {
  443. static int prev_nid;
  444. int nid = prev_nid;
  445. int ret = 0;
  446. VM_BUG_ON(delta != -1 && delta != 1);
  447. do {
  448. nid = next_node(nid, node_online_map);
  449. if (nid == MAX_NUMNODES)
  450. nid = first_node(node_online_map);
  451. /* To shrink on this node, there must be a surplus page */
  452. if (delta < 0 && !h->surplus_huge_pages_node[nid])
  453. continue;
  454. /* Surplus cannot exceed the total number of pages */
  455. if (delta > 0 && h->surplus_huge_pages_node[nid] >=
  456. h->nr_huge_pages_node[nid])
  457. continue;
  458. h->surplus_huge_pages += delta;
  459. h->surplus_huge_pages_node[nid] += delta;
  460. ret = 1;
  461. break;
  462. } while (nid != prev_nid);
  463. prev_nid = nid;
  464. return ret;
  465. }
  466. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
  467. {
  468. set_compound_page_dtor(page, free_huge_page);
  469. spin_lock(&hugetlb_lock);
  470. h->nr_huge_pages++;
  471. h->nr_huge_pages_node[nid]++;
  472. spin_unlock(&hugetlb_lock);
  473. put_page(page); /* free it into the hugepage allocator */
  474. }
  475. static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  476. {
  477. struct page *page;
  478. if (h->order >= MAX_ORDER)
  479. return NULL;
  480. page = alloc_pages_node(nid,
  481. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
  482. __GFP_REPEAT|__GFP_NOWARN,
  483. huge_page_order(h));
  484. if (page) {
  485. if (arch_prepare_hugepage(page)) {
  486. __free_pages(page, HUGETLB_PAGE_ORDER);
  487. return NULL;
  488. }
  489. prep_new_huge_page(h, page, nid);
  490. }
  491. return page;
  492. }
  493. /*
  494. * Use a helper variable to find the next node and then
  495. * copy it back to hugetlb_next_nid afterwards:
  496. * otherwise there's a window in which a racer might
  497. * pass invalid nid MAX_NUMNODES to alloc_pages_node.
  498. * But we don't need to use a spin_lock here: it really
  499. * doesn't matter if occasionally a racer chooses the
  500. * same nid as we do. Move nid forward in the mask even
  501. * if we just successfully allocated a hugepage so that
  502. * the next caller gets hugepages on the next node.
  503. */
  504. static int hstate_next_node(struct hstate *h)
  505. {
  506. int next_nid;
  507. next_nid = next_node(h->hugetlb_next_nid, node_online_map);
  508. if (next_nid == MAX_NUMNODES)
  509. next_nid = first_node(node_online_map);
  510. h->hugetlb_next_nid = next_nid;
  511. return next_nid;
  512. }
  513. static int alloc_fresh_huge_page(struct hstate *h)
  514. {
  515. struct page *page;
  516. int start_nid;
  517. int next_nid;
  518. int ret = 0;
  519. start_nid = h->hugetlb_next_nid;
  520. do {
  521. page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
  522. if (page)
  523. ret = 1;
  524. next_nid = hstate_next_node(h);
  525. } while (!page && h->hugetlb_next_nid != start_nid);
  526. if (ret)
  527. count_vm_event(HTLB_BUDDY_PGALLOC);
  528. else
  529. count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  530. return ret;
  531. }
  532. static struct page *alloc_buddy_huge_page(struct hstate *h,
  533. struct vm_area_struct *vma, unsigned long address)
  534. {
  535. struct page *page;
  536. unsigned int nid;
  537. if (h->order >= MAX_ORDER)
  538. return NULL;
  539. /*
  540. * Assume we will successfully allocate the surplus page to
  541. * prevent racing processes from causing the surplus to exceed
  542. * overcommit
  543. *
  544. * This however introduces a different race, where a process B
  545. * tries to grow the static hugepage pool while alloc_pages() is
  546. * called by process A. B will only examine the per-node
  547. * counters in determining if surplus huge pages can be
  548. * converted to normal huge pages in adjust_pool_surplus(). A
  549. * won't be able to increment the per-node counter, until the
  550. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  551. * no more huge pages can be converted from surplus to normal
  552. * state (and doesn't try to convert again). Thus, we have a
  553. * case where a surplus huge page exists, the pool is grown, and
  554. * the surplus huge page still exists after, even though it
  555. * should just have been converted to a normal huge page. This
  556. * does not leak memory, though, as the hugepage will be freed
  557. * once it is out of use. It also does not allow the counters to
  558. * go out of whack in adjust_pool_surplus() as we don't modify
  559. * the node values until we've gotten the hugepage and only the
  560. * per-node value is checked there.
  561. */
  562. spin_lock(&hugetlb_lock);
  563. if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
  564. spin_unlock(&hugetlb_lock);
  565. return NULL;
  566. } else {
  567. h->nr_huge_pages++;
  568. h->surplus_huge_pages++;
  569. }
  570. spin_unlock(&hugetlb_lock);
  571. page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
  572. __GFP_REPEAT|__GFP_NOWARN,
  573. huge_page_order(h));
  574. spin_lock(&hugetlb_lock);
  575. if (page) {
  576. /*
  577. * This page is now managed by the hugetlb allocator and has
  578. * no users -- drop the buddy allocator's reference.
  579. */
  580. put_page_testzero(page);
  581. VM_BUG_ON(page_count(page));
  582. nid = page_to_nid(page);
  583. set_compound_page_dtor(page, free_huge_page);
  584. /*
  585. * We incremented the global counters already
  586. */
  587. h->nr_huge_pages_node[nid]++;
  588. h->surplus_huge_pages_node[nid]++;
  589. __count_vm_event(HTLB_BUDDY_PGALLOC);
  590. } else {
  591. h->nr_huge_pages--;
  592. h->surplus_huge_pages--;
  593. __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  594. }
  595. spin_unlock(&hugetlb_lock);
  596. return page;
  597. }
  598. /*
  599. * Increase the hugetlb pool such that it can accomodate a reservation
  600. * of size 'delta'.
  601. */
  602. static int gather_surplus_pages(struct hstate *h, int delta)
  603. {
  604. struct list_head surplus_list;
  605. struct page *page, *tmp;
  606. int ret, i;
  607. int needed, allocated;
  608. needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
  609. if (needed <= 0) {
  610. h->resv_huge_pages += delta;
  611. return 0;
  612. }
  613. allocated = 0;
  614. INIT_LIST_HEAD(&surplus_list);
  615. ret = -ENOMEM;
  616. retry:
  617. spin_unlock(&hugetlb_lock);
  618. for (i = 0; i < needed; i++) {
  619. page = alloc_buddy_huge_page(h, NULL, 0);
  620. if (!page) {
  621. /*
  622. * We were not able to allocate enough pages to
  623. * satisfy the entire reservation so we free what
  624. * we've allocated so far.
  625. */
  626. spin_lock(&hugetlb_lock);
  627. needed = 0;
  628. goto free;
  629. }
  630. list_add(&page->lru, &surplus_list);
  631. }
  632. allocated += needed;
  633. /*
  634. * After retaking hugetlb_lock, we need to recalculate 'needed'
  635. * because either resv_huge_pages or free_huge_pages may have changed.
  636. */
  637. spin_lock(&hugetlb_lock);
  638. needed = (h->resv_huge_pages + delta) -
  639. (h->free_huge_pages + allocated);
  640. if (needed > 0)
  641. goto retry;
  642. /*
  643. * The surplus_list now contains _at_least_ the number of extra pages
  644. * needed to accomodate the reservation. Add the appropriate number
  645. * of pages to the hugetlb pool and free the extras back to the buddy
  646. * allocator. Commit the entire reservation here to prevent another
  647. * process from stealing the pages as they are added to the pool but
  648. * before they are reserved.
  649. */
  650. needed += allocated;
  651. h->resv_huge_pages += delta;
  652. ret = 0;
  653. free:
  654. /* Free the needed pages to the hugetlb pool */
  655. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  656. if ((--needed) < 0)
  657. break;
  658. list_del(&page->lru);
  659. enqueue_huge_page(h, page);
  660. }
  661. /* Free unnecessary surplus pages to the buddy allocator */
  662. if (!list_empty(&surplus_list)) {
  663. spin_unlock(&hugetlb_lock);
  664. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  665. list_del(&page->lru);
  666. /*
  667. * The page has a reference count of zero already, so
  668. * call free_huge_page directly instead of using
  669. * put_page. This must be done with hugetlb_lock
  670. * unlocked which is safe because free_huge_page takes
  671. * hugetlb_lock before deciding how to free the page.
  672. */
  673. free_huge_page(page);
  674. }
  675. spin_lock(&hugetlb_lock);
  676. }
  677. return ret;
  678. }
  679. /*
  680. * When releasing a hugetlb pool reservation, any surplus pages that were
  681. * allocated to satisfy the reservation must be explicitly freed if they were
  682. * never used.
  683. */
  684. static void return_unused_surplus_pages(struct hstate *h,
  685. unsigned long unused_resv_pages)
  686. {
  687. static int nid = -1;
  688. struct page *page;
  689. unsigned long nr_pages;
  690. /*
  691. * We want to release as many surplus pages as possible, spread
  692. * evenly across all nodes. Iterate across all nodes until we
  693. * can no longer free unreserved surplus pages. This occurs when
  694. * the nodes with surplus pages have no free pages.
  695. */
  696. unsigned long remaining_iterations = num_online_nodes();
  697. /* Uncommit the reservation */
  698. h->resv_huge_pages -= unused_resv_pages;
  699. /* Cannot return gigantic pages currently */
  700. if (h->order >= MAX_ORDER)
  701. return;
  702. nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
  703. while (remaining_iterations-- && nr_pages) {
  704. nid = next_node(nid, node_online_map);
  705. if (nid == MAX_NUMNODES)
  706. nid = first_node(node_online_map);
  707. if (!h->surplus_huge_pages_node[nid])
  708. continue;
  709. if (!list_empty(&h->hugepage_freelists[nid])) {
  710. page = list_entry(h->hugepage_freelists[nid].next,
  711. struct page, lru);
  712. list_del(&page->lru);
  713. update_and_free_page(h, page);
  714. h->free_huge_pages--;
  715. h->free_huge_pages_node[nid]--;
  716. h->surplus_huge_pages--;
  717. h->surplus_huge_pages_node[nid]--;
  718. nr_pages--;
  719. remaining_iterations = num_online_nodes();
  720. }
  721. }
  722. }
  723. /*
  724. * Determine if the huge page at addr within the vma has an associated
  725. * reservation. Where it does not we will need to logically increase
  726. * reservation and actually increase quota before an allocation can occur.
  727. * Where any new reservation would be required the reservation change is
  728. * prepared, but not committed. Once the page has been quota'd allocated
  729. * an instantiated the change should be committed via vma_commit_reservation.
  730. * No action is required on failure.
  731. */
  732. static int vma_needs_reservation(struct hstate *h,
  733. struct vm_area_struct *vma, unsigned long addr)
  734. {
  735. struct address_space *mapping = vma->vm_file->f_mapping;
  736. struct inode *inode = mapping->host;
  737. if (vma->vm_flags & VM_SHARED) {
  738. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  739. return region_chg(&inode->i_mapping->private_list,
  740. idx, idx + 1);
  741. } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  742. return 1;
  743. } else {
  744. int err;
  745. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  746. struct resv_map *reservations = vma_resv_map(vma);
  747. err = region_chg(&reservations->regions, idx, idx + 1);
  748. if (err < 0)
  749. return err;
  750. return 0;
  751. }
  752. }
  753. static void vma_commit_reservation(struct hstate *h,
  754. struct vm_area_struct *vma, unsigned long addr)
  755. {
  756. struct address_space *mapping = vma->vm_file->f_mapping;
  757. struct inode *inode = mapping->host;
  758. if (vma->vm_flags & VM_SHARED) {
  759. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  760. region_add(&inode->i_mapping->private_list, idx, idx + 1);
  761. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  762. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  763. struct resv_map *reservations = vma_resv_map(vma);
  764. /* Mark this page used in the map. */
  765. region_add(&reservations->regions, idx, idx + 1);
  766. }
  767. }
  768. static struct page *alloc_huge_page(struct vm_area_struct *vma,
  769. unsigned long addr, int avoid_reserve)
  770. {
  771. struct hstate *h = hstate_vma(vma);
  772. struct page *page;
  773. struct address_space *mapping = vma->vm_file->f_mapping;
  774. struct inode *inode = mapping->host;
  775. unsigned int chg;
  776. /*
  777. * Processes that did not create the mapping will have no reserves and
  778. * will not have accounted against quota. Check that the quota can be
  779. * made before satisfying the allocation
  780. * MAP_NORESERVE mappings may also need pages and quota allocated
  781. * if no reserve mapping overlaps.
  782. */
  783. chg = vma_needs_reservation(h, vma, addr);
  784. if (chg < 0)
  785. return ERR_PTR(chg);
  786. if (chg)
  787. if (hugetlb_get_quota(inode->i_mapping, chg))
  788. return ERR_PTR(-ENOSPC);
  789. spin_lock(&hugetlb_lock);
  790. page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
  791. spin_unlock(&hugetlb_lock);
  792. if (!page) {
  793. page = alloc_buddy_huge_page(h, vma, addr);
  794. if (!page) {
  795. hugetlb_put_quota(inode->i_mapping, chg);
  796. return ERR_PTR(-VM_FAULT_OOM);
  797. }
  798. }
  799. set_page_refcounted(page);
  800. set_page_private(page, (unsigned long) mapping);
  801. vma_commit_reservation(h, vma, addr);
  802. return page;
  803. }
  804. __attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h)
  805. {
  806. struct huge_bootmem_page *m;
  807. int nr_nodes = nodes_weight(node_online_map);
  808. while (nr_nodes) {
  809. void *addr;
  810. addr = __alloc_bootmem_node_nopanic(
  811. NODE_DATA(h->hugetlb_next_nid),
  812. huge_page_size(h), huge_page_size(h), 0);
  813. if (addr) {
  814. /*
  815. * Use the beginning of the huge page to store the
  816. * huge_bootmem_page struct (until gather_bootmem
  817. * puts them into the mem_map).
  818. */
  819. m = addr;
  820. if (m)
  821. goto found;
  822. }
  823. hstate_next_node(h);
  824. nr_nodes--;
  825. }
  826. return 0;
  827. found:
  828. BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
  829. /* Put them into a private list first because mem_map is not up yet */
  830. list_add(&m->list, &huge_boot_pages);
  831. m->hstate = h;
  832. return 1;
  833. }
  834. /* Put bootmem huge pages into the standard lists after mem_map is up */
  835. static void __init gather_bootmem_prealloc(void)
  836. {
  837. struct huge_bootmem_page *m;
  838. list_for_each_entry(m, &huge_boot_pages, list) {
  839. struct page *page = virt_to_page(m);
  840. struct hstate *h = m->hstate;
  841. __ClearPageReserved(page);
  842. WARN_ON(page_count(page) != 1);
  843. prep_compound_page(page, h->order);
  844. prep_new_huge_page(h, page, page_to_nid(page));
  845. }
  846. }
  847. static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  848. {
  849. unsigned long i;
  850. for (i = 0; i < h->max_huge_pages; ++i) {
  851. if (h->order >= MAX_ORDER) {
  852. if (!alloc_bootmem_huge_page(h))
  853. break;
  854. } else if (!alloc_fresh_huge_page(h))
  855. break;
  856. }
  857. h->max_huge_pages = i;
  858. }
  859. static void __init hugetlb_init_hstates(void)
  860. {
  861. struct hstate *h;
  862. for_each_hstate(h) {
  863. /* oversize hugepages were init'ed in early boot */
  864. if (h->order < MAX_ORDER)
  865. hugetlb_hstate_alloc_pages(h);
  866. }
  867. }
  868. static char * __init memfmt(char *buf, unsigned long n)
  869. {
  870. if (n >= (1UL << 30))
  871. sprintf(buf, "%lu GB", n >> 30);
  872. else if (n >= (1UL << 20))
  873. sprintf(buf, "%lu MB", n >> 20);
  874. else
  875. sprintf(buf, "%lu KB", n >> 10);
  876. return buf;
  877. }
  878. static void __init report_hugepages(void)
  879. {
  880. struct hstate *h;
  881. for_each_hstate(h) {
  882. char buf[32];
  883. printk(KERN_INFO "HugeTLB registered %s page size, "
  884. "pre-allocated %ld pages\n",
  885. memfmt(buf, huge_page_size(h)),
  886. h->free_huge_pages);
  887. }
  888. }
  889. #ifdef CONFIG_HIGHMEM
  890. static void try_to_free_low(struct hstate *h, unsigned long count)
  891. {
  892. int i;
  893. if (h->order >= MAX_ORDER)
  894. return;
  895. for (i = 0; i < MAX_NUMNODES; ++i) {
  896. struct page *page, *next;
  897. struct list_head *freel = &h->hugepage_freelists[i];
  898. list_for_each_entry_safe(page, next, freel, lru) {
  899. if (count >= h->nr_huge_pages)
  900. return;
  901. if (PageHighMem(page))
  902. continue;
  903. list_del(&page->lru);
  904. update_and_free_page(h, page);
  905. h->free_huge_pages--;
  906. h->free_huge_pages_node[page_to_nid(page)]--;
  907. }
  908. }
  909. }
  910. #else
  911. static inline void try_to_free_low(struct hstate *h, unsigned long count)
  912. {
  913. }
  914. #endif
  915. #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
  916. static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
  917. {
  918. unsigned long min_count, ret;
  919. if (h->order >= MAX_ORDER)
  920. return h->max_huge_pages;
  921. /*
  922. * Increase the pool size
  923. * First take pages out of surplus state. Then make up the
  924. * remaining difference by allocating fresh huge pages.
  925. *
  926. * We might race with alloc_buddy_huge_page() here and be unable
  927. * to convert a surplus huge page to a normal huge page. That is
  928. * not critical, though, it just means the overall size of the
  929. * pool might be one hugepage larger than it needs to be, but
  930. * within all the constraints specified by the sysctls.
  931. */
  932. spin_lock(&hugetlb_lock);
  933. while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
  934. if (!adjust_pool_surplus(h, -1))
  935. break;
  936. }
  937. while (count > persistent_huge_pages(h)) {
  938. /*
  939. * If this allocation races such that we no longer need the
  940. * page, free_huge_page will handle it by freeing the page
  941. * and reducing the surplus.
  942. */
  943. spin_unlock(&hugetlb_lock);
  944. ret = alloc_fresh_huge_page(h);
  945. spin_lock(&hugetlb_lock);
  946. if (!ret)
  947. goto out;
  948. }
  949. /*
  950. * Decrease the pool size
  951. * First return free pages to the buddy allocator (being careful
  952. * to keep enough around to satisfy reservations). Then place
  953. * pages into surplus state as needed so the pool will shrink
  954. * to the desired size as pages become free.
  955. *
  956. * By placing pages into the surplus state independent of the
  957. * overcommit value, we are allowing the surplus pool size to
  958. * exceed overcommit. There are few sane options here. Since
  959. * alloc_buddy_huge_page() is checking the global counter,
  960. * though, we'll note that we're not allowed to exceed surplus
  961. * and won't grow the pool anywhere else. Not until one of the
  962. * sysctls are changed, or the surplus pages go out of use.
  963. */
  964. min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
  965. min_count = max(count, min_count);
  966. try_to_free_low(h, min_count);
  967. while (min_count < persistent_huge_pages(h)) {
  968. struct page *page = dequeue_huge_page(h);
  969. if (!page)
  970. break;
  971. update_and_free_page(h, page);
  972. }
  973. while (count < persistent_huge_pages(h)) {
  974. if (!adjust_pool_surplus(h, 1))
  975. break;
  976. }
  977. out:
  978. ret = persistent_huge_pages(h);
  979. spin_unlock(&hugetlb_lock);
  980. return ret;
  981. }
  982. #define HSTATE_ATTR_RO(_name) \
  983. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  984. #define HSTATE_ATTR(_name) \
  985. static struct kobj_attribute _name##_attr = \
  986. __ATTR(_name, 0644, _name##_show, _name##_store)
  987. static struct kobject *hugepages_kobj;
  988. static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  989. static struct hstate *kobj_to_hstate(struct kobject *kobj)
  990. {
  991. int i;
  992. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  993. if (hstate_kobjs[i] == kobj)
  994. return &hstates[i];
  995. BUG();
  996. return NULL;
  997. }
  998. static ssize_t nr_hugepages_show(struct kobject *kobj,
  999. struct kobj_attribute *attr, char *buf)
  1000. {
  1001. struct hstate *h = kobj_to_hstate(kobj);
  1002. return sprintf(buf, "%lu\n", h->nr_huge_pages);
  1003. }
  1004. static ssize_t nr_hugepages_store(struct kobject *kobj,
  1005. struct kobj_attribute *attr, const char *buf, size_t count)
  1006. {
  1007. int err;
  1008. unsigned long input;
  1009. struct hstate *h = kobj_to_hstate(kobj);
  1010. err = strict_strtoul(buf, 10, &input);
  1011. if (err)
  1012. return 0;
  1013. h->max_huge_pages = set_max_huge_pages(h, input);
  1014. return count;
  1015. }
  1016. HSTATE_ATTR(nr_hugepages);
  1017. static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
  1018. struct kobj_attribute *attr, char *buf)
  1019. {
  1020. struct hstate *h = kobj_to_hstate(kobj);
  1021. return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
  1022. }
  1023. static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
  1024. struct kobj_attribute *attr, const char *buf, size_t count)
  1025. {
  1026. int err;
  1027. unsigned long input;
  1028. struct hstate *h = kobj_to_hstate(kobj);
  1029. err = strict_strtoul(buf, 10, &input);
  1030. if (err)
  1031. return 0;
  1032. spin_lock(&hugetlb_lock);
  1033. h->nr_overcommit_huge_pages = input;
  1034. spin_unlock(&hugetlb_lock);
  1035. return count;
  1036. }
  1037. HSTATE_ATTR(nr_overcommit_hugepages);
  1038. static ssize_t free_hugepages_show(struct kobject *kobj,
  1039. struct kobj_attribute *attr, char *buf)
  1040. {
  1041. struct hstate *h = kobj_to_hstate(kobj);
  1042. return sprintf(buf, "%lu\n", h->free_huge_pages);
  1043. }
  1044. HSTATE_ATTR_RO(free_hugepages);
  1045. static ssize_t resv_hugepages_show(struct kobject *kobj,
  1046. struct kobj_attribute *attr, char *buf)
  1047. {
  1048. struct hstate *h = kobj_to_hstate(kobj);
  1049. return sprintf(buf, "%lu\n", h->resv_huge_pages);
  1050. }
  1051. HSTATE_ATTR_RO(resv_hugepages);
  1052. static ssize_t surplus_hugepages_show(struct kobject *kobj,
  1053. struct kobj_attribute *attr, char *buf)
  1054. {
  1055. struct hstate *h = kobj_to_hstate(kobj);
  1056. return sprintf(buf, "%lu\n", h->surplus_huge_pages);
  1057. }
  1058. HSTATE_ATTR_RO(surplus_hugepages);
  1059. static struct attribute *hstate_attrs[] = {
  1060. &nr_hugepages_attr.attr,
  1061. &nr_overcommit_hugepages_attr.attr,
  1062. &free_hugepages_attr.attr,
  1063. &resv_hugepages_attr.attr,
  1064. &surplus_hugepages_attr.attr,
  1065. NULL,
  1066. };
  1067. static struct attribute_group hstate_attr_group = {
  1068. .attrs = hstate_attrs,
  1069. };
  1070. static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
  1071. {
  1072. int retval;
  1073. hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
  1074. hugepages_kobj);
  1075. if (!hstate_kobjs[h - hstates])
  1076. return -ENOMEM;
  1077. retval = sysfs_create_group(hstate_kobjs[h - hstates],
  1078. &hstate_attr_group);
  1079. if (retval)
  1080. kobject_put(hstate_kobjs[h - hstates]);
  1081. return retval;
  1082. }
  1083. static void __init hugetlb_sysfs_init(void)
  1084. {
  1085. struct hstate *h;
  1086. int err;
  1087. hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
  1088. if (!hugepages_kobj)
  1089. return;
  1090. for_each_hstate(h) {
  1091. err = hugetlb_sysfs_add_hstate(h);
  1092. if (err)
  1093. printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
  1094. h->name);
  1095. }
  1096. }
  1097. static void __exit hugetlb_exit(void)
  1098. {
  1099. struct hstate *h;
  1100. for_each_hstate(h) {
  1101. kobject_put(hstate_kobjs[h - hstates]);
  1102. }
  1103. kobject_put(hugepages_kobj);
  1104. }
  1105. module_exit(hugetlb_exit);
  1106. static int __init hugetlb_init(void)
  1107. {
  1108. BUILD_BUG_ON(HPAGE_SHIFT == 0);
  1109. if (!size_to_hstate(default_hstate_size)) {
  1110. default_hstate_size = HPAGE_SIZE;
  1111. if (!size_to_hstate(default_hstate_size))
  1112. hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
  1113. }
  1114. default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
  1115. if (default_hstate_max_huge_pages)
  1116. default_hstate.max_huge_pages = default_hstate_max_huge_pages;
  1117. hugetlb_init_hstates();
  1118. gather_bootmem_prealloc();
  1119. report_hugepages();
  1120. hugetlb_sysfs_init();
  1121. return 0;
  1122. }
  1123. module_init(hugetlb_init);
  1124. /* Should be called on processing a hugepagesz=... option */
  1125. void __init hugetlb_add_hstate(unsigned order)
  1126. {
  1127. struct hstate *h;
  1128. unsigned long i;
  1129. if (size_to_hstate(PAGE_SIZE << order)) {
  1130. printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
  1131. return;
  1132. }
  1133. BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
  1134. BUG_ON(order == 0);
  1135. h = &hstates[max_hstate++];
  1136. h->order = order;
  1137. h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
  1138. h->nr_huge_pages = 0;
  1139. h->free_huge_pages = 0;
  1140. for (i = 0; i < MAX_NUMNODES; ++i)
  1141. INIT_LIST_HEAD(&h->hugepage_freelists[i]);
  1142. h->hugetlb_next_nid = first_node(node_online_map);
  1143. snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
  1144. huge_page_size(h)/1024);
  1145. parsed_hstate = h;
  1146. }
  1147. static int __init hugetlb_nrpages_setup(char *s)
  1148. {
  1149. unsigned long *mhp;
  1150. static unsigned long *last_mhp;
  1151. /*
  1152. * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
  1153. * so this hugepages= parameter goes to the "default hstate".
  1154. */
  1155. if (!max_hstate)
  1156. mhp = &default_hstate_max_huge_pages;
  1157. else
  1158. mhp = &parsed_hstate->max_huge_pages;
  1159. if (mhp == last_mhp) {
  1160. printk(KERN_WARNING "hugepages= specified twice without "
  1161. "interleaving hugepagesz=, ignoring\n");
  1162. return 1;
  1163. }
  1164. if (sscanf(s, "%lu", mhp) <= 0)
  1165. *mhp = 0;
  1166. /*
  1167. * Global state is always initialized later in hugetlb_init.
  1168. * But we need to allocate >= MAX_ORDER hstates here early to still
  1169. * use the bootmem allocator.
  1170. */
  1171. if (max_hstate && parsed_hstate->order >= MAX_ORDER)
  1172. hugetlb_hstate_alloc_pages(parsed_hstate);
  1173. last_mhp = mhp;
  1174. return 1;
  1175. }
  1176. __setup("hugepages=", hugetlb_nrpages_setup);
  1177. static int __init hugetlb_default_setup(char *s)
  1178. {
  1179. default_hstate_size = memparse(s, &s);
  1180. return 1;
  1181. }
  1182. __setup("default_hugepagesz=", hugetlb_default_setup);
  1183. static unsigned int cpuset_mems_nr(unsigned int *array)
  1184. {
  1185. int node;
  1186. unsigned int nr = 0;
  1187. for_each_node_mask(node, cpuset_current_mems_allowed)
  1188. nr += array[node];
  1189. return nr;
  1190. }
  1191. #ifdef CONFIG_SYSCTL
  1192. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  1193. struct file *file, void __user *buffer,
  1194. size_t *length, loff_t *ppos)
  1195. {
  1196. struct hstate *h = &default_hstate;
  1197. unsigned long tmp;
  1198. if (!write)
  1199. tmp = h->max_huge_pages;
  1200. table->data = &tmp;
  1201. table->maxlen = sizeof(unsigned long);
  1202. proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
  1203. if (write)
  1204. h->max_huge_pages = set_max_huge_pages(h, tmp);
  1205. return 0;
  1206. }
  1207. int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
  1208. struct file *file, void __user *buffer,
  1209. size_t *length, loff_t *ppos)
  1210. {
  1211. proc_dointvec(table, write, file, buffer, length, ppos);
  1212. if (hugepages_treat_as_movable)
  1213. htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
  1214. else
  1215. htlb_alloc_mask = GFP_HIGHUSER;
  1216. return 0;
  1217. }
  1218. int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  1219. struct file *file, void __user *buffer,
  1220. size_t *length, loff_t *ppos)
  1221. {
  1222. struct hstate *h = &default_hstate;
  1223. unsigned long tmp;
  1224. if (!write)
  1225. tmp = h->nr_overcommit_huge_pages;
  1226. table->data = &tmp;
  1227. table->maxlen = sizeof(unsigned long);
  1228. proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
  1229. if (write) {
  1230. spin_lock(&hugetlb_lock);
  1231. h->nr_overcommit_huge_pages = tmp;
  1232. spin_unlock(&hugetlb_lock);
  1233. }
  1234. return 0;
  1235. }
  1236. #endif /* CONFIG_SYSCTL */
  1237. int hugetlb_report_meminfo(char *buf)
  1238. {
  1239. struct hstate *h = &default_hstate;
  1240. return sprintf(buf,
  1241. "HugePages_Total: %5lu\n"
  1242. "HugePages_Free: %5lu\n"
  1243. "HugePages_Rsvd: %5lu\n"
  1244. "HugePages_Surp: %5lu\n"
  1245. "Hugepagesize: %5lu kB\n",
  1246. h->nr_huge_pages,
  1247. h->free_huge_pages,
  1248. h->resv_huge_pages,
  1249. h->surplus_huge_pages,
  1250. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  1251. }
  1252. int hugetlb_report_node_meminfo(int nid, char *buf)
  1253. {
  1254. struct hstate *h = &default_hstate;
  1255. return sprintf(buf,
  1256. "Node %d HugePages_Total: %5u\n"
  1257. "Node %d HugePages_Free: %5u\n"
  1258. "Node %d HugePages_Surp: %5u\n",
  1259. nid, h->nr_huge_pages_node[nid],
  1260. nid, h->free_huge_pages_node[nid],
  1261. nid, h->surplus_huge_pages_node[nid]);
  1262. }
  1263. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  1264. unsigned long hugetlb_total_pages(void)
  1265. {
  1266. struct hstate *h = &default_hstate;
  1267. return h->nr_huge_pages * pages_per_huge_page(h);
  1268. }
  1269. static int hugetlb_acct_memory(struct hstate *h, long delta)
  1270. {
  1271. int ret = -ENOMEM;
  1272. spin_lock(&hugetlb_lock);
  1273. /*
  1274. * When cpuset is configured, it breaks the strict hugetlb page
  1275. * reservation as the accounting is done on a global variable. Such
  1276. * reservation is completely rubbish in the presence of cpuset because
  1277. * the reservation is not checked against page availability for the
  1278. * current cpuset. Application can still potentially OOM'ed by kernel
  1279. * with lack of free htlb page in cpuset that the task is in.
  1280. * Attempt to enforce strict accounting with cpuset is almost
  1281. * impossible (or too ugly) because cpuset is too fluid that
  1282. * task or memory node can be dynamically moved between cpusets.
  1283. *
  1284. * The change of semantics for shared hugetlb mapping with cpuset is
  1285. * undesirable. However, in order to preserve some of the semantics,
  1286. * we fall back to check against current free page availability as
  1287. * a best attempt and hopefully to minimize the impact of changing
  1288. * semantics that cpuset has.
  1289. */
  1290. if (delta > 0) {
  1291. if (gather_surplus_pages(h, delta) < 0)
  1292. goto out;
  1293. if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
  1294. return_unused_surplus_pages(h, delta);
  1295. goto out;
  1296. }
  1297. }
  1298. ret = 0;
  1299. if (delta < 0)
  1300. return_unused_surplus_pages(h, (unsigned long) -delta);
  1301. out:
  1302. spin_unlock(&hugetlb_lock);
  1303. return ret;
  1304. }
  1305. static void hugetlb_vm_op_open(struct vm_area_struct *vma)
  1306. {
  1307. struct resv_map *reservations = vma_resv_map(vma);
  1308. /*
  1309. * This new VMA should share its siblings reservation map if present.
  1310. * The VMA will only ever have a valid reservation map pointer where
  1311. * it is being copied for another still existing VMA. As that VMA
  1312. * has a reference to the reservation map it cannot dissappear until
  1313. * after this open call completes. It is therefore safe to take a
  1314. * new reference here without additional locking.
  1315. */
  1316. if (reservations)
  1317. kref_get(&reservations->refs);
  1318. }
  1319. static void hugetlb_vm_op_close(struct vm_area_struct *vma)
  1320. {
  1321. struct hstate *h = hstate_vma(vma);
  1322. struct resv_map *reservations = vma_resv_map(vma);
  1323. unsigned long reserve;
  1324. unsigned long start;
  1325. unsigned long end;
  1326. if (reservations) {
  1327. start = vma_hugecache_offset(h, vma, vma->vm_start);
  1328. end = vma_hugecache_offset(h, vma, vma->vm_end);
  1329. reserve = (end - start) -
  1330. region_count(&reservations->regions, start, end);
  1331. kref_put(&reservations->refs, resv_map_release);
  1332. if (reserve) {
  1333. hugetlb_acct_memory(h, -reserve);
  1334. hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
  1335. }
  1336. }
  1337. }
  1338. /*
  1339. * We cannot handle pagefaults against hugetlb pages at all. They cause
  1340. * handle_mm_fault() to try to instantiate regular-sized pages in the
  1341. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  1342. * this far.
  1343. */
  1344. static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1345. {
  1346. BUG();
  1347. return 0;
  1348. }
  1349. struct vm_operations_struct hugetlb_vm_ops = {
  1350. .fault = hugetlb_vm_op_fault,
  1351. .open = hugetlb_vm_op_open,
  1352. .close = hugetlb_vm_op_close,
  1353. };
  1354. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  1355. int writable)
  1356. {
  1357. pte_t entry;
  1358. if (writable) {
  1359. entry =
  1360. pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
  1361. } else {
  1362. entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
  1363. }
  1364. entry = pte_mkyoung(entry);
  1365. entry = pte_mkhuge(entry);
  1366. return entry;
  1367. }
  1368. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  1369. unsigned long address, pte_t *ptep)
  1370. {
  1371. pte_t entry;
  1372. entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
  1373. if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
  1374. update_mmu_cache(vma, address, entry);
  1375. }
  1376. }
  1377. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  1378. struct vm_area_struct *vma)
  1379. {
  1380. pte_t *src_pte, *dst_pte, entry;
  1381. struct page *ptepage;
  1382. unsigned long addr;
  1383. int cow;
  1384. struct hstate *h = hstate_vma(vma);
  1385. unsigned long sz = huge_page_size(h);
  1386. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  1387. for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
  1388. src_pte = huge_pte_offset(src, addr);
  1389. if (!src_pte)
  1390. continue;
  1391. dst_pte = huge_pte_alloc(dst, addr, sz);
  1392. if (!dst_pte)
  1393. goto nomem;
  1394. /* If the pagetables are shared don't copy or take references */
  1395. if (dst_pte == src_pte)
  1396. continue;
  1397. spin_lock(&dst->page_table_lock);
  1398. spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
  1399. if (!huge_pte_none(huge_ptep_get(src_pte))) {
  1400. if (cow)
  1401. huge_ptep_set_wrprotect(src, addr, src_pte);
  1402. entry = huge_ptep_get(src_pte);
  1403. ptepage = pte_page(entry);
  1404. get_page(ptepage);
  1405. set_huge_pte_at(dst, addr, dst_pte, entry);
  1406. }
  1407. spin_unlock(&src->page_table_lock);
  1408. spin_unlock(&dst->page_table_lock);
  1409. }
  1410. return 0;
  1411. nomem:
  1412. return -ENOMEM;
  1413. }
  1414. void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  1415. unsigned long end, struct page *ref_page)
  1416. {
  1417. struct mm_struct *mm = vma->vm_mm;
  1418. unsigned long address;
  1419. pte_t *ptep;
  1420. pte_t pte;
  1421. struct page *page;
  1422. struct page *tmp;
  1423. struct hstate *h = hstate_vma(vma);
  1424. unsigned long sz = huge_page_size(h);
  1425. /*
  1426. * A page gathering list, protected by per file i_mmap_lock. The
  1427. * lock is used to avoid list corruption from multiple unmapping
  1428. * of the same page since we are using page->lru.
  1429. */
  1430. LIST_HEAD(page_list);
  1431. WARN_ON(!is_vm_hugetlb_page(vma));
  1432. BUG_ON(start & ~huge_page_mask(h));
  1433. BUG_ON(end & ~huge_page_mask(h));
  1434. spin_lock(&mm->page_table_lock);
  1435. for (address = start; address < end; address += sz) {
  1436. ptep = huge_pte_offset(mm, address);
  1437. if (!ptep)
  1438. continue;
  1439. if (huge_pmd_unshare(mm, &address, ptep))
  1440. continue;
  1441. /*
  1442. * If a reference page is supplied, it is because a specific
  1443. * page is being unmapped, not a range. Ensure the page we
  1444. * are about to unmap is the actual page of interest.
  1445. */
  1446. if (ref_page) {
  1447. pte = huge_ptep_get(ptep);
  1448. if (huge_pte_none(pte))
  1449. continue;
  1450. page = pte_page(pte);
  1451. if (page != ref_page)
  1452. continue;
  1453. /*
  1454. * Mark the VMA as having unmapped its page so that
  1455. * future faults in this VMA will fail rather than
  1456. * looking like data was lost
  1457. */
  1458. set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
  1459. }
  1460. pte = huge_ptep_get_and_clear(mm, address, ptep);
  1461. if (huge_pte_none(pte))
  1462. continue;
  1463. page = pte_page(pte);
  1464. if (pte_dirty(pte))
  1465. set_page_dirty(page);
  1466. list_add(&page->lru, &page_list);
  1467. }
  1468. spin_unlock(&mm->page_table_lock);
  1469. flush_tlb_range(vma, start, end);
  1470. list_for_each_entry_safe(page, tmp, &page_list, lru) {
  1471. list_del(&page->lru);
  1472. put_page(page);
  1473. }
  1474. }
  1475. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  1476. unsigned long end, struct page *ref_page)
  1477. {
  1478. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  1479. __unmap_hugepage_range(vma, start, end, ref_page);
  1480. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  1481. }
  1482. /*
  1483. * This is called when the original mapper is failing to COW a MAP_PRIVATE
  1484. * mappping it owns the reserve page for. The intention is to unmap the page
  1485. * from other VMAs and let the children be SIGKILLed if they are faulting the
  1486. * same region.
  1487. */
  1488. int unmap_ref_private(struct mm_struct *mm,
  1489. struct vm_area_struct *vma,
  1490. struct page *page,
  1491. unsigned long address)
  1492. {
  1493. struct vm_area_struct *iter_vma;
  1494. struct address_space *mapping;
  1495. struct prio_tree_iter iter;
  1496. pgoff_t pgoff;
  1497. /*
  1498. * vm_pgoff is in PAGE_SIZE units, hence the different calculation
  1499. * from page cache lookup which is in HPAGE_SIZE units.
  1500. */
  1501. address = address & huge_page_mask(hstate_vma(vma));
  1502. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
  1503. + (vma->vm_pgoff >> PAGE_SHIFT);
  1504. mapping = (struct address_space *)page_private(page);
  1505. vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  1506. /* Do not unmap the current VMA */
  1507. if (iter_vma == vma)
  1508. continue;
  1509. /*
  1510. * Unmap the page from other VMAs without their own reserves.
  1511. * They get marked to be SIGKILLed if they fault in these
  1512. * areas. This is because a future no-page fault on this VMA
  1513. * could insert a zeroed page instead of the data existing
  1514. * from the time of fork. This would look like data corruption
  1515. */
  1516. if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
  1517. unmap_hugepage_range(iter_vma,
  1518. address, address + HPAGE_SIZE,
  1519. page);
  1520. }
  1521. return 1;
  1522. }
  1523. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  1524. unsigned long address, pte_t *ptep, pte_t pte,
  1525. struct page *pagecache_page)
  1526. {
  1527. struct hstate *h = hstate_vma(vma);
  1528. struct page *old_page, *new_page;
  1529. int avoidcopy;
  1530. int outside_reserve = 0;
  1531. old_page = pte_page(pte);
  1532. retry_avoidcopy:
  1533. /* If no-one else is actually using this page, avoid the copy
  1534. * and just make the page writable */
  1535. avoidcopy = (page_count(old_page) == 1);
  1536. if (avoidcopy) {
  1537. set_huge_ptep_writable(vma, address, ptep);
  1538. return 0;
  1539. }
  1540. /*
  1541. * If the process that created a MAP_PRIVATE mapping is about to
  1542. * perform a COW due to a shared page count, attempt to satisfy
  1543. * the allocation without using the existing reserves. The pagecache
  1544. * page is used to determine if the reserve at this address was
  1545. * consumed or not. If reserves were used, a partial faulted mapping
  1546. * at the time of fork() could consume its reserves on COW instead
  1547. * of the full address range.
  1548. */
  1549. if (!(vma->vm_flags & VM_SHARED) &&
  1550. is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
  1551. old_page != pagecache_page)
  1552. outside_reserve = 1;
  1553. page_cache_get(old_page);
  1554. new_page = alloc_huge_page(vma, address, outside_reserve);
  1555. if (IS_ERR(new_page)) {
  1556. page_cache_release(old_page);
  1557. /*
  1558. * If a process owning a MAP_PRIVATE mapping fails to COW,
  1559. * it is due to references held by a child and an insufficient
  1560. * huge page pool. To guarantee the original mappers
  1561. * reliability, unmap the page from child processes. The child
  1562. * may get SIGKILLed if it later faults.
  1563. */
  1564. if (outside_reserve) {
  1565. BUG_ON(huge_pte_none(pte));
  1566. if (unmap_ref_private(mm, vma, old_page, address)) {
  1567. BUG_ON(page_count(old_page) != 1);
  1568. BUG_ON(huge_pte_none(pte));
  1569. goto retry_avoidcopy;
  1570. }
  1571. WARN_ON_ONCE(1);
  1572. }
  1573. return -PTR_ERR(new_page);
  1574. }
  1575. spin_unlock(&mm->page_table_lock);
  1576. copy_huge_page(new_page, old_page, address, vma);
  1577. __SetPageUptodate(new_page);
  1578. spin_lock(&mm->page_table_lock);
  1579. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  1580. if (likely(pte_same(huge_ptep_get(ptep), pte))) {
  1581. /* Break COW */
  1582. huge_ptep_clear_flush(vma, address, ptep);
  1583. set_huge_pte_at(mm, address, ptep,
  1584. make_huge_pte(vma, new_page, 1));
  1585. /* Make the old page be freed below */
  1586. new_page = old_page;
  1587. }
  1588. page_cache_release(new_page);
  1589. page_cache_release(old_page);
  1590. return 0;
  1591. }
  1592. /* Return the pagecache page at a given address within a VMA */
  1593. static struct page *hugetlbfs_pagecache_page(struct hstate *h,
  1594. struct vm_area_struct *vma, unsigned long address)
  1595. {
  1596. struct address_space *mapping;
  1597. pgoff_t idx;
  1598. mapping = vma->vm_file->f_mapping;
  1599. idx = vma_hugecache_offset(h, vma, address);
  1600. return find_lock_page(mapping, idx);
  1601. }
  1602. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1603. unsigned long address, pte_t *ptep, int write_access)
  1604. {
  1605. struct hstate *h = hstate_vma(vma);
  1606. int ret = VM_FAULT_SIGBUS;
  1607. pgoff_t idx;
  1608. unsigned long size;
  1609. struct page *page;
  1610. struct address_space *mapping;
  1611. pte_t new_pte;
  1612. /*
  1613. * Currently, we are forced to kill the process in the event the
  1614. * original mapper has unmapped pages from the child due to a failed
  1615. * COW. Warn that such a situation has occured as it may not be obvious
  1616. */
  1617. if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
  1618. printk(KERN_WARNING
  1619. "PID %d killed due to inadequate hugepage pool\n",
  1620. current->pid);
  1621. return ret;
  1622. }
  1623. mapping = vma->vm_file->f_mapping;
  1624. idx = vma_hugecache_offset(h, vma, address);
  1625. /*
  1626. * Use page lock to guard against racing truncation
  1627. * before we get page_table_lock.
  1628. */
  1629. retry:
  1630. page = find_lock_page(mapping, idx);
  1631. if (!page) {
  1632. size = i_size_read(mapping->host) >> huge_page_shift(h);
  1633. if (idx >= size)
  1634. goto out;
  1635. page = alloc_huge_page(vma, address, 0);
  1636. if (IS_ERR(page)) {
  1637. ret = -PTR_ERR(page);
  1638. goto out;
  1639. }
  1640. clear_huge_page(page, address, huge_page_size(h));
  1641. __SetPageUptodate(page);
  1642. if (vma->vm_flags & VM_SHARED) {
  1643. int err;
  1644. struct inode *inode = mapping->host;
  1645. err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  1646. if (err) {
  1647. put_page(page);
  1648. if (err == -EEXIST)
  1649. goto retry;
  1650. goto out;
  1651. }
  1652. spin_lock(&inode->i_lock);
  1653. inode->i_blocks += blocks_per_huge_page(h);
  1654. spin_unlock(&inode->i_lock);
  1655. } else
  1656. lock_page(page);
  1657. }
  1658. spin_lock(&mm->page_table_lock);
  1659. size = i_size_read(mapping->host) >> huge_page_shift(h);
  1660. if (idx >= size)
  1661. goto backout;
  1662. ret = 0;
  1663. if (!huge_pte_none(huge_ptep_get(ptep)))
  1664. goto backout;
  1665. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  1666. && (vma->vm_flags & VM_SHARED)));
  1667. set_huge_pte_at(mm, address, ptep, new_pte);
  1668. if (write_access && !(vma->vm_flags & VM_SHARED)) {
  1669. /* Optimization, do the COW without a second fault */
  1670. ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
  1671. }
  1672. spin_unlock(&mm->page_table_lock);
  1673. unlock_page(page);
  1674. out:
  1675. return ret;
  1676. backout:
  1677. spin_unlock(&mm->page_table_lock);
  1678. unlock_page(page);
  1679. put_page(page);
  1680. goto out;
  1681. }
  1682. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  1683. unsigned long address, int write_access)
  1684. {
  1685. pte_t *ptep;
  1686. pte_t entry;
  1687. int ret;
  1688. static DEFINE_MUTEX(hugetlb_instantiation_mutex);
  1689. struct hstate *h = hstate_vma(vma);
  1690. ptep = huge_pte_alloc(mm, address, huge_page_size(h));
  1691. if (!ptep)
  1692. return VM_FAULT_OOM;
  1693. /*
  1694. * Serialize hugepage allocation and instantiation, so that we don't
  1695. * get spurious allocation failures if two CPUs race to instantiate
  1696. * the same page in the page cache.
  1697. */
  1698. mutex_lock(&hugetlb_instantiation_mutex);
  1699. entry = huge_ptep_get(ptep);
  1700. if (huge_pte_none(entry)) {
  1701. ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
  1702. mutex_unlock(&hugetlb_instantiation_mutex);
  1703. return ret;
  1704. }
  1705. ret = 0;
  1706. spin_lock(&mm->page_table_lock);
  1707. /* Check for a racing update before calling hugetlb_cow */
  1708. if (likely(pte_same(entry, huge_ptep_get(ptep))))
  1709. if (write_access && !pte_write(entry)) {
  1710. struct page *page;
  1711. page = hugetlbfs_pagecache_page(h, vma, address);
  1712. ret = hugetlb_cow(mm, vma, address, ptep, entry, page);
  1713. if (page) {
  1714. unlock_page(page);
  1715. put_page(page);
  1716. }
  1717. }
  1718. spin_unlock(&mm->page_table_lock);
  1719. mutex_unlock(&hugetlb_instantiation_mutex);
  1720. return ret;
  1721. }
  1722. /* Can be overriden by architectures */
  1723. __attribute__((weak)) struct page *
  1724. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  1725. pud_t *pud, int write)
  1726. {
  1727. BUG();
  1728. return NULL;
  1729. }
  1730. int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1731. struct page **pages, struct vm_area_struct **vmas,
  1732. unsigned long *position, int *length, int i,
  1733. int write)
  1734. {
  1735. unsigned long pfn_offset;
  1736. unsigned long vaddr = *position;
  1737. int remainder = *length;
  1738. struct hstate *h = hstate_vma(vma);
  1739. spin_lock(&mm->page_table_lock);
  1740. while (vaddr < vma->vm_end && remainder) {
  1741. pte_t *pte;
  1742. struct page *page;
  1743. /*
  1744. * Some archs (sparc64, sh*) have multiple pte_ts to
  1745. * each hugepage. We have to make * sure we get the
  1746. * first, for the page indexing below to work.
  1747. */
  1748. pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
  1749. if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
  1750. (write && !pte_write(huge_ptep_get(pte)))) {
  1751. int ret;
  1752. spin_unlock(&mm->page_table_lock);
  1753. ret = hugetlb_fault(mm, vma, vaddr, write);
  1754. spin_lock(&mm->page_table_lock);
  1755. if (!(ret & VM_FAULT_ERROR))
  1756. continue;
  1757. remainder = 0;
  1758. if (!i)
  1759. i = -EFAULT;
  1760. break;
  1761. }
  1762. pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
  1763. page = pte_page(huge_ptep_get(pte));
  1764. same_page:
  1765. if (pages) {
  1766. get_page(page);
  1767. pages[i] = page + pfn_offset;
  1768. }
  1769. if (vmas)
  1770. vmas[i] = vma;
  1771. vaddr += PAGE_SIZE;
  1772. ++pfn_offset;
  1773. --remainder;
  1774. ++i;
  1775. if (vaddr < vma->vm_end && remainder &&
  1776. pfn_offset < pages_per_huge_page(h)) {
  1777. /*
  1778. * We use pfn_offset to avoid touching the pageframes
  1779. * of this compound page.
  1780. */
  1781. goto same_page;
  1782. }
  1783. }
  1784. spin_unlock(&mm->page_table_lock);
  1785. *length = remainder;
  1786. *position = vaddr;
  1787. return i;
  1788. }
  1789. void hugetlb_change_protection(struct vm_area_struct *vma,
  1790. unsigned long address, unsigned long end, pgprot_t newprot)
  1791. {
  1792. struct mm_struct *mm = vma->vm_mm;
  1793. unsigned long start = address;
  1794. pte_t *ptep;
  1795. pte_t pte;
  1796. struct hstate *h = hstate_vma(vma);
  1797. BUG_ON(address >= end);
  1798. flush_cache_range(vma, address, end);
  1799. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  1800. spin_lock(&mm->page_table_lock);
  1801. for (; address < end; address += huge_page_size(h)) {
  1802. ptep = huge_pte_offset(mm, address);
  1803. if (!ptep)
  1804. continue;
  1805. if (huge_pmd_unshare(mm, &address, ptep))
  1806. continue;
  1807. if (!huge_pte_none(huge_ptep_get(ptep))) {
  1808. pte = huge_ptep_get_and_clear(mm, address, ptep);
  1809. pte = pte_mkhuge(pte_modify(pte, newprot));
  1810. set_huge_pte_at(mm, address, ptep, pte);
  1811. }
  1812. }
  1813. spin_unlock(&mm->page_table_lock);
  1814. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  1815. flush_tlb_range(vma, start, end);
  1816. }
  1817. int hugetlb_reserve_pages(struct inode *inode,
  1818. long from, long to,
  1819. struct vm_area_struct *vma)
  1820. {
  1821. long ret, chg;
  1822. struct hstate *h = hstate_inode(inode);
  1823. if (vma && vma->vm_flags & VM_NORESERVE)
  1824. return 0;
  1825. /*
  1826. * Shared mappings base their reservation on the number of pages that
  1827. * are already allocated on behalf of the file. Private mappings need
  1828. * to reserve the full area even if read-only as mprotect() may be
  1829. * called to make the mapping read-write. Assume !vma is a shm mapping
  1830. */
  1831. if (!vma || vma->vm_flags & VM_SHARED)
  1832. chg = region_chg(&inode->i_mapping->private_list, from, to);
  1833. else {
  1834. struct resv_map *resv_map = resv_map_alloc();
  1835. if (!resv_map)
  1836. return -ENOMEM;
  1837. chg = to - from;
  1838. set_vma_resv_map(vma, resv_map);
  1839. set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
  1840. }
  1841. if (chg < 0)
  1842. return chg;
  1843. if (hugetlb_get_quota(inode->i_mapping, chg))
  1844. return -ENOSPC;
  1845. ret = hugetlb_acct_memory(h, chg);
  1846. if (ret < 0) {
  1847. hugetlb_put_quota(inode->i_mapping, chg);
  1848. return ret;
  1849. }
  1850. if (!vma || vma->vm_flags & VM_SHARED)
  1851. region_add(&inode->i_mapping->private_list, from, to);
  1852. return 0;
  1853. }
  1854. void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
  1855. {
  1856. struct hstate *h = hstate_inode(inode);
  1857. long chg = region_truncate(&inode->i_mapping->private_list, offset);
  1858. spin_lock(&inode->i_lock);
  1859. inode->i_blocks -= blocks_per_huge_page(h);
  1860. spin_unlock(&inode->i_lock);
  1861. hugetlb_put_quota(inode->i_mapping, (chg - freed));
  1862. hugetlb_acct_memory(h, -(chg - freed));
  1863. }