hugetlb.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292
  1. /*
  2. * Generic hugetlb support.
  3. * (C) William Irwin, April 2004
  4. */
  5. #include <linux/gfp.h>
  6. #include <linux/list.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/seq_file.h>
  11. #include <linux/sysctl.h>
  12. #include <linux/highmem.h>
  13. #include <linux/mmu_notifier.h>
  14. #include <linux/nodemask.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/mempolicy.h>
  17. #include <linux/cpuset.h>
  18. #include <linux/mutex.h>
  19. #include <linux/bootmem.h>
  20. #include <linux/sysfs.h>
  21. #include <asm/page.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/io.h>
  24. #include <linux/hugetlb.h>
  25. #include "internal.h"
  26. const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
  27. static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
  28. unsigned long hugepages_treat_as_movable;
  29. static int max_hstate;
  30. unsigned int default_hstate_idx;
  31. struct hstate hstates[HUGE_MAX_HSTATE];
  32. __initdata LIST_HEAD(huge_boot_pages);
  33. /* for command line parsing */
  34. static struct hstate * __initdata parsed_hstate;
  35. static unsigned long __initdata default_hstate_max_huge_pages;
  36. static unsigned long __initdata default_hstate_size;
  37. #define for_each_hstate(h) \
  38. for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
  39. /*
  40. * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  41. */
  42. static DEFINE_SPINLOCK(hugetlb_lock);
  43. /*
  44. * Region tracking -- allows tracking of reservations and instantiated pages
  45. * across the pages in a mapping.
  46. *
  47. * The region data structures are protected by a combination of the mmap_sem
  48. * and the hugetlb_instantion_mutex. To access or modify a region the caller
  49. * must either hold the mmap_sem for write, or the mmap_sem for read and
  50. * the hugetlb_instantiation mutex:
  51. *
  52. * down_write(&mm->mmap_sem);
  53. * or
  54. * down_read(&mm->mmap_sem);
  55. * mutex_lock(&hugetlb_instantiation_mutex);
  56. */
  57. struct file_region {
  58. struct list_head link;
  59. long from;
  60. long to;
  61. };
  62. static long region_add(struct list_head *head, long f, long t)
  63. {
  64. struct file_region *rg, *nrg, *trg;
  65. /* Locate the region we are either in or before. */
  66. list_for_each_entry(rg, head, link)
  67. if (f <= rg->to)
  68. break;
  69. /* Round our left edge to the current segment if it encloses us. */
  70. if (f > rg->from)
  71. f = rg->from;
  72. /* Check for and consume any regions we now overlap with. */
  73. nrg = rg;
  74. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  75. if (&rg->link == head)
  76. break;
  77. if (rg->from > t)
  78. break;
  79. /* If this area reaches higher then extend our area to
  80. * include it completely. If this is not the first area
  81. * which we intend to reuse, free it. */
  82. if (rg->to > t)
  83. t = rg->to;
  84. if (rg != nrg) {
  85. list_del(&rg->link);
  86. kfree(rg);
  87. }
  88. }
  89. nrg->from = f;
  90. nrg->to = t;
  91. return 0;
  92. }
  93. static long region_chg(struct list_head *head, long f, long t)
  94. {
  95. struct file_region *rg, *nrg;
  96. long chg = 0;
  97. /* Locate the region we are before or in. */
  98. list_for_each_entry(rg, head, link)
  99. if (f <= rg->to)
  100. break;
  101. /* If we are below the current region then a new region is required.
  102. * Subtle, allocate a new region at the position but make it zero
  103. * size such that we can guarantee to record the reservation. */
  104. if (&rg->link == head || t < rg->from) {
  105. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  106. if (!nrg)
  107. return -ENOMEM;
  108. nrg->from = f;
  109. nrg->to = f;
  110. INIT_LIST_HEAD(&nrg->link);
  111. list_add(&nrg->link, rg->link.prev);
  112. return t - f;
  113. }
  114. /* Round our left edge to the current segment if it encloses us. */
  115. if (f > rg->from)
  116. f = rg->from;
  117. chg = t - f;
  118. /* Check for and consume any regions we now overlap with. */
  119. list_for_each_entry(rg, rg->link.prev, link) {
  120. if (&rg->link == head)
  121. break;
  122. if (rg->from > t)
  123. return chg;
  124. /* We overlap with this area, if it extends futher than
  125. * us then we must extend ourselves. Account for its
  126. * existing reservation. */
  127. if (rg->to > t) {
  128. chg += rg->to - t;
  129. t = rg->to;
  130. }
  131. chg -= rg->to - rg->from;
  132. }
  133. return chg;
  134. }
  135. static long region_truncate(struct list_head *head, long end)
  136. {
  137. struct file_region *rg, *trg;
  138. long chg = 0;
  139. /* Locate the region we are either in or before. */
  140. list_for_each_entry(rg, head, link)
  141. if (end <= rg->to)
  142. break;
  143. if (&rg->link == head)
  144. return 0;
  145. /* If we are in the middle of a region then adjust it. */
  146. if (end > rg->from) {
  147. chg = rg->to - end;
  148. rg->to = end;
  149. rg = list_entry(rg->link.next, typeof(*rg), link);
  150. }
  151. /* Drop any remaining regions. */
  152. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  153. if (&rg->link == head)
  154. break;
  155. chg += rg->to - rg->from;
  156. list_del(&rg->link);
  157. kfree(rg);
  158. }
  159. return chg;
  160. }
  161. static long region_count(struct list_head *head, long f, long t)
  162. {
  163. struct file_region *rg;
  164. long chg = 0;
  165. /* Locate each segment we overlap with, and count that overlap. */
  166. list_for_each_entry(rg, head, link) {
  167. int seg_from;
  168. int seg_to;
  169. if (rg->to <= f)
  170. continue;
  171. if (rg->from >= t)
  172. break;
  173. seg_from = max(rg->from, f);
  174. seg_to = min(rg->to, t);
  175. chg += seg_to - seg_from;
  176. }
  177. return chg;
  178. }
  179. /*
  180. * Convert the address within this vma to the page offset within
  181. * the mapping, in pagecache page units; huge pages here.
  182. */
  183. static pgoff_t vma_hugecache_offset(struct hstate *h,
  184. struct vm_area_struct *vma, unsigned long address)
  185. {
  186. return ((address - vma->vm_start) >> huge_page_shift(h)) +
  187. (vma->vm_pgoff >> huge_page_order(h));
  188. }
  189. /*
  190. * Flags for MAP_PRIVATE reservations. These are stored in the bottom
  191. * bits of the reservation map pointer, which are always clear due to
  192. * alignment.
  193. */
  194. #define HPAGE_RESV_OWNER (1UL << 0)
  195. #define HPAGE_RESV_UNMAPPED (1UL << 1)
  196. #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
  197. /*
  198. * These helpers are used to track how many pages are reserved for
  199. * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
  200. * is guaranteed to have their future faults succeed.
  201. *
  202. * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
  203. * the reserve counters are updated with the hugetlb_lock held. It is safe
  204. * to reset the VMA at fork() time as it is not in use yet and there is no
  205. * chance of the global counters getting corrupted as a result of the values.
  206. *
  207. * The private mapping reservation is represented in a subtly different
  208. * manner to a shared mapping. A shared mapping has a region map associated
  209. * with the underlying file, this region map represents the backing file
  210. * pages which have ever had a reservation assigned which this persists even
  211. * after the page is instantiated. A private mapping has a region map
  212. * associated with the original mmap which is attached to all VMAs which
  213. * reference it, this region map represents those offsets which have consumed
  214. * reservation ie. where pages have been instantiated.
  215. */
  216. static unsigned long get_vma_private_data(struct vm_area_struct *vma)
  217. {
  218. return (unsigned long)vma->vm_private_data;
  219. }
  220. static void set_vma_private_data(struct vm_area_struct *vma,
  221. unsigned long value)
  222. {
  223. vma->vm_private_data = (void *)value;
  224. }
  225. struct resv_map {
  226. struct kref refs;
  227. struct list_head regions;
  228. };
  229. static struct resv_map *resv_map_alloc(void)
  230. {
  231. struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
  232. if (!resv_map)
  233. return NULL;
  234. kref_init(&resv_map->refs);
  235. INIT_LIST_HEAD(&resv_map->regions);
  236. return resv_map;
  237. }
  238. static void resv_map_release(struct kref *ref)
  239. {
  240. struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
  241. /* Clear out any active regions before we release the map. */
  242. region_truncate(&resv_map->regions, 0);
  243. kfree(resv_map);
  244. }
  245. static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
  246. {
  247. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  248. if (!(vma->vm_flags & VM_SHARED))
  249. return (struct resv_map *)(get_vma_private_data(vma) &
  250. ~HPAGE_RESV_MASK);
  251. return NULL;
  252. }
  253. static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
  254. {
  255. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  256. VM_BUG_ON(vma->vm_flags & VM_SHARED);
  257. set_vma_private_data(vma, (get_vma_private_data(vma) &
  258. HPAGE_RESV_MASK) | (unsigned long)map);
  259. }
  260. static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
  261. {
  262. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  263. VM_BUG_ON(vma->vm_flags & VM_SHARED);
  264. set_vma_private_data(vma, get_vma_private_data(vma) | flags);
  265. }
  266. static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
  267. {
  268. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  269. return (get_vma_private_data(vma) & flag) != 0;
  270. }
  271. /* Decrement the reserved pages in the hugepage pool by one */
  272. static void decrement_hugepage_resv_vma(struct hstate *h,
  273. struct vm_area_struct *vma)
  274. {
  275. if (vma->vm_flags & VM_NORESERVE)
  276. return;
  277. if (vma->vm_flags & VM_SHARED) {
  278. /* Shared mappings always use reserves */
  279. h->resv_huge_pages--;
  280. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  281. /*
  282. * Only the process that called mmap() has reserves for
  283. * private mappings.
  284. */
  285. h->resv_huge_pages--;
  286. }
  287. }
  288. /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
  289. void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  290. {
  291. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  292. if (!(vma->vm_flags & VM_SHARED))
  293. vma->vm_private_data = (void *)0;
  294. }
  295. /* Returns true if the VMA has associated reserve pages */
  296. static int vma_has_reserves(struct vm_area_struct *vma)
  297. {
  298. if (vma->vm_flags & VM_SHARED)
  299. return 1;
  300. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  301. return 1;
  302. return 0;
  303. }
  304. static void clear_gigantic_page(struct page *page,
  305. unsigned long addr, unsigned long sz)
  306. {
  307. int i;
  308. struct page *p = page;
  309. might_sleep();
  310. for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
  311. cond_resched();
  312. clear_user_highpage(p, addr + i * PAGE_SIZE);
  313. }
  314. }
  315. static void clear_huge_page(struct page *page,
  316. unsigned long addr, unsigned long sz)
  317. {
  318. int i;
  319. if (unlikely(sz > MAX_ORDER_NR_PAGES))
  320. return clear_gigantic_page(page, addr, sz);
  321. might_sleep();
  322. for (i = 0; i < sz/PAGE_SIZE; i++) {
  323. cond_resched();
  324. clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  325. }
  326. }
  327. static void copy_gigantic_page(struct page *dst, struct page *src,
  328. unsigned long addr, struct vm_area_struct *vma)
  329. {
  330. int i;
  331. struct hstate *h = hstate_vma(vma);
  332. struct page *dst_base = dst;
  333. struct page *src_base = src;
  334. might_sleep();
  335. for (i = 0; i < pages_per_huge_page(h); ) {
  336. cond_resched();
  337. copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
  338. i++;
  339. dst = mem_map_next(dst, dst_base, i);
  340. src = mem_map_next(src, src_base, i);
  341. }
  342. }
  343. static void copy_huge_page(struct page *dst, struct page *src,
  344. unsigned long addr, struct vm_area_struct *vma)
  345. {
  346. int i;
  347. struct hstate *h = hstate_vma(vma);
  348. if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES))
  349. return copy_gigantic_page(dst, src, addr, vma);
  350. might_sleep();
  351. for (i = 0; i < pages_per_huge_page(h); i++) {
  352. cond_resched();
  353. copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
  354. }
  355. }
  356. static void enqueue_huge_page(struct hstate *h, struct page *page)
  357. {
  358. int nid = page_to_nid(page);
  359. list_add(&page->lru, &h->hugepage_freelists[nid]);
  360. h->free_huge_pages++;
  361. h->free_huge_pages_node[nid]++;
  362. }
  363. static struct page *dequeue_huge_page(struct hstate *h)
  364. {
  365. int nid;
  366. struct page *page = NULL;
  367. for (nid = 0; nid < MAX_NUMNODES; ++nid) {
  368. if (!list_empty(&h->hugepage_freelists[nid])) {
  369. page = list_entry(h->hugepage_freelists[nid].next,
  370. struct page, lru);
  371. list_del(&page->lru);
  372. h->free_huge_pages--;
  373. h->free_huge_pages_node[nid]--;
  374. break;
  375. }
  376. }
  377. return page;
  378. }
  379. static struct page *dequeue_huge_page_vma(struct hstate *h,
  380. struct vm_area_struct *vma,
  381. unsigned long address, int avoid_reserve)
  382. {
  383. int nid;
  384. struct page *page = NULL;
  385. struct mempolicy *mpol;
  386. nodemask_t *nodemask;
  387. struct zonelist *zonelist = huge_zonelist(vma, address,
  388. htlb_alloc_mask, &mpol, &nodemask);
  389. struct zone *zone;
  390. struct zoneref *z;
  391. /*
  392. * A child process with MAP_PRIVATE mappings created by their parent
  393. * have no page reserves. This check ensures that reservations are
  394. * not "stolen". The child may still get SIGKILLed
  395. */
  396. if (!vma_has_reserves(vma) &&
  397. h->free_huge_pages - h->resv_huge_pages == 0)
  398. return NULL;
  399. /* If reserves cannot be used, ensure enough pages are in the pool */
  400. if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
  401. return NULL;
  402. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  403. MAX_NR_ZONES - 1, nodemask) {
  404. nid = zone_to_nid(zone);
  405. if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
  406. !list_empty(&h->hugepage_freelists[nid])) {
  407. page = list_entry(h->hugepage_freelists[nid].next,
  408. struct page, lru);
  409. list_del(&page->lru);
  410. h->free_huge_pages--;
  411. h->free_huge_pages_node[nid]--;
  412. if (!avoid_reserve)
  413. decrement_hugepage_resv_vma(h, vma);
  414. break;
  415. }
  416. }
  417. mpol_cond_put(mpol);
  418. return page;
  419. }
  420. static void update_and_free_page(struct hstate *h, struct page *page)
  421. {
  422. int i;
  423. VM_BUG_ON(h->order >= MAX_ORDER);
  424. h->nr_huge_pages--;
  425. h->nr_huge_pages_node[page_to_nid(page)]--;
  426. for (i = 0; i < pages_per_huge_page(h); i++) {
  427. page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
  428. 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
  429. 1 << PG_private | 1<< PG_writeback);
  430. }
  431. set_compound_page_dtor(page, NULL);
  432. set_page_refcounted(page);
  433. arch_release_hugepage(page);
  434. __free_pages(page, huge_page_order(h));
  435. }
  436. struct hstate *size_to_hstate(unsigned long size)
  437. {
  438. struct hstate *h;
  439. for_each_hstate(h) {
  440. if (huge_page_size(h) == size)
  441. return h;
  442. }
  443. return NULL;
  444. }
  445. static void free_huge_page(struct page *page)
  446. {
  447. /*
  448. * Can't pass hstate in here because it is called from the
  449. * compound page destructor.
  450. */
  451. struct hstate *h = page_hstate(page);
  452. int nid = page_to_nid(page);
  453. struct address_space *mapping;
  454. mapping = (struct address_space *) page_private(page);
  455. set_page_private(page, 0);
  456. BUG_ON(page_count(page));
  457. INIT_LIST_HEAD(&page->lru);
  458. spin_lock(&hugetlb_lock);
  459. if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
  460. update_and_free_page(h, page);
  461. h->surplus_huge_pages--;
  462. h->surplus_huge_pages_node[nid]--;
  463. } else {
  464. enqueue_huge_page(h, page);
  465. }
  466. spin_unlock(&hugetlb_lock);
  467. if (mapping)
  468. hugetlb_put_quota(mapping, 1);
  469. }
  470. /*
  471. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  472. * balanced by operating on them in a round-robin fashion.
  473. * Returns 1 if an adjustment was made.
  474. */
  475. static int adjust_pool_surplus(struct hstate *h, int delta)
  476. {
  477. static int prev_nid;
  478. int nid = prev_nid;
  479. int ret = 0;
  480. VM_BUG_ON(delta != -1 && delta != 1);
  481. do {
  482. nid = next_node(nid, node_online_map);
  483. if (nid == MAX_NUMNODES)
  484. nid = first_node(node_online_map);
  485. /* To shrink on this node, there must be a surplus page */
  486. if (delta < 0 && !h->surplus_huge_pages_node[nid])
  487. continue;
  488. /* Surplus cannot exceed the total number of pages */
  489. if (delta > 0 && h->surplus_huge_pages_node[nid] >=
  490. h->nr_huge_pages_node[nid])
  491. continue;
  492. h->surplus_huge_pages += delta;
  493. h->surplus_huge_pages_node[nid] += delta;
  494. ret = 1;
  495. break;
  496. } while (nid != prev_nid);
  497. prev_nid = nid;
  498. return ret;
  499. }
  500. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
  501. {
  502. set_compound_page_dtor(page, free_huge_page);
  503. spin_lock(&hugetlb_lock);
  504. h->nr_huge_pages++;
  505. h->nr_huge_pages_node[nid]++;
  506. spin_unlock(&hugetlb_lock);
  507. put_page(page); /* free it into the hugepage allocator */
  508. }
  509. static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  510. {
  511. struct page *page;
  512. if (h->order >= MAX_ORDER)
  513. return NULL;
  514. page = alloc_pages_node(nid,
  515. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
  516. __GFP_REPEAT|__GFP_NOWARN,
  517. huge_page_order(h));
  518. if (page) {
  519. if (arch_prepare_hugepage(page)) {
  520. __free_pages(page, huge_page_order(h));
  521. return NULL;
  522. }
  523. prep_new_huge_page(h, page, nid);
  524. }
  525. return page;
  526. }
  527. /*
  528. * Use a helper variable to find the next node and then
  529. * copy it back to hugetlb_next_nid afterwards:
  530. * otherwise there's a window in which a racer might
  531. * pass invalid nid MAX_NUMNODES to alloc_pages_node.
  532. * But we don't need to use a spin_lock here: it really
  533. * doesn't matter if occasionally a racer chooses the
  534. * same nid as we do. Move nid forward in the mask even
  535. * if we just successfully allocated a hugepage so that
  536. * the next caller gets hugepages on the next node.
  537. */
  538. static int hstate_next_node(struct hstate *h)
  539. {
  540. int next_nid;
  541. next_nid = next_node(h->hugetlb_next_nid, node_online_map);
  542. if (next_nid == MAX_NUMNODES)
  543. next_nid = first_node(node_online_map);
  544. h->hugetlb_next_nid = next_nid;
  545. return next_nid;
  546. }
  547. static int alloc_fresh_huge_page(struct hstate *h)
  548. {
  549. struct page *page;
  550. int start_nid;
  551. int next_nid;
  552. int ret = 0;
  553. start_nid = h->hugetlb_next_nid;
  554. do {
  555. page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
  556. if (page)
  557. ret = 1;
  558. next_nid = hstate_next_node(h);
  559. } while (!page && h->hugetlb_next_nid != start_nid);
  560. if (ret)
  561. count_vm_event(HTLB_BUDDY_PGALLOC);
  562. else
  563. count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  564. return ret;
  565. }
  566. static struct page *alloc_buddy_huge_page(struct hstate *h,
  567. struct vm_area_struct *vma, unsigned long address)
  568. {
  569. struct page *page;
  570. unsigned int nid;
  571. if (h->order >= MAX_ORDER)
  572. return NULL;
  573. /*
  574. * Assume we will successfully allocate the surplus page to
  575. * prevent racing processes from causing the surplus to exceed
  576. * overcommit
  577. *
  578. * This however introduces a different race, where a process B
  579. * tries to grow the static hugepage pool while alloc_pages() is
  580. * called by process A. B will only examine the per-node
  581. * counters in determining if surplus huge pages can be
  582. * converted to normal huge pages in adjust_pool_surplus(). A
  583. * won't be able to increment the per-node counter, until the
  584. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  585. * no more huge pages can be converted from surplus to normal
  586. * state (and doesn't try to convert again). Thus, we have a
  587. * case where a surplus huge page exists, the pool is grown, and
  588. * the surplus huge page still exists after, even though it
  589. * should just have been converted to a normal huge page. This
  590. * does not leak memory, though, as the hugepage will be freed
  591. * once it is out of use. It also does not allow the counters to
  592. * go out of whack in adjust_pool_surplus() as we don't modify
  593. * the node values until we've gotten the hugepage and only the
  594. * per-node value is checked there.
  595. */
  596. spin_lock(&hugetlb_lock);
  597. if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
  598. spin_unlock(&hugetlb_lock);
  599. return NULL;
  600. } else {
  601. h->nr_huge_pages++;
  602. h->surplus_huge_pages++;
  603. }
  604. spin_unlock(&hugetlb_lock);
  605. page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
  606. __GFP_REPEAT|__GFP_NOWARN,
  607. huge_page_order(h));
  608. if (page && arch_prepare_hugepage(page)) {
  609. __free_pages(page, huge_page_order(h));
  610. return NULL;
  611. }
  612. spin_lock(&hugetlb_lock);
  613. if (page) {
  614. /*
  615. * This page is now managed by the hugetlb allocator and has
  616. * no users -- drop the buddy allocator's reference.
  617. */
  618. put_page_testzero(page);
  619. VM_BUG_ON(page_count(page));
  620. nid = page_to_nid(page);
  621. set_compound_page_dtor(page, free_huge_page);
  622. /*
  623. * We incremented the global counters already
  624. */
  625. h->nr_huge_pages_node[nid]++;
  626. h->surplus_huge_pages_node[nid]++;
  627. __count_vm_event(HTLB_BUDDY_PGALLOC);
  628. } else {
  629. h->nr_huge_pages--;
  630. h->surplus_huge_pages--;
  631. __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  632. }
  633. spin_unlock(&hugetlb_lock);
  634. return page;
  635. }
  636. /*
  637. * Increase the hugetlb pool such that it can accomodate a reservation
  638. * of size 'delta'.
  639. */
  640. static int gather_surplus_pages(struct hstate *h, int delta)
  641. {
  642. struct list_head surplus_list;
  643. struct page *page, *tmp;
  644. int ret, i;
  645. int needed, allocated;
  646. needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
  647. if (needed <= 0) {
  648. h->resv_huge_pages += delta;
  649. return 0;
  650. }
  651. allocated = 0;
  652. INIT_LIST_HEAD(&surplus_list);
  653. ret = -ENOMEM;
  654. retry:
  655. spin_unlock(&hugetlb_lock);
  656. for (i = 0; i < needed; i++) {
  657. page = alloc_buddy_huge_page(h, NULL, 0);
  658. if (!page) {
  659. /*
  660. * We were not able to allocate enough pages to
  661. * satisfy the entire reservation so we free what
  662. * we've allocated so far.
  663. */
  664. spin_lock(&hugetlb_lock);
  665. needed = 0;
  666. goto free;
  667. }
  668. list_add(&page->lru, &surplus_list);
  669. }
  670. allocated += needed;
  671. /*
  672. * After retaking hugetlb_lock, we need to recalculate 'needed'
  673. * because either resv_huge_pages or free_huge_pages may have changed.
  674. */
  675. spin_lock(&hugetlb_lock);
  676. needed = (h->resv_huge_pages + delta) -
  677. (h->free_huge_pages + allocated);
  678. if (needed > 0)
  679. goto retry;
  680. /*
  681. * The surplus_list now contains _at_least_ the number of extra pages
  682. * needed to accomodate the reservation. Add the appropriate number
  683. * of pages to the hugetlb pool and free the extras back to the buddy
  684. * allocator. Commit the entire reservation here to prevent another
  685. * process from stealing the pages as they are added to the pool but
  686. * before they are reserved.
  687. */
  688. needed += allocated;
  689. h->resv_huge_pages += delta;
  690. ret = 0;
  691. free:
  692. /* Free the needed pages to the hugetlb pool */
  693. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  694. if ((--needed) < 0)
  695. break;
  696. list_del(&page->lru);
  697. enqueue_huge_page(h, page);
  698. }
  699. /* Free unnecessary surplus pages to the buddy allocator */
  700. if (!list_empty(&surplus_list)) {
  701. spin_unlock(&hugetlb_lock);
  702. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  703. list_del(&page->lru);
  704. /*
  705. * The page has a reference count of zero already, so
  706. * call free_huge_page directly instead of using
  707. * put_page. This must be done with hugetlb_lock
  708. * unlocked which is safe because free_huge_page takes
  709. * hugetlb_lock before deciding how to free the page.
  710. */
  711. free_huge_page(page);
  712. }
  713. spin_lock(&hugetlb_lock);
  714. }
  715. return ret;
  716. }
  717. /*
  718. * When releasing a hugetlb pool reservation, any surplus pages that were
  719. * allocated to satisfy the reservation must be explicitly freed if they were
  720. * never used.
  721. */
  722. static void return_unused_surplus_pages(struct hstate *h,
  723. unsigned long unused_resv_pages)
  724. {
  725. static int nid = -1;
  726. struct page *page;
  727. unsigned long nr_pages;
  728. /*
  729. * We want to release as many surplus pages as possible, spread
  730. * evenly across all nodes. Iterate across all nodes until we
  731. * can no longer free unreserved surplus pages. This occurs when
  732. * the nodes with surplus pages have no free pages.
  733. */
  734. unsigned long remaining_iterations = num_online_nodes();
  735. /* Uncommit the reservation */
  736. h->resv_huge_pages -= unused_resv_pages;
  737. /* Cannot return gigantic pages currently */
  738. if (h->order >= MAX_ORDER)
  739. return;
  740. nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
  741. while (remaining_iterations-- && nr_pages) {
  742. nid = next_node(nid, node_online_map);
  743. if (nid == MAX_NUMNODES)
  744. nid = first_node(node_online_map);
  745. if (!h->surplus_huge_pages_node[nid])
  746. continue;
  747. if (!list_empty(&h->hugepage_freelists[nid])) {
  748. page = list_entry(h->hugepage_freelists[nid].next,
  749. struct page, lru);
  750. list_del(&page->lru);
  751. update_and_free_page(h, page);
  752. h->free_huge_pages--;
  753. h->free_huge_pages_node[nid]--;
  754. h->surplus_huge_pages--;
  755. h->surplus_huge_pages_node[nid]--;
  756. nr_pages--;
  757. remaining_iterations = num_online_nodes();
  758. }
  759. }
  760. }
  761. /*
  762. * Determine if the huge page at addr within the vma has an associated
  763. * reservation. Where it does not we will need to logically increase
  764. * reservation and actually increase quota before an allocation can occur.
  765. * Where any new reservation would be required the reservation change is
  766. * prepared, but not committed. Once the page has been quota'd allocated
  767. * an instantiated the change should be committed via vma_commit_reservation.
  768. * No action is required on failure.
  769. */
  770. static int vma_needs_reservation(struct hstate *h,
  771. struct vm_area_struct *vma, unsigned long addr)
  772. {
  773. struct address_space *mapping = vma->vm_file->f_mapping;
  774. struct inode *inode = mapping->host;
  775. if (vma->vm_flags & VM_SHARED) {
  776. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  777. return region_chg(&inode->i_mapping->private_list,
  778. idx, idx + 1);
  779. } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  780. return 1;
  781. } else {
  782. int err;
  783. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  784. struct resv_map *reservations = vma_resv_map(vma);
  785. err = region_chg(&reservations->regions, idx, idx + 1);
  786. if (err < 0)
  787. return err;
  788. return 0;
  789. }
  790. }
  791. static void vma_commit_reservation(struct hstate *h,
  792. struct vm_area_struct *vma, unsigned long addr)
  793. {
  794. struct address_space *mapping = vma->vm_file->f_mapping;
  795. struct inode *inode = mapping->host;
  796. if (vma->vm_flags & VM_SHARED) {
  797. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  798. region_add(&inode->i_mapping->private_list, idx, idx + 1);
  799. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  800. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  801. struct resv_map *reservations = vma_resv_map(vma);
  802. /* Mark this page used in the map. */
  803. region_add(&reservations->regions, idx, idx + 1);
  804. }
  805. }
  806. static struct page *alloc_huge_page(struct vm_area_struct *vma,
  807. unsigned long addr, int avoid_reserve)
  808. {
  809. struct hstate *h = hstate_vma(vma);
  810. struct page *page;
  811. struct address_space *mapping = vma->vm_file->f_mapping;
  812. struct inode *inode = mapping->host;
  813. unsigned int chg;
  814. /*
  815. * Processes that did not create the mapping will have no reserves and
  816. * will not have accounted against quota. Check that the quota can be
  817. * made before satisfying the allocation
  818. * MAP_NORESERVE mappings may also need pages and quota allocated
  819. * if no reserve mapping overlaps.
  820. */
  821. chg = vma_needs_reservation(h, vma, addr);
  822. if (chg < 0)
  823. return ERR_PTR(chg);
  824. if (chg)
  825. if (hugetlb_get_quota(inode->i_mapping, chg))
  826. return ERR_PTR(-ENOSPC);
  827. spin_lock(&hugetlb_lock);
  828. page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
  829. spin_unlock(&hugetlb_lock);
  830. if (!page) {
  831. page = alloc_buddy_huge_page(h, vma, addr);
  832. if (!page) {
  833. hugetlb_put_quota(inode->i_mapping, chg);
  834. return ERR_PTR(-VM_FAULT_OOM);
  835. }
  836. }
  837. set_page_refcounted(page);
  838. set_page_private(page, (unsigned long) mapping);
  839. vma_commit_reservation(h, vma, addr);
  840. return page;
  841. }
  842. __attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h)
  843. {
  844. struct huge_bootmem_page *m;
  845. int nr_nodes = nodes_weight(node_online_map);
  846. while (nr_nodes) {
  847. void *addr;
  848. addr = __alloc_bootmem_node_nopanic(
  849. NODE_DATA(h->hugetlb_next_nid),
  850. huge_page_size(h), huge_page_size(h), 0);
  851. if (addr) {
  852. /*
  853. * Use the beginning of the huge page to store the
  854. * huge_bootmem_page struct (until gather_bootmem
  855. * puts them into the mem_map).
  856. */
  857. m = addr;
  858. if (m)
  859. goto found;
  860. }
  861. hstate_next_node(h);
  862. nr_nodes--;
  863. }
  864. return 0;
  865. found:
  866. BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
  867. /* Put them into a private list first because mem_map is not up yet */
  868. list_add(&m->list, &huge_boot_pages);
  869. m->hstate = h;
  870. return 1;
  871. }
  872. static void prep_compound_huge_page(struct page *page, int order)
  873. {
  874. if (unlikely(order > (MAX_ORDER - 1)))
  875. prep_compound_gigantic_page(page, order);
  876. else
  877. prep_compound_page(page, order);
  878. }
  879. /* Put bootmem huge pages into the standard lists after mem_map is up */
  880. static void __init gather_bootmem_prealloc(void)
  881. {
  882. struct huge_bootmem_page *m;
  883. list_for_each_entry(m, &huge_boot_pages, list) {
  884. struct page *page = virt_to_page(m);
  885. struct hstate *h = m->hstate;
  886. __ClearPageReserved(page);
  887. WARN_ON(page_count(page) != 1);
  888. prep_compound_huge_page(page, h->order);
  889. prep_new_huge_page(h, page, page_to_nid(page));
  890. }
  891. }
  892. static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  893. {
  894. unsigned long i;
  895. for (i = 0; i < h->max_huge_pages; ++i) {
  896. if (h->order >= MAX_ORDER) {
  897. if (!alloc_bootmem_huge_page(h))
  898. break;
  899. } else if (!alloc_fresh_huge_page(h))
  900. break;
  901. }
  902. h->max_huge_pages = i;
  903. }
  904. static void __init hugetlb_init_hstates(void)
  905. {
  906. struct hstate *h;
  907. for_each_hstate(h) {
  908. /* oversize hugepages were init'ed in early boot */
  909. if (h->order < MAX_ORDER)
  910. hugetlb_hstate_alloc_pages(h);
  911. }
  912. }
  913. static char * __init memfmt(char *buf, unsigned long n)
  914. {
  915. if (n >= (1UL << 30))
  916. sprintf(buf, "%lu GB", n >> 30);
  917. else if (n >= (1UL << 20))
  918. sprintf(buf, "%lu MB", n >> 20);
  919. else
  920. sprintf(buf, "%lu KB", n >> 10);
  921. return buf;
  922. }
  923. static void __init report_hugepages(void)
  924. {
  925. struct hstate *h;
  926. for_each_hstate(h) {
  927. char buf[32];
  928. printk(KERN_INFO "HugeTLB registered %s page size, "
  929. "pre-allocated %ld pages\n",
  930. memfmt(buf, huge_page_size(h)),
  931. h->free_huge_pages);
  932. }
  933. }
  934. #ifdef CONFIG_HIGHMEM
  935. static void try_to_free_low(struct hstate *h, unsigned long count)
  936. {
  937. int i;
  938. if (h->order >= MAX_ORDER)
  939. return;
  940. for (i = 0; i < MAX_NUMNODES; ++i) {
  941. struct page *page, *next;
  942. struct list_head *freel = &h->hugepage_freelists[i];
  943. list_for_each_entry_safe(page, next, freel, lru) {
  944. if (count >= h->nr_huge_pages)
  945. return;
  946. if (PageHighMem(page))
  947. continue;
  948. list_del(&page->lru);
  949. update_and_free_page(h, page);
  950. h->free_huge_pages--;
  951. h->free_huge_pages_node[page_to_nid(page)]--;
  952. }
  953. }
  954. }
  955. #else
  956. static inline void try_to_free_low(struct hstate *h, unsigned long count)
  957. {
  958. }
  959. #endif
  960. #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
  961. static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
  962. {
  963. unsigned long min_count, ret;
  964. if (h->order >= MAX_ORDER)
  965. return h->max_huge_pages;
  966. /*
  967. * Increase the pool size
  968. * First take pages out of surplus state. Then make up the
  969. * remaining difference by allocating fresh huge pages.
  970. *
  971. * We might race with alloc_buddy_huge_page() here and be unable
  972. * to convert a surplus huge page to a normal huge page. That is
  973. * not critical, though, it just means the overall size of the
  974. * pool might be one hugepage larger than it needs to be, but
  975. * within all the constraints specified by the sysctls.
  976. */
  977. spin_lock(&hugetlb_lock);
  978. while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
  979. if (!adjust_pool_surplus(h, -1))
  980. break;
  981. }
  982. while (count > persistent_huge_pages(h)) {
  983. /*
  984. * If this allocation races such that we no longer need the
  985. * page, free_huge_page will handle it by freeing the page
  986. * and reducing the surplus.
  987. */
  988. spin_unlock(&hugetlb_lock);
  989. ret = alloc_fresh_huge_page(h);
  990. spin_lock(&hugetlb_lock);
  991. if (!ret)
  992. goto out;
  993. }
  994. /*
  995. * Decrease the pool size
  996. * First return free pages to the buddy allocator (being careful
  997. * to keep enough around to satisfy reservations). Then place
  998. * pages into surplus state as needed so the pool will shrink
  999. * to the desired size as pages become free.
  1000. *
  1001. * By placing pages into the surplus state independent of the
  1002. * overcommit value, we are allowing the surplus pool size to
  1003. * exceed overcommit. There are few sane options here. Since
  1004. * alloc_buddy_huge_page() is checking the global counter,
  1005. * though, we'll note that we're not allowed to exceed surplus
  1006. * and won't grow the pool anywhere else. Not until one of the
  1007. * sysctls are changed, or the surplus pages go out of use.
  1008. */
  1009. min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
  1010. min_count = max(count, min_count);
  1011. try_to_free_low(h, min_count);
  1012. while (min_count < persistent_huge_pages(h)) {
  1013. struct page *page = dequeue_huge_page(h);
  1014. if (!page)
  1015. break;
  1016. update_and_free_page(h, page);
  1017. }
  1018. while (count < persistent_huge_pages(h)) {
  1019. if (!adjust_pool_surplus(h, 1))
  1020. break;
  1021. }
  1022. out:
  1023. ret = persistent_huge_pages(h);
  1024. spin_unlock(&hugetlb_lock);
  1025. return ret;
  1026. }
  1027. #define HSTATE_ATTR_RO(_name) \
  1028. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  1029. #define HSTATE_ATTR(_name) \
  1030. static struct kobj_attribute _name##_attr = \
  1031. __ATTR(_name, 0644, _name##_show, _name##_store)
  1032. static struct kobject *hugepages_kobj;
  1033. static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1034. static struct hstate *kobj_to_hstate(struct kobject *kobj)
  1035. {
  1036. int i;
  1037. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1038. if (hstate_kobjs[i] == kobj)
  1039. return &hstates[i];
  1040. BUG();
  1041. return NULL;
  1042. }
  1043. static ssize_t nr_hugepages_show(struct kobject *kobj,
  1044. struct kobj_attribute *attr, char *buf)
  1045. {
  1046. struct hstate *h = kobj_to_hstate(kobj);
  1047. return sprintf(buf, "%lu\n", h->nr_huge_pages);
  1048. }
  1049. static ssize_t nr_hugepages_store(struct kobject *kobj,
  1050. struct kobj_attribute *attr, const char *buf, size_t count)
  1051. {
  1052. int err;
  1053. unsigned long input;
  1054. struct hstate *h = kobj_to_hstate(kobj);
  1055. err = strict_strtoul(buf, 10, &input);
  1056. if (err)
  1057. return 0;
  1058. h->max_huge_pages = set_max_huge_pages(h, input);
  1059. return count;
  1060. }
  1061. HSTATE_ATTR(nr_hugepages);
  1062. static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
  1063. struct kobj_attribute *attr, char *buf)
  1064. {
  1065. struct hstate *h = kobj_to_hstate(kobj);
  1066. return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
  1067. }
  1068. static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
  1069. struct kobj_attribute *attr, const char *buf, size_t count)
  1070. {
  1071. int err;
  1072. unsigned long input;
  1073. struct hstate *h = kobj_to_hstate(kobj);
  1074. err = strict_strtoul(buf, 10, &input);
  1075. if (err)
  1076. return 0;
  1077. spin_lock(&hugetlb_lock);
  1078. h->nr_overcommit_huge_pages = input;
  1079. spin_unlock(&hugetlb_lock);
  1080. return count;
  1081. }
  1082. HSTATE_ATTR(nr_overcommit_hugepages);
  1083. static ssize_t free_hugepages_show(struct kobject *kobj,
  1084. struct kobj_attribute *attr, char *buf)
  1085. {
  1086. struct hstate *h = kobj_to_hstate(kobj);
  1087. return sprintf(buf, "%lu\n", h->free_huge_pages);
  1088. }
  1089. HSTATE_ATTR_RO(free_hugepages);
  1090. static ssize_t resv_hugepages_show(struct kobject *kobj,
  1091. struct kobj_attribute *attr, char *buf)
  1092. {
  1093. struct hstate *h = kobj_to_hstate(kobj);
  1094. return sprintf(buf, "%lu\n", h->resv_huge_pages);
  1095. }
  1096. HSTATE_ATTR_RO(resv_hugepages);
  1097. static ssize_t surplus_hugepages_show(struct kobject *kobj,
  1098. struct kobj_attribute *attr, char *buf)
  1099. {
  1100. struct hstate *h = kobj_to_hstate(kobj);
  1101. return sprintf(buf, "%lu\n", h->surplus_huge_pages);
  1102. }
  1103. HSTATE_ATTR_RO(surplus_hugepages);
  1104. static struct attribute *hstate_attrs[] = {
  1105. &nr_hugepages_attr.attr,
  1106. &nr_overcommit_hugepages_attr.attr,
  1107. &free_hugepages_attr.attr,
  1108. &resv_hugepages_attr.attr,
  1109. &surplus_hugepages_attr.attr,
  1110. NULL,
  1111. };
  1112. static struct attribute_group hstate_attr_group = {
  1113. .attrs = hstate_attrs,
  1114. };
  1115. static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
  1116. {
  1117. int retval;
  1118. hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
  1119. hugepages_kobj);
  1120. if (!hstate_kobjs[h - hstates])
  1121. return -ENOMEM;
  1122. retval = sysfs_create_group(hstate_kobjs[h - hstates],
  1123. &hstate_attr_group);
  1124. if (retval)
  1125. kobject_put(hstate_kobjs[h - hstates]);
  1126. return retval;
  1127. }
  1128. static void __init hugetlb_sysfs_init(void)
  1129. {
  1130. struct hstate *h;
  1131. int err;
  1132. hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
  1133. if (!hugepages_kobj)
  1134. return;
  1135. for_each_hstate(h) {
  1136. err = hugetlb_sysfs_add_hstate(h);
  1137. if (err)
  1138. printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
  1139. h->name);
  1140. }
  1141. }
  1142. static void __exit hugetlb_exit(void)
  1143. {
  1144. struct hstate *h;
  1145. for_each_hstate(h) {
  1146. kobject_put(hstate_kobjs[h - hstates]);
  1147. }
  1148. kobject_put(hugepages_kobj);
  1149. }
  1150. module_exit(hugetlb_exit);
  1151. static int __init hugetlb_init(void)
  1152. {
  1153. /* Some platform decide whether they support huge pages at boot
  1154. * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
  1155. * there is no such support
  1156. */
  1157. if (HPAGE_SHIFT == 0)
  1158. return 0;
  1159. if (!size_to_hstate(default_hstate_size)) {
  1160. default_hstate_size = HPAGE_SIZE;
  1161. if (!size_to_hstate(default_hstate_size))
  1162. hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
  1163. }
  1164. default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
  1165. if (default_hstate_max_huge_pages)
  1166. default_hstate.max_huge_pages = default_hstate_max_huge_pages;
  1167. hugetlb_init_hstates();
  1168. gather_bootmem_prealloc();
  1169. report_hugepages();
  1170. hugetlb_sysfs_init();
  1171. return 0;
  1172. }
  1173. module_init(hugetlb_init);
  1174. /* Should be called on processing a hugepagesz=... option */
  1175. void __init hugetlb_add_hstate(unsigned order)
  1176. {
  1177. struct hstate *h;
  1178. unsigned long i;
  1179. if (size_to_hstate(PAGE_SIZE << order)) {
  1180. printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
  1181. return;
  1182. }
  1183. BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
  1184. BUG_ON(order == 0);
  1185. h = &hstates[max_hstate++];
  1186. h->order = order;
  1187. h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
  1188. h->nr_huge_pages = 0;
  1189. h->free_huge_pages = 0;
  1190. for (i = 0; i < MAX_NUMNODES; ++i)
  1191. INIT_LIST_HEAD(&h->hugepage_freelists[i]);
  1192. h->hugetlb_next_nid = first_node(node_online_map);
  1193. snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
  1194. huge_page_size(h)/1024);
  1195. parsed_hstate = h;
  1196. }
  1197. static int __init hugetlb_nrpages_setup(char *s)
  1198. {
  1199. unsigned long *mhp;
  1200. static unsigned long *last_mhp;
  1201. /*
  1202. * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
  1203. * so this hugepages= parameter goes to the "default hstate".
  1204. */
  1205. if (!max_hstate)
  1206. mhp = &default_hstate_max_huge_pages;
  1207. else
  1208. mhp = &parsed_hstate->max_huge_pages;
  1209. if (mhp == last_mhp) {
  1210. printk(KERN_WARNING "hugepages= specified twice without "
  1211. "interleaving hugepagesz=, ignoring\n");
  1212. return 1;
  1213. }
  1214. if (sscanf(s, "%lu", mhp) <= 0)
  1215. *mhp = 0;
  1216. /*
  1217. * Global state is always initialized later in hugetlb_init.
  1218. * But we need to allocate >= MAX_ORDER hstates here early to still
  1219. * use the bootmem allocator.
  1220. */
  1221. if (max_hstate && parsed_hstate->order >= MAX_ORDER)
  1222. hugetlb_hstate_alloc_pages(parsed_hstate);
  1223. last_mhp = mhp;
  1224. return 1;
  1225. }
  1226. __setup("hugepages=", hugetlb_nrpages_setup);
  1227. static int __init hugetlb_default_setup(char *s)
  1228. {
  1229. default_hstate_size = memparse(s, &s);
  1230. return 1;
  1231. }
  1232. __setup("default_hugepagesz=", hugetlb_default_setup);
  1233. static unsigned int cpuset_mems_nr(unsigned int *array)
  1234. {
  1235. int node;
  1236. unsigned int nr = 0;
  1237. for_each_node_mask(node, cpuset_current_mems_allowed)
  1238. nr += array[node];
  1239. return nr;
  1240. }
  1241. #ifdef CONFIG_SYSCTL
  1242. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  1243. struct file *file, void __user *buffer,
  1244. size_t *length, loff_t *ppos)
  1245. {
  1246. struct hstate *h = &default_hstate;
  1247. unsigned long tmp;
  1248. if (!write)
  1249. tmp = h->max_huge_pages;
  1250. table->data = &tmp;
  1251. table->maxlen = sizeof(unsigned long);
  1252. proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
  1253. if (write)
  1254. h->max_huge_pages = set_max_huge_pages(h, tmp);
  1255. return 0;
  1256. }
  1257. int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
  1258. struct file *file, void __user *buffer,
  1259. size_t *length, loff_t *ppos)
  1260. {
  1261. proc_dointvec(table, write, file, buffer, length, ppos);
  1262. if (hugepages_treat_as_movable)
  1263. htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
  1264. else
  1265. htlb_alloc_mask = GFP_HIGHUSER;
  1266. return 0;
  1267. }
  1268. int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  1269. struct file *file, void __user *buffer,
  1270. size_t *length, loff_t *ppos)
  1271. {
  1272. struct hstate *h = &default_hstate;
  1273. unsigned long tmp;
  1274. if (!write)
  1275. tmp = h->nr_overcommit_huge_pages;
  1276. table->data = &tmp;
  1277. table->maxlen = sizeof(unsigned long);
  1278. proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
  1279. if (write) {
  1280. spin_lock(&hugetlb_lock);
  1281. h->nr_overcommit_huge_pages = tmp;
  1282. spin_unlock(&hugetlb_lock);
  1283. }
  1284. return 0;
  1285. }
  1286. #endif /* CONFIG_SYSCTL */
  1287. void hugetlb_report_meminfo(struct seq_file *m)
  1288. {
  1289. struct hstate *h = &default_hstate;
  1290. seq_printf(m,
  1291. "HugePages_Total: %5lu\n"
  1292. "HugePages_Free: %5lu\n"
  1293. "HugePages_Rsvd: %5lu\n"
  1294. "HugePages_Surp: %5lu\n"
  1295. "Hugepagesize: %8lu kB\n",
  1296. h->nr_huge_pages,
  1297. h->free_huge_pages,
  1298. h->resv_huge_pages,
  1299. h->surplus_huge_pages,
  1300. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  1301. }
  1302. int hugetlb_report_node_meminfo(int nid, char *buf)
  1303. {
  1304. struct hstate *h = &default_hstate;
  1305. return sprintf(buf,
  1306. "Node %d HugePages_Total: %5u\n"
  1307. "Node %d HugePages_Free: %5u\n"
  1308. "Node %d HugePages_Surp: %5u\n",
  1309. nid, h->nr_huge_pages_node[nid],
  1310. nid, h->free_huge_pages_node[nid],
  1311. nid, h->surplus_huge_pages_node[nid]);
  1312. }
  1313. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  1314. unsigned long hugetlb_total_pages(void)
  1315. {
  1316. struct hstate *h = &default_hstate;
  1317. return h->nr_huge_pages * pages_per_huge_page(h);
  1318. }
  1319. static int hugetlb_acct_memory(struct hstate *h, long delta)
  1320. {
  1321. int ret = -ENOMEM;
  1322. spin_lock(&hugetlb_lock);
  1323. /*
  1324. * When cpuset is configured, it breaks the strict hugetlb page
  1325. * reservation as the accounting is done on a global variable. Such
  1326. * reservation is completely rubbish in the presence of cpuset because
  1327. * the reservation is not checked against page availability for the
  1328. * current cpuset. Application can still potentially OOM'ed by kernel
  1329. * with lack of free htlb page in cpuset that the task is in.
  1330. * Attempt to enforce strict accounting with cpuset is almost
  1331. * impossible (or too ugly) because cpuset is too fluid that
  1332. * task or memory node can be dynamically moved between cpusets.
  1333. *
  1334. * The change of semantics for shared hugetlb mapping with cpuset is
  1335. * undesirable. However, in order to preserve some of the semantics,
  1336. * we fall back to check against current free page availability as
  1337. * a best attempt and hopefully to minimize the impact of changing
  1338. * semantics that cpuset has.
  1339. */
  1340. if (delta > 0) {
  1341. if (gather_surplus_pages(h, delta) < 0)
  1342. goto out;
  1343. if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
  1344. return_unused_surplus_pages(h, delta);
  1345. goto out;
  1346. }
  1347. }
  1348. ret = 0;
  1349. if (delta < 0)
  1350. return_unused_surplus_pages(h, (unsigned long) -delta);
  1351. out:
  1352. spin_unlock(&hugetlb_lock);
  1353. return ret;
  1354. }
  1355. static void hugetlb_vm_op_open(struct vm_area_struct *vma)
  1356. {
  1357. struct resv_map *reservations = vma_resv_map(vma);
  1358. /*
  1359. * This new VMA should share its siblings reservation map if present.
  1360. * The VMA will only ever have a valid reservation map pointer where
  1361. * it is being copied for another still existing VMA. As that VMA
  1362. * has a reference to the reservation map it cannot dissappear until
  1363. * after this open call completes. It is therefore safe to take a
  1364. * new reference here without additional locking.
  1365. */
  1366. if (reservations)
  1367. kref_get(&reservations->refs);
  1368. }
  1369. static void hugetlb_vm_op_close(struct vm_area_struct *vma)
  1370. {
  1371. struct hstate *h = hstate_vma(vma);
  1372. struct resv_map *reservations = vma_resv_map(vma);
  1373. unsigned long reserve;
  1374. unsigned long start;
  1375. unsigned long end;
  1376. if (reservations) {
  1377. start = vma_hugecache_offset(h, vma, vma->vm_start);
  1378. end = vma_hugecache_offset(h, vma, vma->vm_end);
  1379. reserve = (end - start) -
  1380. region_count(&reservations->regions, start, end);
  1381. kref_put(&reservations->refs, resv_map_release);
  1382. if (reserve) {
  1383. hugetlb_acct_memory(h, -reserve);
  1384. hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
  1385. }
  1386. }
  1387. }
  1388. /*
  1389. * We cannot handle pagefaults against hugetlb pages at all. They cause
  1390. * handle_mm_fault() to try to instantiate regular-sized pages in the
  1391. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  1392. * this far.
  1393. */
  1394. static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1395. {
  1396. BUG();
  1397. return 0;
  1398. }
  1399. struct vm_operations_struct hugetlb_vm_ops = {
  1400. .fault = hugetlb_vm_op_fault,
  1401. .open = hugetlb_vm_op_open,
  1402. .close = hugetlb_vm_op_close,
  1403. };
  1404. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  1405. int writable)
  1406. {
  1407. pte_t entry;
  1408. if (writable) {
  1409. entry =
  1410. pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
  1411. } else {
  1412. entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
  1413. }
  1414. entry = pte_mkyoung(entry);
  1415. entry = pte_mkhuge(entry);
  1416. return entry;
  1417. }
  1418. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  1419. unsigned long address, pte_t *ptep)
  1420. {
  1421. pte_t entry;
  1422. entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
  1423. if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
  1424. update_mmu_cache(vma, address, entry);
  1425. }
  1426. }
  1427. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  1428. struct vm_area_struct *vma)
  1429. {
  1430. pte_t *src_pte, *dst_pte, entry;
  1431. struct page *ptepage;
  1432. unsigned long addr;
  1433. int cow;
  1434. struct hstate *h = hstate_vma(vma);
  1435. unsigned long sz = huge_page_size(h);
  1436. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  1437. for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
  1438. src_pte = huge_pte_offset(src, addr);
  1439. if (!src_pte)
  1440. continue;
  1441. dst_pte = huge_pte_alloc(dst, addr, sz);
  1442. if (!dst_pte)
  1443. goto nomem;
  1444. /* If the pagetables are shared don't copy or take references */
  1445. if (dst_pte == src_pte)
  1446. continue;
  1447. spin_lock(&dst->page_table_lock);
  1448. spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
  1449. if (!huge_pte_none(huge_ptep_get(src_pte))) {
  1450. if (cow)
  1451. huge_ptep_set_wrprotect(src, addr, src_pte);
  1452. entry = huge_ptep_get(src_pte);
  1453. ptepage = pte_page(entry);
  1454. get_page(ptepage);
  1455. set_huge_pte_at(dst, addr, dst_pte, entry);
  1456. }
  1457. spin_unlock(&src->page_table_lock);
  1458. spin_unlock(&dst->page_table_lock);
  1459. }
  1460. return 0;
  1461. nomem:
  1462. return -ENOMEM;
  1463. }
  1464. void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  1465. unsigned long end, struct page *ref_page)
  1466. {
  1467. struct mm_struct *mm = vma->vm_mm;
  1468. unsigned long address;
  1469. pte_t *ptep;
  1470. pte_t pte;
  1471. struct page *page;
  1472. struct page *tmp;
  1473. struct hstate *h = hstate_vma(vma);
  1474. unsigned long sz = huge_page_size(h);
  1475. /*
  1476. * A page gathering list, protected by per file i_mmap_lock. The
  1477. * lock is used to avoid list corruption from multiple unmapping
  1478. * of the same page since we are using page->lru.
  1479. */
  1480. LIST_HEAD(page_list);
  1481. WARN_ON(!is_vm_hugetlb_page(vma));
  1482. BUG_ON(start & ~huge_page_mask(h));
  1483. BUG_ON(end & ~huge_page_mask(h));
  1484. mmu_notifier_invalidate_range_start(mm, start, end);
  1485. spin_lock(&mm->page_table_lock);
  1486. for (address = start; address < end; address += sz) {
  1487. ptep = huge_pte_offset(mm, address);
  1488. if (!ptep)
  1489. continue;
  1490. if (huge_pmd_unshare(mm, &address, ptep))
  1491. continue;
  1492. /*
  1493. * If a reference page is supplied, it is because a specific
  1494. * page is being unmapped, not a range. Ensure the page we
  1495. * are about to unmap is the actual page of interest.
  1496. */
  1497. if (ref_page) {
  1498. pte = huge_ptep_get(ptep);
  1499. if (huge_pte_none(pte))
  1500. continue;
  1501. page = pte_page(pte);
  1502. if (page != ref_page)
  1503. continue;
  1504. /*
  1505. * Mark the VMA as having unmapped its page so that
  1506. * future faults in this VMA will fail rather than
  1507. * looking like data was lost
  1508. */
  1509. set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
  1510. }
  1511. pte = huge_ptep_get_and_clear(mm, address, ptep);
  1512. if (huge_pte_none(pte))
  1513. continue;
  1514. page = pte_page(pte);
  1515. if (pte_dirty(pte))
  1516. set_page_dirty(page);
  1517. list_add(&page->lru, &page_list);
  1518. }
  1519. spin_unlock(&mm->page_table_lock);
  1520. flush_tlb_range(vma, start, end);
  1521. mmu_notifier_invalidate_range_end(mm, start, end);
  1522. list_for_each_entry_safe(page, tmp, &page_list, lru) {
  1523. list_del(&page->lru);
  1524. put_page(page);
  1525. }
  1526. }
  1527. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  1528. unsigned long end, struct page *ref_page)
  1529. {
  1530. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  1531. __unmap_hugepage_range(vma, start, end, ref_page);
  1532. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  1533. }
  1534. /*
  1535. * This is called when the original mapper is failing to COW a MAP_PRIVATE
  1536. * mappping it owns the reserve page for. The intention is to unmap the page
  1537. * from other VMAs and let the children be SIGKILLed if they are faulting the
  1538. * same region.
  1539. */
  1540. static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  1541. struct page *page, unsigned long address)
  1542. {
  1543. struct vm_area_struct *iter_vma;
  1544. struct address_space *mapping;
  1545. struct prio_tree_iter iter;
  1546. pgoff_t pgoff;
  1547. /*
  1548. * vm_pgoff is in PAGE_SIZE units, hence the different calculation
  1549. * from page cache lookup which is in HPAGE_SIZE units.
  1550. */
  1551. address = address & huge_page_mask(hstate_vma(vma));
  1552. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
  1553. + (vma->vm_pgoff >> PAGE_SHIFT);
  1554. mapping = (struct address_space *)page_private(page);
  1555. vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  1556. /* Do not unmap the current VMA */
  1557. if (iter_vma == vma)
  1558. continue;
  1559. /*
  1560. * Unmap the page from other VMAs without their own reserves.
  1561. * They get marked to be SIGKILLed if they fault in these
  1562. * areas. This is because a future no-page fault on this VMA
  1563. * could insert a zeroed page instead of the data existing
  1564. * from the time of fork. This would look like data corruption
  1565. */
  1566. if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
  1567. unmap_hugepage_range(iter_vma,
  1568. address, address + HPAGE_SIZE,
  1569. page);
  1570. }
  1571. return 1;
  1572. }
  1573. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  1574. unsigned long address, pte_t *ptep, pte_t pte,
  1575. struct page *pagecache_page)
  1576. {
  1577. struct hstate *h = hstate_vma(vma);
  1578. struct page *old_page, *new_page;
  1579. int avoidcopy;
  1580. int outside_reserve = 0;
  1581. old_page = pte_page(pte);
  1582. retry_avoidcopy:
  1583. /* If no-one else is actually using this page, avoid the copy
  1584. * and just make the page writable */
  1585. avoidcopy = (page_count(old_page) == 1);
  1586. if (avoidcopy) {
  1587. set_huge_ptep_writable(vma, address, ptep);
  1588. return 0;
  1589. }
  1590. /*
  1591. * If the process that created a MAP_PRIVATE mapping is about to
  1592. * perform a COW due to a shared page count, attempt to satisfy
  1593. * the allocation without using the existing reserves. The pagecache
  1594. * page is used to determine if the reserve at this address was
  1595. * consumed or not. If reserves were used, a partial faulted mapping
  1596. * at the time of fork() could consume its reserves on COW instead
  1597. * of the full address range.
  1598. */
  1599. if (!(vma->vm_flags & VM_SHARED) &&
  1600. is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
  1601. old_page != pagecache_page)
  1602. outside_reserve = 1;
  1603. page_cache_get(old_page);
  1604. new_page = alloc_huge_page(vma, address, outside_reserve);
  1605. if (IS_ERR(new_page)) {
  1606. page_cache_release(old_page);
  1607. /*
  1608. * If a process owning a MAP_PRIVATE mapping fails to COW,
  1609. * it is due to references held by a child and an insufficient
  1610. * huge page pool. To guarantee the original mappers
  1611. * reliability, unmap the page from child processes. The child
  1612. * may get SIGKILLed if it later faults.
  1613. */
  1614. if (outside_reserve) {
  1615. BUG_ON(huge_pte_none(pte));
  1616. if (unmap_ref_private(mm, vma, old_page, address)) {
  1617. BUG_ON(page_count(old_page) != 1);
  1618. BUG_ON(huge_pte_none(pte));
  1619. goto retry_avoidcopy;
  1620. }
  1621. WARN_ON_ONCE(1);
  1622. }
  1623. return -PTR_ERR(new_page);
  1624. }
  1625. spin_unlock(&mm->page_table_lock);
  1626. copy_huge_page(new_page, old_page, address, vma);
  1627. __SetPageUptodate(new_page);
  1628. spin_lock(&mm->page_table_lock);
  1629. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  1630. if (likely(pte_same(huge_ptep_get(ptep), pte))) {
  1631. /* Break COW */
  1632. huge_ptep_clear_flush(vma, address, ptep);
  1633. set_huge_pte_at(mm, address, ptep,
  1634. make_huge_pte(vma, new_page, 1));
  1635. /* Make the old page be freed below */
  1636. new_page = old_page;
  1637. }
  1638. page_cache_release(new_page);
  1639. page_cache_release(old_page);
  1640. return 0;
  1641. }
  1642. /* Return the pagecache page at a given address within a VMA */
  1643. static struct page *hugetlbfs_pagecache_page(struct hstate *h,
  1644. struct vm_area_struct *vma, unsigned long address)
  1645. {
  1646. struct address_space *mapping;
  1647. pgoff_t idx;
  1648. mapping = vma->vm_file->f_mapping;
  1649. idx = vma_hugecache_offset(h, vma, address);
  1650. return find_lock_page(mapping, idx);
  1651. }
  1652. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1653. unsigned long address, pte_t *ptep, int write_access)
  1654. {
  1655. struct hstate *h = hstate_vma(vma);
  1656. int ret = VM_FAULT_SIGBUS;
  1657. pgoff_t idx;
  1658. unsigned long size;
  1659. struct page *page;
  1660. struct address_space *mapping;
  1661. pte_t new_pte;
  1662. /*
  1663. * Currently, we are forced to kill the process in the event the
  1664. * original mapper has unmapped pages from the child due to a failed
  1665. * COW. Warn that such a situation has occured as it may not be obvious
  1666. */
  1667. if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
  1668. printk(KERN_WARNING
  1669. "PID %d killed due to inadequate hugepage pool\n",
  1670. current->pid);
  1671. return ret;
  1672. }
  1673. mapping = vma->vm_file->f_mapping;
  1674. idx = vma_hugecache_offset(h, vma, address);
  1675. /*
  1676. * Use page lock to guard against racing truncation
  1677. * before we get page_table_lock.
  1678. */
  1679. retry:
  1680. page = find_lock_page(mapping, idx);
  1681. if (!page) {
  1682. size = i_size_read(mapping->host) >> huge_page_shift(h);
  1683. if (idx >= size)
  1684. goto out;
  1685. page = alloc_huge_page(vma, address, 0);
  1686. if (IS_ERR(page)) {
  1687. ret = -PTR_ERR(page);
  1688. goto out;
  1689. }
  1690. clear_huge_page(page, address, huge_page_size(h));
  1691. __SetPageUptodate(page);
  1692. if (vma->vm_flags & VM_SHARED) {
  1693. int err;
  1694. struct inode *inode = mapping->host;
  1695. err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  1696. if (err) {
  1697. put_page(page);
  1698. if (err == -EEXIST)
  1699. goto retry;
  1700. goto out;
  1701. }
  1702. spin_lock(&inode->i_lock);
  1703. inode->i_blocks += blocks_per_huge_page(h);
  1704. spin_unlock(&inode->i_lock);
  1705. } else
  1706. lock_page(page);
  1707. }
  1708. /*
  1709. * If we are going to COW a private mapping later, we examine the
  1710. * pending reservations for this page now. This will ensure that
  1711. * any allocations necessary to record that reservation occur outside
  1712. * the spinlock.
  1713. */
  1714. if (write_access && !(vma->vm_flags & VM_SHARED))
  1715. if (vma_needs_reservation(h, vma, address) < 0) {
  1716. ret = VM_FAULT_OOM;
  1717. goto backout_unlocked;
  1718. }
  1719. spin_lock(&mm->page_table_lock);
  1720. size = i_size_read(mapping->host) >> huge_page_shift(h);
  1721. if (idx >= size)
  1722. goto backout;
  1723. ret = 0;
  1724. if (!huge_pte_none(huge_ptep_get(ptep)))
  1725. goto backout;
  1726. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  1727. && (vma->vm_flags & VM_SHARED)));
  1728. set_huge_pte_at(mm, address, ptep, new_pte);
  1729. if (write_access && !(vma->vm_flags & VM_SHARED)) {
  1730. /* Optimization, do the COW without a second fault */
  1731. ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
  1732. }
  1733. spin_unlock(&mm->page_table_lock);
  1734. unlock_page(page);
  1735. out:
  1736. return ret;
  1737. backout:
  1738. spin_unlock(&mm->page_table_lock);
  1739. backout_unlocked:
  1740. unlock_page(page);
  1741. put_page(page);
  1742. goto out;
  1743. }
  1744. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  1745. unsigned long address, int write_access)
  1746. {
  1747. pte_t *ptep;
  1748. pte_t entry;
  1749. int ret;
  1750. struct page *pagecache_page = NULL;
  1751. static DEFINE_MUTEX(hugetlb_instantiation_mutex);
  1752. struct hstate *h = hstate_vma(vma);
  1753. ptep = huge_pte_alloc(mm, address, huge_page_size(h));
  1754. if (!ptep)
  1755. return VM_FAULT_OOM;
  1756. /*
  1757. * Serialize hugepage allocation and instantiation, so that we don't
  1758. * get spurious allocation failures if two CPUs race to instantiate
  1759. * the same page in the page cache.
  1760. */
  1761. mutex_lock(&hugetlb_instantiation_mutex);
  1762. entry = huge_ptep_get(ptep);
  1763. if (huge_pte_none(entry)) {
  1764. ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
  1765. goto out_mutex;
  1766. }
  1767. ret = 0;
  1768. /*
  1769. * If we are going to COW the mapping later, we examine the pending
  1770. * reservations for this page now. This will ensure that any
  1771. * allocations necessary to record that reservation occur outside the
  1772. * spinlock. For private mappings, we also lookup the pagecache
  1773. * page now as it is used to determine if a reservation has been
  1774. * consumed.
  1775. */
  1776. if (write_access && !pte_write(entry)) {
  1777. if (vma_needs_reservation(h, vma, address) < 0) {
  1778. ret = VM_FAULT_OOM;
  1779. goto out_mutex;
  1780. }
  1781. if (!(vma->vm_flags & VM_SHARED))
  1782. pagecache_page = hugetlbfs_pagecache_page(h,
  1783. vma, address);
  1784. }
  1785. spin_lock(&mm->page_table_lock);
  1786. /* Check for a racing update before calling hugetlb_cow */
  1787. if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
  1788. goto out_page_table_lock;
  1789. if (write_access) {
  1790. if (!pte_write(entry)) {
  1791. ret = hugetlb_cow(mm, vma, address, ptep, entry,
  1792. pagecache_page);
  1793. goto out_page_table_lock;
  1794. }
  1795. entry = pte_mkdirty(entry);
  1796. }
  1797. entry = pte_mkyoung(entry);
  1798. if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access))
  1799. update_mmu_cache(vma, address, entry);
  1800. out_page_table_lock:
  1801. spin_unlock(&mm->page_table_lock);
  1802. if (pagecache_page) {
  1803. unlock_page(pagecache_page);
  1804. put_page(pagecache_page);
  1805. }
  1806. out_mutex:
  1807. mutex_unlock(&hugetlb_instantiation_mutex);
  1808. return ret;
  1809. }
  1810. /* Can be overriden by architectures */
  1811. __attribute__((weak)) struct page *
  1812. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  1813. pud_t *pud, int write)
  1814. {
  1815. BUG();
  1816. return NULL;
  1817. }
  1818. static int huge_zeropage_ok(pte_t *ptep, int write, int shared)
  1819. {
  1820. if (!ptep || write || shared)
  1821. return 0;
  1822. else
  1823. return huge_pte_none(huge_ptep_get(ptep));
  1824. }
  1825. int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1826. struct page **pages, struct vm_area_struct **vmas,
  1827. unsigned long *position, int *length, int i,
  1828. int write)
  1829. {
  1830. unsigned long pfn_offset;
  1831. unsigned long vaddr = *position;
  1832. int remainder = *length;
  1833. struct hstate *h = hstate_vma(vma);
  1834. int zeropage_ok = 0;
  1835. int shared = vma->vm_flags & VM_SHARED;
  1836. spin_lock(&mm->page_table_lock);
  1837. while (vaddr < vma->vm_end && remainder) {
  1838. pte_t *pte;
  1839. struct page *page;
  1840. /*
  1841. * Some archs (sparc64, sh*) have multiple pte_ts to
  1842. * each hugepage. We have to make * sure we get the
  1843. * first, for the page indexing below to work.
  1844. */
  1845. pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
  1846. if (huge_zeropage_ok(pte, write, shared))
  1847. zeropage_ok = 1;
  1848. if (!pte ||
  1849. (huge_pte_none(huge_ptep_get(pte)) && !zeropage_ok) ||
  1850. (write && !pte_write(huge_ptep_get(pte)))) {
  1851. int ret;
  1852. spin_unlock(&mm->page_table_lock);
  1853. ret = hugetlb_fault(mm, vma, vaddr, write);
  1854. spin_lock(&mm->page_table_lock);
  1855. if (!(ret & VM_FAULT_ERROR))
  1856. continue;
  1857. remainder = 0;
  1858. if (!i)
  1859. i = -EFAULT;
  1860. break;
  1861. }
  1862. pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
  1863. page = pte_page(huge_ptep_get(pte));
  1864. same_page:
  1865. if (pages) {
  1866. if (zeropage_ok)
  1867. pages[i] = ZERO_PAGE(0);
  1868. else
  1869. pages[i] = mem_map_offset(page, pfn_offset);
  1870. get_page(pages[i]);
  1871. }
  1872. if (vmas)
  1873. vmas[i] = vma;
  1874. vaddr += PAGE_SIZE;
  1875. ++pfn_offset;
  1876. --remainder;
  1877. ++i;
  1878. if (vaddr < vma->vm_end && remainder &&
  1879. pfn_offset < pages_per_huge_page(h)) {
  1880. /*
  1881. * We use pfn_offset to avoid touching the pageframes
  1882. * of this compound page.
  1883. */
  1884. goto same_page;
  1885. }
  1886. }
  1887. spin_unlock(&mm->page_table_lock);
  1888. *length = remainder;
  1889. *position = vaddr;
  1890. return i;
  1891. }
  1892. void hugetlb_change_protection(struct vm_area_struct *vma,
  1893. unsigned long address, unsigned long end, pgprot_t newprot)
  1894. {
  1895. struct mm_struct *mm = vma->vm_mm;
  1896. unsigned long start = address;
  1897. pte_t *ptep;
  1898. pte_t pte;
  1899. struct hstate *h = hstate_vma(vma);
  1900. BUG_ON(address >= end);
  1901. flush_cache_range(vma, address, end);
  1902. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  1903. spin_lock(&mm->page_table_lock);
  1904. for (; address < end; address += huge_page_size(h)) {
  1905. ptep = huge_pte_offset(mm, address);
  1906. if (!ptep)
  1907. continue;
  1908. if (huge_pmd_unshare(mm, &address, ptep))
  1909. continue;
  1910. if (!huge_pte_none(huge_ptep_get(ptep))) {
  1911. pte = huge_ptep_get_and_clear(mm, address, ptep);
  1912. pte = pte_mkhuge(pte_modify(pte, newprot));
  1913. set_huge_pte_at(mm, address, ptep, pte);
  1914. }
  1915. }
  1916. spin_unlock(&mm->page_table_lock);
  1917. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  1918. flush_tlb_range(vma, start, end);
  1919. }
  1920. int hugetlb_reserve_pages(struct inode *inode,
  1921. long from, long to,
  1922. struct vm_area_struct *vma)
  1923. {
  1924. long ret, chg;
  1925. struct hstate *h = hstate_inode(inode);
  1926. if (vma && vma->vm_flags & VM_NORESERVE)
  1927. return 0;
  1928. /*
  1929. * Shared mappings base their reservation on the number of pages that
  1930. * are already allocated on behalf of the file. Private mappings need
  1931. * to reserve the full area even if read-only as mprotect() may be
  1932. * called to make the mapping read-write. Assume !vma is a shm mapping
  1933. */
  1934. if (!vma || vma->vm_flags & VM_SHARED)
  1935. chg = region_chg(&inode->i_mapping->private_list, from, to);
  1936. else {
  1937. struct resv_map *resv_map = resv_map_alloc();
  1938. if (!resv_map)
  1939. return -ENOMEM;
  1940. chg = to - from;
  1941. set_vma_resv_map(vma, resv_map);
  1942. set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
  1943. }
  1944. if (chg < 0)
  1945. return chg;
  1946. if (hugetlb_get_quota(inode->i_mapping, chg))
  1947. return -ENOSPC;
  1948. ret = hugetlb_acct_memory(h, chg);
  1949. if (ret < 0) {
  1950. hugetlb_put_quota(inode->i_mapping, chg);
  1951. return ret;
  1952. }
  1953. if (!vma || vma->vm_flags & VM_SHARED)
  1954. region_add(&inode->i_mapping->private_list, from, to);
  1955. return 0;
  1956. }
  1957. void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
  1958. {
  1959. struct hstate *h = hstate_inode(inode);
  1960. long chg = region_truncate(&inode->i_mapping->private_list, offset);
  1961. spin_lock(&inode->i_lock);
  1962. inode->i_blocks -= blocks_per_huge_page(h);
  1963. spin_unlock(&inode->i_lock);
  1964. hugetlb_put_quota(inode->i_mapping, (chg - freed));
  1965. hugetlb_acct_memory(h, -(chg - freed));
  1966. }