hugetlb.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401
  1. /*
  2. * Generic hugetlb support.
  3. * (C) William Irwin, April 2004
  4. */
  5. #include <linux/gfp.h>
  6. #include <linux/list.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/sysctl.h>
  11. #include <linux/highmem.h>
  12. #include <linux/nodemask.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/mempolicy.h>
  15. #include <linux/cpuset.h>
  16. #include <linux/mutex.h>
  17. #include <asm/page.h>
  18. #include <asm/pgtable.h>
  19. #include <linux/hugetlb.h>
  20. #include "internal.h"
  21. const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
  22. static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
  23. static unsigned long surplus_huge_pages;
  24. static unsigned long nr_overcommit_huge_pages;
  25. unsigned long max_huge_pages;
  26. unsigned long sysctl_overcommit_huge_pages;
  27. static struct list_head hugepage_freelists[MAX_NUMNODES];
  28. static unsigned int nr_huge_pages_node[MAX_NUMNODES];
  29. static unsigned int free_huge_pages_node[MAX_NUMNODES];
  30. static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
  31. static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
  32. unsigned long hugepages_treat_as_movable;
  33. static int hugetlb_next_nid;
  34. /*
  35. * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  36. */
  37. static DEFINE_SPINLOCK(hugetlb_lock);
  38. /*
  39. * These helpers are used to track how many pages are reserved for
  40. * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
  41. * is guaranteed to have their future faults succeed.
  42. *
  43. * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
  44. * the reserve counters are updated with the hugetlb_lock held. It is safe
  45. * to reset the VMA at fork() time as it is not in use yet and there is no
  46. * chance of the global counters getting corrupted as a result of the values.
  47. */
  48. static unsigned long vma_resv_huge_pages(struct vm_area_struct *vma)
  49. {
  50. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  51. if (!(vma->vm_flags & VM_SHARED))
  52. return (unsigned long)vma->vm_private_data;
  53. return 0;
  54. }
  55. static void set_vma_resv_huge_pages(struct vm_area_struct *vma,
  56. unsigned long reserve)
  57. {
  58. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  59. VM_BUG_ON(vma->vm_flags & VM_SHARED);
  60. vma->vm_private_data = (void *)reserve;
  61. }
  62. /* Decrement the reserved pages in the hugepage pool by one */
  63. static void decrement_hugepage_resv_vma(struct vm_area_struct *vma)
  64. {
  65. if (vma->vm_flags & VM_SHARED) {
  66. /* Shared mappings always use reserves */
  67. resv_huge_pages--;
  68. } else {
  69. /*
  70. * Only the process that called mmap() has reserves for
  71. * private mappings.
  72. */
  73. if (vma_resv_huge_pages(vma)) {
  74. resv_huge_pages--;
  75. reserve = (unsigned long)vma->vm_private_data - 1;
  76. vma->vm_private_data = (void *)reserve;
  77. }
  78. }
  79. }
  80. void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  81. {
  82. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  83. if (!(vma->vm_flags & VM_SHARED))
  84. vma->vm_private_data = (void *)0;
  85. }
  86. /* Returns true if the VMA has associated reserve pages */
  87. static int vma_has_private_reserves(struct vm_area_struct *vma)
  88. {
  89. if (vma->vm_flags & VM_SHARED)
  90. return 0;
  91. if (!vma_resv_huge_pages(vma))
  92. return 0;
  93. return 1;
  94. }
  95. static void clear_huge_page(struct page *page, unsigned long addr)
  96. {
  97. int i;
  98. might_sleep();
  99. for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
  100. cond_resched();
  101. clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  102. }
  103. }
  104. static void copy_huge_page(struct page *dst, struct page *src,
  105. unsigned long addr, struct vm_area_struct *vma)
  106. {
  107. int i;
  108. might_sleep();
  109. for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
  110. cond_resched();
  111. copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
  112. }
  113. }
  114. static void enqueue_huge_page(struct page *page)
  115. {
  116. int nid = page_to_nid(page);
  117. list_add(&page->lru, &hugepage_freelists[nid]);
  118. free_huge_pages++;
  119. free_huge_pages_node[nid]++;
  120. }
  121. static struct page *dequeue_huge_page(void)
  122. {
  123. int nid;
  124. struct page *page = NULL;
  125. for (nid = 0; nid < MAX_NUMNODES; ++nid) {
  126. if (!list_empty(&hugepage_freelists[nid])) {
  127. page = list_entry(hugepage_freelists[nid].next,
  128. struct page, lru);
  129. list_del(&page->lru);
  130. free_huge_pages--;
  131. free_huge_pages_node[nid]--;
  132. break;
  133. }
  134. }
  135. return page;
  136. }
  137. static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
  138. unsigned long address)
  139. {
  140. int nid;
  141. struct page *page = NULL;
  142. struct mempolicy *mpol;
  143. nodemask_t *nodemask;
  144. struct zonelist *zonelist = huge_zonelist(vma, address,
  145. htlb_alloc_mask, &mpol, &nodemask);
  146. struct zone *zone;
  147. struct zoneref *z;
  148. /*
  149. * A child process with MAP_PRIVATE mappings created by their parent
  150. * have no page reserves. This check ensures that reservations are
  151. * not "stolen". The child may still get SIGKILLed
  152. */
  153. if (!vma_has_private_reserves(vma) &&
  154. free_huge_pages - resv_huge_pages == 0)
  155. return NULL;
  156. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  157. MAX_NR_ZONES - 1, nodemask) {
  158. nid = zone_to_nid(zone);
  159. if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
  160. !list_empty(&hugepage_freelists[nid])) {
  161. page = list_entry(hugepage_freelists[nid].next,
  162. struct page, lru);
  163. list_del(&page->lru);
  164. free_huge_pages--;
  165. free_huge_pages_node[nid]--;
  166. decrement_hugepage_resv_vma(vma);
  167. break;
  168. }
  169. }
  170. mpol_cond_put(mpol);
  171. return page;
  172. }
  173. static void update_and_free_page(struct page *page)
  174. {
  175. int i;
  176. nr_huge_pages--;
  177. nr_huge_pages_node[page_to_nid(page)]--;
  178. for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
  179. page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
  180. 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
  181. 1 << PG_private | 1<< PG_writeback);
  182. }
  183. set_compound_page_dtor(page, NULL);
  184. set_page_refcounted(page);
  185. arch_release_hugepage(page);
  186. __free_pages(page, HUGETLB_PAGE_ORDER);
  187. }
  188. static void free_huge_page(struct page *page)
  189. {
  190. int nid = page_to_nid(page);
  191. struct address_space *mapping;
  192. mapping = (struct address_space *) page_private(page);
  193. set_page_private(page, 0);
  194. BUG_ON(page_count(page));
  195. INIT_LIST_HEAD(&page->lru);
  196. spin_lock(&hugetlb_lock);
  197. if (surplus_huge_pages_node[nid]) {
  198. update_and_free_page(page);
  199. surplus_huge_pages--;
  200. surplus_huge_pages_node[nid]--;
  201. } else {
  202. enqueue_huge_page(page);
  203. }
  204. spin_unlock(&hugetlb_lock);
  205. if (mapping)
  206. hugetlb_put_quota(mapping, 1);
  207. }
  208. /*
  209. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  210. * balanced by operating on them in a round-robin fashion.
  211. * Returns 1 if an adjustment was made.
  212. */
  213. static int adjust_pool_surplus(int delta)
  214. {
  215. static int prev_nid;
  216. int nid = prev_nid;
  217. int ret = 0;
  218. VM_BUG_ON(delta != -1 && delta != 1);
  219. do {
  220. nid = next_node(nid, node_online_map);
  221. if (nid == MAX_NUMNODES)
  222. nid = first_node(node_online_map);
  223. /* To shrink on this node, there must be a surplus page */
  224. if (delta < 0 && !surplus_huge_pages_node[nid])
  225. continue;
  226. /* Surplus cannot exceed the total number of pages */
  227. if (delta > 0 && surplus_huge_pages_node[nid] >=
  228. nr_huge_pages_node[nid])
  229. continue;
  230. surplus_huge_pages += delta;
  231. surplus_huge_pages_node[nid] += delta;
  232. ret = 1;
  233. break;
  234. } while (nid != prev_nid);
  235. prev_nid = nid;
  236. return ret;
  237. }
  238. static struct page *alloc_fresh_huge_page_node(int nid)
  239. {
  240. struct page *page;
  241. page = alloc_pages_node(nid,
  242. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
  243. __GFP_REPEAT|__GFP_NOWARN,
  244. HUGETLB_PAGE_ORDER);
  245. if (page) {
  246. if (arch_prepare_hugepage(page)) {
  247. __free_pages(page, HUGETLB_PAGE_ORDER);
  248. return NULL;
  249. }
  250. set_compound_page_dtor(page, free_huge_page);
  251. spin_lock(&hugetlb_lock);
  252. nr_huge_pages++;
  253. nr_huge_pages_node[nid]++;
  254. spin_unlock(&hugetlb_lock);
  255. put_page(page); /* free it into the hugepage allocator */
  256. }
  257. return page;
  258. }
  259. static int alloc_fresh_huge_page(void)
  260. {
  261. struct page *page;
  262. int start_nid;
  263. int next_nid;
  264. int ret = 0;
  265. start_nid = hugetlb_next_nid;
  266. do {
  267. page = alloc_fresh_huge_page_node(hugetlb_next_nid);
  268. if (page)
  269. ret = 1;
  270. /*
  271. * Use a helper variable to find the next node and then
  272. * copy it back to hugetlb_next_nid afterwards:
  273. * otherwise there's a window in which a racer might
  274. * pass invalid nid MAX_NUMNODES to alloc_pages_node.
  275. * But we don't need to use a spin_lock here: it really
  276. * doesn't matter if occasionally a racer chooses the
  277. * same nid as we do. Move nid forward in the mask even
  278. * if we just successfully allocated a hugepage so that
  279. * the next caller gets hugepages on the next node.
  280. */
  281. next_nid = next_node(hugetlb_next_nid, node_online_map);
  282. if (next_nid == MAX_NUMNODES)
  283. next_nid = first_node(node_online_map);
  284. hugetlb_next_nid = next_nid;
  285. } while (!page && hugetlb_next_nid != start_nid);
  286. if (ret)
  287. count_vm_event(HTLB_BUDDY_PGALLOC);
  288. else
  289. count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  290. return ret;
  291. }
  292. static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
  293. unsigned long address)
  294. {
  295. struct page *page;
  296. unsigned int nid;
  297. /*
  298. * Assume we will successfully allocate the surplus page to
  299. * prevent racing processes from causing the surplus to exceed
  300. * overcommit
  301. *
  302. * This however introduces a different race, where a process B
  303. * tries to grow the static hugepage pool while alloc_pages() is
  304. * called by process A. B will only examine the per-node
  305. * counters in determining if surplus huge pages can be
  306. * converted to normal huge pages in adjust_pool_surplus(). A
  307. * won't be able to increment the per-node counter, until the
  308. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  309. * no more huge pages can be converted from surplus to normal
  310. * state (and doesn't try to convert again). Thus, we have a
  311. * case where a surplus huge page exists, the pool is grown, and
  312. * the surplus huge page still exists after, even though it
  313. * should just have been converted to a normal huge page. This
  314. * does not leak memory, though, as the hugepage will be freed
  315. * once it is out of use. It also does not allow the counters to
  316. * go out of whack in adjust_pool_surplus() as we don't modify
  317. * the node values until we've gotten the hugepage and only the
  318. * per-node value is checked there.
  319. */
  320. spin_lock(&hugetlb_lock);
  321. if (surplus_huge_pages >= nr_overcommit_huge_pages) {
  322. spin_unlock(&hugetlb_lock);
  323. return NULL;
  324. } else {
  325. nr_huge_pages++;
  326. surplus_huge_pages++;
  327. }
  328. spin_unlock(&hugetlb_lock);
  329. page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
  330. __GFP_REPEAT|__GFP_NOWARN,
  331. HUGETLB_PAGE_ORDER);
  332. spin_lock(&hugetlb_lock);
  333. if (page) {
  334. /*
  335. * This page is now managed by the hugetlb allocator and has
  336. * no users -- drop the buddy allocator's reference.
  337. */
  338. put_page_testzero(page);
  339. VM_BUG_ON(page_count(page));
  340. nid = page_to_nid(page);
  341. set_compound_page_dtor(page, free_huge_page);
  342. /*
  343. * We incremented the global counters already
  344. */
  345. nr_huge_pages_node[nid]++;
  346. surplus_huge_pages_node[nid]++;
  347. __count_vm_event(HTLB_BUDDY_PGALLOC);
  348. } else {
  349. nr_huge_pages--;
  350. surplus_huge_pages--;
  351. __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  352. }
  353. spin_unlock(&hugetlb_lock);
  354. return page;
  355. }
  356. /*
  357. * Increase the hugetlb pool such that it can accomodate a reservation
  358. * of size 'delta'.
  359. */
  360. static int gather_surplus_pages(int delta)
  361. {
  362. struct list_head surplus_list;
  363. struct page *page, *tmp;
  364. int ret, i;
  365. int needed, allocated;
  366. needed = (resv_huge_pages + delta) - free_huge_pages;
  367. if (needed <= 0) {
  368. resv_huge_pages += delta;
  369. return 0;
  370. }
  371. allocated = 0;
  372. INIT_LIST_HEAD(&surplus_list);
  373. ret = -ENOMEM;
  374. retry:
  375. spin_unlock(&hugetlb_lock);
  376. for (i = 0; i < needed; i++) {
  377. page = alloc_buddy_huge_page(NULL, 0);
  378. if (!page) {
  379. /*
  380. * We were not able to allocate enough pages to
  381. * satisfy the entire reservation so we free what
  382. * we've allocated so far.
  383. */
  384. spin_lock(&hugetlb_lock);
  385. needed = 0;
  386. goto free;
  387. }
  388. list_add(&page->lru, &surplus_list);
  389. }
  390. allocated += needed;
  391. /*
  392. * After retaking hugetlb_lock, we need to recalculate 'needed'
  393. * because either resv_huge_pages or free_huge_pages may have changed.
  394. */
  395. spin_lock(&hugetlb_lock);
  396. needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
  397. if (needed > 0)
  398. goto retry;
  399. /*
  400. * The surplus_list now contains _at_least_ the number of extra pages
  401. * needed to accomodate the reservation. Add the appropriate number
  402. * of pages to the hugetlb pool and free the extras back to the buddy
  403. * allocator. Commit the entire reservation here to prevent another
  404. * process from stealing the pages as they are added to the pool but
  405. * before they are reserved.
  406. */
  407. needed += allocated;
  408. resv_huge_pages += delta;
  409. ret = 0;
  410. free:
  411. /* Free the needed pages to the hugetlb pool */
  412. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  413. if ((--needed) < 0)
  414. break;
  415. list_del(&page->lru);
  416. enqueue_huge_page(page);
  417. }
  418. /* Free unnecessary surplus pages to the buddy allocator */
  419. if (!list_empty(&surplus_list)) {
  420. spin_unlock(&hugetlb_lock);
  421. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  422. list_del(&page->lru);
  423. /*
  424. * The page has a reference count of zero already, so
  425. * call free_huge_page directly instead of using
  426. * put_page. This must be done with hugetlb_lock
  427. * unlocked which is safe because free_huge_page takes
  428. * hugetlb_lock before deciding how to free the page.
  429. */
  430. free_huge_page(page);
  431. }
  432. spin_lock(&hugetlb_lock);
  433. }
  434. return ret;
  435. }
  436. /*
  437. * When releasing a hugetlb pool reservation, any surplus pages that were
  438. * allocated to satisfy the reservation must be explicitly freed if they were
  439. * never used.
  440. */
  441. static void return_unused_surplus_pages(unsigned long unused_resv_pages)
  442. {
  443. static int nid = -1;
  444. struct page *page;
  445. unsigned long nr_pages;
  446. /*
  447. * We want to release as many surplus pages as possible, spread
  448. * evenly across all nodes. Iterate across all nodes until we
  449. * can no longer free unreserved surplus pages. This occurs when
  450. * the nodes with surplus pages have no free pages.
  451. */
  452. unsigned long remaining_iterations = num_online_nodes();
  453. /* Uncommit the reservation */
  454. resv_huge_pages -= unused_resv_pages;
  455. nr_pages = min(unused_resv_pages, surplus_huge_pages);
  456. while (remaining_iterations-- && nr_pages) {
  457. nid = next_node(nid, node_online_map);
  458. if (nid == MAX_NUMNODES)
  459. nid = first_node(node_online_map);
  460. if (!surplus_huge_pages_node[nid])
  461. continue;
  462. if (!list_empty(&hugepage_freelists[nid])) {
  463. page = list_entry(hugepage_freelists[nid].next,
  464. struct page, lru);
  465. list_del(&page->lru);
  466. update_and_free_page(page);
  467. free_huge_pages--;
  468. free_huge_pages_node[nid]--;
  469. surplus_huge_pages--;
  470. surplus_huge_pages_node[nid]--;
  471. nr_pages--;
  472. remaining_iterations = num_online_nodes();
  473. }
  474. }
  475. }
  476. static struct page *alloc_huge_page(struct vm_area_struct *vma,
  477. unsigned long addr)
  478. {
  479. struct page *page;
  480. struct address_space *mapping = vma->vm_file->f_mapping;
  481. struct inode *inode = mapping->host;
  482. unsigned int chg = 0;
  483. /*
  484. * Processes that did not create the mapping will have no reserves and
  485. * will not have accounted against quota. Check that the quota can be
  486. * made before satisfying the allocation
  487. */
  488. if (!vma_has_private_reserves(vma)) {
  489. chg = 1;
  490. if (hugetlb_get_quota(inode->i_mapping, chg))
  491. return ERR_PTR(-ENOSPC);
  492. }
  493. spin_lock(&hugetlb_lock);
  494. page = dequeue_huge_page_vma(vma, addr);
  495. spin_unlock(&hugetlb_lock);
  496. if (!page) {
  497. page = alloc_buddy_huge_page(vma, addr);
  498. if (!page) {
  499. hugetlb_put_quota(inode->i_mapping, chg);
  500. return ERR_PTR(-VM_FAULT_OOM);
  501. }
  502. }
  503. set_page_refcounted(page);
  504. set_page_private(page, (unsigned long) mapping);
  505. return page;
  506. }
  507. static int __init hugetlb_init(void)
  508. {
  509. unsigned long i;
  510. if (HPAGE_SHIFT == 0)
  511. return 0;
  512. for (i = 0; i < MAX_NUMNODES; ++i)
  513. INIT_LIST_HEAD(&hugepage_freelists[i]);
  514. hugetlb_next_nid = first_node(node_online_map);
  515. for (i = 0; i < max_huge_pages; ++i) {
  516. if (!alloc_fresh_huge_page())
  517. break;
  518. }
  519. max_huge_pages = free_huge_pages = nr_huge_pages = i;
  520. printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
  521. return 0;
  522. }
  523. module_init(hugetlb_init);
  524. static int __init hugetlb_setup(char *s)
  525. {
  526. if (sscanf(s, "%lu", &max_huge_pages) <= 0)
  527. max_huge_pages = 0;
  528. return 1;
  529. }
  530. __setup("hugepages=", hugetlb_setup);
  531. static unsigned int cpuset_mems_nr(unsigned int *array)
  532. {
  533. int node;
  534. unsigned int nr = 0;
  535. for_each_node_mask(node, cpuset_current_mems_allowed)
  536. nr += array[node];
  537. return nr;
  538. }
  539. #ifdef CONFIG_SYSCTL
  540. #ifdef CONFIG_HIGHMEM
  541. static void try_to_free_low(unsigned long count)
  542. {
  543. int i;
  544. for (i = 0; i < MAX_NUMNODES; ++i) {
  545. struct page *page, *next;
  546. list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
  547. if (count >= nr_huge_pages)
  548. return;
  549. if (PageHighMem(page))
  550. continue;
  551. list_del(&page->lru);
  552. update_and_free_page(page);
  553. free_huge_pages--;
  554. free_huge_pages_node[page_to_nid(page)]--;
  555. }
  556. }
  557. }
  558. #else
  559. static inline void try_to_free_low(unsigned long count)
  560. {
  561. }
  562. #endif
  563. #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
  564. static unsigned long set_max_huge_pages(unsigned long count)
  565. {
  566. unsigned long min_count, ret;
  567. /*
  568. * Increase the pool size
  569. * First take pages out of surplus state. Then make up the
  570. * remaining difference by allocating fresh huge pages.
  571. *
  572. * We might race with alloc_buddy_huge_page() here and be unable
  573. * to convert a surplus huge page to a normal huge page. That is
  574. * not critical, though, it just means the overall size of the
  575. * pool might be one hugepage larger than it needs to be, but
  576. * within all the constraints specified by the sysctls.
  577. */
  578. spin_lock(&hugetlb_lock);
  579. while (surplus_huge_pages && count > persistent_huge_pages) {
  580. if (!adjust_pool_surplus(-1))
  581. break;
  582. }
  583. while (count > persistent_huge_pages) {
  584. /*
  585. * If this allocation races such that we no longer need the
  586. * page, free_huge_page will handle it by freeing the page
  587. * and reducing the surplus.
  588. */
  589. spin_unlock(&hugetlb_lock);
  590. ret = alloc_fresh_huge_page();
  591. spin_lock(&hugetlb_lock);
  592. if (!ret)
  593. goto out;
  594. }
  595. /*
  596. * Decrease the pool size
  597. * First return free pages to the buddy allocator (being careful
  598. * to keep enough around to satisfy reservations). Then place
  599. * pages into surplus state as needed so the pool will shrink
  600. * to the desired size as pages become free.
  601. *
  602. * By placing pages into the surplus state independent of the
  603. * overcommit value, we are allowing the surplus pool size to
  604. * exceed overcommit. There are few sane options here. Since
  605. * alloc_buddy_huge_page() is checking the global counter,
  606. * though, we'll note that we're not allowed to exceed surplus
  607. * and won't grow the pool anywhere else. Not until one of the
  608. * sysctls are changed, or the surplus pages go out of use.
  609. */
  610. min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
  611. min_count = max(count, min_count);
  612. try_to_free_low(min_count);
  613. while (min_count < persistent_huge_pages) {
  614. struct page *page = dequeue_huge_page();
  615. if (!page)
  616. break;
  617. update_and_free_page(page);
  618. }
  619. while (count < persistent_huge_pages) {
  620. if (!adjust_pool_surplus(1))
  621. break;
  622. }
  623. out:
  624. ret = persistent_huge_pages;
  625. spin_unlock(&hugetlb_lock);
  626. return ret;
  627. }
  628. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  629. struct file *file, void __user *buffer,
  630. size_t *length, loff_t *ppos)
  631. {
  632. proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
  633. max_huge_pages = set_max_huge_pages(max_huge_pages);
  634. return 0;
  635. }
  636. int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
  637. struct file *file, void __user *buffer,
  638. size_t *length, loff_t *ppos)
  639. {
  640. proc_dointvec(table, write, file, buffer, length, ppos);
  641. if (hugepages_treat_as_movable)
  642. htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
  643. else
  644. htlb_alloc_mask = GFP_HIGHUSER;
  645. return 0;
  646. }
  647. int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  648. struct file *file, void __user *buffer,
  649. size_t *length, loff_t *ppos)
  650. {
  651. proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
  652. spin_lock(&hugetlb_lock);
  653. nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
  654. spin_unlock(&hugetlb_lock);
  655. return 0;
  656. }
  657. #endif /* CONFIG_SYSCTL */
  658. int hugetlb_report_meminfo(char *buf)
  659. {
  660. return sprintf(buf,
  661. "HugePages_Total: %5lu\n"
  662. "HugePages_Free: %5lu\n"
  663. "HugePages_Rsvd: %5lu\n"
  664. "HugePages_Surp: %5lu\n"
  665. "Hugepagesize: %5lu kB\n",
  666. nr_huge_pages,
  667. free_huge_pages,
  668. resv_huge_pages,
  669. surplus_huge_pages,
  670. HPAGE_SIZE/1024);
  671. }
  672. int hugetlb_report_node_meminfo(int nid, char *buf)
  673. {
  674. return sprintf(buf,
  675. "Node %d HugePages_Total: %5u\n"
  676. "Node %d HugePages_Free: %5u\n"
  677. "Node %d HugePages_Surp: %5u\n",
  678. nid, nr_huge_pages_node[nid],
  679. nid, free_huge_pages_node[nid],
  680. nid, surplus_huge_pages_node[nid]);
  681. }
  682. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  683. unsigned long hugetlb_total_pages(void)
  684. {
  685. return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
  686. }
  687. static int hugetlb_acct_memory(long delta)
  688. {
  689. int ret = -ENOMEM;
  690. spin_lock(&hugetlb_lock);
  691. /*
  692. * When cpuset is configured, it breaks the strict hugetlb page
  693. * reservation as the accounting is done on a global variable. Such
  694. * reservation is completely rubbish in the presence of cpuset because
  695. * the reservation is not checked against page availability for the
  696. * current cpuset. Application can still potentially OOM'ed by kernel
  697. * with lack of free htlb page in cpuset that the task is in.
  698. * Attempt to enforce strict accounting with cpuset is almost
  699. * impossible (or too ugly) because cpuset is too fluid that
  700. * task or memory node can be dynamically moved between cpusets.
  701. *
  702. * The change of semantics for shared hugetlb mapping with cpuset is
  703. * undesirable. However, in order to preserve some of the semantics,
  704. * we fall back to check against current free page availability as
  705. * a best attempt and hopefully to minimize the impact of changing
  706. * semantics that cpuset has.
  707. */
  708. if (delta > 0) {
  709. if (gather_surplus_pages(delta) < 0)
  710. goto out;
  711. if (delta > cpuset_mems_nr(free_huge_pages_node)) {
  712. return_unused_surplus_pages(delta);
  713. goto out;
  714. }
  715. }
  716. ret = 0;
  717. if (delta < 0)
  718. return_unused_surplus_pages((unsigned long) -delta);
  719. out:
  720. spin_unlock(&hugetlb_lock);
  721. return ret;
  722. }
  723. static void hugetlb_vm_op_close(struct vm_area_struct *vma)
  724. {
  725. unsigned long reserve = vma_resv_huge_pages(vma);
  726. if (reserve)
  727. hugetlb_acct_memory(-reserve);
  728. }
  729. /*
  730. * We cannot handle pagefaults against hugetlb pages at all. They cause
  731. * handle_mm_fault() to try to instantiate regular-sized pages in the
  732. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  733. * this far.
  734. */
  735. static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  736. {
  737. BUG();
  738. return 0;
  739. }
  740. struct vm_operations_struct hugetlb_vm_ops = {
  741. .fault = hugetlb_vm_op_fault,
  742. .close = hugetlb_vm_op_close,
  743. };
  744. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  745. int writable)
  746. {
  747. pte_t entry;
  748. if (writable) {
  749. entry =
  750. pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
  751. } else {
  752. entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
  753. }
  754. entry = pte_mkyoung(entry);
  755. entry = pte_mkhuge(entry);
  756. return entry;
  757. }
  758. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  759. unsigned long address, pte_t *ptep)
  760. {
  761. pte_t entry;
  762. entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
  763. if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
  764. update_mmu_cache(vma, address, entry);
  765. }
  766. }
  767. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  768. struct vm_area_struct *vma)
  769. {
  770. pte_t *src_pte, *dst_pte, entry;
  771. struct page *ptepage;
  772. unsigned long addr;
  773. int cow;
  774. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  775. for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
  776. src_pte = huge_pte_offset(src, addr);
  777. if (!src_pte)
  778. continue;
  779. dst_pte = huge_pte_alloc(dst, addr);
  780. if (!dst_pte)
  781. goto nomem;
  782. /* If the pagetables are shared don't copy or take references */
  783. if (dst_pte == src_pte)
  784. continue;
  785. spin_lock(&dst->page_table_lock);
  786. spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
  787. if (!huge_pte_none(huge_ptep_get(src_pte))) {
  788. if (cow)
  789. huge_ptep_set_wrprotect(src, addr, src_pte);
  790. entry = huge_ptep_get(src_pte);
  791. ptepage = pte_page(entry);
  792. get_page(ptepage);
  793. set_huge_pte_at(dst, addr, dst_pte, entry);
  794. }
  795. spin_unlock(&src->page_table_lock);
  796. spin_unlock(&dst->page_table_lock);
  797. }
  798. return 0;
  799. nomem:
  800. return -ENOMEM;
  801. }
  802. void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  803. unsigned long end)
  804. {
  805. struct mm_struct *mm = vma->vm_mm;
  806. unsigned long address;
  807. pte_t *ptep;
  808. pte_t pte;
  809. struct page *page;
  810. struct page *tmp;
  811. /*
  812. * A page gathering list, protected by per file i_mmap_lock. The
  813. * lock is used to avoid list corruption from multiple unmapping
  814. * of the same page since we are using page->lru.
  815. */
  816. LIST_HEAD(page_list);
  817. WARN_ON(!is_vm_hugetlb_page(vma));
  818. BUG_ON(start & ~HPAGE_MASK);
  819. BUG_ON(end & ~HPAGE_MASK);
  820. spin_lock(&mm->page_table_lock);
  821. for (address = start; address < end; address += HPAGE_SIZE) {
  822. ptep = huge_pte_offset(mm, address);
  823. if (!ptep)
  824. continue;
  825. if (huge_pmd_unshare(mm, &address, ptep))
  826. continue;
  827. pte = huge_ptep_get_and_clear(mm, address, ptep);
  828. if (huge_pte_none(pte))
  829. continue;
  830. page = pte_page(pte);
  831. if (pte_dirty(pte))
  832. set_page_dirty(page);
  833. list_add(&page->lru, &page_list);
  834. }
  835. spin_unlock(&mm->page_table_lock);
  836. flush_tlb_range(vma, start, end);
  837. list_for_each_entry_safe(page, tmp, &page_list, lru) {
  838. list_del(&page->lru);
  839. put_page(page);
  840. }
  841. }
  842. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  843. unsigned long end)
  844. {
  845. /*
  846. * It is undesirable to test vma->vm_file as it should be non-null
  847. * for valid hugetlb area. However, vm_file will be NULL in the error
  848. * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
  849. * do_mmap_pgoff() nullifies vma->vm_file before calling this function
  850. * to clean up. Since no pte has actually been setup, it is safe to
  851. * do nothing in this case.
  852. */
  853. if (vma->vm_file) {
  854. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  855. __unmap_hugepage_range(vma, start, end);
  856. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  857. }
  858. }
  859. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  860. unsigned long address, pte_t *ptep, pte_t pte)
  861. {
  862. struct page *old_page, *new_page;
  863. int avoidcopy;
  864. old_page = pte_page(pte);
  865. /* If no-one else is actually using this page, avoid the copy
  866. * and just make the page writable */
  867. avoidcopy = (page_count(old_page) == 1);
  868. if (avoidcopy) {
  869. set_huge_ptep_writable(vma, address, ptep);
  870. return 0;
  871. }
  872. page_cache_get(old_page);
  873. new_page = alloc_huge_page(vma, address);
  874. if (IS_ERR(new_page)) {
  875. page_cache_release(old_page);
  876. return -PTR_ERR(new_page);
  877. }
  878. spin_unlock(&mm->page_table_lock);
  879. copy_huge_page(new_page, old_page, address, vma);
  880. __SetPageUptodate(new_page);
  881. spin_lock(&mm->page_table_lock);
  882. ptep = huge_pte_offset(mm, address & HPAGE_MASK);
  883. if (likely(pte_same(huge_ptep_get(ptep), pte))) {
  884. /* Break COW */
  885. huge_ptep_clear_flush(vma, address, ptep);
  886. set_huge_pte_at(mm, address, ptep,
  887. make_huge_pte(vma, new_page, 1));
  888. /* Make the old page be freed below */
  889. new_page = old_page;
  890. }
  891. page_cache_release(new_page);
  892. page_cache_release(old_page);
  893. return 0;
  894. }
  895. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  896. unsigned long address, pte_t *ptep, int write_access)
  897. {
  898. int ret = VM_FAULT_SIGBUS;
  899. unsigned long idx;
  900. unsigned long size;
  901. struct page *page;
  902. struct address_space *mapping;
  903. pte_t new_pte;
  904. mapping = vma->vm_file->f_mapping;
  905. idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
  906. + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
  907. /*
  908. * Use page lock to guard against racing truncation
  909. * before we get page_table_lock.
  910. */
  911. retry:
  912. page = find_lock_page(mapping, idx);
  913. if (!page) {
  914. size = i_size_read(mapping->host) >> HPAGE_SHIFT;
  915. if (idx >= size)
  916. goto out;
  917. page = alloc_huge_page(vma, address);
  918. if (IS_ERR(page)) {
  919. ret = -PTR_ERR(page);
  920. goto out;
  921. }
  922. clear_huge_page(page, address);
  923. __SetPageUptodate(page);
  924. if (vma->vm_flags & VM_SHARED) {
  925. int err;
  926. struct inode *inode = mapping->host;
  927. err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  928. if (err) {
  929. put_page(page);
  930. if (err == -EEXIST)
  931. goto retry;
  932. goto out;
  933. }
  934. spin_lock(&inode->i_lock);
  935. inode->i_blocks += BLOCKS_PER_HUGEPAGE;
  936. spin_unlock(&inode->i_lock);
  937. } else
  938. lock_page(page);
  939. }
  940. spin_lock(&mm->page_table_lock);
  941. size = i_size_read(mapping->host) >> HPAGE_SHIFT;
  942. if (idx >= size)
  943. goto backout;
  944. ret = 0;
  945. if (!huge_pte_none(huge_ptep_get(ptep)))
  946. goto backout;
  947. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  948. && (vma->vm_flags & VM_SHARED)));
  949. set_huge_pte_at(mm, address, ptep, new_pte);
  950. if (write_access && !(vma->vm_flags & VM_SHARED)) {
  951. /* Optimization, do the COW without a second fault */
  952. ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
  953. }
  954. spin_unlock(&mm->page_table_lock);
  955. unlock_page(page);
  956. out:
  957. return ret;
  958. backout:
  959. spin_unlock(&mm->page_table_lock);
  960. unlock_page(page);
  961. put_page(page);
  962. goto out;
  963. }
  964. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  965. unsigned long address, int write_access)
  966. {
  967. pte_t *ptep;
  968. pte_t entry;
  969. int ret;
  970. static DEFINE_MUTEX(hugetlb_instantiation_mutex);
  971. ptep = huge_pte_alloc(mm, address);
  972. if (!ptep)
  973. return VM_FAULT_OOM;
  974. /*
  975. * Serialize hugepage allocation and instantiation, so that we don't
  976. * get spurious allocation failures if two CPUs race to instantiate
  977. * the same page in the page cache.
  978. */
  979. mutex_lock(&hugetlb_instantiation_mutex);
  980. entry = huge_ptep_get(ptep);
  981. if (huge_pte_none(entry)) {
  982. ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
  983. mutex_unlock(&hugetlb_instantiation_mutex);
  984. return ret;
  985. }
  986. ret = 0;
  987. spin_lock(&mm->page_table_lock);
  988. /* Check for a racing update before calling hugetlb_cow */
  989. if (likely(pte_same(entry, huge_ptep_get(ptep))))
  990. if (write_access && !pte_write(entry))
  991. ret = hugetlb_cow(mm, vma, address, ptep, entry);
  992. spin_unlock(&mm->page_table_lock);
  993. mutex_unlock(&hugetlb_instantiation_mutex);
  994. return ret;
  995. }
  996. int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  997. struct page **pages, struct vm_area_struct **vmas,
  998. unsigned long *position, int *length, int i,
  999. int write)
  1000. {
  1001. unsigned long pfn_offset;
  1002. unsigned long vaddr = *position;
  1003. int remainder = *length;
  1004. spin_lock(&mm->page_table_lock);
  1005. while (vaddr < vma->vm_end && remainder) {
  1006. pte_t *pte;
  1007. struct page *page;
  1008. /*
  1009. * Some archs (sparc64, sh*) have multiple pte_ts to
  1010. * each hugepage. We have to make * sure we get the
  1011. * first, for the page indexing below to work.
  1012. */
  1013. pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
  1014. if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
  1015. (write && !pte_write(huge_ptep_get(pte)))) {
  1016. int ret;
  1017. spin_unlock(&mm->page_table_lock);
  1018. ret = hugetlb_fault(mm, vma, vaddr, write);
  1019. spin_lock(&mm->page_table_lock);
  1020. if (!(ret & VM_FAULT_ERROR))
  1021. continue;
  1022. remainder = 0;
  1023. if (!i)
  1024. i = -EFAULT;
  1025. break;
  1026. }
  1027. pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
  1028. page = pte_page(huge_ptep_get(pte));
  1029. same_page:
  1030. if (pages) {
  1031. get_page(page);
  1032. pages[i] = page + pfn_offset;
  1033. }
  1034. if (vmas)
  1035. vmas[i] = vma;
  1036. vaddr += PAGE_SIZE;
  1037. ++pfn_offset;
  1038. --remainder;
  1039. ++i;
  1040. if (vaddr < vma->vm_end && remainder &&
  1041. pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
  1042. /*
  1043. * We use pfn_offset to avoid touching the pageframes
  1044. * of this compound page.
  1045. */
  1046. goto same_page;
  1047. }
  1048. }
  1049. spin_unlock(&mm->page_table_lock);
  1050. *length = remainder;
  1051. *position = vaddr;
  1052. return i;
  1053. }
  1054. void hugetlb_change_protection(struct vm_area_struct *vma,
  1055. unsigned long address, unsigned long end, pgprot_t newprot)
  1056. {
  1057. struct mm_struct *mm = vma->vm_mm;
  1058. unsigned long start = address;
  1059. pte_t *ptep;
  1060. pte_t pte;
  1061. BUG_ON(address >= end);
  1062. flush_cache_range(vma, address, end);
  1063. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  1064. spin_lock(&mm->page_table_lock);
  1065. for (; address < end; address += HPAGE_SIZE) {
  1066. ptep = huge_pte_offset(mm, address);
  1067. if (!ptep)
  1068. continue;
  1069. if (huge_pmd_unshare(mm, &address, ptep))
  1070. continue;
  1071. if (!huge_pte_none(huge_ptep_get(ptep))) {
  1072. pte = huge_ptep_get_and_clear(mm, address, ptep);
  1073. pte = pte_mkhuge(pte_modify(pte, newprot));
  1074. set_huge_pte_at(mm, address, ptep, pte);
  1075. }
  1076. }
  1077. spin_unlock(&mm->page_table_lock);
  1078. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  1079. flush_tlb_range(vma, start, end);
  1080. }
  1081. struct file_region {
  1082. struct list_head link;
  1083. long from;
  1084. long to;
  1085. };
  1086. static long region_add(struct list_head *head, long f, long t)
  1087. {
  1088. struct file_region *rg, *nrg, *trg;
  1089. /* Locate the region we are either in or before. */
  1090. list_for_each_entry(rg, head, link)
  1091. if (f <= rg->to)
  1092. break;
  1093. /* Round our left edge to the current segment if it encloses us. */
  1094. if (f > rg->from)
  1095. f = rg->from;
  1096. /* Check for and consume any regions we now overlap with. */
  1097. nrg = rg;
  1098. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  1099. if (&rg->link == head)
  1100. break;
  1101. if (rg->from > t)
  1102. break;
  1103. /* If this area reaches higher then extend our area to
  1104. * include it completely. If this is not the first area
  1105. * which we intend to reuse, free it. */
  1106. if (rg->to > t)
  1107. t = rg->to;
  1108. if (rg != nrg) {
  1109. list_del(&rg->link);
  1110. kfree(rg);
  1111. }
  1112. }
  1113. nrg->from = f;
  1114. nrg->to = t;
  1115. return 0;
  1116. }
  1117. static long region_chg(struct list_head *head, long f, long t)
  1118. {
  1119. struct file_region *rg, *nrg;
  1120. long chg = 0;
  1121. /* Locate the region we are before or in. */
  1122. list_for_each_entry(rg, head, link)
  1123. if (f <= rg->to)
  1124. break;
  1125. /* If we are below the current region then a new region is required.
  1126. * Subtle, allocate a new region at the position but make it zero
  1127. * size such that we can guarantee to record the reservation. */
  1128. if (&rg->link == head || t < rg->from) {
  1129. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  1130. if (!nrg)
  1131. return -ENOMEM;
  1132. nrg->from = f;
  1133. nrg->to = f;
  1134. INIT_LIST_HEAD(&nrg->link);
  1135. list_add(&nrg->link, rg->link.prev);
  1136. return t - f;
  1137. }
  1138. /* Round our left edge to the current segment if it encloses us. */
  1139. if (f > rg->from)
  1140. f = rg->from;
  1141. chg = t - f;
  1142. /* Check for and consume any regions we now overlap with. */
  1143. list_for_each_entry(rg, rg->link.prev, link) {
  1144. if (&rg->link == head)
  1145. break;
  1146. if (rg->from > t)
  1147. return chg;
  1148. /* We overlap with this area, if it extends futher than
  1149. * us then we must extend ourselves. Account for its
  1150. * existing reservation. */
  1151. if (rg->to > t) {
  1152. chg += rg->to - t;
  1153. t = rg->to;
  1154. }
  1155. chg -= rg->to - rg->from;
  1156. }
  1157. return chg;
  1158. }
  1159. static long region_truncate(struct list_head *head, long end)
  1160. {
  1161. struct file_region *rg, *trg;
  1162. long chg = 0;
  1163. /* Locate the region we are either in or before. */
  1164. list_for_each_entry(rg, head, link)
  1165. if (end <= rg->to)
  1166. break;
  1167. if (&rg->link == head)
  1168. return 0;
  1169. /* If we are in the middle of a region then adjust it. */
  1170. if (end > rg->from) {
  1171. chg = rg->to - end;
  1172. rg->to = end;
  1173. rg = list_entry(rg->link.next, typeof(*rg), link);
  1174. }
  1175. /* Drop any remaining regions. */
  1176. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  1177. if (&rg->link == head)
  1178. break;
  1179. chg += rg->to - rg->from;
  1180. list_del(&rg->link);
  1181. kfree(rg);
  1182. }
  1183. return chg;
  1184. }
  1185. int hugetlb_reserve_pages(struct inode *inode,
  1186. long from, long to,
  1187. struct vm_area_struct *vma)
  1188. {
  1189. long ret, chg;
  1190. /*
  1191. * Shared mappings base their reservation on the number of pages that
  1192. * are already allocated on behalf of the file. Private mappings need
  1193. * to reserve the full area even if read-only as mprotect() may be
  1194. * called to make the mapping read-write. Assume !vma is a shm mapping
  1195. */
  1196. if (!vma || vma->vm_flags & VM_SHARED)
  1197. chg = region_chg(&inode->i_mapping->private_list, from, to);
  1198. else {
  1199. chg = to - from;
  1200. set_vma_resv_huge_pages(vma, chg);
  1201. }
  1202. if (chg < 0)
  1203. return chg;
  1204. if (hugetlb_get_quota(inode->i_mapping, chg))
  1205. return -ENOSPC;
  1206. ret = hugetlb_acct_memory(chg);
  1207. if (ret < 0) {
  1208. hugetlb_put_quota(inode->i_mapping, chg);
  1209. return ret;
  1210. }
  1211. if (!vma || vma->vm_flags & VM_SHARED)
  1212. region_add(&inode->i_mapping->private_list, from, to);
  1213. return 0;
  1214. }
  1215. void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
  1216. {
  1217. long chg = region_truncate(&inode->i_mapping->private_list, offset);
  1218. spin_lock(&inode->i_lock);
  1219. inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed;
  1220. spin_unlock(&inode->i_lock);
  1221. hugetlb_put_quota(inode->i_mapping, (chg - freed));
  1222. hugetlb_acct_memory(-(chg - freed));
  1223. }