hugetlb.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239
  1. /*
  2. * Generic hugetlb support.
  3. * (C) William Irwin, April 2004
  4. */
  5. #include <linux/gfp.h>
  6. #include <linux/list.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/sysctl.h>
  11. #include <linux/highmem.h>
  12. #include <linux/nodemask.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/mempolicy.h>
  15. #include <linux/cpuset.h>
  16. #include <linux/mutex.h>
  17. #include <asm/page.h>
  18. #include <asm/pgtable.h>
  19. #include <linux/hugetlb.h>
  20. #include "internal.h"
  21. const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
  22. static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
  23. static unsigned long surplus_huge_pages;
  24. unsigned long max_huge_pages;
  25. static struct list_head hugepage_freelists[MAX_NUMNODES];
  26. static unsigned int nr_huge_pages_node[MAX_NUMNODES];
  27. static unsigned int free_huge_pages_node[MAX_NUMNODES];
  28. static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
  29. static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
  30. unsigned long hugepages_treat_as_movable;
  31. unsigned long nr_overcommit_huge_pages;
  32. static int hugetlb_next_nid;
  33. /*
  34. * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  35. */
  36. static DEFINE_SPINLOCK(hugetlb_lock);
  37. static void clear_huge_page(struct page *page, unsigned long addr)
  38. {
  39. int i;
  40. might_sleep();
  41. for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
  42. cond_resched();
  43. clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  44. }
  45. }
  46. static void copy_huge_page(struct page *dst, struct page *src,
  47. unsigned long addr, struct vm_area_struct *vma)
  48. {
  49. int i;
  50. might_sleep();
  51. for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
  52. cond_resched();
  53. copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
  54. }
  55. }
  56. static void enqueue_huge_page(struct page *page)
  57. {
  58. int nid = page_to_nid(page);
  59. list_add(&page->lru, &hugepage_freelists[nid]);
  60. free_huge_pages++;
  61. free_huge_pages_node[nid]++;
  62. }
  63. static struct page *dequeue_huge_page(struct vm_area_struct *vma,
  64. unsigned long address)
  65. {
  66. int nid;
  67. struct page *page = NULL;
  68. struct mempolicy *mpol;
  69. struct zonelist *zonelist = huge_zonelist(vma, address,
  70. htlb_alloc_mask, &mpol);
  71. struct zone **z;
  72. for (z = zonelist->zones; *z; z++) {
  73. nid = zone_to_nid(*z);
  74. if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
  75. !list_empty(&hugepage_freelists[nid])) {
  76. page = list_entry(hugepage_freelists[nid].next,
  77. struct page, lru);
  78. list_del(&page->lru);
  79. free_huge_pages--;
  80. free_huge_pages_node[nid]--;
  81. if (vma && vma->vm_flags & VM_MAYSHARE)
  82. resv_huge_pages--;
  83. break;
  84. }
  85. }
  86. mpol_free(mpol); /* unref if mpol !NULL */
  87. return page;
  88. }
  89. static void update_and_free_page(struct page *page)
  90. {
  91. int i;
  92. nr_huge_pages--;
  93. nr_huge_pages_node[page_to_nid(page)]--;
  94. for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
  95. page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
  96. 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
  97. 1 << PG_private | 1<< PG_writeback);
  98. }
  99. set_compound_page_dtor(page, NULL);
  100. set_page_refcounted(page);
  101. __free_pages(page, HUGETLB_PAGE_ORDER);
  102. }
  103. static void free_huge_page(struct page *page)
  104. {
  105. int nid = page_to_nid(page);
  106. struct address_space *mapping;
  107. mapping = (struct address_space *) page_private(page);
  108. BUG_ON(page_count(page));
  109. INIT_LIST_HEAD(&page->lru);
  110. spin_lock(&hugetlb_lock);
  111. if (surplus_huge_pages_node[nid]) {
  112. update_and_free_page(page);
  113. surplus_huge_pages--;
  114. surplus_huge_pages_node[nid]--;
  115. } else {
  116. enqueue_huge_page(page);
  117. }
  118. spin_unlock(&hugetlb_lock);
  119. if (mapping)
  120. hugetlb_put_quota(mapping, 1);
  121. set_page_private(page, 0);
  122. }
  123. /*
  124. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  125. * balanced by operating on them in a round-robin fashion.
  126. * Returns 1 if an adjustment was made.
  127. */
  128. static int adjust_pool_surplus(int delta)
  129. {
  130. static int prev_nid;
  131. int nid = prev_nid;
  132. int ret = 0;
  133. VM_BUG_ON(delta != -1 && delta != 1);
  134. do {
  135. nid = next_node(nid, node_online_map);
  136. if (nid == MAX_NUMNODES)
  137. nid = first_node(node_online_map);
  138. /* To shrink on this node, there must be a surplus page */
  139. if (delta < 0 && !surplus_huge_pages_node[nid])
  140. continue;
  141. /* Surplus cannot exceed the total number of pages */
  142. if (delta > 0 && surplus_huge_pages_node[nid] >=
  143. nr_huge_pages_node[nid])
  144. continue;
  145. surplus_huge_pages += delta;
  146. surplus_huge_pages_node[nid] += delta;
  147. ret = 1;
  148. break;
  149. } while (nid != prev_nid);
  150. prev_nid = nid;
  151. return ret;
  152. }
  153. static struct page *alloc_fresh_huge_page_node(int nid)
  154. {
  155. struct page *page;
  156. page = alloc_pages_node(nid,
  157. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
  158. HUGETLB_PAGE_ORDER);
  159. if (page) {
  160. set_compound_page_dtor(page, free_huge_page);
  161. spin_lock(&hugetlb_lock);
  162. nr_huge_pages++;
  163. nr_huge_pages_node[nid]++;
  164. spin_unlock(&hugetlb_lock);
  165. put_page(page); /* free it into the hugepage allocator */
  166. }
  167. return page;
  168. }
  169. static int alloc_fresh_huge_page(void)
  170. {
  171. struct page *page;
  172. int start_nid;
  173. int next_nid;
  174. int ret = 0;
  175. start_nid = hugetlb_next_nid;
  176. do {
  177. page = alloc_fresh_huge_page_node(hugetlb_next_nid);
  178. if (page)
  179. ret = 1;
  180. /*
  181. * Use a helper variable to find the next node and then
  182. * copy it back to hugetlb_next_nid afterwards:
  183. * otherwise there's a window in which a racer might
  184. * pass invalid nid MAX_NUMNODES to alloc_pages_node.
  185. * But we don't need to use a spin_lock here: it really
  186. * doesn't matter if occasionally a racer chooses the
  187. * same nid as we do. Move nid forward in the mask even
  188. * if we just successfully allocated a hugepage so that
  189. * the next caller gets hugepages on the next node.
  190. */
  191. next_nid = next_node(hugetlb_next_nid, node_online_map);
  192. if (next_nid == MAX_NUMNODES)
  193. next_nid = first_node(node_online_map);
  194. hugetlb_next_nid = next_nid;
  195. } while (!page && hugetlb_next_nid != start_nid);
  196. return ret;
  197. }
  198. static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
  199. unsigned long address)
  200. {
  201. struct page *page;
  202. unsigned int nid;
  203. /*
  204. * Assume we will successfully allocate the surplus page to
  205. * prevent racing processes from causing the surplus to exceed
  206. * overcommit
  207. *
  208. * This however introduces a different race, where a process B
  209. * tries to grow the static hugepage pool while alloc_pages() is
  210. * called by process A. B will only examine the per-node
  211. * counters in determining if surplus huge pages can be
  212. * converted to normal huge pages in adjust_pool_surplus(). A
  213. * won't be able to increment the per-node counter, until the
  214. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  215. * no more huge pages can be converted from surplus to normal
  216. * state (and doesn't try to convert again). Thus, we have a
  217. * case where a surplus huge page exists, the pool is grown, and
  218. * the surplus huge page still exists after, even though it
  219. * should just have been converted to a normal huge page. This
  220. * does not leak memory, though, as the hugepage will be freed
  221. * once it is out of use. It also does not allow the counters to
  222. * go out of whack in adjust_pool_surplus() as we don't modify
  223. * the node values until we've gotten the hugepage and only the
  224. * per-node value is checked there.
  225. */
  226. spin_lock(&hugetlb_lock);
  227. if (surplus_huge_pages >= nr_overcommit_huge_pages) {
  228. spin_unlock(&hugetlb_lock);
  229. return NULL;
  230. } else {
  231. nr_huge_pages++;
  232. surplus_huge_pages++;
  233. }
  234. spin_unlock(&hugetlb_lock);
  235. page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
  236. HUGETLB_PAGE_ORDER);
  237. spin_lock(&hugetlb_lock);
  238. if (page) {
  239. nid = page_to_nid(page);
  240. set_compound_page_dtor(page, free_huge_page);
  241. /*
  242. * We incremented the global counters already
  243. */
  244. nr_huge_pages_node[nid]++;
  245. surplus_huge_pages_node[nid]++;
  246. } else {
  247. nr_huge_pages--;
  248. surplus_huge_pages--;
  249. }
  250. spin_unlock(&hugetlb_lock);
  251. return page;
  252. }
  253. /*
  254. * Increase the hugetlb pool such that it can accomodate a reservation
  255. * of size 'delta'.
  256. */
  257. static int gather_surplus_pages(int delta)
  258. {
  259. struct list_head surplus_list;
  260. struct page *page, *tmp;
  261. int ret, i;
  262. int needed, allocated;
  263. needed = (resv_huge_pages + delta) - free_huge_pages;
  264. if (needed <= 0)
  265. return 0;
  266. allocated = 0;
  267. INIT_LIST_HEAD(&surplus_list);
  268. ret = -ENOMEM;
  269. retry:
  270. spin_unlock(&hugetlb_lock);
  271. for (i = 0; i < needed; i++) {
  272. page = alloc_buddy_huge_page(NULL, 0);
  273. if (!page) {
  274. /*
  275. * We were not able to allocate enough pages to
  276. * satisfy the entire reservation so we free what
  277. * we've allocated so far.
  278. */
  279. spin_lock(&hugetlb_lock);
  280. needed = 0;
  281. goto free;
  282. }
  283. list_add(&page->lru, &surplus_list);
  284. }
  285. allocated += needed;
  286. /*
  287. * After retaking hugetlb_lock, we need to recalculate 'needed'
  288. * because either resv_huge_pages or free_huge_pages may have changed.
  289. */
  290. spin_lock(&hugetlb_lock);
  291. needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
  292. if (needed > 0)
  293. goto retry;
  294. /*
  295. * The surplus_list now contains _at_least_ the number of extra pages
  296. * needed to accomodate the reservation. Add the appropriate number
  297. * of pages to the hugetlb pool and free the extras back to the buddy
  298. * allocator.
  299. */
  300. needed += allocated;
  301. ret = 0;
  302. free:
  303. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  304. list_del(&page->lru);
  305. if ((--needed) >= 0)
  306. enqueue_huge_page(page);
  307. else {
  308. /*
  309. * Decrement the refcount and free the page using its
  310. * destructor. This must be done with hugetlb_lock
  311. * unlocked which is safe because free_huge_page takes
  312. * hugetlb_lock before deciding how to free the page.
  313. */
  314. spin_unlock(&hugetlb_lock);
  315. put_page(page);
  316. spin_lock(&hugetlb_lock);
  317. }
  318. }
  319. return ret;
  320. }
  321. /*
  322. * When releasing a hugetlb pool reservation, any surplus pages that were
  323. * allocated to satisfy the reservation must be explicitly freed if they were
  324. * never used.
  325. */
  326. static void return_unused_surplus_pages(unsigned long unused_resv_pages)
  327. {
  328. static int nid = -1;
  329. struct page *page;
  330. unsigned long nr_pages;
  331. nr_pages = min(unused_resv_pages, surplus_huge_pages);
  332. while (nr_pages) {
  333. nid = next_node(nid, node_online_map);
  334. if (nid == MAX_NUMNODES)
  335. nid = first_node(node_online_map);
  336. if (!surplus_huge_pages_node[nid])
  337. continue;
  338. if (!list_empty(&hugepage_freelists[nid])) {
  339. page = list_entry(hugepage_freelists[nid].next,
  340. struct page, lru);
  341. list_del(&page->lru);
  342. update_and_free_page(page);
  343. free_huge_pages--;
  344. free_huge_pages_node[nid]--;
  345. surplus_huge_pages--;
  346. surplus_huge_pages_node[nid]--;
  347. nr_pages--;
  348. }
  349. }
  350. }
  351. static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
  352. unsigned long addr)
  353. {
  354. struct page *page;
  355. spin_lock(&hugetlb_lock);
  356. page = dequeue_huge_page(vma, addr);
  357. spin_unlock(&hugetlb_lock);
  358. return page ? page : ERR_PTR(-VM_FAULT_OOM);
  359. }
  360. static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
  361. unsigned long addr)
  362. {
  363. struct page *page = NULL;
  364. if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
  365. return ERR_PTR(-VM_FAULT_SIGBUS);
  366. spin_lock(&hugetlb_lock);
  367. if (free_huge_pages > resv_huge_pages)
  368. page = dequeue_huge_page(vma, addr);
  369. spin_unlock(&hugetlb_lock);
  370. if (!page) {
  371. page = alloc_buddy_huge_page(vma, addr);
  372. if (!page) {
  373. hugetlb_put_quota(vma->vm_file->f_mapping, 1);
  374. return ERR_PTR(-VM_FAULT_OOM);
  375. }
  376. }
  377. return page;
  378. }
  379. static struct page *alloc_huge_page(struct vm_area_struct *vma,
  380. unsigned long addr)
  381. {
  382. struct page *page;
  383. struct address_space *mapping = vma->vm_file->f_mapping;
  384. if (vma->vm_flags & VM_MAYSHARE)
  385. page = alloc_huge_page_shared(vma, addr);
  386. else
  387. page = alloc_huge_page_private(vma, addr);
  388. if (!IS_ERR(page)) {
  389. set_page_refcounted(page);
  390. set_page_private(page, (unsigned long) mapping);
  391. }
  392. return page;
  393. }
  394. static int __init hugetlb_init(void)
  395. {
  396. unsigned long i;
  397. if (HPAGE_SHIFT == 0)
  398. return 0;
  399. for (i = 0; i < MAX_NUMNODES; ++i)
  400. INIT_LIST_HEAD(&hugepage_freelists[i]);
  401. hugetlb_next_nid = first_node(node_online_map);
  402. for (i = 0; i < max_huge_pages; ++i) {
  403. if (!alloc_fresh_huge_page())
  404. break;
  405. }
  406. max_huge_pages = free_huge_pages = nr_huge_pages = i;
  407. printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
  408. return 0;
  409. }
  410. module_init(hugetlb_init);
  411. static int __init hugetlb_setup(char *s)
  412. {
  413. if (sscanf(s, "%lu", &max_huge_pages) <= 0)
  414. max_huge_pages = 0;
  415. return 1;
  416. }
  417. __setup("hugepages=", hugetlb_setup);
  418. static unsigned int cpuset_mems_nr(unsigned int *array)
  419. {
  420. int node;
  421. unsigned int nr = 0;
  422. for_each_node_mask(node, cpuset_current_mems_allowed)
  423. nr += array[node];
  424. return nr;
  425. }
  426. #ifdef CONFIG_SYSCTL
  427. #ifdef CONFIG_HIGHMEM
  428. static void try_to_free_low(unsigned long count)
  429. {
  430. int i;
  431. for (i = 0; i < MAX_NUMNODES; ++i) {
  432. struct page *page, *next;
  433. list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
  434. if (count >= nr_huge_pages)
  435. return;
  436. if (PageHighMem(page))
  437. continue;
  438. list_del(&page->lru);
  439. update_and_free_page(page);
  440. free_huge_pages--;
  441. free_huge_pages_node[page_to_nid(page)]--;
  442. }
  443. }
  444. }
  445. #else
  446. static inline void try_to_free_low(unsigned long count)
  447. {
  448. }
  449. #endif
  450. #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
  451. static unsigned long set_max_huge_pages(unsigned long count)
  452. {
  453. unsigned long min_count, ret;
  454. /*
  455. * Increase the pool size
  456. * First take pages out of surplus state. Then make up the
  457. * remaining difference by allocating fresh huge pages.
  458. *
  459. * We might race with alloc_buddy_huge_page() here and be unable
  460. * to convert a surplus huge page to a normal huge page. That is
  461. * not critical, though, it just means the overall size of the
  462. * pool might be one hugepage larger than it needs to be, but
  463. * within all the constraints specified by the sysctls.
  464. */
  465. spin_lock(&hugetlb_lock);
  466. while (surplus_huge_pages && count > persistent_huge_pages) {
  467. if (!adjust_pool_surplus(-1))
  468. break;
  469. }
  470. while (count > persistent_huge_pages) {
  471. int ret;
  472. /*
  473. * If this allocation races such that we no longer need the
  474. * page, free_huge_page will handle it by freeing the page
  475. * and reducing the surplus.
  476. */
  477. spin_unlock(&hugetlb_lock);
  478. ret = alloc_fresh_huge_page();
  479. spin_lock(&hugetlb_lock);
  480. if (!ret)
  481. goto out;
  482. }
  483. /*
  484. * Decrease the pool size
  485. * First return free pages to the buddy allocator (being careful
  486. * to keep enough around to satisfy reservations). Then place
  487. * pages into surplus state as needed so the pool will shrink
  488. * to the desired size as pages become free.
  489. *
  490. * By placing pages into the surplus state independent of the
  491. * overcommit value, we are allowing the surplus pool size to
  492. * exceed overcommit. There are few sane options here. Since
  493. * alloc_buddy_huge_page() is checking the global counter,
  494. * though, we'll note that we're not allowed to exceed surplus
  495. * and won't grow the pool anywhere else. Not until one of the
  496. * sysctls are changed, or the surplus pages go out of use.
  497. */
  498. min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
  499. min_count = max(count, min_count);
  500. try_to_free_low(min_count);
  501. while (min_count < persistent_huge_pages) {
  502. struct page *page = dequeue_huge_page(NULL, 0);
  503. if (!page)
  504. break;
  505. update_and_free_page(page);
  506. }
  507. while (count < persistent_huge_pages) {
  508. if (!adjust_pool_surplus(1))
  509. break;
  510. }
  511. out:
  512. ret = persistent_huge_pages;
  513. spin_unlock(&hugetlb_lock);
  514. return ret;
  515. }
  516. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  517. struct file *file, void __user *buffer,
  518. size_t *length, loff_t *ppos)
  519. {
  520. proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
  521. max_huge_pages = set_max_huge_pages(max_huge_pages);
  522. return 0;
  523. }
  524. int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
  525. struct file *file, void __user *buffer,
  526. size_t *length, loff_t *ppos)
  527. {
  528. proc_dointvec(table, write, file, buffer, length, ppos);
  529. if (hugepages_treat_as_movable)
  530. htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
  531. else
  532. htlb_alloc_mask = GFP_HIGHUSER;
  533. return 0;
  534. }
  535. #endif /* CONFIG_SYSCTL */
  536. int hugetlb_report_meminfo(char *buf)
  537. {
  538. return sprintf(buf,
  539. "HugePages_Total: %5lu\n"
  540. "HugePages_Free: %5lu\n"
  541. "HugePages_Rsvd: %5lu\n"
  542. "HugePages_Surp: %5lu\n"
  543. "Hugepagesize: %5lu kB\n",
  544. nr_huge_pages,
  545. free_huge_pages,
  546. resv_huge_pages,
  547. surplus_huge_pages,
  548. HPAGE_SIZE/1024);
  549. }
  550. int hugetlb_report_node_meminfo(int nid, char *buf)
  551. {
  552. return sprintf(buf,
  553. "Node %d HugePages_Total: %5u\n"
  554. "Node %d HugePages_Free: %5u\n",
  555. nid, nr_huge_pages_node[nid],
  556. nid, free_huge_pages_node[nid]);
  557. }
  558. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  559. unsigned long hugetlb_total_pages(void)
  560. {
  561. return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
  562. }
  563. /*
  564. * We cannot handle pagefaults against hugetlb pages at all. They cause
  565. * handle_mm_fault() to try to instantiate regular-sized pages in the
  566. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  567. * this far.
  568. */
  569. static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  570. {
  571. BUG();
  572. return 0;
  573. }
  574. struct vm_operations_struct hugetlb_vm_ops = {
  575. .fault = hugetlb_vm_op_fault,
  576. };
  577. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  578. int writable)
  579. {
  580. pte_t entry;
  581. if (writable) {
  582. entry =
  583. pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
  584. } else {
  585. entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
  586. }
  587. entry = pte_mkyoung(entry);
  588. entry = pte_mkhuge(entry);
  589. return entry;
  590. }
  591. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  592. unsigned long address, pte_t *ptep)
  593. {
  594. pte_t entry;
  595. entry = pte_mkwrite(pte_mkdirty(*ptep));
  596. if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
  597. update_mmu_cache(vma, address, entry);
  598. }
  599. }
  600. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  601. struct vm_area_struct *vma)
  602. {
  603. pte_t *src_pte, *dst_pte, entry;
  604. struct page *ptepage;
  605. unsigned long addr;
  606. int cow;
  607. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  608. for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
  609. src_pte = huge_pte_offset(src, addr);
  610. if (!src_pte)
  611. continue;
  612. dst_pte = huge_pte_alloc(dst, addr);
  613. if (!dst_pte)
  614. goto nomem;
  615. /* If the pagetables are shared don't copy or take references */
  616. if (dst_pte == src_pte)
  617. continue;
  618. spin_lock(&dst->page_table_lock);
  619. spin_lock(&src->page_table_lock);
  620. if (!pte_none(*src_pte)) {
  621. if (cow)
  622. ptep_set_wrprotect(src, addr, src_pte);
  623. entry = *src_pte;
  624. ptepage = pte_page(entry);
  625. get_page(ptepage);
  626. set_huge_pte_at(dst, addr, dst_pte, entry);
  627. }
  628. spin_unlock(&src->page_table_lock);
  629. spin_unlock(&dst->page_table_lock);
  630. }
  631. return 0;
  632. nomem:
  633. return -ENOMEM;
  634. }
  635. void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  636. unsigned long end)
  637. {
  638. struct mm_struct *mm = vma->vm_mm;
  639. unsigned long address;
  640. pte_t *ptep;
  641. pte_t pte;
  642. struct page *page;
  643. struct page *tmp;
  644. /*
  645. * A page gathering list, protected by per file i_mmap_lock. The
  646. * lock is used to avoid list corruption from multiple unmapping
  647. * of the same page since we are using page->lru.
  648. */
  649. LIST_HEAD(page_list);
  650. WARN_ON(!is_vm_hugetlb_page(vma));
  651. BUG_ON(start & ~HPAGE_MASK);
  652. BUG_ON(end & ~HPAGE_MASK);
  653. spin_lock(&mm->page_table_lock);
  654. for (address = start; address < end; address += HPAGE_SIZE) {
  655. ptep = huge_pte_offset(mm, address);
  656. if (!ptep)
  657. continue;
  658. if (huge_pmd_unshare(mm, &address, ptep))
  659. continue;
  660. pte = huge_ptep_get_and_clear(mm, address, ptep);
  661. if (pte_none(pte))
  662. continue;
  663. page = pte_page(pte);
  664. if (pte_dirty(pte))
  665. set_page_dirty(page);
  666. list_add(&page->lru, &page_list);
  667. }
  668. spin_unlock(&mm->page_table_lock);
  669. flush_tlb_range(vma, start, end);
  670. list_for_each_entry_safe(page, tmp, &page_list, lru) {
  671. list_del(&page->lru);
  672. put_page(page);
  673. }
  674. }
  675. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  676. unsigned long end)
  677. {
  678. /*
  679. * It is undesirable to test vma->vm_file as it should be non-null
  680. * for valid hugetlb area. However, vm_file will be NULL in the error
  681. * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
  682. * do_mmap_pgoff() nullifies vma->vm_file before calling this function
  683. * to clean up. Since no pte has actually been setup, it is safe to
  684. * do nothing in this case.
  685. */
  686. if (vma->vm_file) {
  687. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  688. __unmap_hugepage_range(vma, start, end);
  689. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  690. }
  691. }
  692. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  693. unsigned long address, pte_t *ptep, pte_t pte)
  694. {
  695. struct page *old_page, *new_page;
  696. int avoidcopy;
  697. old_page = pte_page(pte);
  698. /* If no-one else is actually using this page, avoid the copy
  699. * and just make the page writable */
  700. avoidcopy = (page_count(old_page) == 1);
  701. if (avoidcopy) {
  702. set_huge_ptep_writable(vma, address, ptep);
  703. return 0;
  704. }
  705. page_cache_get(old_page);
  706. new_page = alloc_huge_page(vma, address);
  707. if (IS_ERR(new_page)) {
  708. page_cache_release(old_page);
  709. return -PTR_ERR(new_page);
  710. }
  711. spin_unlock(&mm->page_table_lock);
  712. copy_huge_page(new_page, old_page, address, vma);
  713. __SetPageUptodate(new_page);
  714. spin_lock(&mm->page_table_lock);
  715. ptep = huge_pte_offset(mm, address & HPAGE_MASK);
  716. if (likely(pte_same(*ptep, pte))) {
  717. /* Break COW */
  718. set_huge_pte_at(mm, address, ptep,
  719. make_huge_pte(vma, new_page, 1));
  720. /* Make the old page be freed below */
  721. new_page = old_page;
  722. }
  723. page_cache_release(new_page);
  724. page_cache_release(old_page);
  725. return 0;
  726. }
  727. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  728. unsigned long address, pte_t *ptep, int write_access)
  729. {
  730. int ret = VM_FAULT_SIGBUS;
  731. unsigned long idx;
  732. unsigned long size;
  733. struct page *page;
  734. struct address_space *mapping;
  735. pte_t new_pte;
  736. mapping = vma->vm_file->f_mapping;
  737. idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
  738. + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
  739. /*
  740. * Use page lock to guard against racing truncation
  741. * before we get page_table_lock.
  742. */
  743. retry:
  744. page = find_lock_page(mapping, idx);
  745. if (!page) {
  746. size = i_size_read(mapping->host) >> HPAGE_SHIFT;
  747. if (idx >= size)
  748. goto out;
  749. page = alloc_huge_page(vma, address);
  750. if (IS_ERR(page)) {
  751. ret = -PTR_ERR(page);
  752. goto out;
  753. }
  754. clear_huge_page(page, address);
  755. __SetPageUptodate(page);
  756. if (vma->vm_flags & VM_SHARED) {
  757. int err;
  758. struct inode *inode = mapping->host;
  759. err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  760. if (err) {
  761. put_page(page);
  762. if (err == -EEXIST)
  763. goto retry;
  764. goto out;
  765. }
  766. spin_lock(&inode->i_lock);
  767. inode->i_blocks += BLOCKS_PER_HUGEPAGE;
  768. spin_unlock(&inode->i_lock);
  769. } else
  770. lock_page(page);
  771. }
  772. spin_lock(&mm->page_table_lock);
  773. size = i_size_read(mapping->host) >> HPAGE_SHIFT;
  774. if (idx >= size)
  775. goto backout;
  776. ret = 0;
  777. if (!pte_none(*ptep))
  778. goto backout;
  779. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  780. && (vma->vm_flags & VM_SHARED)));
  781. set_huge_pte_at(mm, address, ptep, new_pte);
  782. if (write_access && !(vma->vm_flags & VM_SHARED)) {
  783. /* Optimization, do the COW without a second fault */
  784. ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
  785. }
  786. spin_unlock(&mm->page_table_lock);
  787. unlock_page(page);
  788. out:
  789. return ret;
  790. backout:
  791. spin_unlock(&mm->page_table_lock);
  792. unlock_page(page);
  793. put_page(page);
  794. goto out;
  795. }
  796. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  797. unsigned long address, int write_access)
  798. {
  799. pte_t *ptep;
  800. pte_t entry;
  801. int ret;
  802. static DEFINE_MUTEX(hugetlb_instantiation_mutex);
  803. ptep = huge_pte_alloc(mm, address);
  804. if (!ptep)
  805. return VM_FAULT_OOM;
  806. /*
  807. * Serialize hugepage allocation and instantiation, so that we don't
  808. * get spurious allocation failures if two CPUs race to instantiate
  809. * the same page in the page cache.
  810. */
  811. mutex_lock(&hugetlb_instantiation_mutex);
  812. entry = *ptep;
  813. if (pte_none(entry)) {
  814. ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
  815. mutex_unlock(&hugetlb_instantiation_mutex);
  816. return ret;
  817. }
  818. ret = 0;
  819. spin_lock(&mm->page_table_lock);
  820. /* Check for a racing update before calling hugetlb_cow */
  821. if (likely(pte_same(entry, *ptep)))
  822. if (write_access && !pte_write(entry))
  823. ret = hugetlb_cow(mm, vma, address, ptep, entry);
  824. spin_unlock(&mm->page_table_lock);
  825. mutex_unlock(&hugetlb_instantiation_mutex);
  826. return ret;
  827. }
  828. int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  829. struct page **pages, struct vm_area_struct **vmas,
  830. unsigned long *position, int *length, int i,
  831. int write)
  832. {
  833. unsigned long pfn_offset;
  834. unsigned long vaddr = *position;
  835. int remainder = *length;
  836. spin_lock(&mm->page_table_lock);
  837. while (vaddr < vma->vm_end && remainder) {
  838. pte_t *pte;
  839. struct page *page;
  840. /*
  841. * Some archs (sparc64, sh*) have multiple pte_ts to
  842. * each hugepage. We have to make * sure we get the
  843. * first, for the page indexing below to work.
  844. */
  845. pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
  846. if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) {
  847. int ret;
  848. spin_unlock(&mm->page_table_lock);
  849. ret = hugetlb_fault(mm, vma, vaddr, write);
  850. spin_lock(&mm->page_table_lock);
  851. if (!(ret & VM_FAULT_ERROR))
  852. continue;
  853. remainder = 0;
  854. if (!i)
  855. i = -EFAULT;
  856. break;
  857. }
  858. pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
  859. page = pte_page(*pte);
  860. same_page:
  861. if (pages) {
  862. get_page(page);
  863. pages[i] = page + pfn_offset;
  864. }
  865. if (vmas)
  866. vmas[i] = vma;
  867. vaddr += PAGE_SIZE;
  868. ++pfn_offset;
  869. --remainder;
  870. ++i;
  871. if (vaddr < vma->vm_end && remainder &&
  872. pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
  873. /*
  874. * We use pfn_offset to avoid touching the pageframes
  875. * of this compound page.
  876. */
  877. goto same_page;
  878. }
  879. }
  880. spin_unlock(&mm->page_table_lock);
  881. *length = remainder;
  882. *position = vaddr;
  883. return i;
  884. }
  885. void hugetlb_change_protection(struct vm_area_struct *vma,
  886. unsigned long address, unsigned long end, pgprot_t newprot)
  887. {
  888. struct mm_struct *mm = vma->vm_mm;
  889. unsigned long start = address;
  890. pte_t *ptep;
  891. pte_t pte;
  892. BUG_ON(address >= end);
  893. flush_cache_range(vma, address, end);
  894. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  895. spin_lock(&mm->page_table_lock);
  896. for (; address < end; address += HPAGE_SIZE) {
  897. ptep = huge_pte_offset(mm, address);
  898. if (!ptep)
  899. continue;
  900. if (huge_pmd_unshare(mm, &address, ptep))
  901. continue;
  902. if (!pte_none(*ptep)) {
  903. pte = huge_ptep_get_and_clear(mm, address, ptep);
  904. pte = pte_mkhuge(pte_modify(pte, newprot));
  905. set_huge_pte_at(mm, address, ptep, pte);
  906. }
  907. }
  908. spin_unlock(&mm->page_table_lock);
  909. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  910. flush_tlb_range(vma, start, end);
  911. }
  912. struct file_region {
  913. struct list_head link;
  914. long from;
  915. long to;
  916. };
  917. static long region_add(struct list_head *head, long f, long t)
  918. {
  919. struct file_region *rg, *nrg, *trg;
  920. /* Locate the region we are either in or before. */
  921. list_for_each_entry(rg, head, link)
  922. if (f <= rg->to)
  923. break;
  924. /* Round our left edge to the current segment if it encloses us. */
  925. if (f > rg->from)
  926. f = rg->from;
  927. /* Check for and consume any regions we now overlap with. */
  928. nrg = rg;
  929. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  930. if (&rg->link == head)
  931. break;
  932. if (rg->from > t)
  933. break;
  934. /* If this area reaches higher then extend our area to
  935. * include it completely. If this is not the first area
  936. * which we intend to reuse, free it. */
  937. if (rg->to > t)
  938. t = rg->to;
  939. if (rg != nrg) {
  940. list_del(&rg->link);
  941. kfree(rg);
  942. }
  943. }
  944. nrg->from = f;
  945. nrg->to = t;
  946. return 0;
  947. }
  948. static long region_chg(struct list_head *head, long f, long t)
  949. {
  950. struct file_region *rg, *nrg;
  951. long chg = 0;
  952. /* Locate the region we are before or in. */
  953. list_for_each_entry(rg, head, link)
  954. if (f <= rg->to)
  955. break;
  956. /* If we are below the current region then a new region is required.
  957. * Subtle, allocate a new region at the position but make it zero
  958. * size such that we can guarantee to record the reservation. */
  959. if (&rg->link == head || t < rg->from) {
  960. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  961. if (!nrg)
  962. return -ENOMEM;
  963. nrg->from = f;
  964. nrg->to = f;
  965. INIT_LIST_HEAD(&nrg->link);
  966. list_add(&nrg->link, rg->link.prev);
  967. return t - f;
  968. }
  969. /* Round our left edge to the current segment if it encloses us. */
  970. if (f > rg->from)
  971. f = rg->from;
  972. chg = t - f;
  973. /* Check for and consume any regions we now overlap with. */
  974. list_for_each_entry(rg, rg->link.prev, link) {
  975. if (&rg->link == head)
  976. break;
  977. if (rg->from > t)
  978. return chg;
  979. /* We overlap with this area, if it extends futher than
  980. * us then we must extend ourselves. Account for its
  981. * existing reservation. */
  982. if (rg->to > t) {
  983. chg += rg->to - t;
  984. t = rg->to;
  985. }
  986. chg -= rg->to - rg->from;
  987. }
  988. return chg;
  989. }
  990. static long region_truncate(struct list_head *head, long end)
  991. {
  992. struct file_region *rg, *trg;
  993. long chg = 0;
  994. /* Locate the region we are either in or before. */
  995. list_for_each_entry(rg, head, link)
  996. if (end <= rg->to)
  997. break;
  998. if (&rg->link == head)
  999. return 0;
  1000. /* If we are in the middle of a region then adjust it. */
  1001. if (end > rg->from) {
  1002. chg = rg->to - end;
  1003. rg->to = end;
  1004. rg = list_entry(rg->link.next, typeof(*rg), link);
  1005. }
  1006. /* Drop any remaining regions. */
  1007. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  1008. if (&rg->link == head)
  1009. break;
  1010. chg += rg->to - rg->from;
  1011. list_del(&rg->link);
  1012. kfree(rg);
  1013. }
  1014. return chg;
  1015. }
  1016. static int hugetlb_acct_memory(long delta)
  1017. {
  1018. int ret = -ENOMEM;
  1019. spin_lock(&hugetlb_lock);
  1020. /*
  1021. * When cpuset is configured, it breaks the strict hugetlb page
  1022. * reservation as the accounting is done on a global variable. Such
  1023. * reservation is completely rubbish in the presence of cpuset because
  1024. * the reservation is not checked against page availability for the
  1025. * current cpuset. Application can still potentially OOM'ed by kernel
  1026. * with lack of free htlb page in cpuset that the task is in.
  1027. * Attempt to enforce strict accounting with cpuset is almost
  1028. * impossible (or too ugly) because cpuset is too fluid that
  1029. * task or memory node can be dynamically moved between cpusets.
  1030. *
  1031. * The change of semantics for shared hugetlb mapping with cpuset is
  1032. * undesirable. However, in order to preserve some of the semantics,
  1033. * we fall back to check against current free page availability as
  1034. * a best attempt and hopefully to minimize the impact of changing
  1035. * semantics that cpuset has.
  1036. */
  1037. if (delta > 0) {
  1038. if (gather_surplus_pages(delta) < 0)
  1039. goto out;
  1040. if (delta > cpuset_mems_nr(free_huge_pages_node))
  1041. goto out;
  1042. }
  1043. ret = 0;
  1044. resv_huge_pages += delta;
  1045. if (delta < 0)
  1046. return_unused_surplus_pages((unsigned long) -delta);
  1047. out:
  1048. spin_unlock(&hugetlb_lock);
  1049. return ret;
  1050. }
  1051. int hugetlb_reserve_pages(struct inode *inode, long from, long to)
  1052. {
  1053. long ret, chg;
  1054. chg = region_chg(&inode->i_mapping->private_list, from, to);
  1055. if (chg < 0)
  1056. return chg;
  1057. if (hugetlb_get_quota(inode->i_mapping, chg))
  1058. return -ENOSPC;
  1059. ret = hugetlb_acct_memory(chg);
  1060. if (ret < 0) {
  1061. hugetlb_put_quota(inode->i_mapping, chg);
  1062. return ret;
  1063. }
  1064. region_add(&inode->i_mapping->private_list, from, to);
  1065. return 0;
  1066. }
  1067. void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
  1068. {
  1069. long chg = region_truncate(&inode->i_mapping->private_list, offset);
  1070. spin_lock(&inode->i_lock);
  1071. inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed;
  1072. spin_unlock(&inode->i_lock);
  1073. hugetlb_put_quota(inode->i_mapping, (chg - freed));
  1074. hugetlb_acct_memory(-(chg - freed));
  1075. }