hugetlb.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467
  1. /*
  2. * Generic hugetlb support.
  3. * (C) William Irwin, April 2004
  4. */
  5. #include <linux/gfp.h>
  6. #include <linux/list.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/seq_file.h>
  11. #include <linux/sysctl.h>
  12. #include <linux/highmem.h>
  13. #include <linux/mmu_notifier.h>
  14. #include <linux/nodemask.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/mempolicy.h>
  17. #include <linux/cpuset.h>
  18. #include <linux/mutex.h>
  19. #include <linux/bootmem.h>
  20. #include <linux/sysfs.h>
  21. #include <asm/page.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/io.h>
  24. #include <linux/hugetlb.h>
  25. #include "internal.h"
  26. const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
  27. static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
  28. unsigned long hugepages_treat_as_movable;
  29. static int max_hstate;
  30. unsigned int default_hstate_idx;
  31. struct hstate hstates[HUGE_MAX_HSTATE];
  32. __initdata LIST_HEAD(huge_boot_pages);
  33. /* for command line parsing */
  34. static struct hstate * __initdata parsed_hstate;
  35. static unsigned long __initdata default_hstate_max_huge_pages;
  36. static unsigned long __initdata default_hstate_size;
  37. #define for_each_hstate(h) \
  38. for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
  39. /*
  40. * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  41. */
  42. static DEFINE_SPINLOCK(hugetlb_lock);
  43. /*
  44. * Region tracking -- allows tracking of reservations and instantiated pages
  45. * across the pages in a mapping.
  46. *
  47. * The region data structures are protected by a combination of the mmap_sem
  48. * and the hugetlb_instantion_mutex. To access or modify a region the caller
  49. * must either hold the mmap_sem for write, or the mmap_sem for read and
  50. * the hugetlb_instantiation mutex:
  51. *
  52. * down_write(&mm->mmap_sem);
  53. * or
  54. * down_read(&mm->mmap_sem);
  55. * mutex_lock(&hugetlb_instantiation_mutex);
  56. */
  57. struct file_region {
  58. struct list_head link;
  59. long from;
  60. long to;
  61. };
  62. static long region_add(struct list_head *head, long f, long t)
  63. {
  64. struct file_region *rg, *nrg, *trg;
  65. /* Locate the region we are either in or before. */
  66. list_for_each_entry(rg, head, link)
  67. if (f <= rg->to)
  68. break;
  69. /* Round our left edge to the current segment if it encloses us. */
  70. if (f > rg->from)
  71. f = rg->from;
  72. /* Check for and consume any regions we now overlap with. */
  73. nrg = rg;
  74. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  75. if (&rg->link == head)
  76. break;
  77. if (rg->from > t)
  78. break;
  79. /* If this area reaches higher then extend our area to
  80. * include it completely. If this is not the first area
  81. * which we intend to reuse, free it. */
  82. if (rg->to > t)
  83. t = rg->to;
  84. if (rg != nrg) {
  85. list_del(&rg->link);
  86. kfree(rg);
  87. }
  88. }
  89. nrg->from = f;
  90. nrg->to = t;
  91. return 0;
  92. }
  93. static long region_chg(struct list_head *head, long f, long t)
  94. {
  95. struct file_region *rg, *nrg;
  96. long chg = 0;
  97. /* Locate the region we are before or in. */
  98. list_for_each_entry(rg, head, link)
  99. if (f <= rg->to)
  100. break;
  101. /* If we are below the current region then a new region is required.
  102. * Subtle, allocate a new region at the position but make it zero
  103. * size such that we can guarantee to record the reservation. */
  104. if (&rg->link == head || t < rg->from) {
  105. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  106. if (!nrg)
  107. return -ENOMEM;
  108. nrg->from = f;
  109. nrg->to = f;
  110. INIT_LIST_HEAD(&nrg->link);
  111. list_add(&nrg->link, rg->link.prev);
  112. return t - f;
  113. }
  114. /* Round our left edge to the current segment if it encloses us. */
  115. if (f > rg->from)
  116. f = rg->from;
  117. chg = t - f;
  118. /* Check for and consume any regions we now overlap with. */
  119. list_for_each_entry(rg, rg->link.prev, link) {
  120. if (&rg->link == head)
  121. break;
  122. if (rg->from > t)
  123. return chg;
  124. /* We overlap with this area, if it extends futher than
  125. * us then we must extend ourselves. Account for its
  126. * existing reservation. */
  127. if (rg->to > t) {
  128. chg += rg->to - t;
  129. t = rg->to;
  130. }
  131. chg -= rg->to - rg->from;
  132. }
  133. return chg;
  134. }
  135. static long region_truncate(struct list_head *head, long end)
  136. {
  137. struct file_region *rg, *trg;
  138. long chg = 0;
  139. /* Locate the region we are either in or before. */
  140. list_for_each_entry(rg, head, link)
  141. if (end <= rg->to)
  142. break;
  143. if (&rg->link == head)
  144. return 0;
  145. /* If we are in the middle of a region then adjust it. */
  146. if (end > rg->from) {
  147. chg = rg->to - end;
  148. rg->to = end;
  149. rg = list_entry(rg->link.next, typeof(*rg), link);
  150. }
  151. /* Drop any remaining regions. */
  152. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  153. if (&rg->link == head)
  154. break;
  155. chg += rg->to - rg->from;
  156. list_del(&rg->link);
  157. kfree(rg);
  158. }
  159. return chg;
  160. }
  161. static long region_count(struct list_head *head, long f, long t)
  162. {
  163. struct file_region *rg;
  164. long chg = 0;
  165. /* Locate each segment we overlap with, and count that overlap. */
  166. list_for_each_entry(rg, head, link) {
  167. int seg_from;
  168. int seg_to;
  169. if (rg->to <= f)
  170. continue;
  171. if (rg->from >= t)
  172. break;
  173. seg_from = max(rg->from, f);
  174. seg_to = min(rg->to, t);
  175. chg += seg_to - seg_from;
  176. }
  177. return chg;
  178. }
  179. /*
  180. * Convert the address within this vma to the page offset within
  181. * the mapping, in pagecache page units; huge pages here.
  182. */
  183. static pgoff_t vma_hugecache_offset(struct hstate *h,
  184. struct vm_area_struct *vma, unsigned long address)
  185. {
  186. return ((address - vma->vm_start) >> huge_page_shift(h)) +
  187. (vma->vm_pgoff >> huge_page_order(h));
  188. }
  189. /*
  190. * Return the size of the pages allocated when backing a VMA. In the majority
  191. * cases this will be same size as used by the page table entries.
  192. */
  193. unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
  194. {
  195. struct hstate *hstate;
  196. if (!is_vm_hugetlb_page(vma))
  197. return PAGE_SIZE;
  198. hstate = hstate_vma(vma);
  199. return 1UL << (hstate->order + PAGE_SHIFT);
  200. }
  201. EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
  202. /*
  203. * Return the page size being used by the MMU to back a VMA. In the majority
  204. * of cases, the page size used by the kernel matches the MMU size. On
  205. * architectures where it differs, an architecture-specific version of this
  206. * function is required.
  207. */
  208. #ifndef vma_mmu_pagesize
  209. unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  210. {
  211. return vma_kernel_pagesize(vma);
  212. }
  213. #endif
  214. /*
  215. * Flags for MAP_PRIVATE reservations. These are stored in the bottom
  216. * bits of the reservation map pointer, which are always clear due to
  217. * alignment.
  218. */
  219. #define HPAGE_RESV_OWNER (1UL << 0)
  220. #define HPAGE_RESV_UNMAPPED (1UL << 1)
  221. #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
  222. /*
  223. * These helpers are used to track how many pages are reserved for
  224. * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
  225. * is guaranteed to have their future faults succeed.
  226. *
  227. * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
  228. * the reserve counters are updated with the hugetlb_lock held. It is safe
  229. * to reset the VMA at fork() time as it is not in use yet and there is no
  230. * chance of the global counters getting corrupted as a result of the values.
  231. *
  232. * The private mapping reservation is represented in a subtly different
  233. * manner to a shared mapping. A shared mapping has a region map associated
  234. * with the underlying file, this region map represents the backing file
  235. * pages which have ever had a reservation assigned which this persists even
  236. * after the page is instantiated. A private mapping has a region map
  237. * associated with the original mmap which is attached to all VMAs which
  238. * reference it, this region map represents those offsets which have consumed
  239. * reservation ie. where pages have been instantiated.
  240. */
  241. static unsigned long get_vma_private_data(struct vm_area_struct *vma)
  242. {
  243. return (unsigned long)vma->vm_private_data;
  244. }
  245. static void set_vma_private_data(struct vm_area_struct *vma,
  246. unsigned long value)
  247. {
  248. vma->vm_private_data = (void *)value;
  249. }
  250. struct resv_map {
  251. struct kref refs;
  252. struct list_head regions;
  253. };
  254. static struct resv_map *resv_map_alloc(void)
  255. {
  256. struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
  257. if (!resv_map)
  258. return NULL;
  259. kref_init(&resv_map->refs);
  260. INIT_LIST_HEAD(&resv_map->regions);
  261. return resv_map;
  262. }
  263. static void resv_map_release(struct kref *ref)
  264. {
  265. struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
  266. /* Clear out any active regions before we release the map. */
  267. region_truncate(&resv_map->regions, 0);
  268. kfree(resv_map);
  269. }
  270. static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
  271. {
  272. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  273. if (!(vma->vm_flags & VM_MAYSHARE))
  274. return (struct resv_map *)(get_vma_private_data(vma) &
  275. ~HPAGE_RESV_MASK);
  276. return NULL;
  277. }
  278. static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
  279. {
  280. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  281. VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
  282. set_vma_private_data(vma, (get_vma_private_data(vma) &
  283. HPAGE_RESV_MASK) | (unsigned long)map);
  284. }
  285. static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
  286. {
  287. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  288. VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
  289. set_vma_private_data(vma, get_vma_private_data(vma) | flags);
  290. }
  291. static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
  292. {
  293. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  294. return (get_vma_private_data(vma) & flag) != 0;
  295. }
  296. /* Decrement the reserved pages in the hugepage pool by one */
  297. static void decrement_hugepage_resv_vma(struct hstate *h,
  298. struct vm_area_struct *vma)
  299. {
  300. if (vma->vm_flags & VM_NORESERVE)
  301. return;
  302. if (vma->vm_flags & VM_MAYSHARE) {
  303. /* Shared mappings always use reserves */
  304. h->resv_huge_pages--;
  305. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  306. /*
  307. * Only the process that called mmap() has reserves for
  308. * private mappings.
  309. */
  310. h->resv_huge_pages--;
  311. }
  312. }
  313. /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
  314. void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  315. {
  316. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  317. if (!(vma->vm_flags & VM_MAYSHARE))
  318. vma->vm_private_data = (void *)0;
  319. }
  320. /* Returns true if the VMA has associated reserve pages */
  321. static int vma_has_reserves(struct vm_area_struct *vma)
  322. {
  323. if (vma->vm_flags & VM_MAYSHARE)
  324. return 1;
  325. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  326. return 1;
  327. return 0;
  328. }
  329. static void clear_gigantic_page(struct page *page,
  330. unsigned long addr, unsigned long sz)
  331. {
  332. int i;
  333. struct page *p = page;
  334. might_sleep();
  335. for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
  336. cond_resched();
  337. clear_user_highpage(p, addr + i * PAGE_SIZE);
  338. }
  339. }
  340. static void clear_huge_page(struct page *page,
  341. unsigned long addr, unsigned long sz)
  342. {
  343. int i;
  344. if (unlikely(sz > MAX_ORDER_NR_PAGES)) {
  345. clear_gigantic_page(page, addr, sz);
  346. return;
  347. }
  348. might_sleep();
  349. for (i = 0; i < sz/PAGE_SIZE; i++) {
  350. cond_resched();
  351. clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  352. }
  353. }
  354. static void copy_gigantic_page(struct page *dst, struct page *src,
  355. unsigned long addr, struct vm_area_struct *vma)
  356. {
  357. int i;
  358. struct hstate *h = hstate_vma(vma);
  359. struct page *dst_base = dst;
  360. struct page *src_base = src;
  361. might_sleep();
  362. for (i = 0; i < pages_per_huge_page(h); ) {
  363. cond_resched();
  364. copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
  365. i++;
  366. dst = mem_map_next(dst, dst_base, i);
  367. src = mem_map_next(src, src_base, i);
  368. }
  369. }
  370. static void copy_huge_page(struct page *dst, struct page *src,
  371. unsigned long addr, struct vm_area_struct *vma)
  372. {
  373. int i;
  374. struct hstate *h = hstate_vma(vma);
  375. if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
  376. copy_gigantic_page(dst, src, addr, vma);
  377. return;
  378. }
  379. might_sleep();
  380. for (i = 0; i < pages_per_huge_page(h); i++) {
  381. cond_resched();
  382. copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
  383. }
  384. }
  385. static void enqueue_huge_page(struct hstate *h, struct page *page)
  386. {
  387. int nid = page_to_nid(page);
  388. list_add(&page->lru, &h->hugepage_freelists[nid]);
  389. h->free_huge_pages++;
  390. h->free_huge_pages_node[nid]++;
  391. }
  392. static struct page *dequeue_huge_page_vma(struct hstate *h,
  393. struct vm_area_struct *vma,
  394. unsigned long address, int avoid_reserve)
  395. {
  396. int nid;
  397. struct page *page = NULL;
  398. struct mempolicy *mpol;
  399. nodemask_t *nodemask;
  400. struct zonelist *zonelist = huge_zonelist(vma, address,
  401. htlb_alloc_mask, &mpol, &nodemask);
  402. struct zone *zone;
  403. struct zoneref *z;
  404. /*
  405. * A child process with MAP_PRIVATE mappings created by their parent
  406. * have no page reserves. This check ensures that reservations are
  407. * not "stolen". The child may still get SIGKILLed
  408. */
  409. if (!vma_has_reserves(vma) &&
  410. h->free_huge_pages - h->resv_huge_pages == 0)
  411. return NULL;
  412. /* If reserves cannot be used, ensure enough pages are in the pool */
  413. if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
  414. return NULL;
  415. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  416. MAX_NR_ZONES - 1, nodemask) {
  417. nid = zone_to_nid(zone);
  418. if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
  419. !list_empty(&h->hugepage_freelists[nid])) {
  420. page = list_entry(h->hugepage_freelists[nid].next,
  421. struct page, lru);
  422. list_del(&page->lru);
  423. h->free_huge_pages--;
  424. h->free_huge_pages_node[nid]--;
  425. if (!avoid_reserve)
  426. decrement_hugepage_resv_vma(h, vma);
  427. break;
  428. }
  429. }
  430. mpol_cond_put(mpol);
  431. return page;
  432. }
  433. static void update_and_free_page(struct hstate *h, struct page *page)
  434. {
  435. int i;
  436. VM_BUG_ON(h->order >= MAX_ORDER);
  437. h->nr_huge_pages--;
  438. h->nr_huge_pages_node[page_to_nid(page)]--;
  439. for (i = 0; i < pages_per_huge_page(h); i++) {
  440. page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
  441. 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
  442. 1 << PG_private | 1<< PG_writeback);
  443. }
  444. set_compound_page_dtor(page, NULL);
  445. set_page_refcounted(page);
  446. arch_release_hugepage(page);
  447. __free_pages(page, huge_page_order(h));
  448. }
  449. struct hstate *size_to_hstate(unsigned long size)
  450. {
  451. struct hstate *h;
  452. for_each_hstate(h) {
  453. if (huge_page_size(h) == size)
  454. return h;
  455. }
  456. return NULL;
  457. }
  458. static void free_huge_page(struct page *page)
  459. {
  460. /*
  461. * Can't pass hstate in here because it is called from the
  462. * compound page destructor.
  463. */
  464. struct hstate *h = page_hstate(page);
  465. int nid = page_to_nid(page);
  466. struct address_space *mapping;
  467. mapping = (struct address_space *) page_private(page);
  468. set_page_private(page, 0);
  469. BUG_ON(page_count(page));
  470. INIT_LIST_HEAD(&page->lru);
  471. spin_lock(&hugetlb_lock);
  472. if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
  473. update_and_free_page(h, page);
  474. h->surplus_huge_pages--;
  475. h->surplus_huge_pages_node[nid]--;
  476. } else {
  477. enqueue_huge_page(h, page);
  478. }
  479. spin_unlock(&hugetlb_lock);
  480. if (mapping)
  481. hugetlb_put_quota(mapping, 1);
  482. }
  483. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
  484. {
  485. set_compound_page_dtor(page, free_huge_page);
  486. spin_lock(&hugetlb_lock);
  487. h->nr_huge_pages++;
  488. h->nr_huge_pages_node[nid]++;
  489. spin_unlock(&hugetlb_lock);
  490. put_page(page); /* free it into the hugepage allocator */
  491. }
  492. static void prep_compound_gigantic_page(struct page *page, unsigned long order)
  493. {
  494. int i;
  495. int nr_pages = 1 << order;
  496. struct page *p = page + 1;
  497. /* we rely on prep_new_huge_page to set the destructor */
  498. set_compound_order(page, order);
  499. __SetPageHead(page);
  500. for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
  501. __SetPageTail(p);
  502. p->first_page = page;
  503. }
  504. }
  505. int PageHuge(struct page *page)
  506. {
  507. compound_page_dtor *dtor;
  508. if (!PageCompound(page))
  509. return 0;
  510. page = compound_head(page);
  511. dtor = get_compound_page_dtor(page);
  512. return dtor == free_huge_page;
  513. }
  514. static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  515. {
  516. struct page *page;
  517. if (h->order >= MAX_ORDER)
  518. return NULL;
  519. page = alloc_pages_exact_node(nid,
  520. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
  521. __GFP_REPEAT|__GFP_NOWARN,
  522. huge_page_order(h));
  523. if (page) {
  524. if (arch_prepare_hugepage(page)) {
  525. __free_pages(page, huge_page_order(h));
  526. return NULL;
  527. }
  528. prep_new_huge_page(h, page, nid);
  529. }
  530. return page;
  531. }
  532. /*
  533. * common helper functions for hstate_next_node_to_{alloc|free}.
  534. * We may have allocated or freed a huge page based on a different
  535. * nodes_allowed previously, so h->next_node_to_{alloc|free} might
  536. * be outside of *nodes_allowed. Ensure that we use an allowed
  537. * node for alloc or free.
  538. */
  539. static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
  540. {
  541. nid = next_node(nid, *nodes_allowed);
  542. if (nid == MAX_NUMNODES)
  543. nid = first_node(*nodes_allowed);
  544. VM_BUG_ON(nid >= MAX_NUMNODES);
  545. return nid;
  546. }
  547. static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
  548. {
  549. if (!node_isset(nid, *nodes_allowed))
  550. nid = next_node_allowed(nid, nodes_allowed);
  551. return nid;
  552. }
  553. /*
  554. * returns the previously saved node ["this node"] from which to
  555. * allocate a persistent huge page for the pool and advance the
  556. * next node from which to allocate, handling wrap at end of node
  557. * mask.
  558. */
  559. static int hstate_next_node_to_alloc(struct hstate *h,
  560. nodemask_t *nodes_allowed)
  561. {
  562. int nid;
  563. VM_BUG_ON(!nodes_allowed);
  564. nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
  565. h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
  566. return nid;
  567. }
  568. static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
  569. {
  570. struct page *page;
  571. int start_nid;
  572. int next_nid;
  573. int ret = 0;
  574. start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
  575. next_nid = start_nid;
  576. do {
  577. page = alloc_fresh_huge_page_node(h, next_nid);
  578. if (page) {
  579. ret = 1;
  580. break;
  581. }
  582. next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
  583. } while (next_nid != start_nid);
  584. if (ret)
  585. count_vm_event(HTLB_BUDDY_PGALLOC);
  586. else
  587. count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  588. return ret;
  589. }
  590. /*
  591. * helper for free_pool_huge_page() - return the previously saved
  592. * node ["this node"] from which to free a huge page. Advance the
  593. * next node id whether or not we find a free huge page to free so
  594. * that the next attempt to free addresses the next node.
  595. */
  596. static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
  597. {
  598. int nid;
  599. VM_BUG_ON(!nodes_allowed);
  600. nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
  601. h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
  602. return nid;
  603. }
  604. /*
  605. * Free huge page from pool from next node to free.
  606. * Attempt to keep persistent huge pages more or less
  607. * balanced over allowed nodes.
  608. * Called with hugetlb_lock locked.
  609. */
  610. static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
  611. bool acct_surplus)
  612. {
  613. int start_nid;
  614. int next_nid;
  615. int ret = 0;
  616. start_nid = hstate_next_node_to_free(h, nodes_allowed);
  617. next_nid = start_nid;
  618. do {
  619. /*
  620. * If we're returning unused surplus pages, only examine
  621. * nodes with surplus pages.
  622. */
  623. if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
  624. !list_empty(&h->hugepage_freelists[next_nid])) {
  625. struct page *page =
  626. list_entry(h->hugepage_freelists[next_nid].next,
  627. struct page, lru);
  628. list_del(&page->lru);
  629. h->free_huge_pages--;
  630. h->free_huge_pages_node[next_nid]--;
  631. if (acct_surplus) {
  632. h->surplus_huge_pages--;
  633. h->surplus_huge_pages_node[next_nid]--;
  634. }
  635. update_and_free_page(h, page);
  636. ret = 1;
  637. break;
  638. }
  639. next_nid = hstate_next_node_to_free(h, nodes_allowed);
  640. } while (next_nid != start_nid);
  641. return ret;
  642. }
  643. static struct page *alloc_buddy_huge_page(struct hstate *h,
  644. struct vm_area_struct *vma, unsigned long address)
  645. {
  646. struct page *page;
  647. unsigned int nid;
  648. if (h->order >= MAX_ORDER)
  649. return NULL;
  650. /*
  651. * Assume we will successfully allocate the surplus page to
  652. * prevent racing processes from causing the surplus to exceed
  653. * overcommit
  654. *
  655. * This however introduces a different race, where a process B
  656. * tries to grow the static hugepage pool while alloc_pages() is
  657. * called by process A. B will only examine the per-node
  658. * counters in determining if surplus huge pages can be
  659. * converted to normal huge pages in adjust_pool_surplus(). A
  660. * won't be able to increment the per-node counter, until the
  661. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  662. * no more huge pages can be converted from surplus to normal
  663. * state (and doesn't try to convert again). Thus, we have a
  664. * case where a surplus huge page exists, the pool is grown, and
  665. * the surplus huge page still exists after, even though it
  666. * should just have been converted to a normal huge page. This
  667. * does not leak memory, though, as the hugepage will be freed
  668. * once it is out of use. It also does not allow the counters to
  669. * go out of whack in adjust_pool_surplus() as we don't modify
  670. * the node values until we've gotten the hugepage and only the
  671. * per-node value is checked there.
  672. */
  673. spin_lock(&hugetlb_lock);
  674. if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
  675. spin_unlock(&hugetlb_lock);
  676. return NULL;
  677. } else {
  678. h->nr_huge_pages++;
  679. h->surplus_huge_pages++;
  680. }
  681. spin_unlock(&hugetlb_lock);
  682. page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
  683. __GFP_REPEAT|__GFP_NOWARN,
  684. huge_page_order(h));
  685. if (page && arch_prepare_hugepage(page)) {
  686. __free_pages(page, huge_page_order(h));
  687. return NULL;
  688. }
  689. spin_lock(&hugetlb_lock);
  690. if (page) {
  691. /*
  692. * This page is now managed by the hugetlb allocator and has
  693. * no users -- drop the buddy allocator's reference.
  694. */
  695. put_page_testzero(page);
  696. VM_BUG_ON(page_count(page));
  697. nid = page_to_nid(page);
  698. set_compound_page_dtor(page, free_huge_page);
  699. /*
  700. * We incremented the global counters already
  701. */
  702. h->nr_huge_pages_node[nid]++;
  703. h->surplus_huge_pages_node[nid]++;
  704. __count_vm_event(HTLB_BUDDY_PGALLOC);
  705. } else {
  706. h->nr_huge_pages--;
  707. h->surplus_huge_pages--;
  708. __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  709. }
  710. spin_unlock(&hugetlb_lock);
  711. return page;
  712. }
  713. /*
  714. * Increase the hugetlb pool such that it can accomodate a reservation
  715. * of size 'delta'.
  716. */
  717. static int gather_surplus_pages(struct hstate *h, int delta)
  718. {
  719. struct list_head surplus_list;
  720. struct page *page, *tmp;
  721. int ret, i;
  722. int needed, allocated;
  723. needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
  724. if (needed <= 0) {
  725. h->resv_huge_pages += delta;
  726. return 0;
  727. }
  728. allocated = 0;
  729. INIT_LIST_HEAD(&surplus_list);
  730. ret = -ENOMEM;
  731. retry:
  732. spin_unlock(&hugetlb_lock);
  733. for (i = 0; i < needed; i++) {
  734. page = alloc_buddy_huge_page(h, NULL, 0);
  735. if (!page) {
  736. /*
  737. * We were not able to allocate enough pages to
  738. * satisfy the entire reservation so we free what
  739. * we've allocated so far.
  740. */
  741. spin_lock(&hugetlb_lock);
  742. needed = 0;
  743. goto free;
  744. }
  745. list_add(&page->lru, &surplus_list);
  746. }
  747. allocated += needed;
  748. /*
  749. * After retaking hugetlb_lock, we need to recalculate 'needed'
  750. * because either resv_huge_pages or free_huge_pages may have changed.
  751. */
  752. spin_lock(&hugetlb_lock);
  753. needed = (h->resv_huge_pages + delta) -
  754. (h->free_huge_pages + allocated);
  755. if (needed > 0)
  756. goto retry;
  757. /*
  758. * The surplus_list now contains _at_least_ the number of extra pages
  759. * needed to accomodate the reservation. Add the appropriate number
  760. * of pages to the hugetlb pool and free the extras back to the buddy
  761. * allocator. Commit the entire reservation here to prevent another
  762. * process from stealing the pages as they are added to the pool but
  763. * before they are reserved.
  764. */
  765. needed += allocated;
  766. h->resv_huge_pages += delta;
  767. ret = 0;
  768. free:
  769. /* Free the needed pages to the hugetlb pool */
  770. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  771. if ((--needed) < 0)
  772. break;
  773. list_del(&page->lru);
  774. enqueue_huge_page(h, page);
  775. }
  776. /* Free unnecessary surplus pages to the buddy allocator */
  777. if (!list_empty(&surplus_list)) {
  778. spin_unlock(&hugetlb_lock);
  779. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  780. list_del(&page->lru);
  781. /*
  782. * The page has a reference count of zero already, so
  783. * call free_huge_page directly instead of using
  784. * put_page. This must be done with hugetlb_lock
  785. * unlocked which is safe because free_huge_page takes
  786. * hugetlb_lock before deciding how to free the page.
  787. */
  788. free_huge_page(page);
  789. }
  790. spin_lock(&hugetlb_lock);
  791. }
  792. return ret;
  793. }
  794. /*
  795. * When releasing a hugetlb pool reservation, any surplus pages that were
  796. * allocated to satisfy the reservation must be explicitly freed if they were
  797. * never used.
  798. * Called with hugetlb_lock held.
  799. */
  800. static void return_unused_surplus_pages(struct hstate *h,
  801. unsigned long unused_resv_pages)
  802. {
  803. unsigned long nr_pages;
  804. /* Uncommit the reservation */
  805. h->resv_huge_pages -= unused_resv_pages;
  806. /* Cannot return gigantic pages currently */
  807. if (h->order >= MAX_ORDER)
  808. return;
  809. nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
  810. /*
  811. * We want to release as many surplus pages as possible, spread
  812. * evenly across all nodes. Iterate across all nodes until we
  813. * can no longer free unreserved surplus pages. This occurs when
  814. * the nodes with surplus pages have no free pages.
  815. * free_pool_huge_page() will balance the the frees across the
  816. * on-line nodes for us and will handle the hstate accounting.
  817. */
  818. while (nr_pages--) {
  819. if (!free_pool_huge_page(h, &node_online_map, 1))
  820. break;
  821. }
  822. }
  823. /*
  824. * Determine if the huge page at addr within the vma has an associated
  825. * reservation. Where it does not we will need to logically increase
  826. * reservation and actually increase quota before an allocation can occur.
  827. * Where any new reservation would be required the reservation change is
  828. * prepared, but not committed. Once the page has been quota'd allocated
  829. * an instantiated the change should be committed via vma_commit_reservation.
  830. * No action is required on failure.
  831. */
  832. static long vma_needs_reservation(struct hstate *h,
  833. struct vm_area_struct *vma, unsigned long addr)
  834. {
  835. struct address_space *mapping = vma->vm_file->f_mapping;
  836. struct inode *inode = mapping->host;
  837. if (vma->vm_flags & VM_MAYSHARE) {
  838. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  839. return region_chg(&inode->i_mapping->private_list,
  840. idx, idx + 1);
  841. } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  842. return 1;
  843. } else {
  844. long err;
  845. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  846. struct resv_map *reservations = vma_resv_map(vma);
  847. err = region_chg(&reservations->regions, idx, idx + 1);
  848. if (err < 0)
  849. return err;
  850. return 0;
  851. }
  852. }
  853. static void vma_commit_reservation(struct hstate *h,
  854. struct vm_area_struct *vma, unsigned long addr)
  855. {
  856. struct address_space *mapping = vma->vm_file->f_mapping;
  857. struct inode *inode = mapping->host;
  858. if (vma->vm_flags & VM_MAYSHARE) {
  859. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  860. region_add(&inode->i_mapping->private_list, idx, idx + 1);
  861. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  862. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  863. struct resv_map *reservations = vma_resv_map(vma);
  864. /* Mark this page used in the map. */
  865. region_add(&reservations->regions, idx, idx + 1);
  866. }
  867. }
  868. static struct page *alloc_huge_page(struct vm_area_struct *vma,
  869. unsigned long addr, int avoid_reserve)
  870. {
  871. struct hstate *h = hstate_vma(vma);
  872. struct page *page;
  873. struct address_space *mapping = vma->vm_file->f_mapping;
  874. struct inode *inode = mapping->host;
  875. long chg;
  876. /*
  877. * Processes that did not create the mapping will have no reserves and
  878. * will not have accounted against quota. Check that the quota can be
  879. * made before satisfying the allocation
  880. * MAP_NORESERVE mappings may also need pages and quota allocated
  881. * if no reserve mapping overlaps.
  882. */
  883. chg = vma_needs_reservation(h, vma, addr);
  884. if (chg < 0)
  885. return ERR_PTR(chg);
  886. if (chg)
  887. if (hugetlb_get_quota(inode->i_mapping, chg))
  888. return ERR_PTR(-ENOSPC);
  889. spin_lock(&hugetlb_lock);
  890. page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
  891. spin_unlock(&hugetlb_lock);
  892. if (!page) {
  893. page = alloc_buddy_huge_page(h, vma, addr);
  894. if (!page) {
  895. hugetlb_put_quota(inode->i_mapping, chg);
  896. return ERR_PTR(-VM_FAULT_OOM);
  897. }
  898. }
  899. set_page_refcounted(page);
  900. set_page_private(page, (unsigned long) mapping);
  901. vma_commit_reservation(h, vma, addr);
  902. return page;
  903. }
  904. int __weak alloc_bootmem_huge_page(struct hstate *h)
  905. {
  906. struct huge_bootmem_page *m;
  907. int nr_nodes = nodes_weight(node_online_map);
  908. while (nr_nodes) {
  909. void *addr;
  910. addr = __alloc_bootmem_node_nopanic(
  911. NODE_DATA(hstate_next_node_to_alloc(h,
  912. &node_online_map)),
  913. huge_page_size(h), huge_page_size(h), 0);
  914. if (addr) {
  915. /*
  916. * Use the beginning of the huge page to store the
  917. * huge_bootmem_page struct (until gather_bootmem
  918. * puts them into the mem_map).
  919. */
  920. m = addr;
  921. goto found;
  922. }
  923. nr_nodes--;
  924. }
  925. return 0;
  926. found:
  927. BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
  928. /* Put them into a private list first because mem_map is not up yet */
  929. list_add(&m->list, &huge_boot_pages);
  930. m->hstate = h;
  931. return 1;
  932. }
  933. static void prep_compound_huge_page(struct page *page, int order)
  934. {
  935. if (unlikely(order > (MAX_ORDER - 1)))
  936. prep_compound_gigantic_page(page, order);
  937. else
  938. prep_compound_page(page, order);
  939. }
  940. /* Put bootmem huge pages into the standard lists after mem_map is up */
  941. static void __init gather_bootmem_prealloc(void)
  942. {
  943. struct huge_bootmem_page *m;
  944. list_for_each_entry(m, &huge_boot_pages, list) {
  945. struct page *page = virt_to_page(m);
  946. struct hstate *h = m->hstate;
  947. __ClearPageReserved(page);
  948. WARN_ON(page_count(page) != 1);
  949. prep_compound_huge_page(page, h->order);
  950. prep_new_huge_page(h, page, page_to_nid(page));
  951. }
  952. }
  953. static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  954. {
  955. unsigned long i;
  956. for (i = 0; i < h->max_huge_pages; ++i) {
  957. if (h->order >= MAX_ORDER) {
  958. if (!alloc_bootmem_huge_page(h))
  959. break;
  960. } else if (!alloc_fresh_huge_page(h, &node_online_map))
  961. break;
  962. }
  963. h->max_huge_pages = i;
  964. }
  965. static void __init hugetlb_init_hstates(void)
  966. {
  967. struct hstate *h;
  968. for_each_hstate(h) {
  969. /* oversize hugepages were init'ed in early boot */
  970. if (h->order < MAX_ORDER)
  971. hugetlb_hstate_alloc_pages(h);
  972. }
  973. }
  974. static char * __init memfmt(char *buf, unsigned long n)
  975. {
  976. if (n >= (1UL << 30))
  977. sprintf(buf, "%lu GB", n >> 30);
  978. else if (n >= (1UL << 20))
  979. sprintf(buf, "%lu MB", n >> 20);
  980. else
  981. sprintf(buf, "%lu KB", n >> 10);
  982. return buf;
  983. }
  984. static void __init report_hugepages(void)
  985. {
  986. struct hstate *h;
  987. for_each_hstate(h) {
  988. char buf[32];
  989. printk(KERN_INFO "HugeTLB registered %s page size, "
  990. "pre-allocated %ld pages\n",
  991. memfmt(buf, huge_page_size(h)),
  992. h->free_huge_pages);
  993. }
  994. }
  995. #ifdef CONFIG_HIGHMEM
  996. static void try_to_free_low(struct hstate *h, unsigned long count,
  997. nodemask_t *nodes_allowed)
  998. {
  999. int i;
  1000. if (h->order >= MAX_ORDER)
  1001. return;
  1002. for_each_node_mask(i, *nodes_allowed) {
  1003. struct page *page, *next;
  1004. struct list_head *freel = &h->hugepage_freelists[i];
  1005. list_for_each_entry_safe(page, next, freel, lru) {
  1006. if (count >= h->nr_huge_pages)
  1007. return;
  1008. if (PageHighMem(page))
  1009. continue;
  1010. list_del(&page->lru);
  1011. update_and_free_page(h, page);
  1012. h->free_huge_pages--;
  1013. h->free_huge_pages_node[page_to_nid(page)]--;
  1014. }
  1015. }
  1016. }
  1017. #else
  1018. static inline void try_to_free_low(struct hstate *h, unsigned long count,
  1019. nodemask_t *nodes_allowed)
  1020. {
  1021. }
  1022. #endif
  1023. /*
  1024. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  1025. * balanced by operating on them in a round-robin fashion.
  1026. * Returns 1 if an adjustment was made.
  1027. */
  1028. static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
  1029. int delta)
  1030. {
  1031. int start_nid, next_nid;
  1032. int ret = 0;
  1033. VM_BUG_ON(delta != -1 && delta != 1);
  1034. if (delta < 0)
  1035. start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
  1036. else
  1037. start_nid = hstate_next_node_to_free(h, nodes_allowed);
  1038. next_nid = start_nid;
  1039. do {
  1040. int nid = next_nid;
  1041. if (delta < 0) {
  1042. /*
  1043. * To shrink on this node, there must be a surplus page
  1044. */
  1045. if (!h->surplus_huge_pages_node[nid]) {
  1046. next_nid = hstate_next_node_to_alloc(h,
  1047. nodes_allowed);
  1048. continue;
  1049. }
  1050. }
  1051. if (delta > 0) {
  1052. /*
  1053. * Surplus cannot exceed the total number of pages
  1054. */
  1055. if (h->surplus_huge_pages_node[nid] >=
  1056. h->nr_huge_pages_node[nid]) {
  1057. next_nid = hstate_next_node_to_free(h,
  1058. nodes_allowed);
  1059. continue;
  1060. }
  1061. }
  1062. h->surplus_huge_pages += delta;
  1063. h->surplus_huge_pages_node[nid] += delta;
  1064. ret = 1;
  1065. break;
  1066. } while (next_nid != start_nid);
  1067. return ret;
  1068. }
  1069. #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
  1070. static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
  1071. nodemask_t *nodes_allowed)
  1072. {
  1073. unsigned long min_count, ret;
  1074. if (h->order >= MAX_ORDER)
  1075. return h->max_huge_pages;
  1076. /*
  1077. * Increase the pool size
  1078. * First take pages out of surplus state. Then make up the
  1079. * remaining difference by allocating fresh huge pages.
  1080. *
  1081. * We might race with alloc_buddy_huge_page() here and be unable
  1082. * to convert a surplus huge page to a normal huge page. That is
  1083. * not critical, though, it just means the overall size of the
  1084. * pool might be one hugepage larger than it needs to be, but
  1085. * within all the constraints specified by the sysctls.
  1086. */
  1087. spin_lock(&hugetlb_lock);
  1088. while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
  1089. if (!adjust_pool_surplus(h, nodes_allowed, -1))
  1090. break;
  1091. }
  1092. while (count > persistent_huge_pages(h)) {
  1093. /*
  1094. * If this allocation races such that we no longer need the
  1095. * page, free_huge_page will handle it by freeing the page
  1096. * and reducing the surplus.
  1097. */
  1098. spin_unlock(&hugetlb_lock);
  1099. ret = alloc_fresh_huge_page(h, nodes_allowed);
  1100. spin_lock(&hugetlb_lock);
  1101. if (!ret)
  1102. goto out;
  1103. }
  1104. /*
  1105. * Decrease the pool size
  1106. * First return free pages to the buddy allocator (being careful
  1107. * to keep enough around to satisfy reservations). Then place
  1108. * pages into surplus state as needed so the pool will shrink
  1109. * to the desired size as pages become free.
  1110. *
  1111. * By placing pages into the surplus state independent of the
  1112. * overcommit value, we are allowing the surplus pool size to
  1113. * exceed overcommit. There are few sane options here. Since
  1114. * alloc_buddy_huge_page() is checking the global counter,
  1115. * though, we'll note that we're not allowed to exceed surplus
  1116. * and won't grow the pool anywhere else. Not until one of the
  1117. * sysctls are changed, or the surplus pages go out of use.
  1118. */
  1119. min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
  1120. min_count = max(count, min_count);
  1121. try_to_free_low(h, min_count, nodes_allowed);
  1122. while (min_count < persistent_huge_pages(h)) {
  1123. if (!free_pool_huge_page(h, nodes_allowed, 0))
  1124. break;
  1125. }
  1126. while (count < persistent_huge_pages(h)) {
  1127. if (!adjust_pool_surplus(h, nodes_allowed, 1))
  1128. break;
  1129. }
  1130. out:
  1131. ret = persistent_huge_pages(h);
  1132. spin_unlock(&hugetlb_lock);
  1133. return ret;
  1134. }
  1135. #define HSTATE_ATTR_RO(_name) \
  1136. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  1137. #define HSTATE_ATTR(_name) \
  1138. static struct kobj_attribute _name##_attr = \
  1139. __ATTR(_name, 0644, _name##_show, _name##_store)
  1140. static struct kobject *hugepages_kobj;
  1141. static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1142. static struct hstate *kobj_to_hstate(struct kobject *kobj)
  1143. {
  1144. int i;
  1145. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1146. if (hstate_kobjs[i] == kobj)
  1147. return &hstates[i];
  1148. BUG();
  1149. return NULL;
  1150. }
  1151. static ssize_t nr_hugepages_show(struct kobject *kobj,
  1152. struct kobj_attribute *attr, char *buf)
  1153. {
  1154. struct hstate *h = kobj_to_hstate(kobj);
  1155. return sprintf(buf, "%lu\n", h->nr_huge_pages);
  1156. }
  1157. static ssize_t nr_hugepages_store(struct kobject *kobj,
  1158. struct kobj_attribute *attr, const char *buf, size_t count)
  1159. {
  1160. int err;
  1161. unsigned long input;
  1162. struct hstate *h = kobj_to_hstate(kobj);
  1163. err = strict_strtoul(buf, 10, &input);
  1164. if (err)
  1165. return 0;
  1166. h->max_huge_pages = set_max_huge_pages(h, input, &node_online_map);
  1167. return count;
  1168. }
  1169. HSTATE_ATTR(nr_hugepages);
  1170. static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
  1171. struct kobj_attribute *attr, char *buf)
  1172. {
  1173. struct hstate *h = kobj_to_hstate(kobj);
  1174. return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
  1175. }
  1176. static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
  1177. struct kobj_attribute *attr, const char *buf, size_t count)
  1178. {
  1179. int err;
  1180. unsigned long input;
  1181. struct hstate *h = kobj_to_hstate(kobj);
  1182. err = strict_strtoul(buf, 10, &input);
  1183. if (err)
  1184. return 0;
  1185. spin_lock(&hugetlb_lock);
  1186. h->nr_overcommit_huge_pages = input;
  1187. spin_unlock(&hugetlb_lock);
  1188. return count;
  1189. }
  1190. HSTATE_ATTR(nr_overcommit_hugepages);
  1191. static ssize_t free_hugepages_show(struct kobject *kobj,
  1192. struct kobj_attribute *attr, char *buf)
  1193. {
  1194. struct hstate *h = kobj_to_hstate(kobj);
  1195. return sprintf(buf, "%lu\n", h->free_huge_pages);
  1196. }
  1197. HSTATE_ATTR_RO(free_hugepages);
  1198. static ssize_t resv_hugepages_show(struct kobject *kobj,
  1199. struct kobj_attribute *attr, char *buf)
  1200. {
  1201. struct hstate *h = kobj_to_hstate(kobj);
  1202. return sprintf(buf, "%lu\n", h->resv_huge_pages);
  1203. }
  1204. HSTATE_ATTR_RO(resv_hugepages);
  1205. static ssize_t surplus_hugepages_show(struct kobject *kobj,
  1206. struct kobj_attribute *attr, char *buf)
  1207. {
  1208. struct hstate *h = kobj_to_hstate(kobj);
  1209. return sprintf(buf, "%lu\n", h->surplus_huge_pages);
  1210. }
  1211. HSTATE_ATTR_RO(surplus_hugepages);
  1212. static struct attribute *hstate_attrs[] = {
  1213. &nr_hugepages_attr.attr,
  1214. &nr_overcommit_hugepages_attr.attr,
  1215. &free_hugepages_attr.attr,
  1216. &resv_hugepages_attr.attr,
  1217. &surplus_hugepages_attr.attr,
  1218. NULL,
  1219. };
  1220. static struct attribute_group hstate_attr_group = {
  1221. .attrs = hstate_attrs,
  1222. };
  1223. static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
  1224. {
  1225. int retval;
  1226. hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
  1227. hugepages_kobj);
  1228. if (!hstate_kobjs[h - hstates])
  1229. return -ENOMEM;
  1230. retval = sysfs_create_group(hstate_kobjs[h - hstates],
  1231. &hstate_attr_group);
  1232. if (retval)
  1233. kobject_put(hstate_kobjs[h - hstates]);
  1234. return retval;
  1235. }
  1236. static void __init hugetlb_sysfs_init(void)
  1237. {
  1238. struct hstate *h;
  1239. int err;
  1240. hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
  1241. if (!hugepages_kobj)
  1242. return;
  1243. for_each_hstate(h) {
  1244. err = hugetlb_sysfs_add_hstate(h);
  1245. if (err)
  1246. printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
  1247. h->name);
  1248. }
  1249. }
  1250. static void __exit hugetlb_exit(void)
  1251. {
  1252. struct hstate *h;
  1253. for_each_hstate(h) {
  1254. kobject_put(hstate_kobjs[h - hstates]);
  1255. }
  1256. kobject_put(hugepages_kobj);
  1257. }
  1258. module_exit(hugetlb_exit);
  1259. static int __init hugetlb_init(void)
  1260. {
  1261. /* Some platform decide whether they support huge pages at boot
  1262. * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
  1263. * there is no such support
  1264. */
  1265. if (HPAGE_SHIFT == 0)
  1266. return 0;
  1267. if (!size_to_hstate(default_hstate_size)) {
  1268. default_hstate_size = HPAGE_SIZE;
  1269. if (!size_to_hstate(default_hstate_size))
  1270. hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
  1271. }
  1272. default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
  1273. if (default_hstate_max_huge_pages)
  1274. default_hstate.max_huge_pages = default_hstate_max_huge_pages;
  1275. hugetlb_init_hstates();
  1276. gather_bootmem_prealloc();
  1277. report_hugepages();
  1278. hugetlb_sysfs_init();
  1279. return 0;
  1280. }
  1281. module_init(hugetlb_init);
  1282. /* Should be called on processing a hugepagesz=... option */
  1283. void __init hugetlb_add_hstate(unsigned order)
  1284. {
  1285. struct hstate *h;
  1286. unsigned long i;
  1287. if (size_to_hstate(PAGE_SIZE << order)) {
  1288. printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
  1289. return;
  1290. }
  1291. BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
  1292. BUG_ON(order == 0);
  1293. h = &hstates[max_hstate++];
  1294. h->order = order;
  1295. h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
  1296. h->nr_huge_pages = 0;
  1297. h->free_huge_pages = 0;
  1298. for (i = 0; i < MAX_NUMNODES; ++i)
  1299. INIT_LIST_HEAD(&h->hugepage_freelists[i]);
  1300. h->next_nid_to_alloc = first_node(node_online_map);
  1301. h->next_nid_to_free = first_node(node_online_map);
  1302. snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
  1303. huge_page_size(h)/1024);
  1304. parsed_hstate = h;
  1305. }
  1306. static int __init hugetlb_nrpages_setup(char *s)
  1307. {
  1308. unsigned long *mhp;
  1309. static unsigned long *last_mhp;
  1310. /*
  1311. * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
  1312. * so this hugepages= parameter goes to the "default hstate".
  1313. */
  1314. if (!max_hstate)
  1315. mhp = &default_hstate_max_huge_pages;
  1316. else
  1317. mhp = &parsed_hstate->max_huge_pages;
  1318. if (mhp == last_mhp) {
  1319. printk(KERN_WARNING "hugepages= specified twice without "
  1320. "interleaving hugepagesz=, ignoring\n");
  1321. return 1;
  1322. }
  1323. if (sscanf(s, "%lu", mhp) <= 0)
  1324. *mhp = 0;
  1325. /*
  1326. * Global state is always initialized later in hugetlb_init.
  1327. * But we need to allocate >= MAX_ORDER hstates here early to still
  1328. * use the bootmem allocator.
  1329. */
  1330. if (max_hstate && parsed_hstate->order >= MAX_ORDER)
  1331. hugetlb_hstate_alloc_pages(parsed_hstate);
  1332. last_mhp = mhp;
  1333. return 1;
  1334. }
  1335. __setup("hugepages=", hugetlb_nrpages_setup);
  1336. static int __init hugetlb_default_setup(char *s)
  1337. {
  1338. default_hstate_size = memparse(s, &s);
  1339. return 1;
  1340. }
  1341. __setup("default_hugepagesz=", hugetlb_default_setup);
  1342. static unsigned int cpuset_mems_nr(unsigned int *array)
  1343. {
  1344. int node;
  1345. unsigned int nr = 0;
  1346. for_each_node_mask(node, cpuset_current_mems_allowed)
  1347. nr += array[node];
  1348. return nr;
  1349. }
  1350. #ifdef CONFIG_SYSCTL
  1351. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  1352. void __user *buffer,
  1353. size_t *length, loff_t *ppos)
  1354. {
  1355. struct hstate *h = &default_hstate;
  1356. unsigned long tmp;
  1357. if (!write)
  1358. tmp = h->max_huge_pages;
  1359. table->data = &tmp;
  1360. table->maxlen = sizeof(unsigned long);
  1361. proc_doulongvec_minmax(table, write, buffer, length, ppos);
  1362. if (write)
  1363. h->max_huge_pages = set_max_huge_pages(h, tmp,
  1364. &node_online_map);
  1365. return 0;
  1366. }
  1367. int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
  1368. void __user *buffer,
  1369. size_t *length, loff_t *ppos)
  1370. {
  1371. proc_dointvec(table, write, buffer, length, ppos);
  1372. if (hugepages_treat_as_movable)
  1373. htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
  1374. else
  1375. htlb_alloc_mask = GFP_HIGHUSER;
  1376. return 0;
  1377. }
  1378. int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  1379. void __user *buffer,
  1380. size_t *length, loff_t *ppos)
  1381. {
  1382. struct hstate *h = &default_hstate;
  1383. unsigned long tmp;
  1384. if (!write)
  1385. tmp = h->nr_overcommit_huge_pages;
  1386. table->data = &tmp;
  1387. table->maxlen = sizeof(unsigned long);
  1388. proc_doulongvec_minmax(table, write, buffer, length, ppos);
  1389. if (write) {
  1390. spin_lock(&hugetlb_lock);
  1391. h->nr_overcommit_huge_pages = tmp;
  1392. spin_unlock(&hugetlb_lock);
  1393. }
  1394. return 0;
  1395. }
  1396. #endif /* CONFIG_SYSCTL */
  1397. void hugetlb_report_meminfo(struct seq_file *m)
  1398. {
  1399. struct hstate *h = &default_hstate;
  1400. seq_printf(m,
  1401. "HugePages_Total: %5lu\n"
  1402. "HugePages_Free: %5lu\n"
  1403. "HugePages_Rsvd: %5lu\n"
  1404. "HugePages_Surp: %5lu\n"
  1405. "Hugepagesize: %8lu kB\n",
  1406. h->nr_huge_pages,
  1407. h->free_huge_pages,
  1408. h->resv_huge_pages,
  1409. h->surplus_huge_pages,
  1410. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  1411. }
  1412. int hugetlb_report_node_meminfo(int nid, char *buf)
  1413. {
  1414. struct hstate *h = &default_hstate;
  1415. return sprintf(buf,
  1416. "Node %d HugePages_Total: %5u\n"
  1417. "Node %d HugePages_Free: %5u\n"
  1418. "Node %d HugePages_Surp: %5u\n",
  1419. nid, h->nr_huge_pages_node[nid],
  1420. nid, h->free_huge_pages_node[nid],
  1421. nid, h->surplus_huge_pages_node[nid]);
  1422. }
  1423. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  1424. unsigned long hugetlb_total_pages(void)
  1425. {
  1426. struct hstate *h = &default_hstate;
  1427. return h->nr_huge_pages * pages_per_huge_page(h);
  1428. }
  1429. static int hugetlb_acct_memory(struct hstate *h, long delta)
  1430. {
  1431. int ret = -ENOMEM;
  1432. spin_lock(&hugetlb_lock);
  1433. /*
  1434. * When cpuset is configured, it breaks the strict hugetlb page
  1435. * reservation as the accounting is done on a global variable. Such
  1436. * reservation is completely rubbish in the presence of cpuset because
  1437. * the reservation is not checked against page availability for the
  1438. * current cpuset. Application can still potentially OOM'ed by kernel
  1439. * with lack of free htlb page in cpuset that the task is in.
  1440. * Attempt to enforce strict accounting with cpuset is almost
  1441. * impossible (or too ugly) because cpuset is too fluid that
  1442. * task or memory node can be dynamically moved between cpusets.
  1443. *
  1444. * The change of semantics for shared hugetlb mapping with cpuset is
  1445. * undesirable. However, in order to preserve some of the semantics,
  1446. * we fall back to check against current free page availability as
  1447. * a best attempt and hopefully to minimize the impact of changing
  1448. * semantics that cpuset has.
  1449. */
  1450. if (delta > 0) {
  1451. if (gather_surplus_pages(h, delta) < 0)
  1452. goto out;
  1453. if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
  1454. return_unused_surplus_pages(h, delta);
  1455. goto out;
  1456. }
  1457. }
  1458. ret = 0;
  1459. if (delta < 0)
  1460. return_unused_surplus_pages(h, (unsigned long) -delta);
  1461. out:
  1462. spin_unlock(&hugetlb_lock);
  1463. return ret;
  1464. }
  1465. static void hugetlb_vm_op_open(struct vm_area_struct *vma)
  1466. {
  1467. struct resv_map *reservations = vma_resv_map(vma);
  1468. /*
  1469. * This new VMA should share its siblings reservation map if present.
  1470. * The VMA will only ever have a valid reservation map pointer where
  1471. * it is being copied for another still existing VMA. As that VMA
  1472. * has a reference to the reservation map it cannot dissappear until
  1473. * after this open call completes. It is therefore safe to take a
  1474. * new reference here without additional locking.
  1475. */
  1476. if (reservations)
  1477. kref_get(&reservations->refs);
  1478. }
  1479. static void hugetlb_vm_op_close(struct vm_area_struct *vma)
  1480. {
  1481. struct hstate *h = hstate_vma(vma);
  1482. struct resv_map *reservations = vma_resv_map(vma);
  1483. unsigned long reserve;
  1484. unsigned long start;
  1485. unsigned long end;
  1486. if (reservations) {
  1487. start = vma_hugecache_offset(h, vma, vma->vm_start);
  1488. end = vma_hugecache_offset(h, vma, vma->vm_end);
  1489. reserve = (end - start) -
  1490. region_count(&reservations->regions, start, end);
  1491. kref_put(&reservations->refs, resv_map_release);
  1492. if (reserve) {
  1493. hugetlb_acct_memory(h, -reserve);
  1494. hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
  1495. }
  1496. }
  1497. }
  1498. /*
  1499. * We cannot handle pagefaults against hugetlb pages at all. They cause
  1500. * handle_mm_fault() to try to instantiate regular-sized pages in the
  1501. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  1502. * this far.
  1503. */
  1504. static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1505. {
  1506. BUG();
  1507. return 0;
  1508. }
  1509. const struct vm_operations_struct hugetlb_vm_ops = {
  1510. .fault = hugetlb_vm_op_fault,
  1511. .open = hugetlb_vm_op_open,
  1512. .close = hugetlb_vm_op_close,
  1513. };
  1514. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  1515. int writable)
  1516. {
  1517. pte_t entry;
  1518. if (writable) {
  1519. entry =
  1520. pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
  1521. } else {
  1522. entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
  1523. }
  1524. entry = pte_mkyoung(entry);
  1525. entry = pte_mkhuge(entry);
  1526. return entry;
  1527. }
  1528. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  1529. unsigned long address, pte_t *ptep)
  1530. {
  1531. pte_t entry;
  1532. entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
  1533. if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
  1534. update_mmu_cache(vma, address, entry);
  1535. }
  1536. }
  1537. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  1538. struct vm_area_struct *vma)
  1539. {
  1540. pte_t *src_pte, *dst_pte, entry;
  1541. struct page *ptepage;
  1542. unsigned long addr;
  1543. int cow;
  1544. struct hstate *h = hstate_vma(vma);
  1545. unsigned long sz = huge_page_size(h);
  1546. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  1547. for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
  1548. src_pte = huge_pte_offset(src, addr);
  1549. if (!src_pte)
  1550. continue;
  1551. dst_pte = huge_pte_alloc(dst, addr, sz);
  1552. if (!dst_pte)
  1553. goto nomem;
  1554. /* If the pagetables are shared don't copy or take references */
  1555. if (dst_pte == src_pte)
  1556. continue;
  1557. spin_lock(&dst->page_table_lock);
  1558. spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
  1559. if (!huge_pte_none(huge_ptep_get(src_pte))) {
  1560. if (cow)
  1561. huge_ptep_set_wrprotect(src, addr, src_pte);
  1562. entry = huge_ptep_get(src_pte);
  1563. ptepage = pte_page(entry);
  1564. get_page(ptepage);
  1565. set_huge_pte_at(dst, addr, dst_pte, entry);
  1566. }
  1567. spin_unlock(&src->page_table_lock);
  1568. spin_unlock(&dst->page_table_lock);
  1569. }
  1570. return 0;
  1571. nomem:
  1572. return -ENOMEM;
  1573. }
  1574. void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  1575. unsigned long end, struct page *ref_page)
  1576. {
  1577. struct mm_struct *mm = vma->vm_mm;
  1578. unsigned long address;
  1579. pte_t *ptep;
  1580. pte_t pte;
  1581. struct page *page;
  1582. struct page *tmp;
  1583. struct hstate *h = hstate_vma(vma);
  1584. unsigned long sz = huge_page_size(h);
  1585. /*
  1586. * A page gathering list, protected by per file i_mmap_lock. The
  1587. * lock is used to avoid list corruption from multiple unmapping
  1588. * of the same page since we are using page->lru.
  1589. */
  1590. LIST_HEAD(page_list);
  1591. WARN_ON(!is_vm_hugetlb_page(vma));
  1592. BUG_ON(start & ~huge_page_mask(h));
  1593. BUG_ON(end & ~huge_page_mask(h));
  1594. mmu_notifier_invalidate_range_start(mm, start, end);
  1595. spin_lock(&mm->page_table_lock);
  1596. for (address = start; address < end; address += sz) {
  1597. ptep = huge_pte_offset(mm, address);
  1598. if (!ptep)
  1599. continue;
  1600. if (huge_pmd_unshare(mm, &address, ptep))
  1601. continue;
  1602. /*
  1603. * If a reference page is supplied, it is because a specific
  1604. * page is being unmapped, not a range. Ensure the page we
  1605. * are about to unmap is the actual page of interest.
  1606. */
  1607. if (ref_page) {
  1608. pte = huge_ptep_get(ptep);
  1609. if (huge_pte_none(pte))
  1610. continue;
  1611. page = pte_page(pte);
  1612. if (page != ref_page)
  1613. continue;
  1614. /*
  1615. * Mark the VMA as having unmapped its page so that
  1616. * future faults in this VMA will fail rather than
  1617. * looking like data was lost
  1618. */
  1619. set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
  1620. }
  1621. pte = huge_ptep_get_and_clear(mm, address, ptep);
  1622. if (huge_pte_none(pte))
  1623. continue;
  1624. page = pte_page(pte);
  1625. if (pte_dirty(pte))
  1626. set_page_dirty(page);
  1627. list_add(&page->lru, &page_list);
  1628. }
  1629. spin_unlock(&mm->page_table_lock);
  1630. flush_tlb_range(vma, start, end);
  1631. mmu_notifier_invalidate_range_end(mm, start, end);
  1632. list_for_each_entry_safe(page, tmp, &page_list, lru) {
  1633. list_del(&page->lru);
  1634. put_page(page);
  1635. }
  1636. }
  1637. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  1638. unsigned long end, struct page *ref_page)
  1639. {
  1640. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  1641. __unmap_hugepage_range(vma, start, end, ref_page);
  1642. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  1643. }
  1644. /*
  1645. * This is called when the original mapper is failing to COW a MAP_PRIVATE
  1646. * mappping it owns the reserve page for. The intention is to unmap the page
  1647. * from other VMAs and let the children be SIGKILLed if they are faulting the
  1648. * same region.
  1649. */
  1650. static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  1651. struct page *page, unsigned long address)
  1652. {
  1653. struct hstate *h = hstate_vma(vma);
  1654. struct vm_area_struct *iter_vma;
  1655. struct address_space *mapping;
  1656. struct prio_tree_iter iter;
  1657. pgoff_t pgoff;
  1658. /*
  1659. * vm_pgoff is in PAGE_SIZE units, hence the different calculation
  1660. * from page cache lookup which is in HPAGE_SIZE units.
  1661. */
  1662. address = address & huge_page_mask(h);
  1663. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
  1664. + (vma->vm_pgoff >> PAGE_SHIFT);
  1665. mapping = (struct address_space *)page_private(page);
  1666. vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  1667. /* Do not unmap the current VMA */
  1668. if (iter_vma == vma)
  1669. continue;
  1670. /*
  1671. * Unmap the page from other VMAs without their own reserves.
  1672. * They get marked to be SIGKILLed if they fault in these
  1673. * areas. This is because a future no-page fault on this VMA
  1674. * could insert a zeroed page instead of the data existing
  1675. * from the time of fork. This would look like data corruption
  1676. */
  1677. if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
  1678. unmap_hugepage_range(iter_vma,
  1679. address, address + huge_page_size(h),
  1680. page);
  1681. }
  1682. return 1;
  1683. }
  1684. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  1685. unsigned long address, pte_t *ptep, pte_t pte,
  1686. struct page *pagecache_page)
  1687. {
  1688. struct hstate *h = hstate_vma(vma);
  1689. struct page *old_page, *new_page;
  1690. int avoidcopy;
  1691. int outside_reserve = 0;
  1692. old_page = pte_page(pte);
  1693. retry_avoidcopy:
  1694. /* If no-one else is actually using this page, avoid the copy
  1695. * and just make the page writable */
  1696. avoidcopy = (page_count(old_page) == 1);
  1697. if (avoidcopy) {
  1698. set_huge_ptep_writable(vma, address, ptep);
  1699. return 0;
  1700. }
  1701. /*
  1702. * If the process that created a MAP_PRIVATE mapping is about to
  1703. * perform a COW due to a shared page count, attempt to satisfy
  1704. * the allocation without using the existing reserves. The pagecache
  1705. * page is used to determine if the reserve at this address was
  1706. * consumed or not. If reserves were used, a partial faulted mapping
  1707. * at the time of fork() could consume its reserves on COW instead
  1708. * of the full address range.
  1709. */
  1710. if (!(vma->vm_flags & VM_MAYSHARE) &&
  1711. is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
  1712. old_page != pagecache_page)
  1713. outside_reserve = 1;
  1714. page_cache_get(old_page);
  1715. new_page = alloc_huge_page(vma, address, outside_reserve);
  1716. if (IS_ERR(new_page)) {
  1717. page_cache_release(old_page);
  1718. /*
  1719. * If a process owning a MAP_PRIVATE mapping fails to COW,
  1720. * it is due to references held by a child and an insufficient
  1721. * huge page pool. To guarantee the original mappers
  1722. * reliability, unmap the page from child processes. The child
  1723. * may get SIGKILLed if it later faults.
  1724. */
  1725. if (outside_reserve) {
  1726. BUG_ON(huge_pte_none(pte));
  1727. if (unmap_ref_private(mm, vma, old_page, address)) {
  1728. BUG_ON(page_count(old_page) != 1);
  1729. BUG_ON(huge_pte_none(pte));
  1730. goto retry_avoidcopy;
  1731. }
  1732. WARN_ON_ONCE(1);
  1733. }
  1734. return -PTR_ERR(new_page);
  1735. }
  1736. spin_unlock(&mm->page_table_lock);
  1737. copy_huge_page(new_page, old_page, address, vma);
  1738. __SetPageUptodate(new_page);
  1739. spin_lock(&mm->page_table_lock);
  1740. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  1741. if (likely(pte_same(huge_ptep_get(ptep), pte))) {
  1742. /* Break COW */
  1743. huge_ptep_clear_flush(vma, address, ptep);
  1744. set_huge_pte_at(mm, address, ptep,
  1745. make_huge_pte(vma, new_page, 1));
  1746. /* Make the old page be freed below */
  1747. new_page = old_page;
  1748. }
  1749. page_cache_release(new_page);
  1750. page_cache_release(old_page);
  1751. return 0;
  1752. }
  1753. /* Return the pagecache page at a given address within a VMA */
  1754. static struct page *hugetlbfs_pagecache_page(struct hstate *h,
  1755. struct vm_area_struct *vma, unsigned long address)
  1756. {
  1757. struct address_space *mapping;
  1758. pgoff_t idx;
  1759. mapping = vma->vm_file->f_mapping;
  1760. idx = vma_hugecache_offset(h, vma, address);
  1761. return find_lock_page(mapping, idx);
  1762. }
  1763. /*
  1764. * Return whether there is a pagecache page to back given address within VMA.
  1765. * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
  1766. */
  1767. static bool hugetlbfs_pagecache_present(struct hstate *h,
  1768. struct vm_area_struct *vma, unsigned long address)
  1769. {
  1770. struct address_space *mapping;
  1771. pgoff_t idx;
  1772. struct page *page;
  1773. mapping = vma->vm_file->f_mapping;
  1774. idx = vma_hugecache_offset(h, vma, address);
  1775. page = find_get_page(mapping, idx);
  1776. if (page)
  1777. put_page(page);
  1778. return page != NULL;
  1779. }
  1780. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1781. unsigned long address, pte_t *ptep, unsigned int flags)
  1782. {
  1783. struct hstate *h = hstate_vma(vma);
  1784. int ret = VM_FAULT_SIGBUS;
  1785. pgoff_t idx;
  1786. unsigned long size;
  1787. struct page *page;
  1788. struct address_space *mapping;
  1789. pte_t new_pte;
  1790. /*
  1791. * Currently, we are forced to kill the process in the event the
  1792. * original mapper has unmapped pages from the child due to a failed
  1793. * COW. Warn that such a situation has occured as it may not be obvious
  1794. */
  1795. if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
  1796. printk(KERN_WARNING
  1797. "PID %d killed due to inadequate hugepage pool\n",
  1798. current->pid);
  1799. return ret;
  1800. }
  1801. mapping = vma->vm_file->f_mapping;
  1802. idx = vma_hugecache_offset(h, vma, address);
  1803. /*
  1804. * Use page lock to guard against racing truncation
  1805. * before we get page_table_lock.
  1806. */
  1807. retry:
  1808. page = find_lock_page(mapping, idx);
  1809. if (!page) {
  1810. size = i_size_read(mapping->host) >> huge_page_shift(h);
  1811. if (idx >= size)
  1812. goto out;
  1813. page = alloc_huge_page(vma, address, 0);
  1814. if (IS_ERR(page)) {
  1815. ret = -PTR_ERR(page);
  1816. goto out;
  1817. }
  1818. clear_huge_page(page, address, huge_page_size(h));
  1819. __SetPageUptodate(page);
  1820. if (vma->vm_flags & VM_MAYSHARE) {
  1821. int err;
  1822. struct inode *inode = mapping->host;
  1823. err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  1824. if (err) {
  1825. put_page(page);
  1826. if (err == -EEXIST)
  1827. goto retry;
  1828. goto out;
  1829. }
  1830. spin_lock(&inode->i_lock);
  1831. inode->i_blocks += blocks_per_huge_page(h);
  1832. spin_unlock(&inode->i_lock);
  1833. } else
  1834. lock_page(page);
  1835. }
  1836. /*
  1837. * If we are going to COW a private mapping later, we examine the
  1838. * pending reservations for this page now. This will ensure that
  1839. * any allocations necessary to record that reservation occur outside
  1840. * the spinlock.
  1841. */
  1842. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
  1843. if (vma_needs_reservation(h, vma, address) < 0) {
  1844. ret = VM_FAULT_OOM;
  1845. goto backout_unlocked;
  1846. }
  1847. spin_lock(&mm->page_table_lock);
  1848. size = i_size_read(mapping->host) >> huge_page_shift(h);
  1849. if (idx >= size)
  1850. goto backout;
  1851. ret = 0;
  1852. if (!huge_pte_none(huge_ptep_get(ptep)))
  1853. goto backout;
  1854. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  1855. && (vma->vm_flags & VM_SHARED)));
  1856. set_huge_pte_at(mm, address, ptep, new_pte);
  1857. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  1858. /* Optimization, do the COW without a second fault */
  1859. ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
  1860. }
  1861. spin_unlock(&mm->page_table_lock);
  1862. unlock_page(page);
  1863. out:
  1864. return ret;
  1865. backout:
  1866. spin_unlock(&mm->page_table_lock);
  1867. backout_unlocked:
  1868. unlock_page(page);
  1869. put_page(page);
  1870. goto out;
  1871. }
  1872. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  1873. unsigned long address, unsigned int flags)
  1874. {
  1875. pte_t *ptep;
  1876. pte_t entry;
  1877. int ret;
  1878. struct page *pagecache_page = NULL;
  1879. static DEFINE_MUTEX(hugetlb_instantiation_mutex);
  1880. struct hstate *h = hstate_vma(vma);
  1881. ptep = huge_pte_alloc(mm, address, huge_page_size(h));
  1882. if (!ptep)
  1883. return VM_FAULT_OOM;
  1884. /*
  1885. * Serialize hugepage allocation and instantiation, so that we don't
  1886. * get spurious allocation failures if two CPUs race to instantiate
  1887. * the same page in the page cache.
  1888. */
  1889. mutex_lock(&hugetlb_instantiation_mutex);
  1890. entry = huge_ptep_get(ptep);
  1891. if (huge_pte_none(entry)) {
  1892. ret = hugetlb_no_page(mm, vma, address, ptep, flags);
  1893. goto out_mutex;
  1894. }
  1895. ret = 0;
  1896. /*
  1897. * If we are going to COW the mapping later, we examine the pending
  1898. * reservations for this page now. This will ensure that any
  1899. * allocations necessary to record that reservation occur outside the
  1900. * spinlock. For private mappings, we also lookup the pagecache
  1901. * page now as it is used to determine if a reservation has been
  1902. * consumed.
  1903. */
  1904. if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
  1905. if (vma_needs_reservation(h, vma, address) < 0) {
  1906. ret = VM_FAULT_OOM;
  1907. goto out_mutex;
  1908. }
  1909. if (!(vma->vm_flags & VM_MAYSHARE))
  1910. pagecache_page = hugetlbfs_pagecache_page(h,
  1911. vma, address);
  1912. }
  1913. spin_lock(&mm->page_table_lock);
  1914. /* Check for a racing update before calling hugetlb_cow */
  1915. if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
  1916. goto out_page_table_lock;
  1917. if (flags & FAULT_FLAG_WRITE) {
  1918. if (!pte_write(entry)) {
  1919. ret = hugetlb_cow(mm, vma, address, ptep, entry,
  1920. pagecache_page);
  1921. goto out_page_table_lock;
  1922. }
  1923. entry = pte_mkdirty(entry);
  1924. }
  1925. entry = pte_mkyoung(entry);
  1926. if (huge_ptep_set_access_flags(vma, address, ptep, entry,
  1927. flags & FAULT_FLAG_WRITE))
  1928. update_mmu_cache(vma, address, entry);
  1929. out_page_table_lock:
  1930. spin_unlock(&mm->page_table_lock);
  1931. if (pagecache_page) {
  1932. unlock_page(pagecache_page);
  1933. put_page(pagecache_page);
  1934. }
  1935. out_mutex:
  1936. mutex_unlock(&hugetlb_instantiation_mutex);
  1937. return ret;
  1938. }
  1939. /* Can be overriden by architectures */
  1940. __attribute__((weak)) struct page *
  1941. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  1942. pud_t *pud, int write)
  1943. {
  1944. BUG();
  1945. return NULL;
  1946. }
  1947. int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1948. struct page **pages, struct vm_area_struct **vmas,
  1949. unsigned long *position, int *length, int i,
  1950. unsigned int flags)
  1951. {
  1952. unsigned long pfn_offset;
  1953. unsigned long vaddr = *position;
  1954. int remainder = *length;
  1955. struct hstate *h = hstate_vma(vma);
  1956. spin_lock(&mm->page_table_lock);
  1957. while (vaddr < vma->vm_end && remainder) {
  1958. pte_t *pte;
  1959. int absent;
  1960. struct page *page;
  1961. /*
  1962. * Some archs (sparc64, sh*) have multiple pte_ts to
  1963. * each hugepage. We have to make sure we get the
  1964. * first, for the page indexing below to work.
  1965. */
  1966. pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
  1967. absent = !pte || huge_pte_none(huge_ptep_get(pte));
  1968. /*
  1969. * When coredumping, it suits get_dump_page if we just return
  1970. * an error where there's an empty slot with no huge pagecache
  1971. * to back it. This way, we avoid allocating a hugepage, and
  1972. * the sparse dumpfile avoids allocating disk blocks, but its
  1973. * huge holes still show up with zeroes where they need to be.
  1974. */
  1975. if (absent && (flags & FOLL_DUMP) &&
  1976. !hugetlbfs_pagecache_present(h, vma, vaddr)) {
  1977. remainder = 0;
  1978. break;
  1979. }
  1980. if (absent ||
  1981. ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
  1982. int ret;
  1983. spin_unlock(&mm->page_table_lock);
  1984. ret = hugetlb_fault(mm, vma, vaddr,
  1985. (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
  1986. spin_lock(&mm->page_table_lock);
  1987. if (!(ret & VM_FAULT_ERROR))
  1988. continue;
  1989. remainder = 0;
  1990. break;
  1991. }
  1992. pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
  1993. page = pte_page(huge_ptep_get(pte));
  1994. same_page:
  1995. if (pages) {
  1996. pages[i] = mem_map_offset(page, pfn_offset);
  1997. get_page(pages[i]);
  1998. }
  1999. if (vmas)
  2000. vmas[i] = vma;
  2001. vaddr += PAGE_SIZE;
  2002. ++pfn_offset;
  2003. --remainder;
  2004. ++i;
  2005. if (vaddr < vma->vm_end && remainder &&
  2006. pfn_offset < pages_per_huge_page(h)) {
  2007. /*
  2008. * We use pfn_offset to avoid touching the pageframes
  2009. * of this compound page.
  2010. */
  2011. goto same_page;
  2012. }
  2013. }
  2014. spin_unlock(&mm->page_table_lock);
  2015. *length = remainder;
  2016. *position = vaddr;
  2017. return i ? i : -EFAULT;
  2018. }
  2019. void hugetlb_change_protection(struct vm_area_struct *vma,
  2020. unsigned long address, unsigned long end, pgprot_t newprot)
  2021. {
  2022. struct mm_struct *mm = vma->vm_mm;
  2023. unsigned long start = address;
  2024. pte_t *ptep;
  2025. pte_t pte;
  2026. struct hstate *h = hstate_vma(vma);
  2027. BUG_ON(address >= end);
  2028. flush_cache_range(vma, address, end);
  2029. spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
  2030. spin_lock(&mm->page_table_lock);
  2031. for (; address < end; address += huge_page_size(h)) {
  2032. ptep = huge_pte_offset(mm, address);
  2033. if (!ptep)
  2034. continue;
  2035. if (huge_pmd_unshare(mm, &address, ptep))
  2036. continue;
  2037. if (!huge_pte_none(huge_ptep_get(ptep))) {
  2038. pte = huge_ptep_get_and_clear(mm, address, ptep);
  2039. pte = pte_mkhuge(pte_modify(pte, newprot));
  2040. set_huge_pte_at(mm, address, ptep, pte);
  2041. }
  2042. }
  2043. spin_unlock(&mm->page_table_lock);
  2044. spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
  2045. flush_tlb_range(vma, start, end);
  2046. }
  2047. int hugetlb_reserve_pages(struct inode *inode,
  2048. long from, long to,
  2049. struct vm_area_struct *vma,
  2050. int acctflag)
  2051. {
  2052. long ret, chg;
  2053. struct hstate *h = hstate_inode(inode);
  2054. /*
  2055. * Only apply hugepage reservation if asked. At fault time, an
  2056. * attempt will be made for VM_NORESERVE to allocate a page
  2057. * and filesystem quota without using reserves
  2058. */
  2059. if (acctflag & VM_NORESERVE)
  2060. return 0;
  2061. /*
  2062. * Shared mappings base their reservation on the number of pages that
  2063. * are already allocated on behalf of the file. Private mappings need
  2064. * to reserve the full area even if read-only as mprotect() may be
  2065. * called to make the mapping read-write. Assume !vma is a shm mapping
  2066. */
  2067. if (!vma || vma->vm_flags & VM_MAYSHARE)
  2068. chg = region_chg(&inode->i_mapping->private_list, from, to);
  2069. else {
  2070. struct resv_map *resv_map = resv_map_alloc();
  2071. if (!resv_map)
  2072. return -ENOMEM;
  2073. chg = to - from;
  2074. set_vma_resv_map(vma, resv_map);
  2075. set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
  2076. }
  2077. if (chg < 0)
  2078. return chg;
  2079. /* There must be enough filesystem quota for the mapping */
  2080. if (hugetlb_get_quota(inode->i_mapping, chg))
  2081. return -ENOSPC;
  2082. /*
  2083. * Check enough hugepages are available for the reservation.
  2084. * Hand back the quota if there are not
  2085. */
  2086. ret = hugetlb_acct_memory(h, chg);
  2087. if (ret < 0) {
  2088. hugetlb_put_quota(inode->i_mapping, chg);
  2089. return ret;
  2090. }
  2091. /*
  2092. * Account for the reservations made. Shared mappings record regions
  2093. * that have reservations as they are shared by multiple VMAs.
  2094. * When the last VMA disappears, the region map says how much
  2095. * the reservation was and the page cache tells how much of
  2096. * the reservation was consumed. Private mappings are per-VMA and
  2097. * only the consumed reservations are tracked. When the VMA
  2098. * disappears, the original reservation is the VMA size and the
  2099. * consumed reservations are stored in the map. Hence, nothing
  2100. * else has to be done for private mappings here
  2101. */
  2102. if (!vma || vma->vm_flags & VM_MAYSHARE)
  2103. region_add(&inode->i_mapping->private_list, from, to);
  2104. return 0;
  2105. }
  2106. void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
  2107. {
  2108. struct hstate *h = hstate_inode(inode);
  2109. long chg = region_truncate(&inode->i_mapping->private_list, offset);
  2110. spin_lock(&inode->i_lock);
  2111. inode->i_blocks -= (blocks_per_huge_page(h) * freed);
  2112. spin_unlock(&inode->i_lock);
  2113. hugetlb_put_quota(inode->i_mapping, (chg - freed));
  2114. hugetlb_acct_memory(h, -(chg - freed));
  2115. }