hugetlb.c 82 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172
  1. /*
  2. * Generic hugetlb support.
  3. * (C) William Irwin, April 2004
  4. */
  5. #include <linux/list.h>
  6. #include <linux/init.h>
  7. #include <linux/module.h>
  8. #include <linux/mm.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/sysctl.h>
  11. #include <linux/highmem.h>
  12. #include <linux/mmu_notifier.h>
  13. #include <linux/nodemask.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/mempolicy.h>
  16. #include <linux/cpuset.h>
  17. #include <linux/mutex.h>
  18. #include <linux/bootmem.h>
  19. #include <linux/sysfs.h>
  20. #include <linux/slab.h>
  21. #include <linux/rmap.h>
  22. #include <linux/swap.h>
  23. #include <linux/swapops.h>
  24. #include <asm/page.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/tlb.h>
  27. #include <linux/io.h>
  28. #include <linux/hugetlb.h>
  29. #include <linux/hugetlb_cgroup.h>
  30. #include <linux/node.h>
  31. #include <linux/hugetlb_cgroup.h>
  32. #include "internal.h"
  33. const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
  34. static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
  35. unsigned long hugepages_treat_as_movable;
  36. int hugetlb_max_hstate __read_mostly;
  37. unsigned int default_hstate_idx;
  38. struct hstate hstates[HUGE_MAX_HSTATE];
  39. __initdata LIST_HEAD(huge_boot_pages);
  40. /* for command line parsing */
  41. static struct hstate * __initdata parsed_hstate;
  42. static unsigned long __initdata default_hstate_max_huge_pages;
  43. static unsigned long __initdata default_hstate_size;
  44. /*
  45. * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  46. */
  47. DEFINE_SPINLOCK(hugetlb_lock);
  48. static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
  49. {
  50. bool free = (spool->count == 0) && (spool->used_hpages == 0);
  51. spin_unlock(&spool->lock);
  52. /* If no pages are used, and no other handles to the subpool
  53. * remain, free the subpool the subpool remain */
  54. if (free)
  55. kfree(spool);
  56. }
  57. struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
  58. {
  59. struct hugepage_subpool *spool;
  60. spool = kmalloc(sizeof(*spool), GFP_KERNEL);
  61. if (!spool)
  62. return NULL;
  63. spin_lock_init(&spool->lock);
  64. spool->count = 1;
  65. spool->max_hpages = nr_blocks;
  66. spool->used_hpages = 0;
  67. return spool;
  68. }
  69. void hugepage_put_subpool(struct hugepage_subpool *spool)
  70. {
  71. spin_lock(&spool->lock);
  72. BUG_ON(!spool->count);
  73. spool->count--;
  74. unlock_or_release_subpool(spool);
  75. }
  76. static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
  77. long delta)
  78. {
  79. int ret = 0;
  80. if (!spool)
  81. return 0;
  82. spin_lock(&spool->lock);
  83. if ((spool->used_hpages + delta) <= spool->max_hpages) {
  84. spool->used_hpages += delta;
  85. } else {
  86. ret = -ENOMEM;
  87. }
  88. spin_unlock(&spool->lock);
  89. return ret;
  90. }
  91. static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
  92. long delta)
  93. {
  94. if (!spool)
  95. return;
  96. spin_lock(&spool->lock);
  97. spool->used_hpages -= delta;
  98. /* If hugetlbfs_put_super couldn't free spool due to
  99. * an outstanding quota reference, free it now. */
  100. unlock_or_release_subpool(spool);
  101. }
  102. static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
  103. {
  104. return HUGETLBFS_SB(inode->i_sb)->spool;
  105. }
  106. static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
  107. {
  108. return subpool_inode(vma->vm_file->f_dentry->d_inode);
  109. }
  110. /*
  111. * Region tracking -- allows tracking of reservations and instantiated pages
  112. * across the pages in a mapping.
  113. *
  114. * The region data structures are protected by a combination of the mmap_sem
  115. * and the hugetlb_instantion_mutex. To access or modify a region the caller
  116. * must either hold the mmap_sem for write, or the mmap_sem for read and
  117. * the hugetlb_instantiation mutex:
  118. *
  119. * down_write(&mm->mmap_sem);
  120. * or
  121. * down_read(&mm->mmap_sem);
  122. * mutex_lock(&hugetlb_instantiation_mutex);
  123. */
  124. struct file_region {
  125. struct list_head link;
  126. long from;
  127. long to;
  128. };
  129. static long region_add(struct list_head *head, long f, long t)
  130. {
  131. struct file_region *rg, *nrg, *trg;
  132. /* Locate the region we are either in or before. */
  133. list_for_each_entry(rg, head, link)
  134. if (f <= rg->to)
  135. break;
  136. /* Round our left edge to the current segment if it encloses us. */
  137. if (f > rg->from)
  138. f = rg->from;
  139. /* Check for and consume any regions we now overlap with. */
  140. nrg = rg;
  141. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  142. if (&rg->link == head)
  143. break;
  144. if (rg->from > t)
  145. break;
  146. /* If this area reaches higher then extend our area to
  147. * include it completely. If this is not the first area
  148. * which we intend to reuse, free it. */
  149. if (rg->to > t)
  150. t = rg->to;
  151. if (rg != nrg) {
  152. list_del(&rg->link);
  153. kfree(rg);
  154. }
  155. }
  156. nrg->from = f;
  157. nrg->to = t;
  158. return 0;
  159. }
  160. static long region_chg(struct list_head *head, long f, long t)
  161. {
  162. struct file_region *rg, *nrg;
  163. long chg = 0;
  164. /* Locate the region we are before or in. */
  165. list_for_each_entry(rg, head, link)
  166. if (f <= rg->to)
  167. break;
  168. /* If we are below the current region then a new region is required.
  169. * Subtle, allocate a new region at the position but make it zero
  170. * size such that we can guarantee to record the reservation. */
  171. if (&rg->link == head || t < rg->from) {
  172. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  173. if (!nrg)
  174. return -ENOMEM;
  175. nrg->from = f;
  176. nrg->to = f;
  177. INIT_LIST_HEAD(&nrg->link);
  178. list_add(&nrg->link, rg->link.prev);
  179. return t - f;
  180. }
  181. /* Round our left edge to the current segment if it encloses us. */
  182. if (f > rg->from)
  183. f = rg->from;
  184. chg = t - f;
  185. /* Check for and consume any regions we now overlap with. */
  186. list_for_each_entry(rg, rg->link.prev, link) {
  187. if (&rg->link == head)
  188. break;
  189. if (rg->from > t)
  190. return chg;
  191. /* We overlap with this area, if it extends further than
  192. * us then we must extend ourselves. Account for its
  193. * existing reservation. */
  194. if (rg->to > t) {
  195. chg += rg->to - t;
  196. t = rg->to;
  197. }
  198. chg -= rg->to - rg->from;
  199. }
  200. return chg;
  201. }
  202. static long region_truncate(struct list_head *head, long end)
  203. {
  204. struct file_region *rg, *trg;
  205. long chg = 0;
  206. /* Locate the region we are either in or before. */
  207. list_for_each_entry(rg, head, link)
  208. if (end <= rg->to)
  209. break;
  210. if (&rg->link == head)
  211. return 0;
  212. /* If we are in the middle of a region then adjust it. */
  213. if (end > rg->from) {
  214. chg = rg->to - end;
  215. rg->to = end;
  216. rg = list_entry(rg->link.next, typeof(*rg), link);
  217. }
  218. /* Drop any remaining regions. */
  219. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  220. if (&rg->link == head)
  221. break;
  222. chg += rg->to - rg->from;
  223. list_del(&rg->link);
  224. kfree(rg);
  225. }
  226. return chg;
  227. }
  228. static long region_count(struct list_head *head, long f, long t)
  229. {
  230. struct file_region *rg;
  231. long chg = 0;
  232. /* Locate each segment we overlap with, and count that overlap. */
  233. list_for_each_entry(rg, head, link) {
  234. long seg_from;
  235. long seg_to;
  236. if (rg->to <= f)
  237. continue;
  238. if (rg->from >= t)
  239. break;
  240. seg_from = max(rg->from, f);
  241. seg_to = min(rg->to, t);
  242. chg += seg_to - seg_from;
  243. }
  244. return chg;
  245. }
  246. /*
  247. * Convert the address within this vma to the page offset within
  248. * the mapping, in pagecache page units; huge pages here.
  249. */
  250. static pgoff_t vma_hugecache_offset(struct hstate *h,
  251. struct vm_area_struct *vma, unsigned long address)
  252. {
  253. return ((address - vma->vm_start) >> huge_page_shift(h)) +
  254. (vma->vm_pgoff >> huge_page_order(h));
  255. }
  256. pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  257. unsigned long address)
  258. {
  259. return vma_hugecache_offset(hstate_vma(vma), vma, address);
  260. }
  261. /*
  262. * Return the size of the pages allocated when backing a VMA. In the majority
  263. * cases this will be same size as used by the page table entries.
  264. */
  265. unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
  266. {
  267. struct hstate *hstate;
  268. if (!is_vm_hugetlb_page(vma))
  269. return PAGE_SIZE;
  270. hstate = hstate_vma(vma);
  271. return 1UL << (hstate->order + PAGE_SHIFT);
  272. }
  273. EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
  274. /*
  275. * Return the page size being used by the MMU to back a VMA. In the majority
  276. * of cases, the page size used by the kernel matches the MMU size. On
  277. * architectures where it differs, an architecture-specific version of this
  278. * function is required.
  279. */
  280. #ifndef vma_mmu_pagesize
  281. unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  282. {
  283. return vma_kernel_pagesize(vma);
  284. }
  285. #endif
  286. /*
  287. * Flags for MAP_PRIVATE reservations. These are stored in the bottom
  288. * bits of the reservation map pointer, which are always clear due to
  289. * alignment.
  290. */
  291. #define HPAGE_RESV_OWNER (1UL << 0)
  292. #define HPAGE_RESV_UNMAPPED (1UL << 1)
  293. #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
  294. /*
  295. * These helpers are used to track how many pages are reserved for
  296. * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
  297. * is guaranteed to have their future faults succeed.
  298. *
  299. * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
  300. * the reserve counters are updated with the hugetlb_lock held. It is safe
  301. * to reset the VMA at fork() time as it is not in use yet and there is no
  302. * chance of the global counters getting corrupted as a result of the values.
  303. *
  304. * The private mapping reservation is represented in a subtly different
  305. * manner to a shared mapping. A shared mapping has a region map associated
  306. * with the underlying file, this region map represents the backing file
  307. * pages which have ever had a reservation assigned which this persists even
  308. * after the page is instantiated. A private mapping has a region map
  309. * associated with the original mmap which is attached to all VMAs which
  310. * reference it, this region map represents those offsets which have consumed
  311. * reservation ie. where pages have been instantiated.
  312. */
  313. static unsigned long get_vma_private_data(struct vm_area_struct *vma)
  314. {
  315. return (unsigned long)vma->vm_private_data;
  316. }
  317. static void set_vma_private_data(struct vm_area_struct *vma,
  318. unsigned long value)
  319. {
  320. vma->vm_private_data = (void *)value;
  321. }
  322. struct resv_map {
  323. struct kref refs;
  324. struct list_head regions;
  325. };
  326. static struct resv_map *resv_map_alloc(void)
  327. {
  328. struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
  329. if (!resv_map)
  330. return NULL;
  331. kref_init(&resv_map->refs);
  332. INIT_LIST_HEAD(&resv_map->regions);
  333. return resv_map;
  334. }
  335. static void resv_map_release(struct kref *ref)
  336. {
  337. struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
  338. /* Clear out any active regions before we release the map. */
  339. region_truncate(&resv_map->regions, 0);
  340. kfree(resv_map);
  341. }
  342. static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
  343. {
  344. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  345. if (!(vma->vm_flags & VM_MAYSHARE))
  346. return (struct resv_map *)(get_vma_private_data(vma) &
  347. ~HPAGE_RESV_MASK);
  348. return NULL;
  349. }
  350. static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
  351. {
  352. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  353. VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
  354. set_vma_private_data(vma, (get_vma_private_data(vma) &
  355. HPAGE_RESV_MASK) | (unsigned long)map);
  356. }
  357. static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
  358. {
  359. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  360. VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
  361. set_vma_private_data(vma, get_vma_private_data(vma) | flags);
  362. }
  363. static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
  364. {
  365. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  366. return (get_vma_private_data(vma) & flag) != 0;
  367. }
  368. /* Decrement the reserved pages in the hugepage pool by one */
  369. static void decrement_hugepage_resv_vma(struct hstate *h,
  370. struct vm_area_struct *vma)
  371. {
  372. if (vma->vm_flags & VM_NORESERVE)
  373. return;
  374. if (vma->vm_flags & VM_MAYSHARE) {
  375. /* Shared mappings always use reserves */
  376. h->resv_huge_pages--;
  377. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  378. /*
  379. * Only the process that called mmap() has reserves for
  380. * private mappings.
  381. */
  382. h->resv_huge_pages--;
  383. }
  384. }
  385. /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
  386. void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  387. {
  388. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  389. if (!(vma->vm_flags & VM_MAYSHARE))
  390. vma->vm_private_data = (void *)0;
  391. }
  392. /* Returns true if the VMA has associated reserve pages */
  393. static int vma_has_reserves(struct vm_area_struct *vma)
  394. {
  395. if (vma->vm_flags & VM_MAYSHARE)
  396. return 1;
  397. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  398. return 1;
  399. return 0;
  400. }
  401. static void copy_gigantic_page(struct page *dst, struct page *src)
  402. {
  403. int i;
  404. struct hstate *h = page_hstate(src);
  405. struct page *dst_base = dst;
  406. struct page *src_base = src;
  407. for (i = 0; i < pages_per_huge_page(h); ) {
  408. cond_resched();
  409. copy_highpage(dst, src);
  410. i++;
  411. dst = mem_map_next(dst, dst_base, i);
  412. src = mem_map_next(src, src_base, i);
  413. }
  414. }
  415. void copy_huge_page(struct page *dst, struct page *src)
  416. {
  417. int i;
  418. struct hstate *h = page_hstate(src);
  419. if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
  420. copy_gigantic_page(dst, src);
  421. return;
  422. }
  423. might_sleep();
  424. for (i = 0; i < pages_per_huge_page(h); i++) {
  425. cond_resched();
  426. copy_highpage(dst + i, src + i);
  427. }
  428. }
  429. static void enqueue_huge_page(struct hstate *h, struct page *page)
  430. {
  431. int nid = page_to_nid(page);
  432. list_move(&page->lru, &h->hugepage_freelists[nid]);
  433. h->free_huge_pages++;
  434. h->free_huge_pages_node[nid]++;
  435. }
  436. static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
  437. {
  438. struct page *page;
  439. if (list_empty(&h->hugepage_freelists[nid]))
  440. return NULL;
  441. page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
  442. list_move(&page->lru, &h->hugepage_activelist);
  443. set_page_refcounted(page);
  444. h->free_huge_pages--;
  445. h->free_huge_pages_node[nid]--;
  446. return page;
  447. }
  448. static struct page *dequeue_huge_page_vma(struct hstate *h,
  449. struct vm_area_struct *vma,
  450. unsigned long address, int avoid_reserve)
  451. {
  452. struct page *page = NULL;
  453. struct mempolicy *mpol;
  454. nodemask_t *nodemask;
  455. struct zonelist *zonelist;
  456. struct zone *zone;
  457. struct zoneref *z;
  458. unsigned int cpuset_mems_cookie;
  459. retry_cpuset:
  460. cpuset_mems_cookie = get_mems_allowed();
  461. zonelist = huge_zonelist(vma, address,
  462. htlb_alloc_mask, &mpol, &nodemask);
  463. /*
  464. * A child process with MAP_PRIVATE mappings created by their parent
  465. * have no page reserves. This check ensures that reservations are
  466. * not "stolen". The child may still get SIGKILLed
  467. */
  468. if (!vma_has_reserves(vma) &&
  469. h->free_huge_pages - h->resv_huge_pages == 0)
  470. goto err;
  471. /* If reserves cannot be used, ensure enough pages are in the pool */
  472. if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
  473. goto err;
  474. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  475. MAX_NR_ZONES - 1, nodemask) {
  476. if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
  477. page = dequeue_huge_page_node(h, zone_to_nid(zone));
  478. if (page) {
  479. if (!avoid_reserve)
  480. decrement_hugepage_resv_vma(h, vma);
  481. break;
  482. }
  483. }
  484. }
  485. mpol_cond_put(mpol);
  486. if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
  487. goto retry_cpuset;
  488. return page;
  489. err:
  490. mpol_cond_put(mpol);
  491. return NULL;
  492. }
  493. static void update_and_free_page(struct hstate *h, struct page *page)
  494. {
  495. int i;
  496. VM_BUG_ON(h->order >= MAX_ORDER);
  497. h->nr_huge_pages--;
  498. h->nr_huge_pages_node[page_to_nid(page)]--;
  499. for (i = 0; i < pages_per_huge_page(h); i++) {
  500. page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
  501. 1 << PG_referenced | 1 << PG_dirty |
  502. 1 << PG_active | 1 << PG_reserved |
  503. 1 << PG_private | 1 << PG_writeback);
  504. }
  505. VM_BUG_ON(hugetlb_cgroup_from_page(page));
  506. set_compound_page_dtor(page, NULL);
  507. set_page_refcounted(page);
  508. arch_release_hugepage(page);
  509. __free_pages(page, huge_page_order(h));
  510. }
  511. struct hstate *size_to_hstate(unsigned long size)
  512. {
  513. struct hstate *h;
  514. for_each_hstate(h) {
  515. if (huge_page_size(h) == size)
  516. return h;
  517. }
  518. return NULL;
  519. }
  520. static void free_huge_page(struct page *page)
  521. {
  522. /*
  523. * Can't pass hstate in here because it is called from the
  524. * compound page destructor.
  525. */
  526. struct hstate *h = page_hstate(page);
  527. int nid = page_to_nid(page);
  528. struct hugepage_subpool *spool =
  529. (struct hugepage_subpool *)page_private(page);
  530. set_page_private(page, 0);
  531. page->mapping = NULL;
  532. BUG_ON(page_count(page));
  533. BUG_ON(page_mapcount(page));
  534. spin_lock(&hugetlb_lock);
  535. hugetlb_cgroup_uncharge_page(hstate_index(h),
  536. pages_per_huge_page(h), page);
  537. if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
  538. /* remove the page from active list */
  539. list_del(&page->lru);
  540. update_and_free_page(h, page);
  541. h->surplus_huge_pages--;
  542. h->surplus_huge_pages_node[nid]--;
  543. } else {
  544. enqueue_huge_page(h, page);
  545. }
  546. spin_unlock(&hugetlb_lock);
  547. hugepage_subpool_put_pages(spool, 1);
  548. }
  549. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
  550. {
  551. INIT_LIST_HEAD(&page->lru);
  552. set_compound_page_dtor(page, free_huge_page);
  553. spin_lock(&hugetlb_lock);
  554. set_hugetlb_cgroup(page, NULL);
  555. h->nr_huge_pages++;
  556. h->nr_huge_pages_node[nid]++;
  557. spin_unlock(&hugetlb_lock);
  558. put_page(page); /* free it into the hugepage allocator */
  559. }
  560. static void prep_compound_gigantic_page(struct page *page, unsigned long order)
  561. {
  562. int i;
  563. int nr_pages = 1 << order;
  564. struct page *p = page + 1;
  565. /* we rely on prep_new_huge_page to set the destructor */
  566. set_compound_order(page, order);
  567. __SetPageHead(page);
  568. for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
  569. __SetPageTail(p);
  570. set_page_count(p, 0);
  571. p->first_page = page;
  572. }
  573. }
  574. int PageHuge(struct page *page)
  575. {
  576. compound_page_dtor *dtor;
  577. if (!PageCompound(page))
  578. return 0;
  579. page = compound_head(page);
  580. dtor = get_compound_page_dtor(page);
  581. return dtor == free_huge_page;
  582. }
  583. EXPORT_SYMBOL_GPL(PageHuge);
  584. static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  585. {
  586. struct page *page;
  587. if (h->order >= MAX_ORDER)
  588. return NULL;
  589. page = alloc_pages_exact_node(nid,
  590. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
  591. __GFP_REPEAT|__GFP_NOWARN,
  592. huge_page_order(h));
  593. if (page) {
  594. if (arch_prepare_hugepage(page)) {
  595. __free_pages(page, huge_page_order(h));
  596. return NULL;
  597. }
  598. prep_new_huge_page(h, page, nid);
  599. }
  600. return page;
  601. }
  602. /*
  603. * common helper functions for hstate_next_node_to_{alloc|free}.
  604. * We may have allocated or freed a huge page based on a different
  605. * nodes_allowed previously, so h->next_node_to_{alloc|free} might
  606. * be outside of *nodes_allowed. Ensure that we use an allowed
  607. * node for alloc or free.
  608. */
  609. static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
  610. {
  611. nid = next_node(nid, *nodes_allowed);
  612. if (nid == MAX_NUMNODES)
  613. nid = first_node(*nodes_allowed);
  614. VM_BUG_ON(nid >= MAX_NUMNODES);
  615. return nid;
  616. }
  617. static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
  618. {
  619. if (!node_isset(nid, *nodes_allowed))
  620. nid = next_node_allowed(nid, nodes_allowed);
  621. return nid;
  622. }
  623. /*
  624. * returns the previously saved node ["this node"] from which to
  625. * allocate a persistent huge page for the pool and advance the
  626. * next node from which to allocate, handling wrap at end of node
  627. * mask.
  628. */
  629. static int hstate_next_node_to_alloc(struct hstate *h,
  630. nodemask_t *nodes_allowed)
  631. {
  632. int nid;
  633. VM_BUG_ON(!nodes_allowed);
  634. nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
  635. h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
  636. return nid;
  637. }
  638. static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
  639. {
  640. struct page *page;
  641. int start_nid;
  642. int next_nid;
  643. int ret = 0;
  644. start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
  645. next_nid = start_nid;
  646. do {
  647. page = alloc_fresh_huge_page_node(h, next_nid);
  648. if (page) {
  649. ret = 1;
  650. break;
  651. }
  652. next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
  653. } while (next_nid != start_nid);
  654. if (ret)
  655. count_vm_event(HTLB_BUDDY_PGALLOC);
  656. else
  657. count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  658. return ret;
  659. }
  660. /*
  661. * helper for free_pool_huge_page() - return the previously saved
  662. * node ["this node"] from which to free a huge page. Advance the
  663. * next node id whether or not we find a free huge page to free so
  664. * that the next attempt to free addresses the next node.
  665. */
  666. static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
  667. {
  668. int nid;
  669. VM_BUG_ON(!nodes_allowed);
  670. nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
  671. h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
  672. return nid;
  673. }
  674. /*
  675. * Free huge page from pool from next node to free.
  676. * Attempt to keep persistent huge pages more or less
  677. * balanced over allowed nodes.
  678. * Called with hugetlb_lock locked.
  679. */
  680. static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
  681. bool acct_surplus)
  682. {
  683. int start_nid;
  684. int next_nid;
  685. int ret = 0;
  686. start_nid = hstate_next_node_to_free(h, nodes_allowed);
  687. next_nid = start_nid;
  688. do {
  689. /*
  690. * If we're returning unused surplus pages, only examine
  691. * nodes with surplus pages.
  692. */
  693. if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
  694. !list_empty(&h->hugepage_freelists[next_nid])) {
  695. struct page *page =
  696. list_entry(h->hugepage_freelists[next_nid].next,
  697. struct page, lru);
  698. list_del(&page->lru);
  699. h->free_huge_pages--;
  700. h->free_huge_pages_node[next_nid]--;
  701. if (acct_surplus) {
  702. h->surplus_huge_pages--;
  703. h->surplus_huge_pages_node[next_nid]--;
  704. }
  705. update_and_free_page(h, page);
  706. ret = 1;
  707. break;
  708. }
  709. next_nid = hstate_next_node_to_free(h, nodes_allowed);
  710. } while (next_nid != start_nid);
  711. return ret;
  712. }
  713. static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
  714. {
  715. struct page *page;
  716. unsigned int r_nid;
  717. if (h->order >= MAX_ORDER)
  718. return NULL;
  719. /*
  720. * Assume we will successfully allocate the surplus page to
  721. * prevent racing processes from causing the surplus to exceed
  722. * overcommit
  723. *
  724. * This however introduces a different race, where a process B
  725. * tries to grow the static hugepage pool while alloc_pages() is
  726. * called by process A. B will only examine the per-node
  727. * counters in determining if surplus huge pages can be
  728. * converted to normal huge pages in adjust_pool_surplus(). A
  729. * won't be able to increment the per-node counter, until the
  730. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  731. * no more huge pages can be converted from surplus to normal
  732. * state (and doesn't try to convert again). Thus, we have a
  733. * case where a surplus huge page exists, the pool is grown, and
  734. * the surplus huge page still exists after, even though it
  735. * should just have been converted to a normal huge page. This
  736. * does not leak memory, though, as the hugepage will be freed
  737. * once it is out of use. It also does not allow the counters to
  738. * go out of whack in adjust_pool_surplus() as we don't modify
  739. * the node values until we've gotten the hugepage and only the
  740. * per-node value is checked there.
  741. */
  742. spin_lock(&hugetlb_lock);
  743. if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
  744. spin_unlock(&hugetlb_lock);
  745. return NULL;
  746. } else {
  747. h->nr_huge_pages++;
  748. h->surplus_huge_pages++;
  749. }
  750. spin_unlock(&hugetlb_lock);
  751. if (nid == NUMA_NO_NODE)
  752. page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
  753. __GFP_REPEAT|__GFP_NOWARN,
  754. huge_page_order(h));
  755. else
  756. page = alloc_pages_exact_node(nid,
  757. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
  758. __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
  759. if (page && arch_prepare_hugepage(page)) {
  760. __free_pages(page, huge_page_order(h));
  761. page = NULL;
  762. }
  763. spin_lock(&hugetlb_lock);
  764. if (page) {
  765. INIT_LIST_HEAD(&page->lru);
  766. r_nid = page_to_nid(page);
  767. set_compound_page_dtor(page, free_huge_page);
  768. set_hugetlb_cgroup(page, NULL);
  769. /*
  770. * We incremented the global counters already
  771. */
  772. h->nr_huge_pages_node[r_nid]++;
  773. h->surplus_huge_pages_node[r_nid]++;
  774. __count_vm_event(HTLB_BUDDY_PGALLOC);
  775. } else {
  776. h->nr_huge_pages--;
  777. h->surplus_huge_pages--;
  778. __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  779. }
  780. spin_unlock(&hugetlb_lock);
  781. return page;
  782. }
  783. /*
  784. * This allocation function is useful in the context where vma is irrelevant.
  785. * E.g. soft-offlining uses this function because it only cares physical
  786. * address of error page.
  787. */
  788. struct page *alloc_huge_page_node(struct hstate *h, int nid)
  789. {
  790. struct page *page;
  791. spin_lock(&hugetlb_lock);
  792. page = dequeue_huge_page_node(h, nid);
  793. spin_unlock(&hugetlb_lock);
  794. if (!page)
  795. page = alloc_buddy_huge_page(h, nid);
  796. return page;
  797. }
  798. /*
  799. * Increase the hugetlb pool such that it can accommodate a reservation
  800. * of size 'delta'.
  801. */
  802. static int gather_surplus_pages(struct hstate *h, int delta)
  803. {
  804. struct list_head surplus_list;
  805. struct page *page, *tmp;
  806. int ret, i;
  807. int needed, allocated;
  808. bool alloc_ok = true;
  809. needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
  810. if (needed <= 0) {
  811. h->resv_huge_pages += delta;
  812. return 0;
  813. }
  814. allocated = 0;
  815. INIT_LIST_HEAD(&surplus_list);
  816. ret = -ENOMEM;
  817. retry:
  818. spin_unlock(&hugetlb_lock);
  819. for (i = 0; i < needed; i++) {
  820. page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
  821. if (!page) {
  822. alloc_ok = false;
  823. break;
  824. }
  825. list_add(&page->lru, &surplus_list);
  826. }
  827. allocated += i;
  828. /*
  829. * After retaking hugetlb_lock, we need to recalculate 'needed'
  830. * because either resv_huge_pages or free_huge_pages may have changed.
  831. */
  832. spin_lock(&hugetlb_lock);
  833. needed = (h->resv_huge_pages + delta) -
  834. (h->free_huge_pages + allocated);
  835. if (needed > 0) {
  836. if (alloc_ok)
  837. goto retry;
  838. /*
  839. * We were not able to allocate enough pages to
  840. * satisfy the entire reservation so we free what
  841. * we've allocated so far.
  842. */
  843. goto free;
  844. }
  845. /*
  846. * The surplus_list now contains _at_least_ the number of extra pages
  847. * needed to accommodate the reservation. Add the appropriate number
  848. * of pages to the hugetlb pool and free the extras back to the buddy
  849. * allocator. Commit the entire reservation here to prevent another
  850. * process from stealing the pages as they are added to the pool but
  851. * before they are reserved.
  852. */
  853. needed += allocated;
  854. h->resv_huge_pages += delta;
  855. ret = 0;
  856. /* Free the needed pages to the hugetlb pool */
  857. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  858. if ((--needed) < 0)
  859. break;
  860. /*
  861. * This page is now managed by the hugetlb allocator and has
  862. * no users -- drop the buddy allocator's reference.
  863. */
  864. put_page_testzero(page);
  865. VM_BUG_ON(page_count(page));
  866. enqueue_huge_page(h, page);
  867. }
  868. free:
  869. spin_unlock(&hugetlb_lock);
  870. /* Free unnecessary surplus pages to the buddy allocator */
  871. if (!list_empty(&surplus_list)) {
  872. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  873. put_page(page);
  874. }
  875. }
  876. spin_lock(&hugetlb_lock);
  877. return ret;
  878. }
  879. /*
  880. * When releasing a hugetlb pool reservation, any surplus pages that were
  881. * allocated to satisfy the reservation must be explicitly freed if they were
  882. * never used.
  883. * Called with hugetlb_lock held.
  884. */
  885. static void return_unused_surplus_pages(struct hstate *h,
  886. unsigned long unused_resv_pages)
  887. {
  888. unsigned long nr_pages;
  889. /* Uncommit the reservation */
  890. h->resv_huge_pages -= unused_resv_pages;
  891. /* Cannot return gigantic pages currently */
  892. if (h->order >= MAX_ORDER)
  893. return;
  894. nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
  895. /*
  896. * We want to release as many surplus pages as possible, spread
  897. * evenly across all nodes with memory. Iterate across these nodes
  898. * until we can no longer free unreserved surplus pages. This occurs
  899. * when the nodes with surplus pages have no free pages.
  900. * free_pool_huge_page() will balance the the freed pages across the
  901. * on-line nodes with memory and will handle the hstate accounting.
  902. */
  903. while (nr_pages--) {
  904. if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
  905. break;
  906. }
  907. }
  908. /*
  909. * Determine if the huge page at addr within the vma has an associated
  910. * reservation. Where it does not we will need to logically increase
  911. * reservation and actually increase subpool usage before an allocation
  912. * can occur. Where any new reservation would be required the
  913. * reservation change is prepared, but not committed. Once the page
  914. * has been allocated from the subpool and instantiated the change should
  915. * be committed via vma_commit_reservation. No action is required on
  916. * failure.
  917. */
  918. static long vma_needs_reservation(struct hstate *h,
  919. struct vm_area_struct *vma, unsigned long addr)
  920. {
  921. struct address_space *mapping = vma->vm_file->f_mapping;
  922. struct inode *inode = mapping->host;
  923. if (vma->vm_flags & VM_MAYSHARE) {
  924. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  925. return region_chg(&inode->i_mapping->private_list,
  926. idx, idx + 1);
  927. } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  928. return 1;
  929. } else {
  930. long err;
  931. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  932. struct resv_map *reservations = vma_resv_map(vma);
  933. err = region_chg(&reservations->regions, idx, idx + 1);
  934. if (err < 0)
  935. return err;
  936. return 0;
  937. }
  938. }
  939. static void vma_commit_reservation(struct hstate *h,
  940. struct vm_area_struct *vma, unsigned long addr)
  941. {
  942. struct address_space *mapping = vma->vm_file->f_mapping;
  943. struct inode *inode = mapping->host;
  944. if (vma->vm_flags & VM_MAYSHARE) {
  945. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  946. region_add(&inode->i_mapping->private_list, idx, idx + 1);
  947. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  948. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  949. struct resv_map *reservations = vma_resv_map(vma);
  950. /* Mark this page used in the map. */
  951. region_add(&reservations->regions, idx, idx + 1);
  952. }
  953. }
  954. static struct page *alloc_huge_page(struct vm_area_struct *vma,
  955. unsigned long addr, int avoid_reserve)
  956. {
  957. struct hugepage_subpool *spool = subpool_vma(vma);
  958. struct hstate *h = hstate_vma(vma);
  959. struct page *page;
  960. long chg;
  961. int ret, idx;
  962. struct hugetlb_cgroup *h_cg;
  963. idx = hstate_index(h);
  964. /*
  965. * Processes that did not create the mapping will have no
  966. * reserves and will not have accounted against subpool
  967. * limit. Check that the subpool limit can be made before
  968. * satisfying the allocation MAP_NORESERVE mappings may also
  969. * need pages and subpool limit allocated allocated if no reserve
  970. * mapping overlaps.
  971. */
  972. chg = vma_needs_reservation(h, vma, addr);
  973. if (chg < 0)
  974. return ERR_PTR(-ENOMEM);
  975. if (chg)
  976. if (hugepage_subpool_get_pages(spool, chg))
  977. return ERR_PTR(-ENOSPC);
  978. ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
  979. if (ret) {
  980. hugepage_subpool_put_pages(spool, chg);
  981. return ERR_PTR(-ENOSPC);
  982. }
  983. spin_lock(&hugetlb_lock);
  984. page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
  985. if (page) {
  986. /* update page cgroup details */
  987. hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
  988. h_cg, page);
  989. spin_unlock(&hugetlb_lock);
  990. } else {
  991. spin_unlock(&hugetlb_lock);
  992. page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
  993. if (!page) {
  994. hugetlb_cgroup_uncharge_cgroup(idx,
  995. pages_per_huge_page(h),
  996. h_cg);
  997. hugepage_subpool_put_pages(spool, chg);
  998. return ERR_PTR(-ENOSPC);
  999. }
  1000. spin_lock(&hugetlb_lock);
  1001. hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
  1002. h_cg, page);
  1003. list_move(&page->lru, &h->hugepage_activelist);
  1004. spin_unlock(&hugetlb_lock);
  1005. }
  1006. set_page_private(page, (unsigned long)spool);
  1007. vma_commit_reservation(h, vma, addr);
  1008. return page;
  1009. }
  1010. int __weak alloc_bootmem_huge_page(struct hstate *h)
  1011. {
  1012. struct huge_bootmem_page *m;
  1013. int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
  1014. while (nr_nodes) {
  1015. void *addr;
  1016. addr = __alloc_bootmem_node_nopanic(
  1017. NODE_DATA(hstate_next_node_to_alloc(h,
  1018. &node_states[N_HIGH_MEMORY])),
  1019. huge_page_size(h), huge_page_size(h), 0);
  1020. if (addr) {
  1021. /*
  1022. * Use the beginning of the huge page to store the
  1023. * huge_bootmem_page struct (until gather_bootmem
  1024. * puts them into the mem_map).
  1025. */
  1026. m = addr;
  1027. goto found;
  1028. }
  1029. nr_nodes--;
  1030. }
  1031. return 0;
  1032. found:
  1033. BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
  1034. /* Put them into a private list first because mem_map is not up yet */
  1035. list_add(&m->list, &huge_boot_pages);
  1036. m->hstate = h;
  1037. return 1;
  1038. }
  1039. static void prep_compound_huge_page(struct page *page, int order)
  1040. {
  1041. if (unlikely(order > (MAX_ORDER - 1)))
  1042. prep_compound_gigantic_page(page, order);
  1043. else
  1044. prep_compound_page(page, order);
  1045. }
  1046. /* Put bootmem huge pages into the standard lists after mem_map is up */
  1047. static void __init gather_bootmem_prealloc(void)
  1048. {
  1049. struct huge_bootmem_page *m;
  1050. list_for_each_entry(m, &huge_boot_pages, list) {
  1051. struct hstate *h = m->hstate;
  1052. struct page *page;
  1053. #ifdef CONFIG_HIGHMEM
  1054. page = pfn_to_page(m->phys >> PAGE_SHIFT);
  1055. free_bootmem_late((unsigned long)m,
  1056. sizeof(struct huge_bootmem_page));
  1057. #else
  1058. page = virt_to_page(m);
  1059. #endif
  1060. __ClearPageReserved(page);
  1061. WARN_ON(page_count(page) != 1);
  1062. prep_compound_huge_page(page, h->order);
  1063. prep_new_huge_page(h, page, page_to_nid(page));
  1064. /*
  1065. * If we had gigantic hugepages allocated at boot time, we need
  1066. * to restore the 'stolen' pages to totalram_pages in order to
  1067. * fix confusing memory reports from free(1) and another
  1068. * side-effects, like CommitLimit going negative.
  1069. */
  1070. if (h->order > (MAX_ORDER - 1))
  1071. totalram_pages += 1 << h->order;
  1072. }
  1073. }
  1074. static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  1075. {
  1076. unsigned long i;
  1077. for (i = 0; i < h->max_huge_pages; ++i) {
  1078. if (h->order >= MAX_ORDER) {
  1079. if (!alloc_bootmem_huge_page(h))
  1080. break;
  1081. } else if (!alloc_fresh_huge_page(h,
  1082. &node_states[N_HIGH_MEMORY]))
  1083. break;
  1084. }
  1085. h->max_huge_pages = i;
  1086. }
  1087. static void __init hugetlb_init_hstates(void)
  1088. {
  1089. struct hstate *h;
  1090. for_each_hstate(h) {
  1091. /* oversize hugepages were init'ed in early boot */
  1092. if (h->order < MAX_ORDER)
  1093. hugetlb_hstate_alloc_pages(h);
  1094. }
  1095. }
  1096. static char * __init memfmt(char *buf, unsigned long n)
  1097. {
  1098. if (n >= (1UL << 30))
  1099. sprintf(buf, "%lu GB", n >> 30);
  1100. else if (n >= (1UL << 20))
  1101. sprintf(buf, "%lu MB", n >> 20);
  1102. else
  1103. sprintf(buf, "%lu KB", n >> 10);
  1104. return buf;
  1105. }
  1106. static void __init report_hugepages(void)
  1107. {
  1108. struct hstate *h;
  1109. for_each_hstate(h) {
  1110. char buf[32];
  1111. printk(KERN_INFO "HugeTLB registered %s page size, "
  1112. "pre-allocated %ld pages\n",
  1113. memfmt(buf, huge_page_size(h)),
  1114. h->free_huge_pages);
  1115. }
  1116. }
  1117. #ifdef CONFIG_HIGHMEM
  1118. static void try_to_free_low(struct hstate *h, unsigned long count,
  1119. nodemask_t *nodes_allowed)
  1120. {
  1121. int i;
  1122. if (h->order >= MAX_ORDER)
  1123. return;
  1124. for_each_node_mask(i, *nodes_allowed) {
  1125. struct page *page, *next;
  1126. struct list_head *freel = &h->hugepage_freelists[i];
  1127. list_for_each_entry_safe(page, next, freel, lru) {
  1128. if (count >= h->nr_huge_pages)
  1129. return;
  1130. if (PageHighMem(page))
  1131. continue;
  1132. list_del(&page->lru);
  1133. update_and_free_page(h, page);
  1134. h->free_huge_pages--;
  1135. h->free_huge_pages_node[page_to_nid(page)]--;
  1136. }
  1137. }
  1138. }
  1139. #else
  1140. static inline void try_to_free_low(struct hstate *h, unsigned long count,
  1141. nodemask_t *nodes_allowed)
  1142. {
  1143. }
  1144. #endif
  1145. /*
  1146. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  1147. * balanced by operating on them in a round-robin fashion.
  1148. * Returns 1 if an adjustment was made.
  1149. */
  1150. static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
  1151. int delta)
  1152. {
  1153. int start_nid, next_nid;
  1154. int ret = 0;
  1155. VM_BUG_ON(delta != -1 && delta != 1);
  1156. if (delta < 0)
  1157. start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
  1158. else
  1159. start_nid = hstate_next_node_to_free(h, nodes_allowed);
  1160. next_nid = start_nid;
  1161. do {
  1162. int nid = next_nid;
  1163. if (delta < 0) {
  1164. /*
  1165. * To shrink on this node, there must be a surplus page
  1166. */
  1167. if (!h->surplus_huge_pages_node[nid]) {
  1168. next_nid = hstate_next_node_to_alloc(h,
  1169. nodes_allowed);
  1170. continue;
  1171. }
  1172. }
  1173. if (delta > 0) {
  1174. /*
  1175. * Surplus cannot exceed the total number of pages
  1176. */
  1177. if (h->surplus_huge_pages_node[nid] >=
  1178. h->nr_huge_pages_node[nid]) {
  1179. next_nid = hstate_next_node_to_free(h,
  1180. nodes_allowed);
  1181. continue;
  1182. }
  1183. }
  1184. h->surplus_huge_pages += delta;
  1185. h->surplus_huge_pages_node[nid] += delta;
  1186. ret = 1;
  1187. break;
  1188. } while (next_nid != start_nid);
  1189. return ret;
  1190. }
  1191. #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
  1192. static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
  1193. nodemask_t *nodes_allowed)
  1194. {
  1195. unsigned long min_count, ret;
  1196. if (h->order >= MAX_ORDER)
  1197. return h->max_huge_pages;
  1198. /*
  1199. * Increase the pool size
  1200. * First take pages out of surplus state. Then make up the
  1201. * remaining difference by allocating fresh huge pages.
  1202. *
  1203. * We might race with alloc_buddy_huge_page() here and be unable
  1204. * to convert a surplus huge page to a normal huge page. That is
  1205. * not critical, though, it just means the overall size of the
  1206. * pool might be one hugepage larger than it needs to be, but
  1207. * within all the constraints specified by the sysctls.
  1208. */
  1209. spin_lock(&hugetlb_lock);
  1210. while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
  1211. if (!adjust_pool_surplus(h, nodes_allowed, -1))
  1212. break;
  1213. }
  1214. while (count > persistent_huge_pages(h)) {
  1215. /*
  1216. * If this allocation races such that we no longer need the
  1217. * page, free_huge_page will handle it by freeing the page
  1218. * and reducing the surplus.
  1219. */
  1220. spin_unlock(&hugetlb_lock);
  1221. ret = alloc_fresh_huge_page(h, nodes_allowed);
  1222. spin_lock(&hugetlb_lock);
  1223. if (!ret)
  1224. goto out;
  1225. /* Bail for signals. Probably ctrl-c from user */
  1226. if (signal_pending(current))
  1227. goto out;
  1228. }
  1229. /*
  1230. * Decrease the pool size
  1231. * First return free pages to the buddy allocator (being careful
  1232. * to keep enough around to satisfy reservations). Then place
  1233. * pages into surplus state as needed so the pool will shrink
  1234. * to the desired size as pages become free.
  1235. *
  1236. * By placing pages into the surplus state independent of the
  1237. * overcommit value, we are allowing the surplus pool size to
  1238. * exceed overcommit. There are few sane options here. Since
  1239. * alloc_buddy_huge_page() is checking the global counter,
  1240. * though, we'll note that we're not allowed to exceed surplus
  1241. * and won't grow the pool anywhere else. Not until one of the
  1242. * sysctls are changed, or the surplus pages go out of use.
  1243. */
  1244. min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
  1245. min_count = max(count, min_count);
  1246. try_to_free_low(h, min_count, nodes_allowed);
  1247. while (min_count < persistent_huge_pages(h)) {
  1248. if (!free_pool_huge_page(h, nodes_allowed, 0))
  1249. break;
  1250. }
  1251. while (count < persistent_huge_pages(h)) {
  1252. if (!adjust_pool_surplus(h, nodes_allowed, 1))
  1253. break;
  1254. }
  1255. out:
  1256. ret = persistent_huge_pages(h);
  1257. spin_unlock(&hugetlb_lock);
  1258. return ret;
  1259. }
  1260. #define HSTATE_ATTR_RO(_name) \
  1261. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  1262. #define HSTATE_ATTR(_name) \
  1263. static struct kobj_attribute _name##_attr = \
  1264. __ATTR(_name, 0644, _name##_show, _name##_store)
  1265. static struct kobject *hugepages_kobj;
  1266. static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1267. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
  1268. static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
  1269. {
  1270. int i;
  1271. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1272. if (hstate_kobjs[i] == kobj) {
  1273. if (nidp)
  1274. *nidp = NUMA_NO_NODE;
  1275. return &hstates[i];
  1276. }
  1277. return kobj_to_node_hstate(kobj, nidp);
  1278. }
  1279. static ssize_t nr_hugepages_show_common(struct kobject *kobj,
  1280. struct kobj_attribute *attr, char *buf)
  1281. {
  1282. struct hstate *h;
  1283. unsigned long nr_huge_pages;
  1284. int nid;
  1285. h = kobj_to_hstate(kobj, &nid);
  1286. if (nid == NUMA_NO_NODE)
  1287. nr_huge_pages = h->nr_huge_pages;
  1288. else
  1289. nr_huge_pages = h->nr_huge_pages_node[nid];
  1290. return sprintf(buf, "%lu\n", nr_huge_pages);
  1291. }
  1292. static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
  1293. struct kobject *kobj, struct kobj_attribute *attr,
  1294. const char *buf, size_t len)
  1295. {
  1296. int err;
  1297. int nid;
  1298. unsigned long count;
  1299. struct hstate *h;
  1300. NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
  1301. err = strict_strtoul(buf, 10, &count);
  1302. if (err)
  1303. goto out;
  1304. h = kobj_to_hstate(kobj, &nid);
  1305. if (h->order >= MAX_ORDER) {
  1306. err = -EINVAL;
  1307. goto out;
  1308. }
  1309. if (nid == NUMA_NO_NODE) {
  1310. /*
  1311. * global hstate attribute
  1312. */
  1313. if (!(obey_mempolicy &&
  1314. init_nodemask_of_mempolicy(nodes_allowed))) {
  1315. NODEMASK_FREE(nodes_allowed);
  1316. nodes_allowed = &node_states[N_HIGH_MEMORY];
  1317. }
  1318. } else if (nodes_allowed) {
  1319. /*
  1320. * per node hstate attribute: adjust count to global,
  1321. * but restrict alloc/free to the specified node.
  1322. */
  1323. count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
  1324. init_nodemask_of_node(nodes_allowed, nid);
  1325. } else
  1326. nodes_allowed = &node_states[N_HIGH_MEMORY];
  1327. h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
  1328. if (nodes_allowed != &node_states[N_HIGH_MEMORY])
  1329. NODEMASK_FREE(nodes_allowed);
  1330. return len;
  1331. out:
  1332. NODEMASK_FREE(nodes_allowed);
  1333. return err;
  1334. }
  1335. static ssize_t nr_hugepages_show(struct kobject *kobj,
  1336. struct kobj_attribute *attr, char *buf)
  1337. {
  1338. return nr_hugepages_show_common(kobj, attr, buf);
  1339. }
  1340. static ssize_t nr_hugepages_store(struct kobject *kobj,
  1341. struct kobj_attribute *attr, const char *buf, size_t len)
  1342. {
  1343. return nr_hugepages_store_common(false, kobj, attr, buf, len);
  1344. }
  1345. HSTATE_ATTR(nr_hugepages);
  1346. #ifdef CONFIG_NUMA
  1347. /*
  1348. * hstate attribute for optionally mempolicy-based constraint on persistent
  1349. * huge page alloc/free.
  1350. */
  1351. static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
  1352. struct kobj_attribute *attr, char *buf)
  1353. {
  1354. return nr_hugepages_show_common(kobj, attr, buf);
  1355. }
  1356. static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
  1357. struct kobj_attribute *attr, const char *buf, size_t len)
  1358. {
  1359. return nr_hugepages_store_common(true, kobj, attr, buf, len);
  1360. }
  1361. HSTATE_ATTR(nr_hugepages_mempolicy);
  1362. #endif
  1363. static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
  1364. struct kobj_attribute *attr, char *buf)
  1365. {
  1366. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1367. return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
  1368. }
  1369. static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
  1370. struct kobj_attribute *attr, const char *buf, size_t count)
  1371. {
  1372. int err;
  1373. unsigned long input;
  1374. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1375. if (h->order >= MAX_ORDER)
  1376. return -EINVAL;
  1377. err = strict_strtoul(buf, 10, &input);
  1378. if (err)
  1379. return err;
  1380. spin_lock(&hugetlb_lock);
  1381. h->nr_overcommit_huge_pages = input;
  1382. spin_unlock(&hugetlb_lock);
  1383. return count;
  1384. }
  1385. HSTATE_ATTR(nr_overcommit_hugepages);
  1386. static ssize_t free_hugepages_show(struct kobject *kobj,
  1387. struct kobj_attribute *attr, char *buf)
  1388. {
  1389. struct hstate *h;
  1390. unsigned long free_huge_pages;
  1391. int nid;
  1392. h = kobj_to_hstate(kobj, &nid);
  1393. if (nid == NUMA_NO_NODE)
  1394. free_huge_pages = h->free_huge_pages;
  1395. else
  1396. free_huge_pages = h->free_huge_pages_node[nid];
  1397. return sprintf(buf, "%lu\n", free_huge_pages);
  1398. }
  1399. HSTATE_ATTR_RO(free_hugepages);
  1400. static ssize_t resv_hugepages_show(struct kobject *kobj,
  1401. struct kobj_attribute *attr, char *buf)
  1402. {
  1403. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1404. return sprintf(buf, "%lu\n", h->resv_huge_pages);
  1405. }
  1406. HSTATE_ATTR_RO(resv_hugepages);
  1407. static ssize_t surplus_hugepages_show(struct kobject *kobj,
  1408. struct kobj_attribute *attr, char *buf)
  1409. {
  1410. struct hstate *h;
  1411. unsigned long surplus_huge_pages;
  1412. int nid;
  1413. h = kobj_to_hstate(kobj, &nid);
  1414. if (nid == NUMA_NO_NODE)
  1415. surplus_huge_pages = h->surplus_huge_pages;
  1416. else
  1417. surplus_huge_pages = h->surplus_huge_pages_node[nid];
  1418. return sprintf(buf, "%lu\n", surplus_huge_pages);
  1419. }
  1420. HSTATE_ATTR_RO(surplus_hugepages);
  1421. static struct attribute *hstate_attrs[] = {
  1422. &nr_hugepages_attr.attr,
  1423. &nr_overcommit_hugepages_attr.attr,
  1424. &free_hugepages_attr.attr,
  1425. &resv_hugepages_attr.attr,
  1426. &surplus_hugepages_attr.attr,
  1427. #ifdef CONFIG_NUMA
  1428. &nr_hugepages_mempolicy_attr.attr,
  1429. #endif
  1430. NULL,
  1431. };
  1432. static struct attribute_group hstate_attr_group = {
  1433. .attrs = hstate_attrs,
  1434. };
  1435. static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
  1436. struct kobject **hstate_kobjs,
  1437. struct attribute_group *hstate_attr_group)
  1438. {
  1439. int retval;
  1440. int hi = hstate_index(h);
  1441. hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
  1442. if (!hstate_kobjs[hi])
  1443. return -ENOMEM;
  1444. retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
  1445. if (retval)
  1446. kobject_put(hstate_kobjs[hi]);
  1447. return retval;
  1448. }
  1449. static void __init hugetlb_sysfs_init(void)
  1450. {
  1451. struct hstate *h;
  1452. int err;
  1453. hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
  1454. if (!hugepages_kobj)
  1455. return;
  1456. for_each_hstate(h) {
  1457. err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
  1458. hstate_kobjs, &hstate_attr_group);
  1459. if (err)
  1460. printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
  1461. h->name);
  1462. }
  1463. }
  1464. #ifdef CONFIG_NUMA
  1465. /*
  1466. * node_hstate/s - associate per node hstate attributes, via their kobjects,
  1467. * with node devices in node_devices[] using a parallel array. The array
  1468. * index of a node device or _hstate == node id.
  1469. * This is here to avoid any static dependency of the node device driver, in
  1470. * the base kernel, on the hugetlb module.
  1471. */
  1472. struct node_hstate {
  1473. struct kobject *hugepages_kobj;
  1474. struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1475. };
  1476. struct node_hstate node_hstates[MAX_NUMNODES];
  1477. /*
  1478. * A subset of global hstate attributes for node devices
  1479. */
  1480. static struct attribute *per_node_hstate_attrs[] = {
  1481. &nr_hugepages_attr.attr,
  1482. &free_hugepages_attr.attr,
  1483. &surplus_hugepages_attr.attr,
  1484. NULL,
  1485. };
  1486. static struct attribute_group per_node_hstate_attr_group = {
  1487. .attrs = per_node_hstate_attrs,
  1488. };
  1489. /*
  1490. * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
  1491. * Returns node id via non-NULL nidp.
  1492. */
  1493. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  1494. {
  1495. int nid;
  1496. for (nid = 0; nid < nr_node_ids; nid++) {
  1497. struct node_hstate *nhs = &node_hstates[nid];
  1498. int i;
  1499. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1500. if (nhs->hstate_kobjs[i] == kobj) {
  1501. if (nidp)
  1502. *nidp = nid;
  1503. return &hstates[i];
  1504. }
  1505. }
  1506. BUG();
  1507. return NULL;
  1508. }
  1509. /*
  1510. * Unregister hstate attributes from a single node device.
  1511. * No-op if no hstate attributes attached.
  1512. */
  1513. void hugetlb_unregister_node(struct node *node)
  1514. {
  1515. struct hstate *h;
  1516. struct node_hstate *nhs = &node_hstates[node->dev.id];
  1517. if (!nhs->hugepages_kobj)
  1518. return; /* no hstate attributes */
  1519. for_each_hstate(h) {
  1520. int idx = hstate_index(h);
  1521. if (nhs->hstate_kobjs[idx]) {
  1522. kobject_put(nhs->hstate_kobjs[idx]);
  1523. nhs->hstate_kobjs[idx] = NULL;
  1524. }
  1525. }
  1526. kobject_put(nhs->hugepages_kobj);
  1527. nhs->hugepages_kobj = NULL;
  1528. }
  1529. /*
  1530. * hugetlb module exit: unregister hstate attributes from node devices
  1531. * that have them.
  1532. */
  1533. static void hugetlb_unregister_all_nodes(void)
  1534. {
  1535. int nid;
  1536. /*
  1537. * disable node device registrations.
  1538. */
  1539. register_hugetlbfs_with_node(NULL, NULL);
  1540. /*
  1541. * remove hstate attributes from any nodes that have them.
  1542. */
  1543. for (nid = 0; nid < nr_node_ids; nid++)
  1544. hugetlb_unregister_node(&node_devices[nid]);
  1545. }
  1546. /*
  1547. * Register hstate attributes for a single node device.
  1548. * No-op if attributes already registered.
  1549. */
  1550. void hugetlb_register_node(struct node *node)
  1551. {
  1552. struct hstate *h;
  1553. struct node_hstate *nhs = &node_hstates[node->dev.id];
  1554. int err;
  1555. if (nhs->hugepages_kobj)
  1556. return; /* already allocated */
  1557. nhs->hugepages_kobj = kobject_create_and_add("hugepages",
  1558. &node->dev.kobj);
  1559. if (!nhs->hugepages_kobj)
  1560. return;
  1561. for_each_hstate(h) {
  1562. err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
  1563. nhs->hstate_kobjs,
  1564. &per_node_hstate_attr_group);
  1565. if (err) {
  1566. printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
  1567. " for node %d\n",
  1568. h->name, node->dev.id);
  1569. hugetlb_unregister_node(node);
  1570. break;
  1571. }
  1572. }
  1573. }
  1574. /*
  1575. * hugetlb init time: register hstate attributes for all registered node
  1576. * devices of nodes that have memory. All on-line nodes should have
  1577. * registered their associated device by this time.
  1578. */
  1579. static void hugetlb_register_all_nodes(void)
  1580. {
  1581. int nid;
  1582. for_each_node_state(nid, N_HIGH_MEMORY) {
  1583. struct node *node = &node_devices[nid];
  1584. if (node->dev.id == nid)
  1585. hugetlb_register_node(node);
  1586. }
  1587. /*
  1588. * Let the node device driver know we're here so it can
  1589. * [un]register hstate attributes on node hotplug.
  1590. */
  1591. register_hugetlbfs_with_node(hugetlb_register_node,
  1592. hugetlb_unregister_node);
  1593. }
  1594. #else /* !CONFIG_NUMA */
  1595. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  1596. {
  1597. BUG();
  1598. if (nidp)
  1599. *nidp = -1;
  1600. return NULL;
  1601. }
  1602. static void hugetlb_unregister_all_nodes(void) { }
  1603. static void hugetlb_register_all_nodes(void) { }
  1604. #endif
  1605. static void __exit hugetlb_exit(void)
  1606. {
  1607. struct hstate *h;
  1608. hugetlb_unregister_all_nodes();
  1609. for_each_hstate(h) {
  1610. kobject_put(hstate_kobjs[hstate_index(h)]);
  1611. }
  1612. kobject_put(hugepages_kobj);
  1613. }
  1614. module_exit(hugetlb_exit);
  1615. static int __init hugetlb_init(void)
  1616. {
  1617. /* Some platform decide whether they support huge pages at boot
  1618. * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
  1619. * there is no such support
  1620. */
  1621. if (HPAGE_SHIFT == 0)
  1622. return 0;
  1623. if (!size_to_hstate(default_hstate_size)) {
  1624. default_hstate_size = HPAGE_SIZE;
  1625. if (!size_to_hstate(default_hstate_size))
  1626. hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
  1627. }
  1628. default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
  1629. if (default_hstate_max_huge_pages)
  1630. default_hstate.max_huge_pages = default_hstate_max_huge_pages;
  1631. hugetlb_init_hstates();
  1632. gather_bootmem_prealloc();
  1633. report_hugepages();
  1634. hugetlb_sysfs_init();
  1635. hugetlb_register_all_nodes();
  1636. return 0;
  1637. }
  1638. module_init(hugetlb_init);
  1639. /* Should be called on processing a hugepagesz=... option */
  1640. void __init hugetlb_add_hstate(unsigned order)
  1641. {
  1642. struct hstate *h;
  1643. unsigned long i;
  1644. if (size_to_hstate(PAGE_SIZE << order)) {
  1645. printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
  1646. return;
  1647. }
  1648. BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
  1649. BUG_ON(order == 0);
  1650. h = &hstates[hugetlb_max_hstate++];
  1651. h->order = order;
  1652. h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
  1653. h->nr_huge_pages = 0;
  1654. h->free_huge_pages = 0;
  1655. for (i = 0; i < MAX_NUMNODES; ++i)
  1656. INIT_LIST_HEAD(&h->hugepage_freelists[i]);
  1657. INIT_LIST_HEAD(&h->hugepage_activelist);
  1658. h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
  1659. h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
  1660. snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
  1661. huge_page_size(h)/1024);
  1662. /*
  1663. * Add cgroup control files only if the huge page consists
  1664. * of more than two normal pages. This is because we use
  1665. * page[2].lru.next for storing cgoup details.
  1666. */
  1667. if (order >= HUGETLB_CGROUP_MIN_ORDER)
  1668. hugetlb_cgroup_file_init(hugetlb_max_hstate - 1);
  1669. parsed_hstate = h;
  1670. }
  1671. static int __init hugetlb_nrpages_setup(char *s)
  1672. {
  1673. unsigned long *mhp;
  1674. static unsigned long *last_mhp;
  1675. /*
  1676. * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
  1677. * so this hugepages= parameter goes to the "default hstate".
  1678. */
  1679. if (!hugetlb_max_hstate)
  1680. mhp = &default_hstate_max_huge_pages;
  1681. else
  1682. mhp = &parsed_hstate->max_huge_pages;
  1683. if (mhp == last_mhp) {
  1684. printk(KERN_WARNING "hugepages= specified twice without "
  1685. "interleaving hugepagesz=, ignoring\n");
  1686. return 1;
  1687. }
  1688. if (sscanf(s, "%lu", mhp) <= 0)
  1689. *mhp = 0;
  1690. /*
  1691. * Global state is always initialized later in hugetlb_init.
  1692. * But we need to allocate >= MAX_ORDER hstates here early to still
  1693. * use the bootmem allocator.
  1694. */
  1695. if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
  1696. hugetlb_hstate_alloc_pages(parsed_hstate);
  1697. last_mhp = mhp;
  1698. return 1;
  1699. }
  1700. __setup("hugepages=", hugetlb_nrpages_setup);
  1701. static int __init hugetlb_default_setup(char *s)
  1702. {
  1703. default_hstate_size = memparse(s, &s);
  1704. return 1;
  1705. }
  1706. __setup("default_hugepagesz=", hugetlb_default_setup);
  1707. static unsigned int cpuset_mems_nr(unsigned int *array)
  1708. {
  1709. int node;
  1710. unsigned int nr = 0;
  1711. for_each_node_mask(node, cpuset_current_mems_allowed)
  1712. nr += array[node];
  1713. return nr;
  1714. }
  1715. #ifdef CONFIG_SYSCTL
  1716. static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
  1717. struct ctl_table *table, int write,
  1718. void __user *buffer, size_t *length, loff_t *ppos)
  1719. {
  1720. struct hstate *h = &default_hstate;
  1721. unsigned long tmp;
  1722. int ret;
  1723. tmp = h->max_huge_pages;
  1724. if (write && h->order >= MAX_ORDER)
  1725. return -EINVAL;
  1726. table->data = &tmp;
  1727. table->maxlen = sizeof(unsigned long);
  1728. ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
  1729. if (ret)
  1730. goto out;
  1731. if (write) {
  1732. NODEMASK_ALLOC(nodemask_t, nodes_allowed,
  1733. GFP_KERNEL | __GFP_NORETRY);
  1734. if (!(obey_mempolicy &&
  1735. init_nodemask_of_mempolicy(nodes_allowed))) {
  1736. NODEMASK_FREE(nodes_allowed);
  1737. nodes_allowed = &node_states[N_HIGH_MEMORY];
  1738. }
  1739. h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
  1740. if (nodes_allowed != &node_states[N_HIGH_MEMORY])
  1741. NODEMASK_FREE(nodes_allowed);
  1742. }
  1743. out:
  1744. return ret;
  1745. }
  1746. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  1747. void __user *buffer, size_t *length, loff_t *ppos)
  1748. {
  1749. return hugetlb_sysctl_handler_common(false, table, write,
  1750. buffer, length, ppos);
  1751. }
  1752. #ifdef CONFIG_NUMA
  1753. int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
  1754. void __user *buffer, size_t *length, loff_t *ppos)
  1755. {
  1756. return hugetlb_sysctl_handler_common(true, table, write,
  1757. buffer, length, ppos);
  1758. }
  1759. #endif /* CONFIG_NUMA */
  1760. int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
  1761. void __user *buffer,
  1762. size_t *length, loff_t *ppos)
  1763. {
  1764. proc_dointvec(table, write, buffer, length, ppos);
  1765. if (hugepages_treat_as_movable)
  1766. htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
  1767. else
  1768. htlb_alloc_mask = GFP_HIGHUSER;
  1769. return 0;
  1770. }
  1771. int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  1772. void __user *buffer,
  1773. size_t *length, loff_t *ppos)
  1774. {
  1775. struct hstate *h = &default_hstate;
  1776. unsigned long tmp;
  1777. int ret;
  1778. tmp = h->nr_overcommit_huge_pages;
  1779. if (write && h->order >= MAX_ORDER)
  1780. return -EINVAL;
  1781. table->data = &tmp;
  1782. table->maxlen = sizeof(unsigned long);
  1783. ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
  1784. if (ret)
  1785. goto out;
  1786. if (write) {
  1787. spin_lock(&hugetlb_lock);
  1788. h->nr_overcommit_huge_pages = tmp;
  1789. spin_unlock(&hugetlb_lock);
  1790. }
  1791. out:
  1792. return ret;
  1793. }
  1794. #endif /* CONFIG_SYSCTL */
  1795. void hugetlb_report_meminfo(struct seq_file *m)
  1796. {
  1797. struct hstate *h = &default_hstate;
  1798. seq_printf(m,
  1799. "HugePages_Total: %5lu\n"
  1800. "HugePages_Free: %5lu\n"
  1801. "HugePages_Rsvd: %5lu\n"
  1802. "HugePages_Surp: %5lu\n"
  1803. "Hugepagesize: %8lu kB\n",
  1804. h->nr_huge_pages,
  1805. h->free_huge_pages,
  1806. h->resv_huge_pages,
  1807. h->surplus_huge_pages,
  1808. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  1809. }
  1810. int hugetlb_report_node_meminfo(int nid, char *buf)
  1811. {
  1812. struct hstate *h = &default_hstate;
  1813. return sprintf(buf,
  1814. "Node %d HugePages_Total: %5u\n"
  1815. "Node %d HugePages_Free: %5u\n"
  1816. "Node %d HugePages_Surp: %5u\n",
  1817. nid, h->nr_huge_pages_node[nid],
  1818. nid, h->free_huge_pages_node[nid],
  1819. nid, h->surplus_huge_pages_node[nid]);
  1820. }
  1821. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  1822. unsigned long hugetlb_total_pages(void)
  1823. {
  1824. struct hstate *h = &default_hstate;
  1825. return h->nr_huge_pages * pages_per_huge_page(h);
  1826. }
  1827. static int hugetlb_acct_memory(struct hstate *h, long delta)
  1828. {
  1829. int ret = -ENOMEM;
  1830. spin_lock(&hugetlb_lock);
  1831. /*
  1832. * When cpuset is configured, it breaks the strict hugetlb page
  1833. * reservation as the accounting is done on a global variable. Such
  1834. * reservation is completely rubbish in the presence of cpuset because
  1835. * the reservation is not checked against page availability for the
  1836. * current cpuset. Application can still potentially OOM'ed by kernel
  1837. * with lack of free htlb page in cpuset that the task is in.
  1838. * Attempt to enforce strict accounting with cpuset is almost
  1839. * impossible (or too ugly) because cpuset is too fluid that
  1840. * task or memory node can be dynamically moved between cpusets.
  1841. *
  1842. * The change of semantics for shared hugetlb mapping with cpuset is
  1843. * undesirable. However, in order to preserve some of the semantics,
  1844. * we fall back to check against current free page availability as
  1845. * a best attempt and hopefully to minimize the impact of changing
  1846. * semantics that cpuset has.
  1847. */
  1848. if (delta > 0) {
  1849. if (gather_surplus_pages(h, delta) < 0)
  1850. goto out;
  1851. if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
  1852. return_unused_surplus_pages(h, delta);
  1853. goto out;
  1854. }
  1855. }
  1856. ret = 0;
  1857. if (delta < 0)
  1858. return_unused_surplus_pages(h, (unsigned long) -delta);
  1859. out:
  1860. spin_unlock(&hugetlb_lock);
  1861. return ret;
  1862. }
  1863. static void hugetlb_vm_op_open(struct vm_area_struct *vma)
  1864. {
  1865. struct resv_map *reservations = vma_resv_map(vma);
  1866. /*
  1867. * This new VMA should share its siblings reservation map if present.
  1868. * The VMA will only ever have a valid reservation map pointer where
  1869. * it is being copied for another still existing VMA. As that VMA
  1870. * has a reference to the reservation map it cannot disappear until
  1871. * after this open call completes. It is therefore safe to take a
  1872. * new reference here without additional locking.
  1873. */
  1874. if (reservations)
  1875. kref_get(&reservations->refs);
  1876. }
  1877. static void resv_map_put(struct vm_area_struct *vma)
  1878. {
  1879. struct resv_map *reservations = vma_resv_map(vma);
  1880. if (!reservations)
  1881. return;
  1882. kref_put(&reservations->refs, resv_map_release);
  1883. }
  1884. static void hugetlb_vm_op_close(struct vm_area_struct *vma)
  1885. {
  1886. struct hstate *h = hstate_vma(vma);
  1887. struct resv_map *reservations = vma_resv_map(vma);
  1888. struct hugepage_subpool *spool = subpool_vma(vma);
  1889. unsigned long reserve;
  1890. unsigned long start;
  1891. unsigned long end;
  1892. if (reservations) {
  1893. start = vma_hugecache_offset(h, vma, vma->vm_start);
  1894. end = vma_hugecache_offset(h, vma, vma->vm_end);
  1895. reserve = (end - start) -
  1896. region_count(&reservations->regions, start, end);
  1897. resv_map_put(vma);
  1898. if (reserve) {
  1899. hugetlb_acct_memory(h, -reserve);
  1900. hugepage_subpool_put_pages(spool, reserve);
  1901. }
  1902. }
  1903. }
  1904. /*
  1905. * We cannot handle pagefaults against hugetlb pages at all. They cause
  1906. * handle_mm_fault() to try to instantiate regular-sized pages in the
  1907. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  1908. * this far.
  1909. */
  1910. static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1911. {
  1912. BUG();
  1913. return 0;
  1914. }
  1915. const struct vm_operations_struct hugetlb_vm_ops = {
  1916. .fault = hugetlb_vm_op_fault,
  1917. .open = hugetlb_vm_op_open,
  1918. .close = hugetlb_vm_op_close,
  1919. };
  1920. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  1921. int writable)
  1922. {
  1923. pte_t entry;
  1924. if (writable) {
  1925. entry =
  1926. pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
  1927. } else {
  1928. entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
  1929. }
  1930. entry = pte_mkyoung(entry);
  1931. entry = pte_mkhuge(entry);
  1932. entry = arch_make_huge_pte(entry, vma, page, writable);
  1933. return entry;
  1934. }
  1935. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  1936. unsigned long address, pte_t *ptep)
  1937. {
  1938. pte_t entry;
  1939. entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
  1940. if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
  1941. update_mmu_cache(vma, address, ptep);
  1942. }
  1943. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  1944. struct vm_area_struct *vma)
  1945. {
  1946. pte_t *src_pte, *dst_pte, entry;
  1947. struct page *ptepage;
  1948. unsigned long addr;
  1949. int cow;
  1950. struct hstate *h = hstate_vma(vma);
  1951. unsigned long sz = huge_page_size(h);
  1952. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  1953. for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
  1954. src_pte = huge_pte_offset(src, addr);
  1955. if (!src_pte)
  1956. continue;
  1957. dst_pte = huge_pte_alloc(dst, addr, sz);
  1958. if (!dst_pte)
  1959. goto nomem;
  1960. /* If the pagetables are shared don't copy or take references */
  1961. if (dst_pte == src_pte)
  1962. continue;
  1963. spin_lock(&dst->page_table_lock);
  1964. spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
  1965. if (!huge_pte_none(huge_ptep_get(src_pte))) {
  1966. if (cow)
  1967. huge_ptep_set_wrprotect(src, addr, src_pte);
  1968. entry = huge_ptep_get(src_pte);
  1969. ptepage = pte_page(entry);
  1970. get_page(ptepage);
  1971. page_dup_rmap(ptepage);
  1972. set_huge_pte_at(dst, addr, dst_pte, entry);
  1973. }
  1974. spin_unlock(&src->page_table_lock);
  1975. spin_unlock(&dst->page_table_lock);
  1976. }
  1977. return 0;
  1978. nomem:
  1979. return -ENOMEM;
  1980. }
  1981. static int is_hugetlb_entry_migration(pte_t pte)
  1982. {
  1983. swp_entry_t swp;
  1984. if (huge_pte_none(pte) || pte_present(pte))
  1985. return 0;
  1986. swp = pte_to_swp_entry(pte);
  1987. if (non_swap_entry(swp) && is_migration_entry(swp))
  1988. return 1;
  1989. else
  1990. return 0;
  1991. }
  1992. static int is_hugetlb_entry_hwpoisoned(pte_t pte)
  1993. {
  1994. swp_entry_t swp;
  1995. if (huge_pte_none(pte) || pte_present(pte))
  1996. return 0;
  1997. swp = pte_to_swp_entry(pte);
  1998. if (non_swap_entry(swp) && is_hwpoison_entry(swp))
  1999. return 1;
  2000. else
  2001. return 0;
  2002. }
  2003. void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  2004. unsigned long start, unsigned long end,
  2005. struct page *ref_page)
  2006. {
  2007. int force_flush = 0;
  2008. struct mm_struct *mm = vma->vm_mm;
  2009. unsigned long address;
  2010. pte_t *ptep;
  2011. pte_t pte;
  2012. struct page *page;
  2013. struct hstate *h = hstate_vma(vma);
  2014. unsigned long sz = huge_page_size(h);
  2015. WARN_ON(!is_vm_hugetlb_page(vma));
  2016. BUG_ON(start & ~huge_page_mask(h));
  2017. BUG_ON(end & ~huge_page_mask(h));
  2018. tlb_start_vma(tlb, vma);
  2019. mmu_notifier_invalidate_range_start(mm, start, end);
  2020. again:
  2021. spin_lock(&mm->page_table_lock);
  2022. for (address = start; address < end; address += sz) {
  2023. ptep = huge_pte_offset(mm, address);
  2024. if (!ptep)
  2025. continue;
  2026. if (huge_pmd_unshare(mm, &address, ptep))
  2027. continue;
  2028. pte = huge_ptep_get(ptep);
  2029. if (huge_pte_none(pte))
  2030. continue;
  2031. /*
  2032. * HWPoisoned hugepage is already unmapped and dropped reference
  2033. */
  2034. if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
  2035. continue;
  2036. page = pte_page(pte);
  2037. /*
  2038. * If a reference page is supplied, it is because a specific
  2039. * page is being unmapped, not a range. Ensure the page we
  2040. * are about to unmap is the actual page of interest.
  2041. */
  2042. if (ref_page) {
  2043. if (page != ref_page)
  2044. continue;
  2045. /*
  2046. * Mark the VMA as having unmapped its page so that
  2047. * future faults in this VMA will fail rather than
  2048. * looking like data was lost
  2049. */
  2050. set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
  2051. }
  2052. pte = huge_ptep_get_and_clear(mm, address, ptep);
  2053. tlb_remove_tlb_entry(tlb, ptep, address);
  2054. if (pte_dirty(pte))
  2055. set_page_dirty(page);
  2056. page_remove_rmap(page);
  2057. force_flush = !__tlb_remove_page(tlb, page);
  2058. if (force_flush)
  2059. break;
  2060. /* Bail out after unmapping reference page if supplied */
  2061. if (ref_page)
  2062. break;
  2063. }
  2064. spin_unlock(&mm->page_table_lock);
  2065. /*
  2066. * mmu_gather ran out of room to batch pages, we break out of
  2067. * the PTE lock to avoid doing the potential expensive TLB invalidate
  2068. * and page-free while holding it.
  2069. */
  2070. if (force_flush) {
  2071. force_flush = 0;
  2072. tlb_flush_mmu(tlb);
  2073. if (address < end && !ref_page)
  2074. goto again;
  2075. }
  2076. mmu_notifier_invalidate_range_end(mm, start, end);
  2077. tlb_end_vma(tlb, vma);
  2078. }
  2079. void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  2080. struct vm_area_struct *vma, unsigned long start,
  2081. unsigned long end, struct page *ref_page)
  2082. {
  2083. __unmap_hugepage_range(tlb, vma, start, end, ref_page);
  2084. /*
  2085. * Clear this flag so that x86's huge_pmd_share page_table_shareable
  2086. * test will fail on a vma being torn down, and not grab a page table
  2087. * on its way out. We're lucky that the flag has such an appropriate
  2088. * name, and can in fact be safely cleared here. We could clear it
  2089. * before the __unmap_hugepage_range above, but all that's necessary
  2090. * is to clear it before releasing the i_mmap_mutex. This works
  2091. * because in the context this is called, the VMA is about to be
  2092. * destroyed and the i_mmap_mutex is held.
  2093. */
  2094. vma->vm_flags &= ~VM_MAYSHARE;
  2095. }
  2096. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  2097. unsigned long end, struct page *ref_page)
  2098. {
  2099. struct mm_struct *mm;
  2100. struct mmu_gather tlb;
  2101. mm = vma->vm_mm;
  2102. tlb_gather_mmu(&tlb, mm, 0);
  2103. __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
  2104. tlb_finish_mmu(&tlb, start, end);
  2105. }
  2106. /*
  2107. * This is called when the original mapper is failing to COW a MAP_PRIVATE
  2108. * mappping it owns the reserve page for. The intention is to unmap the page
  2109. * from other VMAs and let the children be SIGKILLed if they are faulting the
  2110. * same region.
  2111. */
  2112. static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  2113. struct page *page, unsigned long address)
  2114. {
  2115. struct hstate *h = hstate_vma(vma);
  2116. struct vm_area_struct *iter_vma;
  2117. struct address_space *mapping;
  2118. struct prio_tree_iter iter;
  2119. pgoff_t pgoff;
  2120. /*
  2121. * vm_pgoff is in PAGE_SIZE units, hence the different calculation
  2122. * from page cache lookup which is in HPAGE_SIZE units.
  2123. */
  2124. address = address & huge_page_mask(h);
  2125. pgoff = vma_hugecache_offset(h, vma, address);
  2126. mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
  2127. /*
  2128. * Take the mapping lock for the duration of the table walk. As
  2129. * this mapping should be shared between all the VMAs,
  2130. * __unmap_hugepage_range() is called as the lock is already held
  2131. */
  2132. mutex_lock(&mapping->i_mmap_mutex);
  2133. vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
  2134. /* Do not unmap the current VMA */
  2135. if (iter_vma == vma)
  2136. continue;
  2137. /*
  2138. * Unmap the page from other VMAs without their own reserves.
  2139. * They get marked to be SIGKILLed if they fault in these
  2140. * areas. This is because a future no-page fault on this VMA
  2141. * could insert a zeroed page instead of the data existing
  2142. * from the time of fork. This would look like data corruption
  2143. */
  2144. if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
  2145. unmap_hugepage_range(iter_vma, address,
  2146. address + huge_page_size(h), page);
  2147. }
  2148. mutex_unlock(&mapping->i_mmap_mutex);
  2149. return 1;
  2150. }
  2151. /*
  2152. * Hugetlb_cow() should be called with page lock of the original hugepage held.
  2153. * Called with hugetlb_instantiation_mutex held and pte_page locked so we
  2154. * cannot race with other handlers or page migration.
  2155. * Keep the pte_same checks anyway to make transition from the mutex easier.
  2156. */
  2157. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  2158. unsigned long address, pte_t *ptep, pte_t pte,
  2159. struct page *pagecache_page)
  2160. {
  2161. struct hstate *h = hstate_vma(vma);
  2162. struct page *old_page, *new_page;
  2163. int avoidcopy;
  2164. int outside_reserve = 0;
  2165. old_page = pte_page(pte);
  2166. retry_avoidcopy:
  2167. /* If no-one else is actually using this page, avoid the copy
  2168. * and just make the page writable */
  2169. avoidcopy = (page_mapcount(old_page) == 1);
  2170. if (avoidcopy) {
  2171. if (PageAnon(old_page))
  2172. page_move_anon_rmap(old_page, vma, address);
  2173. set_huge_ptep_writable(vma, address, ptep);
  2174. return 0;
  2175. }
  2176. /*
  2177. * If the process that created a MAP_PRIVATE mapping is about to
  2178. * perform a COW due to a shared page count, attempt to satisfy
  2179. * the allocation without using the existing reserves. The pagecache
  2180. * page is used to determine if the reserve at this address was
  2181. * consumed or not. If reserves were used, a partial faulted mapping
  2182. * at the time of fork() could consume its reserves on COW instead
  2183. * of the full address range.
  2184. */
  2185. if (!(vma->vm_flags & VM_MAYSHARE) &&
  2186. is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
  2187. old_page != pagecache_page)
  2188. outside_reserve = 1;
  2189. page_cache_get(old_page);
  2190. /* Drop page_table_lock as buddy allocator may be called */
  2191. spin_unlock(&mm->page_table_lock);
  2192. new_page = alloc_huge_page(vma, address, outside_reserve);
  2193. if (IS_ERR(new_page)) {
  2194. long err = PTR_ERR(new_page);
  2195. page_cache_release(old_page);
  2196. /*
  2197. * If a process owning a MAP_PRIVATE mapping fails to COW,
  2198. * it is due to references held by a child and an insufficient
  2199. * huge page pool. To guarantee the original mappers
  2200. * reliability, unmap the page from child processes. The child
  2201. * may get SIGKILLed if it later faults.
  2202. */
  2203. if (outside_reserve) {
  2204. BUG_ON(huge_pte_none(pte));
  2205. if (unmap_ref_private(mm, vma, old_page, address)) {
  2206. BUG_ON(huge_pte_none(pte));
  2207. spin_lock(&mm->page_table_lock);
  2208. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  2209. if (likely(pte_same(huge_ptep_get(ptep), pte)))
  2210. goto retry_avoidcopy;
  2211. /*
  2212. * race occurs while re-acquiring page_table_lock, and
  2213. * our job is done.
  2214. */
  2215. return 0;
  2216. }
  2217. WARN_ON_ONCE(1);
  2218. }
  2219. /* Caller expects lock to be held */
  2220. spin_lock(&mm->page_table_lock);
  2221. if (err == -ENOMEM)
  2222. return VM_FAULT_OOM;
  2223. else
  2224. return VM_FAULT_SIGBUS;
  2225. }
  2226. /*
  2227. * When the original hugepage is shared one, it does not have
  2228. * anon_vma prepared.
  2229. */
  2230. if (unlikely(anon_vma_prepare(vma))) {
  2231. page_cache_release(new_page);
  2232. page_cache_release(old_page);
  2233. /* Caller expects lock to be held */
  2234. spin_lock(&mm->page_table_lock);
  2235. return VM_FAULT_OOM;
  2236. }
  2237. copy_user_huge_page(new_page, old_page, address, vma,
  2238. pages_per_huge_page(h));
  2239. __SetPageUptodate(new_page);
  2240. /*
  2241. * Retake the page_table_lock to check for racing updates
  2242. * before the page tables are altered
  2243. */
  2244. spin_lock(&mm->page_table_lock);
  2245. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  2246. if (likely(pte_same(huge_ptep_get(ptep), pte))) {
  2247. /* Break COW */
  2248. mmu_notifier_invalidate_range_start(mm,
  2249. address & huge_page_mask(h),
  2250. (address & huge_page_mask(h)) + huge_page_size(h));
  2251. huge_ptep_clear_flush(vma, address, ptep);
  2252. set_huge_pte_at(mm, address, ptep,
  2253. make_huge_pte(vma, new_page, 1));
  2254. page_remove_rmap(old_page);
  2255. hugepage_add_new_anon_rmap(new_page, vma, address);
  2256. /* Make the old page be freed below */
  2257. new_page = old_page;
  2258. mmu_notifier_invalidate_range_end(mm,
  2259. address & huge_page_mask(h),
  2260. (address & huge_page_mask(h)) + huge_page_size(h));
  2261. }
  2262. page_cache_release(new_page);
  2263. page_cache_release(old_page);
  2264. return 0;
  2265. }
  2266. /* Return the pagecache page at a given address within a VMA */
  2267. static struct page *hugetlbfs_pagecache_page(struct hstate *h,
  2268. struct vm_area_struct *vma, unsigned long address)
  2269. {
  2270. struct address_space *mapping;
  2271. pgoff_t idx;
  2272. mapping = vma->vm_file->f_mapping;
  2273. idx = vma_hugecache_offset(h, vma, address);
  2274. return find_lock_page(mapping, idx);
  2275. }
  2276. /*
  2277. * Return whether there is a pagecache page to back given address within VMA.
  2278. * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
  2279. */
  2280. static bool hugetlbfs_pagecache_present(struct hstate *h,
  2281. struct vm_area_struct *vma, unsigned long address)
  2282. {
  2283. struct address_space *mapping;
  2284. pgoff_t idx;
  2285. struct page *page;
  2286. mapping = vma->vm_file->f_mapping;
  2287. idx = vma_hugecache_offset(h, vma, address);
  2288. page = find_get_page(mapping, idx);
  2289. if (page)
  2290. put_page(page);
  2291. return page != NULL;
  2292. }
  2293. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2294. unsigned long address, pte_t *ptep, unsigned int flags)
  2295. {
  2296. struct hstate *h = hstate_vma(vma);
  2297. int ret = VM_FAULT_SIGBUS;
  2298. int anon_rmap = 0;
  2299. pgoff_t idx;
  2300. unsigned long size;
  2301. struct page *page;
  2302. struct address_space *mapping;
  2303. pte_t new_pte;
  2304. /*
  2305. * Currently, we are forced to kill the process in the event the
  2306. * original mapper has unmapped pages from the child due to a failed
  2307. * COW. Warn that such a situation has occurred as it may not be obvious
  2308. */
  2309. if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
  2310. printk(KERN_WARNING
  2311. "PID %d killed due to inadequate hugepage pool\n",
  2312. current->pid);
  2313. return ret;
  2314. }
  2315. mapping = vma->vm_file->f_mapping;
  2316. idx = vma_hugecache_offset(h, vma, address);
  2317. /*
  2318. * Use page lock to guard against racing truncation
  2319. * before we get page_table_lock.
  2320. */
  2321. retry:
  2322. page = find_lock_page(mapping, idx);
  2323. if (!page) {
  2324. size = i_size_read(mapping->host) >> huge_page_shift(h);
  2325. if (idx >= size)
  2326. goto out;
  2327. page = alloc_huge_page(vma, address, 0);
  2328. if (IS_ERR(page)) {
  2329. ret = PTR_ERR(page);
  2330. if (ret == -ENOMEM)
  2331. ret = VM_FAULT_OOM;
  2332. else
  2333. ret = VM_FAULT_SIGBUS;
  2334. goto out;
  2335. }
  2336. clear_huge_page(page, address, pages_per_huge_page(h));
  2337. __SetPageUptodate(page);
  2338. if (vma->vm_flags & VM_MAYSHARE) {
  2339. int err;
  2340. struct inode *inode = mapping->host;
  2341. err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  2342. if (err) {
  2343. put_page(page);
  2344. if (err == -EEXIST)
  2345. goto retry;
  2346. goto out;
  2347. }
  2348. spin_lock(&inode->i_lock);
  2349. inode->i_blocks += blocks_per_huge_page(h);
  2350. spin_unlock(&inode->i_lock);
  2351. } else {
  2352. lock_page(page);
  2353. if (unlikely(anon_vma_prepare(vma))) {
  2354. ret = VM_FAULT_OOM;
  2355. goto backout_unlocked;
  2356. }
  2357. anon_rmap = 1;
  2358. }
  2359. } else {
  2360. /*
  2361. * If memory error occurs between mmap() and fault, some process
  2362. * don't have hwpoisoned swap entry for errored virtual address.
  2363. * So we need to block hugepage fault by PG_hwpoison bit check.
  2364. */
  2365. if (unlikely(PageHWPoison(page))) {
  2366. ret = VM_FAULT_HWPOISON |
  2367. VM_FAULT_SET_HINDEX(hstate_index(h));
  2368. goto backout_unlocked;
  2369. }
  2370. }
  2371. /*
  2372. * If we are going to COW a private mapping later, we examine the
  2373. * pending reservations for this page now. This will ensure that
  2374. * any allocations necessary to record that reservation occur outside
  2375. * the spinlock.
  2376. */
  2377. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
  2378. if (vma_needs_reservation(h, vma, address) < 0) {
  2379. ret = VM_FAULT_OOM;
  2380. goto backout_unlocked;
  2381. }
  2382. spin_lock(&mm->page_table_lock);
  2383. size = i_size_read(mapping->host) >> huge_page_shift(h);
  2384. if (idx >= size)
  2385. goto backout;
  2386. ret = 0;
  2387. if (!huge_pte_none(huge_ptep_get(ptep)))
  2388. goto backout;
  2389. if (anon_rmap)
  2390. hugepage_add_new_anon_rmap(page, vma, address);
  2391. else
  2392. page_dup_rmap(page);
  2393. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  2394. && (vma->vm_flags & VM_SHARED)));
  2395. set_huge_pte_at(mm, address, ptep, new_pte);
  2396. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  2397. /* Optimization, do the COW without a second fault */
  2398. ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
  2399. }
  2400. spin_unlock(&mm->page_table_lock);
  2401. unlock_page(page);
  2402. out:
  2403. return ret;
  2404. backout:
  2405. spin_unlock(&mm->page_table_lock);
  2406. backout_unlocked:
  2407. unlock_page(page);
  2408. put_page(page);
  2409. goto out;
  2410. }
  2411. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  2412. unsigned long address, unsigned int flags)
  2413. {
  2414. pte_t *ptep;
  2415. pte_t entry;
  2416. int ret;
  2417. struct page *page = NULL;
  2418. struct page *pagecache_page = NULL;
  2419. static DEFINE_MUTEX(hugetlb_instantiation_mutex);
  2420. struct hstate *h = hstate_vma(vma);
  2421. address &= huge_page_mask(h);
  2422. ptep = huge_pte_offset(mm, address);
  2423. if (ptep) {
  2424. entry = huge_ptep_get(ptep);
  2425. if (unlikely(is_hugetlb_entry_migration(entry))) {
  2426. migration_entry_wait(mm, (pmd_t *)ptep, address);
  2427. return 0;
  2428. } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
  2429. return VM_FAULT_HWPOISON_LARGE |
  2430. VM_FAULT_SET_HINDEX(hstate_index(h));
  2431. }
  2432. ptep = huge_pte_alloc(mm, address, huge_page_size(h));
  2433. if (!ptep)
  2434. return VM_FAULT_OOM;
  2435. /*
  2436. * Serialize hugepage allocation and instantiation, so that we don't
  2437. * get spurious allocation failures if two CPUs race to instantiate
  2438. * the same page in the page cache.
  2439. */
  2440. mutex_lock(&hugetlb_instantiation_mutex);
  2441. entry = huge_ptep_get(ptep);
  2442. if (huge_pte_none(entry)) {
  2443. ret = hugetlb_no_page(mm, vma, address, ptep, flags);
  2444. goto out_mutex;
  2445. }
  2446. ret = 0;
  2447. /*
  2448. * If we are going to COW the mapping later, we examine the pending
  2449. * reservations for this page now. This will ensure that any
  2450. * allocations necessary to record that reservation occur outside the
  2451. * spinlock. For private mappings, we also lookup the pagecache
  2452. * page now as it is used to determine if a reservation has been
  2453. * consumed.
  2454. */
  2455. if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
  2456. if (vma_needs_reservation(h, vma, address) < 0) {
  2457. ret = VM_FAULT_OOM;
  2458. goto out_mutex;
  2459. }
  2460. if (!(vma->vm_flags & VM_MAYSHARE))
  2461. pagecache_page = hugetlbfs_pagecache_page(h,
  2462. vma, address);
  2463. }
  2464. /*
  2465. * hugetlb_cow() requires page locks of pte_page(entry) and
  2466. * pagecache_page, so here we need take the former one
  2467. * when page != pagecache_page or !pagecache_page.
  2468. * Note that locking order is always pagecache_page -> page,
  2469. * so no worry about deadlock.
  2470. */
  2471. page = pte_page(entry);
  2472. get_page(page);
  2473. if (page != pagecache_page)
  2474. lock_page(page);
  2475. spin_lock(&mm->page_table_lock);
  2476. /* Check for a racing update before calling hugetlb_cow */
  2477. if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
  2478. goto out_page_table_lock;
  2479. if (flags & FAULT_FLAG_WRITE) {
  2480. if (!pte_write(entry)) {
  2481. ret = hugetlb_cow(mm, vma, address, ptep, entry,
  2482. pagecache_page);
  2483. goto out_page_table_lock;
  2484. }
  2485. entry = pte_mkdirty(entry);
  2486. }
  2487. entry = pte_mkyoung(entry);
  2488. if (huge_ptep_set_access_flags(vma, address, ptep, entry,
  2489. flags & FAULT_FLAG_WRITE))
  2490. update_mmu_cache(vma, address, ptep);
  2491. out_page_table_lock:
  2492. spin_unlock(&mm->page_table_lock);
  2493. if (pagecache_page) {
  2494. unlock_page(pagecache_page);
  2495. put_page(pagecache_page);
  2496. }
  2497. if (page != pagecache_page)
  2498. unlock_page(page);
  2499. put_page(page);
  2500. out_mutex:
  2501. mutex_unlock(&hugetlb_instantiation_mutex);
  2502. return ret;
  2503. }
  2504. /* Can be overriden by architectures */
  2505. __attribute__((weak)) struct page *
  2506. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  2507. pud_t *pud, int write)
  2508. {
  2509. BUG();
  2510. return NULL;
  2511. }
  2512. int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2513. struct page **pages, struct vm_area_struct **vmas,
  2514. unsigned long *position, int *length, int i,
  2515. unsigned int flags)
  2516. {
  2517. unsigned long pfn_offset;
  2518. unsigned long vaddr = *position;
  2519. int remainder = *length;
  2520. struct hstate *h = hstate_vma(vma);
  2521. spin_lock(&mm->page_table_lock);
  2522. while (vaddr < vma->vm_end && remainder) {
  2523. pte_t *pte;
  2524. int absent;
  2525. struct page *page;
  2526. /*
  2527. * Some archs (sparc64, sh*) have multiple pte_ts to
  2528. * each hugepage. We have to make sure we get the
  2529. * first, for the page indexing below to work.
  2530. */
  2531. pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
  2532. absent = !pte || huge_pte_none(huge_ptep_get(pte));
  2533. /*
  2534. * When coredumping, it suits get_dump_page if we just return
  2535. * an error where there's an empty slot with no huge pagecache
  2536. * to back it. This way, we avoid allocating a hugepage, and
  2537. * the sparse dumpfile avoids allocating disk blocks, but its
  2538. * huge holes still show up with zeroes where they need to be.
  2539. */
  2540. if (absent && (flags & FOLL_DUMP) &&
  2541. !hugetlbfs_pagecache_present(h, vma, vaddr)) {
  2542. remainder = 0;
  2543. break;
  2544. }
  2545. if (absent ||
  2546. ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
  2547. int ret;
  2548. spin_unlock(&mm->page_table_lock);
  2549. ret = hugetlb_fault(mm, vma, vaddr,
  2550. (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
  2551. spin_lock(&mm->page_table_lock);
  2552. if (!(ret & VM_FAULT_ERROR))
  2553. continue;
  2554. remainder = 0;
  2555. break;
  2556. }
  2557. pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
  2558. page = pte_page(huge_ptep_get(pte));
  2559. same_page:
  2560. if (pages) {
  2561. pages[i] = mem_map_offset(page, pfn_offset);
  2562. get_page(pages[i]);
  2563. }
  2564. if (vmas)
  2565. vmas[i] = vma;
  2566. vaddr += PAGE_SIZE;
  2567. ++pfn_offset;
  2568. --remainder;
  2569. ++i;
  2570. if (vaddr < vma->vm_end && remainder &&
  2571. pfn_offset < pages_per_huge_page(h)) {
  2572. /*
  2573. * We use pfn_offset to avoid touching the pageframes
  2574. * of this compound page.
  2575. */
  2576. goto same_page;
  2577. }
  2578. }
  2579. spin_unlock(&mm->page_table_lock);
  2580. *length = remainder;
  2581. *position = vaddr;
  2582. return i ? i : -EFAULT;
  2583. }
  2584. void hugetlb_change_protection(struct vm_area_struct *vma,
  2585. unsigned long address, unsigned long end, pgprot_t newprot)
  2586. {
  2587. struct mm_struct *mm = vma->vm_mm;
  2588. unsigned long start = address;
  2589. pte_t *ptep;
  2590. pte_t pte;
  2591. struct hstate *h = hstate_vma(vma);
  2592. BUG_ON(address >= end);
  2593. flush_cache_range(vma, address, end);
  2594. mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
  2595. spin_lock(&mm->page_table_lock);
  2596. for (; address < end; address += huge_page_size(h)) {
  2597. ptep = huge_pte_offset(mm, address);
  2598. if (!ptep)
  2599. continue;
  2600. if (huge_pmd_unshare(mm, &address, ptep))
  2601. continue;
  2602. if (!huge_pte_none(huge_ptep_get(ptep))) {
  2603. pte = huge_ptep_get_and_clear(mm, address, ptep);
  2604. pte = pte_mkhuge(pte_modify(pte, newprot));
  2605. set_huge_pte_at(mm, address, ptep, pte);
  2606. }
  2607. }
  2608. spin_unlock(&mm->page_table_lock);
  2609. /*
  2610. * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
  2611. * may have cleared our pud entry and done put_page on the page table:
  2612. * once we release i_mmap_mutex, another task can do the final put_page
  2613. * and that page table be reused and filled with junk.
  2614. */
  2615. flush_tlb_range(vma, start, end);
  2616. mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
  2617. }
  2618. int hugetlb_reserve_pages(struct inode *inode,
  2619. long from, long to,
  2620. struct vm_area_struct *vma,
  2621. vm_flags_t vm_flags)
  2622. {
  2623. long ret, chg;
  2624. struct hstate *h = hstate_inode(inode);
  2625. struct hugepage_subpool *spool = subpool_inode(inode);
  2626. /*
  2627. * Only apply hugepage reservation if asked. At fault time, an
  2628. * attempt will be made for VM_NORESERVE to allocate a page
  2629. * without using reserves
  2630. */
  2631. if (vm_flags & VM_NORESERVE)
  2632. return 0;
  2633. /*
  2634. * Shared mappings base their reservation on the number of pages that
  2635. * are already allocated on behalf of the file. Private mappings need
  2636. * to reserve the full area even if read-only as mprotect() may be
  2637. * called to make the mapping read-write. Assume !vma is a shm mapping
  2638. */
  2639. if (!vma || vma->vm_flags & VM_MAYSHARE)
  2640. chg = region_chg(&inode->i_mapping->private_list, from, to);
  2641. else {
  2642. struct resv_map *resv_map = resv_map_alloc();
  2643. if (!resv_map)
  2644. return -ENOMEM;
  2645. chg = to - from;
  2646. set_vma_resv_map(vma, resv_map);
  2647. set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
  2648. }
  2649. if (chg < 0) {
  2650. ret = chg;
  2651. goto out_err;
  2652. }
  2653. /* There must be enough pages in the subpool for the mapping */
  2654. if (hugepage_subpool_get_pages(spool, chg)) {
  2655. ret = -ENOSPC;
  2656. goto out_err;
  2657. }
  2658. /*
  2659. * Check enough hugepages are available for the reservation.
  2660. * Hand the pages back to the subpool if there are not
  2661. */
  2662. ret = hugetlb_acct_memory(h, chg);
  2663. if (ret < 0) {
  2664. hugepage_subpool_put_pages(spool, chg);
  2665. goto out_err;
  2666. }
  2667. /*
  2668. * Account for the reservations made. Shared mappings record regions
  2669. * that have reservations as they are shared by multiple VMAs.
  2670. * When the last VMA disappears, the region map says how much
  2671. * the reservation was and the page cache tells how much of
  2672. * the reservation was consumed. Private mappings are per-VMA and
  2673. * only the consumed reservations are tracked. When the VMA
  2674. * disappears, the original reservation is the VMA size and the
  2675. * consumed reservations are stored in the map. Hence, nothing
  2676. * else has to be done for private mappings here
  2677. */
  2678. if (!vma || vma->vm_flags & VM_MAYSHARE)
  2679. region_add(&inode->i_mapping->private_list, from, to);
  2680. return 0;
  2681. out_err:
  2682. if (vma)
  2683. resv_map_put(vma);
  2684. return ret;
  2685. }
  2686. void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
  2687. {
  2688. struct hstate *h = hstate_inode(inode);
  2689. long chg = region_truncate(&inode->i_mapping->private_list, offset);
  2690. struct hugepage_subpool *spool = subpool_inode(inode);
  2691. spin_lock(&inode->i_lock);
  2692. inode->i_blocks -= (blocks_per_huge_page(h) * freed);
  2693. spin_unlock(&inode->i_lock);
  2694. hugepage_subpool_put_pages(spool, (chg - freed));
  2695. hugetlb_acct_memory(h, -(chg - freed));
  2696. }
  2697. #ifdef CONFIG_MEMORY_FAILURE
  2698. /* Should be called in hugetlb_lock */
  2699. static int is_hugepage_on_freelist(struct page *hpage)
  2700. {
  2701. struct page *page;
  2702. struct page *tmp;
  2703. struct hstate *h = page_hstate(hpage);
  2704. int nid = page_to_nid(hpage);
  2705. list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
  2706. if (page == hpage)
  2707. return 1;
  2708. return 0;
  2709. }
  2710. /*
  2711. * This function is called from memory failure code.
  2712. * Assume the caller holds page lock of the head page.
  2713. */
  2714. int dequeue_hwpoisoned_huge_page(struct page *hpage)
  2715. {
  2716. struct hstate *h = page_hstate(hpage);
  2717. int nid = page_to_nid(hpage);
  2718. int ret = -EBUSY;
  2719. spin_lock(&hugetlb_lock);
  2720. if (is_hugepage_on_freelist(hpage)) {
  2721. list_del(&hpage->lru);
  2722. set_page_refcounted(hpage);
  2723. h->free_huge_pages--;
  2724. h->free_huge_pages_node[nid]--;
  2725. ret = 0;
  2726. }
  2727. spin_unlock(&hugetlb_lock);
  2728. return ret;
  2729. }
  2730. #endif