hugetlb.c 88 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403
  1. /*
  2. * Generic hugetlb support.
  3. * (C) Nadia Yvette Chambers, April 2004
  4. */
  5. #include <linux/list.h>
  6. #include <linux/init.h>
  7. #include <linux/module.h>
  8. #include <linux/mm.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/sysctl.h>
  11. #include <linux/highmem.h>
  12. #include <linux/mmu_notifier.h>
  13. #include <linux/nodemask.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/mempolicy.h>
  16. #include <linux/cpuset.h>
  17. #include <linux/mutex.h>
  18. #include <linux/bootmem.h>
  19. #include <linux/sysfs.h>
  20. #include <linux/slab.h>
  21. #include <linux/rmap.h>
  22. #include <linux/swap.h>
  23. #include <linux/swapops.h>
  24. #include <asm/page.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/tlb.h>
  27. #include <linux/io.h>
  28. #include <linux/hugetlb.h>
  29. #include <linux/hugetlb_cgroup.h>
  30. #include <linux/node.h>
  31. #include "internal.h"
  32. const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
  33. static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
  34. unsigned long hugepages_treat_as_movable;
  35. int hugetlb_max_hstate __read_mostly;
  36. unsigned int default_hstate_idx;
  37. struct hstate hstates[HUGE_MAX_HSTATE];
  38. __initdata LIST_HEAD(huge_boot_pages);
  39. /* for command line parsing */
  40. static struct hstate * __initdata parsed_hstate;
  41. static unsigned long __initdata default_hstate_max_huge_pages;
  42. static unsigned long __initdata default_hstate_size;
  43. /*
  44. * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  45. */
  46. DEFINE_SPINLOCK(hugetlb_lock);
  47. static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
  48. {
  49. bool free = (spool->count == 0) && (spool->used_hpages == 0);
  50. spin_unlock(&spool->lock);
  51. /* If no pages are used, and no other handles to the subpool
  52. * remain, free the subpool the subpool remain */
  53. if (free)
  54. kfree(spool);
  55. }
  56. struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
  57. {
  58. struct hugepage_subpool *spool;
  59. spool = kmalloc(sizeof(*spool), GFP_KERNEL);
  60. if (!spool)
  61. return NULL;
  62. spin_lock_init(&spool->lock);
  63. spool->count = 1;
  64. spool->max_hpages = nr_blocks;
  65. spool->used_hpages = 0;
  66. return spool;
  67. }
  68. void hugepage_put_subpool(struct hugepage_subpool *spool)
  69. {
  70. spin_lock(&spool->lock);
  71. BUG_ON(!spool->count);
  72. spool->count--;
  73. unlock_or_release_subpool(spool);
  74. }
  75. static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
  76. long delta)
  77. {
  78. int ret = 0;
  79. if (!spool)
  80. return 0;
  81. spin_lock(&spool->lock);
  82. if ((spool->used_hpages + delta) <= spool->max_hpages) {
  83. spool->used_hpages += delta;
  84. } else {
  85. ret = -ENOMEM;
  86. }
  87. spin_unlock(&spool->lock);
  88. return ret;
  89. }
  90. static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
  91. long delta)
  92. {
  93. if (!spool)
  94. return;
  95. spin_lock(&spool->lock);
  96. spool->used_hpages -= delta;
  97. /* If hugetlbfs_put_super couldn't free spool due to
  98. * an outstanding quota reference, free it now. */
  99. unlock_or_release_subpool(spool);
  100. }
  101. static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
  102. {
  103. return HUGETLBFS_SB(inode->i_sb)->spool;
  104. }
  105. static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
  106. {
  107. return subpool_inode(file_inode(vma->vm_file));
  108. }
  109. /*
  110. * Region tracking -- allows tracking of reservations and instantiated pages
  111. * across the pages in a mapping.
  112. *
  113. * The region data structures are protected by a combination of the mmap_sem
  114. * and the hugetlb_instantiation_mutex. To access or modify a region the caller
  115. * must either hold the mmap_sem for write, or the mmap_sem for read and
  116. * the hugetlb_instantiation_mutex:
  117. *
  118. * down_write(&mm->mmap_sem);
  119. * or
  120. * down_read(&mm->mmap_sem);
  121. * mutex_lock(&hugetlb_instantiation_mutex);
  122. */
  123. struct file_region {
  124. struct list_head link;
  125. long from;
  126. long to;
  127. };
  128. static long region_add(struct list_head *head, long f, long t)
  129. {
  130. struct file_region *rg, *nrg, *trg;
  131. /* Locate the region we are either in or before. */
  132. list_for_each_entry(rg, head, link)
  133. if (f <= rg->to)
  134. break;
  135. /* Round our left edge to the current segment if it encloses us. */
  136. if (f > rg->from)
  137. f = rg->from;
  138. /* Check for and consume any regions we now overlap with. */
  139. nrg = rg;
  140. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  141. if (&rg->link == head)
  142. break;
  143. if (rg->from > t)
  144. break;
  145. /* If this area reaches higher then extend our area to
  146. * include it completely. If this is not the first area
  147. * which we intend to reuse, free it. */
  148. if (rg->to > t)
  149. t = rg->to;
  150. if (rg != nrg) {
  151. list_del(&rg->link);
  152. kfree(rg);
  153. }
  154. }
  155. nrg->from = f;
  156. nrg->to = t;
  157. return 0;
  158. }
  159. static long region_chg(struct list_head *head, long f, long t)
  160. {
  161. struct file_region *rg, *nrg;
  162. long chg = 0;
  163. /* Locate the region we are before or in. */
  164. list_for_each_entry(rg, head, link)
  165. if (f <= rg->to)
  166. break;
  167. /* If we are below the current region then a new region is required.
  168. * Subtle, allocate a new region at the position but make it zero
  169. * size such that we can guarantee to record the reservation. */
  170. if (&rg->link == head || t < rg->from) {
  171. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  172. if (!nrg)
  173. return -ENOMEM;
  174. nrg->from = f;
  175. nrg->to = f;
  176. INIT_LIST_HEAD(&nrg->link);
  177. list_add(&nrg->link, rg->link.prev);
  178. return t - f;
  179. }
  180. /* Round our left edge to the current segment if it encloses us. */
  181. if (f > rg->from)
  182. f = rg->from;
  183. chg = t - f;
  184. /* Check for and consume any regions we now overlap with. */
  185. list_for_each_entry(rg, rg->link.prev, link) {
  186. if (&rg->link == head)
  187. break;
  188. if (rg->from > t)
  189. return chg;
  190. /* We overlap with this area, if it extends further than
  191. * us then we must extend ourselves. Account for its
  192. * existing reservation. */
  193. if (rg->to > t) {
  194. chg += rg->to - t;
  195. t = rg->to;
  196. }
  197. chg -= rg->to - rg->from;
  198. }
  199. return chg;
  200. }
  201. static long region_truncate(struct list_head *head, long end)
  202. {
  203. struct file_region *rg, *trg;
  204. long chg = 0;
  205. /* Locate the region we are either in or before. */
  206. list_for_each_entry(rg, head, link)
  207. if (end <= rg->to)
  208. break;
  209. if (&rg->link == head)
  210. return 0;
  211. /* If we are in the middle of a region then adjust it. */
  212. if (end > rg->from) {
  213. chg = rg->to - end;
  214. rg->to = end;
  215. rg = list_entry(rg->link.next, typeof(*rg), link);
  216. }
  217. /* Drop any remaining regions. */
  218. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  219. if (&rg->link == head)
  220. break;
  221. chg += rg->to - rg->from;
  222. list_del(&rg->link);
  223. kfree(rg);
  224. }
  225. return chg;
  226. }
  227. static long region_count(struct list_head *head, long f, long t)
  228. {
  229. struct file_region *rg;
  230. long chg = 0;
  231. /* Locate each segment we overlap with, and count that overlap. */
  232. list_for_each_entry(rg, head, link) {
  233. long seg_from;
  234. long seg_to;
  235. if (rg->to <= f)
  236. continue;
  237. if (rg->from >= t)
  238. break;
  239. seg_from = max(rg->from, f);
  240. seg_to = min(rg->to, t);
  241. chg += seg_to - seg_from;
  242. }
  243. return chg;
  244. }
  245. /*
  246. * Convert the address within this vma to the page offset within
  247. * the mapping, in pagecache page units; huge pages here.
  248. */
  249. static pgoff_t vma_hugecache_offset(struct hstate *h,
  250. struct vm_area_struct *vma, unsigned long address)
  251. {
  252. return ((address - vma->vm_start) >> huge_page_shift(h)) +
  253. (vma->vm_pgoff >> huge_page_order(h));
  254. }
  255. pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  256. unsigned long address)
  257. {
  258. return vma_hugecache_offset(hstate_vma(vma), vma, address);
  259. }
  260. /*
  261. * Return the size of the pages allocated when backing a VMA. In the majority
  262. * cases this will be same size as used by the page table entries.
  263. */
  264. unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
  265. {
  266. struct hstate *hstate;
  267. if (!is_vm_hugetlb_page(vma))
  268. return PAGE_SIZE;
  269. hstate = hstate_vma(vma);
  270. return 1UL << huge_page_shift(hstate);
  271. }
  272. EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
  273. /*
  274. * Return the page size being used by the MMU to back a VMA. In the majority
  275. * of cases, the page size used by the kernel matches the MMU size. On
  276. * architectures where it differs, an architecture-specific version of this
  277. * function is required.
  278. */
  279. #ifndef vma_mmu_pagesize
  280. unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  281. {
  282. return vma_kernel_pagesize(vma);
  283. }
  284. #endif
  285. /*
  286. * Flags for MAP_PRIVATE reservations. These are stored in the bottom
  287. * bits of the reservation map pointer, which are always clear due to
  288. * alignment.
  289. */
  290. #define HPAGE_RESV_OWNER (1UL << 0)
  291. #define HPAGE_RESV_UNMAPPED (1UL << 1)
  292. #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
  293. /*
  294. * These helpers are used to track how many pages are reserved for
  295. * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
  296. * is guaranteed to have their future faults succeed.
  297. *
  298. * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
  299. * the reserve counters are updated with the hugetlb_lock held. It is safe
  300. * to reset the VMA at fork() time as it is not in use yet and there is no
  301. * chance of the global counters getting corrupted as a result of the values.
  302. *
  303. * The private mapping reservation is represented in a subtly different
  304. * manner to a shared mapping. A shared mapping has a region map associated
  305. * with the underlying file, this region map represents the backing file
  306. * pages which have ever had a reservation assigned which this persists even
  307. * after the page is instantiated. A private mapping has a region map
  308. * associated with the original mmap which is attached to all VMAs which
  309. * reference it, this region map represents those offsets which have consumed
  310. * reservation ie. where pages have been instantiated.
  311. */
  312. static unsigned long get_vma_private_data(struct vm_area_struct *vma)
  313. {
  314. return (unsigned long)vma->vm_private_data;
  315. }
  316. static void set_vma_private_data(struct vm_area_struct *vma,
  317. unsigned long value)
  318. {
  319. vma->vm_private_data = (void *)value;
  320. }
  321. struct resv_map {
  322. struct kref refs;
  323. struct list_head regions;
  324. };
  325. static struct resv_map *resv_map_alloc(void)
  326. {
  327. struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
  328. if (!resv_map)
  329. return NULL;
  330. kref_init(&resv_map->refs);
  331. INIT_LIST_HEAD(&resv_map->regions);
  332. return resv_map;
  333. }
  334. static void resv_map_release(struct kref *ref)
  335. {
  336. struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
  337. /* Clear out any active regions before we release the map. */
  338. region_truncate(&resv_map->regions, 0);
  339. kfree(resv_map);
  340. }
  341. static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
  342. {
  343. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  344. if (!(vma->vm_flags & VM_MAYSHARE))
  345. return (struct resv_map *)(get_vma_private_data(vma) &
  346. ~HPAGE_RESV_MASK);
  347. return NULL;
  348. }
  349. static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
  350. {
  351. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  352. VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
  353. set_vma_private_data(vma, (get_vma_private_data(vma) &
  354. HPAGE_RESV_MASK) | (unsigned long)map);
  355. }
  356. static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
  357. {
  358. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  359. VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
  360. set_vma_private_data(vma, get_vma_private_data(vma) | flags);
  361. }
  362. static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
  363. {
  364. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  365. return (get_vma_private_data(vma) & flag) != 0;
  366. }
  367. /* Decrement the reserved pages in the hugepage pool by one */
  368. static void decrement_hugepage_resv_vma(struct hstate *h,
  369. struct vm_area_struct *vma)
  370. {
  371. if (vma->vm_flags & VM_NORESERVE)
  372. return;
  373. if (vma->vm_flags & VM_MAYSHARE) {
  374. /* Shared mappings always use reserves */
  375. h->resv_huge_pages--;
  376. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  377. /*
  378. * Only the process that called mmap() has reserves for
  379. * private mappings.
  380. */
  381. h->resv_huge_pages--;
  382. }
  383. }
  384. /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
  385. void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  386. {
  387. VM_BUG_ON(!is_vm_hugetlb_page(vma));
  388. if (!(vma->vm_flags & VM_MAYSHARE))
  389. vma->vm_private_data = (void *)0;
  390. }
  391. /* Returns true if the VMA has associated reserve pages */
  392. static int vma_has_reserves(struct vm_area_struct *vma)
  393. {
  394. if (vma->vm_flags & VM_NORESERVE)
  395. return 0;
  396. if (vma->vm_flags & VM_MAYSHARE)
  397. return 1;
  398. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  399. return 1;
  400. return 0;
  401. }
  402. static void copy_gigantic_page(struct page *dst, struct page *src)
  403. {
  404. int i;
  405. struct hstate *h = page_hstate(src);
  406. struct page *dst_base = dst;
  407. struct page *src_base = src;
  408. for (i = 0; i < pages_per_huge_page(h); ) {
  409. cond_resched();
  410. copy_highpage(dst, src);
  411. i++;
  412. dst = mem_map_next(dst, dst_base, i);
  413. src = mem_map_next(src, src_base, i);
  414. }
  415. }
  416. void copy_huge_page(struct page *dst, struct page *src)
  417. {
  418. int i;
  419. struct hstate *h = page_hstate(src);
  420. if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
  421. copy_gigantic_page(dst, src);
  422. return;
  423. }
  424. might_sleep();
  425. for (i = 0; i < pages_per_huge_page(h); i++) {
  426. cond_resched();
  427. copy_highpage(dst + i, src + i);
  428. }
  429. }
  430. static void enqueue_huge_page(struct hstate *h, struct page *page)
  431. {
  432. int nid = page_to_nid(page);
  433. list_move(&page->lru, &h->hugepage_freelists[nid]);
  434. h->free_huge_pages++;
  435. h->free_huge_pages_node[nid]++;
  436. }
  437. static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
  438. {
  439. struct page *page;
  440. if (list_empty(&h->hugepage_freelists[nid]))
  441. return NULL;
  442. page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
  443. list_move(&page->lru, &h->hugepage_activelist);
  444. set_page_refcounted(page);
  445. h->free_huge_pages--;
  446. h->free_huge_pages_node[nid]--;
  447. return page;
  448. }
  449. static struct page *dequeue_huge_page_vma(struct hstate *h,
  450. struct vm_area_struct *vma,
  451. unsigned long address, int avoid_reserve)
  452. {
  453. struct page *page = NULL;
  454. struct mempolicy *mpol;
  455. nodemask_t *nodemask;
  456. struct zonelist *zonelist;
  457. struct zone *zone;
  458. struct zoneref *z;
  459. unsigned int cpuset_mems_cookie;
  460. /*
  461. * A child process with MAP_PRIVATE mappings created by their parent
  462. * have no page reserves. This check ensures that reservations are
  463. * not "stolen". The child may still get SIGKILLed
  464. */
  465. if (!vma_has_reserves(vma) &&
  466. h->free_huge_pages - h->resv_huge_pages == 0)
  467. goto err;
  468. /* If reserves cannot be used, ensure enough pages are in the pool */
  469. if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
  470. goto err;
  471. retry_cpuset:
  472. cpuset_mems_cookie = get_mems_allowed();
  473. zonelist = huge_zonelist(vma, address,
  474. htlb_alloc_mask, &mpol, &nodemask);
  475. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  476. MAX_NR_ZONES - 1, nodemask) {
  477. if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
  478. page = dequeue_huge_page_node(h, zone_to_nid(zone));
  479. if (page) {
  480. if (!avoid_reserve)
  481. decrement_hugepage_resv_vma(h, vma);
  482. break;
  483. }
  484. }
  485. }
  486. mpol_cond_put(mpol);
  487. if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
  488. goto retry_cpuset;
  489. return page;
  490. err:
  491. return NULL;
  492. }
  493. static void update_and_free_page(struct hstate *h, struct page *page)
  494. {
  495. int i;
  496. VM_BUG_ON(h->order >= MAX_ORDER);
  497. h->nr_huge_pages--;
  498. h->nr_huge_pages_node[page_to_nid(page)]--;
  499. for (i = 0; i < pages_per_huge_page(h); i++) {
  500. page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
  501. 1 << PG_referenced | 1 << PG_dirty |
  502. 1 << PG_active | 1 << PG_reserved |
  503. 1 << PG_private | 1 << PG_writeback);
  504. }
  505. VM_BUG_ON(hugetlb_cgroup_from_page(page));
  506. set_compound_page_dtor(page, NULL);
  507. set_page_refcounted(page);
  508. arch_release_hugepage(page);
  509. __free_pages(page, huge_page_order(h));
  510. }
  511. struct hstate *size_to_hstate(unsigned long size)
  512. {
  513. struct hstate *h;
  514. for_each_hstate(h) {
  515. if (huge_page_size(h) == size)
  516. return h;
  517. }
  518. return NULL;
  519. }
  520. static void free_huge_page(struct page *page)
  521. {
  522. /*
  523. * Can't pass hstate in here because it is called from the
  524. * compound page destructor.
  525. */
  526. struct hstate *h = page_hstate(page);
  527. int nid = page_to_nid(page);
  528. struct hugepage_subpool *spool =
  529. (struct hugepage_subpool *)page_private(page);
  530. set_page_private(page, 0);
  531. page->mapping = NULL;
  532. BUG_ON(page_count(page));
  533. BUG_ON(page_mapcount(page));
  534. spin_lock(&hugetlb_lock);
  535. hugetlb_cgroup_uncharge_page(hstate_index(h),
  536. pages_per_huge_page(h), page);
  537. if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
  538. /* remove the page from active list */
  539. list_del(&page->lru);
  540. update_and_free_page(h, page);
  541. h->surplus_huge_pages--;
  542. h->surplus_huge_pages_node[nid]--;
  543. } else {
  544. arch_clear_hugepage_flags(page);
  545. enqueue_huge_page(h, page);
  546. }
  547. spin_unlock(&hugetlb_lock);
  548. hugepage_subpool_put_pages(spool, 1);
  549. }
  550. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
  551. {
  552. INIT_LIST_HEAD(&page->lru);
  553. set_compound_page_dtor(page, free_huge_page);
  554. spin_lock(&hugetlb_lock);
  555. set_hugetlb_cgroup(page, NULL);
  556. h->nr_huge_pages++;
  557. h->nr_huge_pages_node[nid]++;
  558. spin_unlock(&hugetlb_lock);
  559. put_page(page); /* free it into the hugepage allocator */
  560. }
  561. static void prep_compound_gigantic_page(struct page *page, unsigned long order)
  562. {
  563. int i;
  564. int nr_pages = 1 << order;
  565. struct page *p = page + 1;
  566. /* we rely on prep_new_huge_page to set the destructor */
  567. set_compound_order(page, order);
  568. __SetPageHead(page);
  569. for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
  570. __SetPageTail(p);
  571. set_page_count(p, 0);
  572. p->first_page = page;
  573. }
  574. }
  575. /*
  576. * PageHuge() only returns true for hugetlbfs pages, but not for normal or
  577. * transparent huge pages. See the PageTransHuge() documentation for more
  578. * details.
  579. */
  580. int PageHuge(struct page *page)
  581. {
  582. compound_page_dtor *dtor;
  583. if (!PageCompound(page))
  584. return 0;
  585. page = compound_head(page);
  586. dtor = get_compound_page_dtor(page);
  587. return dtor == free_huge_page;
  588. }
  589. EXPORT_SYMBOL_GPL(PageHuge);
  590. pgoff_t __basepage_index(struct page *page)
  591. {
  592. struct page *page_head = compound_head(page);
  593. pgoff_t index = page_index(page_head);
  594. unsigned long compound_idx;
  595. if (!PageHuge(page_head))
  596. return page_index(page);
  597. if (compound_order(page_head) >= MAX_ORDER)
  598. compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
  599. else
  600. compound_idx = page - page_head;
  601. return (index << compound_order(page_head)) + compound_idx;
  602. }
  603. static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  604. {
  605. struct page *page;
  606. if (h->order >= MAX_ORDER)
  607. return NULL;
  608. page = alloc_pages_exact_node(nid,
  609. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
  610. __GFP_REPEAT|__GFP_NOWARN,
  611. huge_page_order(h));
  612. if (page) {
  613. if (arch_prepare_hugepage(page)) {
  614. __free_pages(page, huge_page_order(h));
  615. return NULL;
  616. }
  617. prep_new_huge_page(h, page, nid);
  618. }
  619. return page;
  620. }
  621. /*
  622. * common helper functions for hstate_next_node_to_{alloc|free}.
  623. * We may have allocated or freed a huge page based on a different
  624. * nodes_allowed previously, so h->next_node_to_{alloc|free} might
  625. * be outside of *nodes_allowed. Ensure that we use an allowed
  626. * node for alloc or free.
  627. */
  628. static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
  629. {
  630. nid = next_node(nid, *nodes_allowed);
  631. if (nid == MAX_NUMNODES)
  632. nid = first_node(*nodes_allowed);
  633. VM_BUG_ON(nid >= MAX_NUMNODES);
  634. return nid;
  635. }
  636. static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
  637. {
  638. if (!node_isset(nid, *nodes_allowed))
  639. nid = next_node_allowed(nid, nodes_allowed);
  640. return nid;
  641. }
  642. /*
  643. * returns the previously saved node ["this node"] from which to
  644. * allocate a persistent huge page for the pool and advance the
  645. * next node from which to allocate, handling wrap at end of node
  646. * mask.
  647. */
  648. static int hstate_next_node_to_alloc(struct hstate *h,
  649. nodemask_t *nodes_allowed)
  650. {
  651. int nid;
  652. VM_BUG_ON(!nodes_allowed);
  653. nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
  654. h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
  655. return nid;
  656. }
  657. /*
  658. * helper for free_pool_huge_page() - return the previously saved
  659. * node ["this node"] from which to free a huge page. Advance the
  660. * next node id whether or not we find a free huge page to free so
  661. * that the next attempt to free addresses the next node.
  662. */
  663. static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
  664. {
  665. int nid;
  666. VM_BUG_ON(!nodes_allowed);
  667. nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
  668. h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
  669. return nid;
  670. }
  671. #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
  672. for (nr_nodes = nodes_weight(*mask); \
  673. nr_nodes > 0 && \
  674. ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
  675. nr_nodes--)
  676. #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
  677. for (nr_nodes = nodes_weight(*mask); \
  678. nr_nodes > 0 && \
  679. ((node = hstate_next_node_to_free(hs, mask)) || 1); \
  680. nr_nodes--)
  681. static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
  682. {
  683. struct page *page;
  684. int nr_nodes, node;
  685. int ret = 0;
  686. for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
  687. page = alloc_fresh_huge_page_node(h, node);
  688. if (page) {
  689. ret = 1;
  690. break;
  691. }
  692. }
  693. if (ret)
  694. count_vm_event(HTLB_BUDDY_PGALLOC);
  695. else
  696. count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  697. return ret;
  698. }
  699. /*
  700. * Free huge page from pool from next node to free.
  701. * Attempt to keep persistent huge pages more or less
  702. * balanced over allowed nodes.
  703. * Called with hugetlb_lock locked.
  704. */
  705. static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
  706. bool acct_surplus)
  707. {
  708. int nr_nodes, node;
  709. int ret = 0;
  710. for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
  711. /*
  712. * If we're returning unused surplus pages, only examine
  713. * nodes with surplus pages.
  714. */
  715. if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
  716. !list_empty(&h->hugepage_freelists[node])) {
  717. struct page *page =
  718. list_entry(h->hugepage_freelists[node].next,
  719. struct page, lru);
  720. list_del(&page->lru);
  721. h->free_huge_pages--;
  722. h->free_huge_pages_node[node]--;
  723. if (acct_surplus) {
  724. h->surplus_huge_pages--;
  725. h->surplus_huge_pages_node[node]--;
  726. }
  727. update_and_free_page(h, page);
  728. ret = 1;
  729. break;
  730. }
  731. }
  732. return ret;
  733. }
  734. static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
  735. {
  736. struct page *page;
  737. unsigned int r_nid;
  738. if (h->order >= MAX_ORDER)
  739. return NULL;
  740. /*
  741. * Assume we will successfully allocate the surplus page to
  742. * prevent racing processes from causing the surplus to exceed
  743. * overcommit
  744. *
  745. * This however introduces a different race, where a process B
  746. * tries to grow the static hugepage pool while alloc_pages() is
  747. * called by process A. B will only examine the per-node
  748. * counters in determining if surplus huge pages can be
  749. * converted to normal huge pages in adjust_pool_surplus(). A
  750. * won't be able to increment the per-node counter, until the
  751. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  752. * no more huge pages can be converted from surplus to normal
  753. * state (and doesn't try to convert again). Thus, we have a
  754. * case where a surplus huge page exists, the pool is grown, and
  755. * the surplus huge page still exists after, even though it
  756. * should just have been converted to a normal huge page. This
  757. * does not leak memory, though, as the hugepage will be freed
  758. * once it is out of use. It also does not allow the counters to
  759. * go out of whack in adjust_pool_surplus() as we don't modify
  760. * the node values until we've gotten the hugepage and only the
  761. * per-node value is checked there.
  762. */
  763. spin_lock(&hugetlb_lock);
  764. if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
  765. spin_unlock(&hugetlb_lock);
  766. return NULL;
  767. } else {
  768. h->nr_huge_pages++;
  769. h->surplus_huge_pages++;
  770. }
  771. spin_unlock(&hugetlb_lock);
  772. if (nid == NUMA_NO_NODE)
  773. page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
  774. __GFP_REPEAT|__GFP_NOWARN,
  775. huge_page_order(h));
  776. else
  777. page = alloc_pages_exact_node(nid,
  778. htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
  779. __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
  780. if (page && arch_prepare_hugepage(page)) {
  781. __free_pages(page, huge_page_order(h));
  782. page = NULL;
  783. }
  784. spin_lock(&hugetlb_lock);
  785. if (page) {
  786. INIT_LIST_HEAD(&page->lru);
  787. r_nid = page_to_nid(page);
  788. set_compound_page_dtor(page, free_huge_page);
  789. set_hugetlb_cgroup(page, NULL);
  790. /*
  791. * We incremented the global counters already
  792. */
  793. h->nr_huge_pages_node[r_nid]++;
  794. h->surplus_huge_pages_node[r_nid]++;
  795. __count_vm_event(HTLB_BUDDY_PGALLOC);
  796. } else {
  797. h->nr_huge_pages--;
  798. h->surplus_huge_pages--;
  799. __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  800. }
  801. spin_unlock(&hugetlb_lock);
  802. return page;
  803. }
  804. /*
  805. * This allocation function is useful in the context where vma is irrelevant.
  806. * E.g. soft-offlining uses this function because it only cares physical
  807. * address of error page.
  808. */
  809. struct page *alloc_huge_page_node(struct hstate *h, int nid)
  810. {
  811. struct page *page;
  812. spin_lock(&hugetlb_lock);
  813. page = dequeue_huge_page_node(h, nid);
  814. spin_unlock(&hugetlb_lock);
  815. if (!page)
  816. page = alloc_buddy_huge_page(h, nid);
  817. return page;
  818. }
  819. /*
  820. * Increase the hugetlb pool such that it can accommodate a reservation
  821. * of size 'delta'.
  822. */
  823. static int gather_surplus_pages(struct hstate *h, int delta)
  824. {
  825. struct list_head surplus_list;
  826. struct page *page, *tmp;
  827. int ret, i;
  828. int needed, allocated;
  829. bool alloc_ok = true;
  830. needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
  831. if (needed <= 0) {
  832. h->resv_huge_pages += delta;
  833. return 0;
  834. }
  835. allocated = 0;
  836. INIT_LIST_HEAD(&surplus_list);
  837. ret = -ENOMEM;
  838. retry:
  839. spin_unlock(&hugetlb_lock);
  840. for (i = 0; i < needed; i++) {
  841. page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
  842. if (!page) {
  843. alloc_ok = false;
  844. break;
  845. }
  846. list_add(&page->lru, &surplus_list);
  847. }
  848. allocated += i;
  849. /*
  850. * After retaking hugetlb_lock, we need to recalculate 'needed'
  851. * because either resv_huge_pages or free_huge_pages may have changed.
  852. */
  853. spin_lock(&hugetlb_lock);
  854. needed = (h->resv_huge_pages + delta) -
  855. (h->free_huge_pages + allocated);
  856. if (needed > 0) {
  857. if (alloc_ok)
  858. goto retry;
  859. /*
  860. * We were not able to allocate enough pages to
  861. * satisfy the entire reservation so we free what
  862. * we've allocated so far.
  863. */
  864. goto free;
  865. }
  866. /*
  867. * The surplus_list now contains _at_least_ the number of extra pages
  868. * needed to accommodate the reservation. Add the appropriate number
  869. * of pages to the hugetlb pool and free the extras back to the buddy
  870. * allocator. Commit the entire reservation here to prevent another
  871. * process from stealing the pages as they are added to the pool but
  872. * before they are reserved.
  873. */
  874. needed += allocated;
  875. h->resv_huge_pages += delta;
  876. ret = 0;
  877. /* Free the needed pages to the hugetlb pool */
  878. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  879. if ((--needed) < 0)
  880. break;
  881. /*
  882. * This page is now managed by the hugetlb allocator and has
  883. * no users -- drop the buddy allocator's reference.
  884. */
  885. put_page_testzero(page);
  886. VM_BUG_ON(page_count(page));
  887. enqueue_huge_page(h, page);
  888. }
  889. free:
  890. spin_unlock(&hugetlb_lock);
  891. /* Free unnecessary surplus pages to the buddy allocator */
  892. list_for_each_entry_safe(page, tmp, &surplus_list, lru)
  893. put_page(page);
  894. spin_lock(&hugetlb_lock);
  895. return ret;
  896. }
  897. /*
  898. * When releasing a hugetlb pool reservation, any surplus pages that were
  899. * allocated to satisfy the reservation must be explicitly freed if they were
  900. * never used.
  901. * Called with hugetlb_lock held.
  902. */
  903. static void return_unused_surplus_pages(struct hstate *h,
  904. unsigned long unused_resv_pages)
  905. {
  906. unsigned long nr_pages;
  907. /* Uncommit the reservation */
  908. h->resv_huge_pages -= unused_resv_pages;
  909. /* Cannot return gigantic pages currently */
  910. if (h->order >= MAX_ORDER)
  911. return;
  912. nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
  913. /*
  914. * We want to release as many surplus pages as possible, spread
  915. * evenly across all nodes with memory. Iterate across these nodes
  916. * until we can no longer free unreserved surplus pages. This occurs
  917. * when the nodes with surplus pages have no free pages.
  918. * free_pool_huge_page() will balance the the freed pages across the
  919. * on-line nodes with memory and will handle the hstate accounting.
  920. */
  921. while (nr_pages--) {
  922. if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
  923. break;
  924. }
  925. }
  926. /*
  927. * Determine if the huge page at addr within the vma has an associated
  928. * reservation. Where it does not we will need to logically increase
  929. * reservation and actually increase subpool usage before an allocation
  930. * can occur. Where any new reservation would be required the
  931. * reservation change is prepared, but not committed. Once the page
  932. * has been allocated from the subpool and instantiated the change should
  933. * be committed via vma_commit_reservation. No action is required on
  934. * failure.
  935. */
  936. static long vma_needs_reservation(struct hstate *h,
  937. struct vm_area_struct *vma, unsigned long addr)
  938. {
  939. struct address_space *mapping = vma->vm_file->f_mapping;
  940. struct inode *inode = mapping->host;
  941. if (vma->vm_flags & VM_MAYSHARE) {
  942. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  943. return region_chg(&inode->i_mapping->private_list,
  944. idx, idx + 1);
  945. } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  946. return 1;
  947. } else {
  948. long err;
  949. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  950. struct resv_map *reservations = vma_resv_map(vma);
  951. err = region_chg(&reservations->regions, idx, idx + 1);
  952. if (err < 0)
  953. return err;
  954. return 0;
  955. }
  956. }
  957. static void vma_commit_reservation(struct hstate *h,
  958. struct vm_area_struct *vma, unsigned long addr)
  959. {
  960. struct address_space *mapping = vma->vm_file->f_mapping;
  961. struct inode *inode = mapping->host;
  962. if (vma->vm_flags & VM_MAYSHARE) {
  963. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  964. region_add(&inode->i_mapping->private_list, idx, idx + 1);
  965. } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  966. pgoff_t idx = vma_hugecache_offset(h, vma, addr);
  967. struct resv_map *reservations = vma_resv_map(vma);
  968. /* Mark this page used in the map. */
  969. region_add(&reservations->regions, idx, idx + 1);
  970. }
  971. }
  972. static struct page *alloc_huge_page(struct vm_area_struct *vma,
  973. unsigned long addr, int avoid_reserve)
  974. {
  975. struct hugepage_subpool *spool = subpool_vma(vma);
  976. struct hstate *h = hstate_vma(vma);
  977. struct page *page;
  978. long chg;
  979. int ret, idx;
  980. struct hugetlb_cgroup *h_cg;
  981. idx = hstate_index(h);
  982. /*
  983. * Processes that did not create the mapping will have no
  984. * reserves and will not have accounted against subpool
  985. * limit. Check that the subpool limit can be made before
  986. * satisfying the allocation MAP_NORESERVE mappings may also
  987. * need pages and subpool limit allocated allocated if no reserve
  988. * mapping overlaps.
  989. */
  990. chg = vma_needs_reservation(h, vma, addr);
  991. if (chg < 0)
  992. return ERR_PTR(-ENOMEM);
  993. if (chg)
  994. if (hugepage_subpool_get_pages(spool, chg))
  995. return ERR_PTR(-ENOSPC);
  996. ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
  997. if (ret) {
  998. hugepage_subpool_put_pages(spool, chg);
  999. return ERR_PTR(-ENOSPC);
  1000. }
  1001. spin_lock(&hugetlb_lock);
  1002. page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
  1003. if (!page) {
  1004. spin_unlock(&hugetlb_lock);
  1005. page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
  1006. if (!page) {
  1007. hugetlb_cgroup_uncharge_cgroup(idx,
  1008. pages_per_huge_page(h),
  1009. h_cg);
  1010. hugepage_subpool_put_pages(spool, chg);
  1011. return ERR_PTR(-ENOSPC);
  1012. }
  1013. spin_lock(&hugetlb_lock);
  1014. list_move(&page->lru, &h->hugepage_activelist);
  1015. /* Fall through */
  1016. }
  1017. hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
  1018. spin_unlock(&hugetlb_lock);
  1019. set_page_private(page, (unsigned long)spool);
  1020. vma_commit_reservation(h, vma, addr);
  1021. return page;
  1022. }
  1023. int __weak alloc_bootmem_huge_page(struct hstate *h)
  1024. {
  1025. struct huge_bootmem_page *m;
  1026. int nr_nodes, node;
  1027. for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
  1028. void *addr;
  1029. addr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
  1030. huge_page_size(h), huge_page_size(h), 0);
  1031. if (addr) {
  1032. /*
  1033. * Use the beginning of the huge page to store the
  1034. * huge_bootmem_page struct (until gather_bootmem
  1035. * puts them into the mem_map).
  1036. */
  1037. m = addr;
  1038. goto found;
  1039. }
  1040. }
  1041. return 0;
  1042. found:
  1043. BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
  1044. /* Put them into a private list first because mem_map is not up yet */
  1045. list_add(&m->list, &huge_boot_pages);
  1046. m->hstate = h;
  1047. return 1;
  1048. }
  1049. static void prep_compound_huge_page(struct page *page, int order)
  1050. {
  1051. if (unlikely(order > (MAX_ORDER - 1)))
  1052. prep_compound_gigantic_page(page, order);
  1053. else
  1054. prep_compound_page(page, order);
  1055. }
  1056. /* Put bootmem huge pages into the standard lists after mem_map is up */
  1057. static void __init gather_bootmem_prealloc(void)
  1058. {
  1059. struct huge_bootmem_page *m;
  1060. list_for_each_entry(m, &huge_boot_pages, list) {
  1061. struct hstate *h = m->hstate;
  1062. struct page *page;
  1063. #ifdef CONFIG_HIGHMEM
  1064. page = pfn_to_page(m->phys >> PAGE_SHIFT);
  1065. free_bootmem_late((unsigned long)m,
  1066. sizeof(struct huge_bootmem_page));
  1067. #else
  1068. page = virt_to_page(m);
  1069. #endif
  1070. __ClearPageReserved(page);
  1071. WARN_ON(page_count(page) != 1);
  1072. prep_compound_huge_page(page, h->order);
  1073. prep_new_huge_page(h, page, page_to_nid(page));
  1074. /*
  1075. * If we had gigantic hugepages allocated at boot time, we need
  1076. * to restore the 'stolen' pages to totalram_pages in order to
  1077. * fix confusing memory reports from free(1) and another
  1078. * side-effects, like CommitLimit going negative.
  1079. */
  1080. if (h->order > (MAX_ORDER - 1))
  1081. adjust_managed_page_count(page, 1 << h->order);
  1082. }
  1083. }
  1084. static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  1085. {
  1086. unsigned long i;
  1087. for (i = 0; i < h->max_huge_pages; ++i) {
  1088. if (h->order >= MAX_ORDER) {
  1089. if (!alloc_bootmem_huge_page(h))
  1090. break;
  1091. } else if (!alloc_fresh_huge_page(h,
  1092. &node_states[N_MEMORY]))
  1093. break;
  1094. }
  1095. h->max_huge_pages = i;
  1096. }
  1097. static void __init hugetlb_init_hstates(void)
  1098. {
  1099. struct hstate *h;
  1100. for_each_hstate(h) {
  1101. /* oversize hugepages were init'ed in early boot */
  1102. if (h->order < MAX_ORDER)
  1103. hugetlb_hstate_alloc_pages(h);
  1104. }
  1105. }
  1106. static char * __init memfmt(char *buf, unsigned long n)
  1107. {
  1108. if (n >= (1UL << 30))
  1109. sprintf(buf, "%lu GB", n >> 30);
  1110. else if (n >= (1UL << 20))
  1111. sprintf(buf, "%lu MB", n >> 20);
  1112. else
  1113. sprintf(buf, "%lu KB", n >> 10);
  1114. return buf;
  1115. }
  1116. static void __init report_hugepages(void)
  1117. {
  1118. struct hstate *h;
  1119. for_each_hstate(h) {
  1120. char buf[32];
  1121. pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
  1122. memfmt(buf, huge_page_size(h)),
  1123. h->free_huge_pages);
  1124. }
  1125. }
  1126. #ifdef CONFIG_HIGHMEM
  1127. static void try_to_free_low(struct hstate *h, unsigned long count,
  1128. nodemask_t *nodes_allowed)
  1129. {
  1130. int i;
  1131. if (h->order >= MAX_ORDER)
  1132. return;
  1133. for_each_node_mask(i, *nodes_allowed) {
  1134. struct page *page, *next;
  1135. struct list_head *freel = &h->hugepage_freelists[i];
  1136. list_for_each_entry_safe(page, next, freel, lru) {
  1137. if (count >= h->nr_huge_pages)
  1138. return;
  1139. if (PageHighMem(page))
  1140. continue;
  1141. list_del(&page->lru);
  1142. update_and_free_page(h, page);
  1143. h->free_huge_pages--;
  1144. h->free_huge_pages_node[page_to_nid(page)]--;
  1145. }
  1146. }
  1147. }
  1148. #else
  1149. static inline void try_to_free_low(struct hstate *h, unsigned long count,
  1150. nodemask_t *nodes_allowed)
  1151. {
  1152. }
  1153. #endif
  1154. /*
  1155. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  1156. * balanced by operating on them in a round-robin fashion.
  1157. * Returns 1 if an adjustment was made.
  1158. */
  1159. static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
  1160. int delta)
  1161. {
  1162. int nr_nodes, node;
  1163. VM_BUG_ON(delta != -1 && delta != 1);
  1164. if (delta < 0) {
  1165. for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
  1166. if (h->surplus_huge_pages_node[node])
  1167. goto found;
  1168. }
  1169. } else {
  1170. for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
  1171. if (h->surplus_huge_pages_node[node] <
  1172. h->nr_huge_pages_node[node])
  1173. goto found;
  1174. }
  1175. }
  1176. return 0;
  1177. found:
  1178. h->surplus_huge_pages += delta;
  1179. h->surplus_huge_pages_node[node] += delta;
  1180. return 1;
  1181. }
  1182. #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
  1183. static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
  1184. nodemask_t *nodes_allowed)
  1185. {
  1186. unsigned long min_count, ret;
  1187. if (h->order >= MAX_ORDER)
  1188. return h->max_huge_pages;
  1189. /*
  1190. * Increase the pool size
  1191. * First take pages out of surplus state. Then make up the
  1192. * remaining difference by allocating fresh huge pages.
  1193. *
  1194. * We might race with alloc_buddy_huge_page() here and be unable
  1195. * to convert a surplus huge page to a normal huge page. That is
  1196. * not critical, though, it just means the overall size of the
  1197. * pool might be one hugepage larger than it needs to be, but
  1198. * within all the constraints specified by the sysctls.
  1199. */
  1200. spin_lock(&hugetlb_lock);
  1201. while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
  1202. if (!adjust_pool_surplus(h, nodes_allowed, -1))
  1203. break;
  1204. }
  1205. while (count > persistent_huge_pages(h)) {
  1206. /*
  1207. * If this allocation races such that we no longer need the
  1208. * page, free_huge_page will handle it by freeing the page
  1209. * and reducing the surplus.
  1210. */
  1211. spin_unlock(&hugetlb_lock);
  1212. ret = alloc_fresh_huge_page(h, nodes_allowed);
  1213. spin_lock(&hugetlb_lock);
  1214. if (!ret)
  1215. goto out;
  1216. /* Bail for signals. Probably ctrl-c from user */
  1217. if (signal_pending(current))
  1218. goto out;
  1219. }
  1220. /*
  1221. * Decrease the pool size
  1222. * First return free pages to the buddy allocator (being careful
  1223. * to keep enough around to satisfy reservations). Then place
  1224. * pages into surplus state as needed so the pool will shrink
  1225. * to the desired size as pages become free.
  1226. *
  1227. * By placing pages into the surplus state independent of the
  1228. * overcommit value, we are allowing the surplus pool size to
  1229. * exceed overcommit. There are few sane options here. Since
  1230. * alloc_buddy_huge_page() is checking the global counter,
  1231. * though, we'll note that we're not allowed to exceed surplus
  1232. * and won't grow the pool anywhere else. Not until one of the
  1233. * sysctls are changed, or the surplus pages go out of use.
  1234. */
  1235. min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
  1236. min_count = max(count, min_count);
  1237. try_to_free_low(h, min_count, nodes_allowed);
  1238. while (min_count < persistent_huge_pages(h)) {
  1239. if (!free_pool_huge_page(h, nodes_allowed, 0))
  1240. break;
  1241. }
  1242. while (count < persistent_huge_pages(h)) {
  1243. if (!adjust_pool_surplus(h, nodes_allowed, 1))
  1244. break;
  1245. }
  1246. out:
  1247. ret = persistent_huge_pages(h);
  1248. spin_unlock(&hugetlb_lock);
  1249. return ret;
  1250. }
  1251. #define HSTATE_ATTR_RO(_name) \
  1252. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  1253. #define HSTATE_ATTR(_name) \
  1254. static struct kobj_attribute _name##_attr = \
  1255. __ATTR(_name, 0644, _name##_show, _name##_store)
  1256. static struct kobject *hugepages_kobj;
  1257. static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1258. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
  1259. static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
  1260. {
  1261. int i;
  1262. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1263. if (hstate_kobjs[i] == kobj) {
  1264. if (nidp)
  1265. *nidp = NUMA_NO_NODE;
  1266. return &hstates[i];
  1267. }
  1268. return kobj_to_node_hstate(kobj, nidp);
  1269. }
  1270. static ssize_t nr_hugepages_show_common(struct kobject *kobj,
  1271. struct kobj_attribute *attr, char *buf)
  1272. {
  1273. struct hstate *h;
  1274. unsigned long nr_huge_pages;
  1275. int nid;
  1276. h = kobj_to_hstate(kobj, &nid);
  1277. if (nid == NUMA_NO_NODE)
  1278. nr_huge_pages = h->nr_huge_pages;
  1279. else
  1280. nr_huge_pages = h->nr_huge_pages_node[nid];
  1281. return sprintf(buf, "%lu\n", nr_huge_pages);
  1282. }
  1283. static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
  1284. struct kobject *kobj, struct kobj_attribute *attr,
  1285. const char *buf, size_t len)
  1286. {
  1287. int err;
  1288. int nid;
  1289. unsigned long count;
  1290. struct hstate *h;
  1291. NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
  1292. err = kstrtoul(buf, 10, &count);
  1293. if (err)
  1294. goto out;
  1295. h = kobj_to_hstate(kobj, &nid);
  1296. if (h->order >= MAX_ORDER) {
  1297. err = -EINVAL;
  1298. goto out;
  1299. }
  1300. if (nid == NUMA_NO_NODE) {
  1301. /*
  1302. * global hstate attribute
  1303. */
  1304. if (!(obey_mempolicy &&
  1305. init_nodemask_of_mempolicy(nodes_allowed))) {
  1306. NODEMASK_FREE(nodes_allowed);
  1307. nodes_allowed = &node_states[N_MEMORY];
  1308. }
  1309. } else if (nodes_allowed) {
  1310. /*
  1311. * per node hstate attribute: adjust count to global,
  1312. * but restrict alloc/free to the specified node.
  1313. */
  1314. count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
  1315. init_nodemask_of_node(nodes_allowed, nid);
  1316. } else
  1317. nodes_allowed = &node_states[N_MEMORY];
  1318. h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
  1319. if (nodes_allowed != &node_states[N_MEMORY])
  1320. NODEMASK_FREE(nodes_allowed);
  1321. return len;
  1322. out:
  1323. NODEMASK_FREE(nodes_allowed);
  1324. return err;
  1325. }
  1326. static ssize_t nr_hugepages_show(struct kobject *kobj,
  1327. struct kobj_attribute *attr, char *buf)
  1328. {
  1329. return nr_hugepages_show_common(kobj, attr, buf);
  1330. }
  1331. static ssize_t nr_hugepages_store(struct kobject *kobj,
  1332. struct kobj_attribute *attr, const char *buf, size_t len)
  1333. {
  1334. return nr_hugepages_store_common(false, kobj, attr, buf, len);
  1335. }
  1336. HSTATE_ATTR(nr_hugepages);
  1337. #ifdef CONFIG_NUMA
  1338. /*
  1339. * hstate attribute for optionally mempolicy-based constraint on persistent
  1340. * huge page alloc/free.
  1341. */
  1342. static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
  1343. struct kobj_attribute *attr, char *buf)
  1344. {
  1345. return nr_hugepages_show_common(kobj, attr, buf);
  1346. }
  1347. static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
  1348. struct kobj_attribute *attr, const char *buf, size_t len)
  1349. {
  1350. return nr_hugepages_store_common(true, kobj, attr, buf, len);
  1351. }
  1352. HSTATE_ATTR(nr_hugepages_mempolicy);
  1353. #endif
  1354. static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
  1355. struct kobj_attribute *attr, char *buf)
  1356. {
  1357. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1358. return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
  1359. }
  1360. static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
  1361. struct kobj_attribute *attr, const char *buf, size_t count)
  1362. {
  1363. int err;
  1364. unsigned long input;
  1365. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1366. if (h->order >= MAX_ORDER)
  1367. return -EINVAL;
  1368. err = kstrtoul(buf, 10, &input);
  1369. if (err)
  1370. return err;
  1371. spin_lock(&hugetlb_lock);
  1372. h->nr_overcommit_huge_pages = input;
  1373. spin_unlock(&hugetlb_lock);
  1374. return count;
  1375. }
  1376. HSTATE_ATTR(nr_overcommit_hugepages);
  1377. static ssize_t free_hugepages_show(struct kobject *kobj,
  1378. struct kobj_attribute *attr, char *buf)
  1379. {
  1380. struct hstate *h;
  1381. unsigned long free_huge_pages;
  1382. int nid;
  1383. h = kobj_to_hstate(kobj, &nid);
  1384. if (nid == NUMA_NO_NODE)
  1385. free_huge_pages = h->free_huge_pages;
  1386. else
  1387. free_huge_pages = h->free_huge_pages_node[nid];
  1388. return sprintf(buf, "%lu\n", free_huge_pages);
  1389. }
  1390. HSTATE_ATTR_RO(free_hugepages);
  1391. static ssize_t resv_hugepages_show(struct kobject *kobj,
  1392. struct kobj_attribute *attr, char *buf)
  1393. {
  1394. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1395. return sprintf(buf, "%lu\n", h->resv_huge_pages);
  1396. }
  1397. HSTATE_ATTR_RO(resv_hugepages);
  1398. static ssize_t surplus_hugepages_show(struct kobject *kobj,
  1399. struct kobj_attribute *attr, char *buf)
  1400. {
  1401. struct hstate *h;
  1402. unsigned long surplus_huge_pages;
  1403. int nid;
  1404. h = kobj_to_hstate(kobj, &nid);
  1405. if (nid == NUMA_NO_NODE)
  1406. surplus_huge_pages = h->surplus_huge_pages;
  1407. else
  1408. surplus_huge_pages = h->surplus_huge_pages_node[nid];
  1409. return sprintf(buf, "%lu\n", surplus_huge_pages);
  1410. }
  1411. HSTATE_ATTR_RO(surplus_hugepages);
  1412. static struct attribute *hstate_attrs[] = {
  1413. &nr_hugepages_attr.attr,
  1414. &nr_overcommit_hugepages_attr.attr,
  1415. &free_hugepages_attr.attr,
  1416. &resv_hugepages_attr.attr,
  1417. &surplus_hugepages_attr.attr,
  1418. #ifdef CONFIG_NUMA
  1419. &nr_hugepages_mempolicy_attr.attr,
  1420. #endif
  1421. NULL,
  1422. };
  1423. static struct attribute_group hstate_attr_group = {
  1424. .attrs = hstate_attrs,
  1425. };
  1426. static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
  1427. struct kobject **hstate_kobjs,
  1428. struct attribute_group *hstate_attr_group)
  1429. {
  1430. int retval;
  1431. int hi = hstate_index(h);
  1432. hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
  1433. if (!hstate_kobjs[hi])
  1434. return -ENOMEM;
  1435. retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
  1436. if (retval)
  1437. kobject_put(hstate_kobjs[hi]);
  1438. return retval;
  1439. }
  1440. static void __init hugetlb_sysfs_init(void)
  1441. {
  1442. struct hstate *h;
  1443. int err;
  1444. hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
  1445. if (!hugepages_kobj)
  1446. return;
  1447. for_each_hstate(h) {
  1448. err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
  1449. hstate_kobjs, &hstate_attr_group);
  1450. if (err)
  1451. pr_err("Hugetlb: Unable to add hstate %s", h->name);
  1452. }
  1453. }
  1454. #ifdef CONFIG_NUMA
  1455. /*
  1456. * node_hstate/s - associate per node hstate attributes, via their kobjects,
  1457. * with node devices in node_devices[] using a parallel array. The array
  1458. * index of a node device or _hstate == node id.
  1459. * This is here to avoid any static dependency of the node device driver, in
  1460. * the base kernel, on the hugetlb module.
  1461. */
  1462. struct node_hstate {
  1463. struct kobject *hugepages_kobj;
  1464. struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1465. };
  1466. struct node_hstate node_hstates[MAX_NUMNODES];
  1467. /*
  1468. * A subset of global hstate attributes for node devices
  1469. */
  1470. static struct attribute *per_node_hstate_attrs[] = {
  1471. &nr_hugepages_attr.attr,
  1472. &free_hugepages_attr.attr,
  1473. &surplus_hugepages_attr.attr,
  1474. NULL,
  1475. };
  1476. static struct attribute_group per_node_hstate_attr_group = {
  1477. .attrs = per_node_hstate_attrs,
  1478. };
  1479. /*
  1480. * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
  1481. * Returns node id via non-NULL nidp.
  1482. */
  1483. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  1484. {
  1485. int nid;
  1486. for (nid = 0; nid < nr_node_ids; nid++) {
  1487. struct node_hstate *nhs = &node_hstates[nid];
  1488. int i;
  1489. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1490. if (nhs->hstate_kobjs[i] == kobj) {
  1491. if (nidp)
  1492. *nidp = nid;
  1493. return &hstates[i];
  1494. }
  1495. }
  1496. BUG();
  1497. return NULL;
  1498. }
  1499. /*
  1500. * Unregister hstate attributes from a single node device.
  1501. * No-op if no hstate attributes attached.
  1502. */
  1503. static void hugetlb_unregister_node(struct node *node)
  1504. {
  1505. struct hstate *h;
  1506. struct node_hstate *nhs = &node_hstates[node->dev.id];
  1507. if (!nhs->hugepages_kobj)
  1508. return; /* no hstate attributes */
  1509. for_each_hstate(h) {
  1510. int idx = hstate_index(h);
  1511. if (nhs->hstate_kobjs[idx]) {
  1512. kobject_put(nhs->hstate_kobjs[idx]);
  1513. nhs->hstate_kobjs[idx] = NULL;
  1514. }
  1515. }
  1516. kobject_put(nhs->hugepages_kobj);
  1517. nhs->hugepages_kobj = NULL;
  1518. }
  1519. /*
  1520. * hugetlb module exit: unregister hstate attributes from node devices
  1521. * that have them.
  1522. */
  1523. static void hugetlb_unregister_all_nodes(void)
  1524. {
  1525. int nid;
  1526. /*
  1527. * disable node device registrations.
  1528. */
  1529. register_hugetlbfs_with_node(NULL, NULL);
  1530. /*
  1531. * remove hstate attributes from any nodes that have them.
  1532. */
  1533. for (nid = 0; nid < nr_node_ids; nid++)
  1534. hugetlb_unregister_node(node_devices[nid]);
  1535. }
  1536. /*
  1537. * Register hstate attributes for a single node device.
  1538. * No-op if attributes already registered.
  1539. */
  1540. static void hugetlb_register_node(struct node *node)
  1541. {
  1542. struct hstate *h;
  1543. struct node_hstate *nhs = &node_hstates[node->dev.id];
  1544. int err;
  1545. if (nhs->hugepages_kobj)
  1546. return; /* already allocated */
  1547. nhs->hugepages_kobj = kobject_create_and_add("hugepages",
  1548. &node->dev.kobj);
  1549. if (!nhs->hugepages_kobj)
  1550. return;
  1551. for_each_hstate(h) {
  1552. err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
  1553. nhs->hstate_kobjs,
  1554. &per_node_hstate_attr_group);
  1555. if (err) {
  1556. pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
  1557. h->name, node->dev.id);
  1558. hugetlb_unregister_node(node);
  1559. break;
  1560. }
  1561. }
  1562. }
  1563. /*
  1564. * hugetlb init time: register hstate attributes for all registered node
  1565. * devices of nodes that have memory. All on-line nodes should have
  1566. * registered their associated device by this time.
  1567. */
  1568. static void hugetlb_register_all_nodes(void)
  1569. {
  1570. int nid;
  1571. for_each_node_state(nid, N_MEMORY) {
  1572. struct node *node = node_devices[nid];
  1573. if (node->dev.id == nid)
  1574. hugetlb_register_node(node);
  1575. }
  1576. /*
  1577. * Let the node device driver know we're here so it can
  1578. * [un]register hstate attributes on node hotplug.
  1579. */
  1580. register_hugetlbfs_with_node(hugetlb_register_node,
  1581. hugetlb_unregister_node);
  1582. }
  1583. #else /* !CONFIG_NUMA */
  1584. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  1585. {
  1586. BUG();
  1587. if (nidp)
  1588. *nidp = -1;
  1589. return NULL;
  1590. }
  1591. static void hugetlb_unregister_all_nodes(void) { }
  1592. static void hugetlb_register_all_nodes(void) { }
  1593. #endif
  1594. static void __exit hugetlb_exit(void)
  1595. {
  1596. struct hstate *h;
  1597. hugetlb_unregister_all_nodes();
  1598. for_each_hstate(h) {
  1599. kobject_put(hstate_kobjs[hstate_index(h)]);
  1600. }
  1601. kobject_put(hugepages_kobj);
  1602. }
  1603. module_exit(hugetlb_exit);
  1604. static int __init hugetlb_init(void)
  1605. {
  1606. /* Some platform decide whether they support huge pages at boot
  1607. * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
  1608. * there is no such support
  1609. */
  1610. if (HPAGE_SHIFT == 0)
  1611. return 0;
  1612. if (!size_to_hstate(default_hstate_size)) {
  1613. default_hstate_size = HPAGE_SIZE;
  1614. if (!size_to_hstate(default_hstate_size))
  1615. hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
  1616. }
  1617. default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
  1618. if (default_hstate_max_huge_pages)
  1619. default_hstate.max_huge_pages = default_hstate_max_huge_pages;
  1620. hugetlb_init_hstates();
  1621. gather_bootmem_prealloc();
  1622. report_hugepages();
  1623. hugetlb_sysfs_init();
  1624. hugetlb_register_all_nodes();
  1625. hugetlb_cgroup_file_init();
  1626. return 0;
  1627. }
  1628. module_init(hugetlb_init);
  1629. /* Should be called on processing a hugepagesz=... option */
  1630. void __init hugetlb_add_hstate(unsigned order)
  1631. {
  1632. struct hstate *h;
  1633. unsigned long i;
  1634. if (size_to_hstate(PAGE_SIZE << order)) {
  1635. pr_warning("hugepagesz= specified twice, ignoring\n");
  1636. return;
  1637. }
  1638. BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
  1639. BUG_ON(order == 0);
  1640. h = &hstates[hugetlb_max_hstate++];
  1641. h->order = order;
  1642. h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
  1643. h->nr_huge_pages = 0;
  1644. h->free_huge_pages = 0;
  1645. for (i = 0; i < MAX_NUMNODES; ++i)
  1646. INIT_LIST_HEAD(&h->hugepage_freelists[i]);
  1647. INIT_LIST_HEAD(&h->hugepage_activelist);
  1648. h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
  1649. h->next_nid_to_free = first_node(node_states[N_MEMORY]);
  1650. snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
  1651. huge_page_size(h)/1024);
  1652. parsed_hstate = h;
  1653. }
  1654. static int __init hugetlb_nrpages_setup(char *s)
  1655. {
  1656. unsigned long *mhp;
  1657. static unsigned long *last_mhp;
  1658. /*
  1659. * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
  1660. * so this hugepages= parameter goes to the "default hstate".
  1661. */
  1662. if (!hugetlb_max_hstate)
  1663. mhp = &default_hstate_max_huge_pages;
  1664. else
  1665. mhp = &parsed_hstate->max_huge_pages;
  1666. if (mhp == last_mhp) {
  1667. pr_warning("hugepages= specified twice without "
  1668. "interleaving hugepagesz=, ignoring\n");
  1669. return 1;
  1670. }
  1671. if (sscanf(s, "%lu", mhp) <= 0)
  1672. *mhp = 0;
  1673. /*
  1674. * Global state is always initialized later in hugetlb_init.
  1675. * But we need to allocate >= MAX_ORDER hstates here early to still
  1676. * use the bootmem allocator.
  1677. */
  1678. if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
  1679. hugetlb_hstate_alloc_pages(parsed_hstate);
  1680. last_mhp = mhp;
  1681. return 1;
  1682. }
  1683. __setup("hugepages=", hugetlb_nrpages_setup);
  1684. static int __init hugetlb_default_setup(char *s)
  1685. {
  1686. default_hstate_size = memparse(s, &s);
  1687. return 1;
  1688. }
  1689. __setup("default_hugepagesz=", hugetlb_default_setup);
  1690. static unsigned int cpuset_mems_nr(unsigned int *array)
  1691. {
  1692. int node;
  1693. unsigned int nr = 0;
  1694. for_each_node_mask(node, cpuset_current_mems_allowed)
  1695. nr += array[node];
  1696. return nr;
  1697. }
  1698. #ifdef CONFIG_SYSCTL
  1699. static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
  1700. struct ctl_table *table, int write,
  1701. void __user *buffer, size_t *length, loff_t *ppos)
  1702. {
  1703. struct hstate *h = &default_hstate;
  1704. unsigned long tmp;
  1705. int ret;
  1706. tmp = h->max_huge_pages;
  1707. if (write && h->order >= MAX_ORDER)
  1708. return -EINVAL;
  1709. table->data = &tmp;
  1710. table->maxlen = sizeof(unsigned long);
  1711. ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
  1712. if (ret)
  1713. goto out;
  1714. if (write) {
  1715. NODEMASK_ALLOC(nodemask_t, nodes_allowed,
  1716. GFP_KERNEL | __GFP_NORETRY);
  1717. if (!(obey_mempolicy &&
  1718. init_nodemask_of_mempolicy(nodes_allowed))) {
  1719. NODEMASK_FREE(nodes_allowed);
  1720. nodes_allowed = &node_states[N_MEMORY];
  1721. }
  1722. h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
  1723. if (nodes_allowed != &node_states[N_MEMORY])
  1724. NODEMASK_FREE(nodes_allowed);
  1725. }
  1726. out:
  1727. return ret;
  1728. }
  1729. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  1730. void __user *buffer, size_t *length, loff_t *ppos)
  1731. {
  1732. return hugetlb_sysctl_handler_common(false, table, write,
  1733. buffer, length, ppos);
  1734. }
  1735. #ifdef CONFIG_NUMA
  1736. int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
  1737. void __user *buffer, size_t *length, loff_t *ppos)
  1738. {
  1739. return hugetlb_sysctl_handler_common(true, table, write,
  1740. buffer, length, ppos);
  1741. }
  1742. #endif /* CONFIG_NUMA */
  1743. int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
  1744. void __user *buffer,
  1745. size_t *length, loff_t *ppos)
  1746. {
  1747. proc_dointvec(table, write, buffer, length, ppos);
  1748. if (hugepages_treat_as_movable)
  1749. htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
  1750. else
  1751. htlb_alloc_mask = GFP_HIGHUSER;
  1752. return 0;
  1753. }
  1754. int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  1755. void __user *buffer,
  1756. size_t *length, loff_t *ppos)
  1757. {
  1758. struct hstate *h = &default_hstate;
  1759. unsigned long tmp;
  1760. int ret;
  1761. tmp = h->nr_overcommit_huge_pages;
  1762. if (write && h->order >= MAX_ORDER)
  1763. return -EINVAL;
  1764. table->data = &tmp;
  1765. table->maxlen = sizeof(unsigned long);
  1766. ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
  1767. if (ret)
  1768. goto out;
  1769. if (write) {
  1770. spin_lock(&hugetlb_lock);
  1771. h->nr_overcommit_huge_pages = tmp;
  1772. spin_unlock(&hugetlb_lock);
  1773. }
  1774. out:
  1775. return ret;
  1776. }
  1777. #endif /* CONFIG_SYSCTL */
  1778. void hugetlb_report_meminfo(struct seq_file *m)
  1779. {
  1780. struct hstate *h = &default_hstate;
  1781. seq_printf(m,
  1782. "HugePages_Total: %5lu\n"
  1783. "HugePages_Free: %5lu\n"
  1784. "HugePages_Rsvd: %5lu\n"
  1785. "HugePages_Surp: %5lu\n"
  1786. "Hugepagesize: %8lu kB\n",
  1787. h->nr_huge_pages,
  1788. h->free_huge_pages,
  1789. h->resv_huge_pages,
  1790. h->surplus_huge_pages,
  1791. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  1792. }
  1793. int hugetlb_report_node_meminfo(int nid, char *buf)
  1794. {
  1795. struct hstate *h = &default_hstate;
  1796. return sprintf(buf,
  1797. "Node %d HugePages_Total: %5u\n"
  1798. "Node %d HugePages_Free: %5u\n"
  1799. "Node %d HugePages_Surp: %5u\n",
  1800. nid, h->nr_huge_pages_node[nid],
  1801. nid, h->free_huge_pages_node[nid],
  1802. nid, h->surplus_huge_pages_node[nid]);
  1803. }
  1804. void hugetlb_show_meminfo(void)
  1805. {
  1806. struct hstate *h;
  1807. int nid;
  1808. for_each_node_state(nid, N_MEMORY)
  1809. for_each_hstate(h)
  1810. pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
  1811. nid,
  1812. h->nr_huge_pages_node[nid],
  1813. h->free_huge_pages_node[nid],
  1814. h->surplus_huge_pages_node[nid],
  1815. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  1816. }
  1817. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  1818. unsigned long hugetlb_total_pages(void)
  1819. {
  1820. struct hstate *h;
  1821. unsigned long nr_total_pages = 0;
  1822. for_each_hstate(h)
  1823. nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
  1824. return nr_total_pages;
  1825. }
  1826. static int hugetlb_acct_memory(struct hstate *h, long delta)
  1827. {
  1828. int ret = -ENOMEM;
  1829. spin_lock(&hugetlb_lock);
  1830. /*
  1831. * When cpuset is configured, it breaks the strict hugetlb page
  1832. * reservation as the accounting is done on a global variable. Such
  1833. * reservation is completely rubbish in the presence of cpuset because
  1834. * the reservation is not checked against page availability for the
  1835. * current cpuset. Application can still potentially OOM'ed by kernel
  1836. * with lack of free htlb page in cpuset that the task is in.
  1837. * Attempt to enforce strict accounting with cpuset is almost
  1838. * impossible (or too ugly) because cpuset is too fluid that
  1839. * task or memory node can be dynamically moved between cpusets.
  1840. *
  1841. * The change of semantics for shared hugetlb mapping with cpuset is
  1842. * undesirable. However, in order to preserve some of the semantics,
  1843. * we fall back to check against current free page availability as
  1844. * a best attempt and hopefully to minimize the impact of changing
  1845. * semantics that cpuset has.
  1846. */
  1847. if (delta > 0) {
  1848. if (gather_surplus_pages(h, delta) < 0)
  1849. goto out;
  1850. if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
  1851. return_unused_surplus_pages(h, delta);
  1852. goto out;
  1853. }
  1854. }
  1855. ret = 0;
  1856. if (delta < 0)
  1857. return_unused_surplus_pages(h, (unsigned long) -delta);
  1858. out:
  1859. spin_unlock(&hugetlb_lock);
  1860. return ret;
  1861. }
  1862. static void hugetlb_vm_op_open(struct vm_area_struct *vma)
  1863. {
  1864. struct resv_map *reservations = vma_resv_map(vma);
  1865. /*
  1866. * This new VMA should share its siblings reservation map if present.
  1867. * The VMA will only ever have a valid reservation map pointer where
  1868. * it is being copied for another still existing VMA. As that VMA
  1869. * has a reference to the reservation map it cannot disappear until
  1870. * after this open call completes. It is therefore safe to take a
  1871. * new reference here without additional locking.
  1872. */
  1873. if (reservations)
  1874. kref_get(&reservations->refs);
  1875. }
  1876. static void resv_map_put(struct vm_area_struct *vma)
  1877. {
  1878. struct resv_map *reservations = vma_resv_map(vma);
  1879. if (!reservations)
  1880. return;
  1881. kref_put(&reservations->refs, resv_map_release);
  1882. }
  1883. static void hugetlb_vm_op_close(struct vm_area_struct *vma)
  1884. {
  1885. struct hstate *h = hstate_vma(vma);
  1886. struct resv_map *reservations = vma_resv_map(vma);
  1887. struct hugepage_subpool *spool = subpool_vma(vma);
  1888. unsigned long reserve;
  1889. unsigned long start;
  1890. unsigned long end;
  1891. if (reservations) {
  1892. start = vma_hugecache_offset(h, vma, vma->vm_start);
  1893. end = vma_hugecache_offset(h, vma, vma->vm_end);
  1894. reserve = (end - start) -
  1895. region_count(&reservations->regions, start, end);
  1896. resv_map_put(vma);
  1897. if (reserve) {
  1898. hugetlb_acct_memory(h, -reserve);
  1899. hugepage_subpool_put_pages(spool, reserve);
  1900. }
  1901. }
  1902. }
  1903. /*
  1904. * We cannot handle pagefaults against hugetlb pages at all. They cause
  1905. * handle_mm_fault() to try to instantiate regular-sized pages in the
  1906. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  1907. * this far.
  1908. */
  1909. static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1910. {
  1911. BUG();
  1912. return 0;
  1913. }
  1914. const struct vm_operations_struct hugetlb_vm_ops = {
  1915. .fault = hugetlb_vm_op_fault,
  1916. .open = hugetlb_vm_op_open,
  1917. .close = hugetlb_vm_op_close,
  1918. };
  1919. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  1920. int writable)
  1921. {
  1922. pte_t entry;
  1923. if (writable) {
  1924. entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
  1925. vma->vm_page_prot)));
  1926. } else {
  1927. entry = huge_pte_wrprotect(mk_huge_pte(page,
  1928. vma->vm_page_prot));
  1929. }
  1930. entry = pte_mkyoung(entry);
  1931. entry = pte_mkhuge(entry);
  1932. entry = arch_make_huge_pte(entry, vma, page, writable);
  1933. return entry;
  1934. }
  1935. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  1936. unsigned long address, pte_t *ptep)
  1937. {
  1938. pte_t entry;
  1939. entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
  1940. if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
  1941. update_mmu_cache(vma, address, ptep);
  1942. }
  1943. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  1944. struct vm_area_struct *vma)
  1945. {
  1946. pte_t *src_pte, *dst_pte, entry;
  1947. struct page *ptepage;
  1948. unsigned long addr;
  1949. int cow;
  1950. struct hstate *h = hstate_vma(vma);
  1951. unsigned long sz = huge_page_size(h);
  1952. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  1953. for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
  1954. src_pte = huge_pte_offset(src, addr);
  1955. if (!src_pte)
  1956. continue;
  1957. dst_pte = huge_pte_alloc(dst, addr, sz);
  1958. if (!dst_pte)
  1959. goto nomem;
  1960. /* If the pagetables are shared don't copy or take references */
  1961. if (dst_pte == src_pte)
  1962. continue;
  1963. spin_lock(&dst->page_table_lock);
  1964. spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
  1965. if (!huge_pte_none(huge_ptep_get(src_pte))) {
  1966. if (cow)
  1967. huge_ptep_set_wrprotect(src, addr, src_pte);
  1968. entry = huge_ptep_get(src_pte);
  1969. ptepage = pte_page(entry);
  1970. get_page(ptepage);
  1971. page_dup_rmap(ptepage);
  1972. set_huge_pte_at(dst, addr, dst_pte, entry);
  1973. }
  1974. spin_unlock(&src->page_table_lock);
  1975. spin_unlock(&dst->page_table_lock);
  1976. }
  1977. return 0;
  1978. nomem:
  1979. return -ENOMEM;
  1980. }
  1981. static int is_hugetlb_entry_migration(pte_t pte)
  1982. {
  1983. swp_entry_t swp;
  1984. if (huge_pte_none(pte) || pte_present(pte))
  1985. return 0;
  1986. swp = pte_to_swp_entry(pte);
  1987. if (non_swap_entry(swp) && is_migration_entry(swp))
  1988. return 1;
  1989. else
  1990. return 0;
  1991. }
  1992. static int is_hugetlb_entry_hwpoisoned(pte_t pte)
  1993. {
  1994. swp_entry_t swp;
  1995. if (huge_pte_none(pte) || pte_present(pte))
  1996. return 0;
  1997. swp = pte_to_swp_entry(pte);
  1998. if (non_swap_entry(swp) && is_hwpoison_entry(swp))
  1999. return 1;
  2000. else
  2001. return 0;
  2002. }
  2003. void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  2004. unsigned long start, unsigned long end,
  2005. struct page *ref_page)
  2006. {
  2007. int force_flush = 0;
  2008. struct mm_struct *mm = vma->vm_mm;
  2009. unsigned long address;
  2010. pte_t *ptep;
  2011. pte_t pte;
  2012. struct page *page;
  2013. struct hstate *h = hstate_vma(vma);
  2014. unsigned long sz = huge_page_size(h);
  2015. const unsigned long mmun_start = start; /* For mmu_notifiers */
  2016. const unsigned long mmun_end = end; /* For mmu_notifiers */
  2017. WARN_ON(!is_vm_hugetlb_page(vma));
  2018. BUG_ON(start & ~huge_page_mask(h));
  2019. BUG_ON(end & ~huge_page_mask(h));
  2020. tlb_start_vma(tlb, vma);
  2021. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2022. again:
  2023. spin_lock(&mm->page_table_lock);
  2024. for (address = start; address < end; address += sz) {
  2025. ptep = huge_pte_offset(mm, address);
  2026. if (!ptep)
  2027. continue;
  2028. if (huge_pmd_unshare(mm, &address, ptep))
  2029. continue;
  2030. pte = huge_ptep_get(ptep);
  2031. if (huge_pte_none(pte))
  2032. continue;
  2033. /*
  2034. * HWPoisoned hugepage is already unmapped and dropped reference
  2035. */
  2036. if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
  2037. huge_pte_clear(mm, address, ptep);
  2038. continue;
  2039. }
  2040. page = pte_page(pte);
  2041. /*
  2042. * If a reference page is supplied, it is because a specific
  2043. * page is being unmapped, not a range. Ensure the page we
  2044. * are about to unmap is the actual page of interest.
  2045. */
  2046. if (ref_page) {
  2047. if (page != ref_page)
  2048. continue;
  2049. /*
  2050. * Mark the VMA as having unmapped its page so that
  2051. * future faults in this VMA will fail rather than
  2052. * looking like data was lost
  2053. */
  2054. set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
  2055. }
  2056. pte = huge_ptep_get_and_clear(mm, address, ptep);
  2057. tlb_remove_tlb_entry(tlb, ptep, address);
  2058. if (huge_pte_dirty(pte))
  2059. set_page_dirty(page);
  2060. page_remove_rmap(page);
  2061. force_flush = !__tlb_remove_page(tlb, page);
  2062. if (force_flush)
  2063. break;
  2064. /* Bail out after unmapping reference page if supplied */
  2065. if (ref_page)
  2066. break;
  2067. }
  2068. spin_unlock(&mm->page_table_lock);
  2069. /*
  2070. * mmu_gather ran out of room to batch pages, we break out of
  2071. * the PTE lock to avoid doing the potential expensive TLB invalidate
  2072. * and page-free while holding it.
  2073. */
  2074. if (force_flush) {
  2075. force_flush = 0;
  2076. tlb_flush_mmu(tlb);
  2077. if (address < end && !ref_page)
  2078. goto again;
  2079. }
  2080. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2081. tlb_end_vma(tlb, vma);
  2082. }
  2083. void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  2084. struct vm_area_struct *vma, unsigned long start,
  2085. unsigned long end, struct page *ref_page)
  2086. {
  2087. __unmap_hugepage_range(tlb, vma, start, end, ref_page);
  2088. /*
  2089. * Clear this flag so that x86's huge_pmd_share page_table_shareable
  2090. * test will fail on a vma being torn down, and not grab a page table
  2091. * on its way out. We're lucky that the flag has such an appropriate
  2092. * name, and can in fact be safely cleared here. We could clear it
  2093. * before the __unmap_hugepage_range above, but all that's necessary
  2094. * is to clear it before releasing the i_mmap_mutex. This works
  2095. * because in the context this is called, the VMA is about to be
  2096. * destroyed and the i_mmap_mutex is held.
  2097. */
  2098. vma->vm_flags &= ~VM_MAYSHARE;
  2099. }
  2100. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  2101. unsigned long end, struct page *ref_page)
  2102. {
  2103. struct mm_struct *mm;
  2104. struct mmu_gather tlb;
  2105. mm = vma->vm_mm;
  2106. tlb_gather_mmu(&tlb, mm, start, end);
  2107. __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
  2108. tlb_finish_mmu(&tlb, start, end);
  2109. }
  2110. /*
  2111. * This is called when the original mapper is failing to COW a MAP_PRIVATE
  2112. * mappping it owns the reserve page for. The intention is to unmap the page
  2113. * from other VMAs and let the children be SIGKILLed if they are faulting the
  2114. * same region.
  2115. */
  2116. static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  2117. struct page *page, unsigned long address)
  2118. {
  2119. struct hstate *h = hstate_vma(vma);
  2120. struct vm_area_struct *iter_vma;
  2121. struct address_space *mapping;
  2122. pgoff_t pgoff;
  2123. /*
  2124. * vm_pgoff is in PAGE_SIZE units, hence the different calculation
  2125. * from page cache lookup which is in HPAGE_SIZE units.
  2126. */
  2127. address = address & huge_page_mask(h);
  2128. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
  2129. vma->vm_pgoff;
  2130. mapping = file_inode(vma->vm_file)->i_mapping;
  2131. /*
  2132. * Take the mapping lock for the duration of the table walk. As
  2133. * this mapping should be shared between all the VMAs,
  2134. * __unmap_hugepage_range() is called as the lock is already held
  2135. */
  2136. mutex_lock(&mapping->i_mmap_mutex);
  2137. vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
  2138. /* Do not unmap the current VMA */
  2139. if (iter_vma == vma)
  2140. continue;
  2141. /*
  2142. * Unmap the page from other VMAs without their own reserves.
  2143. * They get marked to be SIGKILLed if they fault in these
  2144. * areas. This is because a future no-page fault on this VMA
  2145. * could insert a zeroed page instead of the data existing
  2146. * from the time of fork. This would look like data corruption
  2147. */
  2148. if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
  2149. unmap_hugepage_range(iter_vma, address,
  2150. address + huge_page_size(h), page);
  2151. }
  2152. mutex_unlock(&mapping->i_mmap_mutex);
  2153. return 1;
  2154. }
  2155. /*
  2156. * Hugetlb_cow() should be called with page lock of the original hugepage held.
  2157. * Called with hugetlb_instantiation_mutex held and pte_page locked so we
  2158. * cannot race with other handlers or page migration.
  2159. * Keep the pte_same checks anyway to make transition from the mutex easier.
  2160. */
  2161. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  2162. unsigned long address, pte_t *ptep, pte_t pte,
  2163. struct page *pagecache_page)
  2164. {
  2165. struct hstate *h = hstate_vma(vma);
  2166. struct page *old_page, *new_page;
  2167. int outside_reserve = 0;
  2168. unsigned long mmun_start; /* For mmu_notifiers */
  2169. unsigned long mmun_end; /* For mmu_notifiers */
  2170. old_page = pte_page(pte);
  2171. retry_avoidcopy:
  2172. /* If no-one else is actually using this page, avoid the copy
  2173. * and just make the page writable */
  2174. if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
  2175. page_move_anon_rmap(old_page, vma, address);
  2176. set_huge_ptep_writable(vma, address, ptep);
  2177. return 0;
  2178. }
  2179. /*
  2180. * If the process that created a MAP_PRIVATE mapping is about to
  2181. * perform a COW due to a shared page count, attempt to satisfy
  2182. * the allocation without using the existing reserves. The pagecache
  2183. * page is used to determine if the reserve at this address was
  2184. * consumed or not. If reserves were used, a partial faulted mapping
  2185. * at the time of fork() could consume its reserves on COW instead
  2186. * of the full address range.
  2187. */
  2188. if (!(vma->vm_flags & VM_MAYSHARE) &&
  2189. is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
  2190. old_page != pagecache_page)
  2191. outside_reserve = 1;
  2192. page_cache_get(old_page);
  2193. /* Drop page_table_lock as buddy allocator may be called */
  2194. spin_unlock(&mm->page_table_lock);
  2195. new_page = alloc_huge_page(vma, address, outside_reserve);
  2196. if (IS_ERR(new_page)) {
  2197. long err = PTR_ERR(new_page);
  2198. page_cache_release(old_page);
  2199. /*
  2200. * If a process owning a MAP_PRIVATE mapping fails to COW,
  2201. * it is due to references held by a child and an insufficient
  2202. * huge page pool. To guarantee the original mappers
  2203. * reliability, unmap the page from child processes. The child
  2204. * may get SIGKILLed if it later faults.
  2205. */
  2206. if (outside_reserve) {
  2207. BUG_ON(huge_pte_none(pte));
  2208. if (unmap_ref_private(mm, vma, old_page, address)) {
  2209. BUG_ON(huge_pte_none(pte));
  2210. spin_lock(&mm->page_table_lock);
  2211. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  2212. if (likely(pte_same(huge_ptep_get(ptep), pte)))
  2213. goto retry_avoidcopy;
  2214. /*
  2215. * race occurs while re-acquiring page_table_lock, and
  2216. * our job is done.
  2217. */
  2218. return 0;
  2219. }
  2220. WARN_ON_ONCE(1);
  2221. }
  2222. /* Caller expects lock to be held */
  2223. spin_lock(&mm->page_table_lock);
  2224. if (err == -ENOMEM)
  2225. return VM_FAULT_OOM;
  2226. else
  2227. return VM_FAULT_SIGBUS;
  2228. }
  2229. /*
  2230. * When the original hugepage is shared one, it does not have
  2231. * anon_vma prepared.
  2232. */
  2233. if (unlikely(anon_vma_prepare(vma))) {
  2234. page_cache_release(new_page);
  2235. page_cache_release(old_page);
  2236. /* Caller expects lock to be held */
  2237. spin_lock(&mm->page_table_lock);
  2238. return VM_FAULT_OOM;
  2239. }
  2240. copy_user_huge_page(new_page, old_page, address, vma,
  2241. pages_per_huge_page(h));
  2242. __SetPageUptodate(new_page);
  2243. mmun_start = address & huge_page_mask(h);
  2244. mmun_end = mmun_start + huge_page_size(h);
  2245. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2246. /*
  2247. * Retake the page_table_lock to check for racing updates
  2248. * before the page tables are altered
  2249. */
  2250. spin_lock(&mm->page_table_lock);
  2251. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  2252. if (likely(pte_same(huge_ptep_get(ptep), pte))) {
  2253. /* Break COW */
  2254. huge_ptep_clear_flush(vma, address, ptep);
  2255. set_huge_pte_at(mm, address, ptep,
  2256. make_huge_pte(vma, new_page, 1));
  2257. page_remove_rmap(old_page);
  2258. hugepage_add_new_anon_rmap(new_page, vma, address);
  2259. /* Make the old page be freed below */
  2260. new_page = old_page;
  2261. }
  2262. spin_unlock(&mm->page_table_lock);
  2263. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2264. /* Caller expects lock to be held */
  2265. spin_lock(&mm->page_table_lock);
  2266. page_cache_release(new_page);
  2267. page_cache_release(old_page);
  2268. return 0;
  2269. }
  2270. /* Return the pagecache page at a given address within a VMA */
  2271. static struct page *hugetlbfs_pagecache_page(struct hstate *h,
  2272. struct vm_area_struct *vma, unsigned long address)
  2273. {
  2274. struct address_space *mapping;
  2275. pgoff_t idx;
  2276. mapping = vma->vm_file->f_mapping;
  2277. idx = vma_hugecache_offset(h, vma, address);
  2278. return find_lock_page(mapping, idx);
  2279. }
  2280. /*
  2281. * Return whether there is a pagecache page to back given address within VMA.
  2282. * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
  2283. */
  2284. static bool hugetlbfs_pagecache_present(struct hstate *h,
  2285. struct vm_area_struct *vma, unsigned long address)
  2286. {
  2287. struct address_space *mapping;
  2288. pgoff_t idx;
  2289. struct page *page;
  2290. mapping = vma->vm_file->f_mapping;
  2291. idx = vma_hugecache_offset(h, vma, address);
  2292. page = find_get_page(mapping, idx);
  2293. if (page)
  2294. put_page(page);
  2295. return page != NULL;
  2296. }
  2297. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2298. unsigned long address, pte_t *ptep, unsigned int flags)
  2299. {
  2300. struct hstate *h = hstate_vma(vma);
  2301. int ret = VM_FAULT_SIGBUS;
  2302. int anon_rmap = 0;
  2303. pgoff_t idx;
  2304. unsigned long size;
  2305. struct page *page;
  2306. struct address_space *mapping;
  2307. pte_t new_pte;
  2308. /*
  2309. * Currently, we are forced to kill the process in the event the
  2310. * original mapper has unmapped pages from the child due to a failed
  2311. * COW. Warn that such a situation has occurred as it may not be obvious
  2312. */
  2313. if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
  2314. pr_warning("PID %d killed due to inadequate hugepage pool\n",
  2315. current->pid);
  2316. return ret;
  2317. }
  2318. mapping = vma->vm_file->f_mapping;
  2319. idx = vma_hugecache_offset(h, vma, address);
  2320. /*
  2321. * Use page lock to guard against racing truncation
  2322. * before we get page_table_lock.
  2323. */
  2324. retry:
  2325. page = find_lock_page(mapping, idx);
  2326. if (!page) {
  2327. size = i_size_read(mapping->host) >> huge_page_shift(h);
  2328. if (idx >= size)
  2329. goto out;
  2330. page = alloc_huge_page(vma, address, 0);
  2331. if (IS_ERR(page)) {
  2332. ret = PTR_ERR(page);
  2333. if (ret == -ENOMEM)
  2334. ret = VM_FAULT_OOM;
  2335. else
  2336. ret = VM_FAULT_SIGBUS;
  2337. goto out;
  2338. }
  2339. clear_huge_page(page, address, pages_per_huge_page(h));
  2340. __SetPageUptodate(page);
  2341. if (vma->vm_flags & VM_MAYSHARE) {
  2342. int err;
  2343. struct inode *inode = mapping->host;
  2344. err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  2345. if (err) {
  2346. put_page(page);
  2347. if (err == -EEXIST)
  2348. goto retry;
  2349. goto out;
  2350. }
  2351. spin_lock(&inode->i_lock);
  2352. inode->i_blocks += blocks_per_huge_page(h);
  2353. spin_unlock(&inode->i_lock);
  2354. } else {
  2355. lock_page(page);
  2356. if (unlikely(anon_vma_prepare(vma))) {
  2357. ret = VM_FAULT_OOM;
  2358. goto backout_unlocked;
  2359. }
  2360. anon_rmap = 1;
  2361. }
  2362. } else {
  2363. /*
  2364. * If memory error occurs between mmap() and fault, some process
  2365. * don't have hwpoisoned swap entry for errored virtual address.
  2366. * So we need to block hugepage fault by PG_hwpoison bit check.
  2367. */
  2368. if (unlikely(PageHWPoison(page))) {
  2369. ret = VM_FAULT_HWPOISON |
  2370. VM_FAULT_SET_HINDEX(hstate_index(h));
  2371. goto backout_unlocked;
  2372. }
  2373. }
  2374. /*
  2375. * If we are going to COW a private mapping later, we examine the
  2376. * pending reservations for this page now. This will ensure that
  2377. * any allocations necessary to record that reservation occur outside
  2378. * the spinlock.
  2379. */
  2380. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
  2381. if (vma_needs_reservation(h, vma, address) < 0) {
  2382. ret = VM_FAULT_OOM;
  2383. goto backout_unlocked;
  2384. }
  2385. spin_lock(&mm->page_table_lock);
  2386. size = i_size_read(mapping->host) >> huge_page_shift(h);
  2387. if (idx >= size)
  2388. goto backout;
  2389. ret = 0;
  2390. if (!huge_pte_none(huge_ptep_get(ptep)))
  2391. goto backout;
  2392. if (anon_rmap)
  2393. hugepage_add_new_anon_rmap(page, vma, address);
  2394. else
  2395. page_dup_rmap(page);
  2396. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  2397. && (vma->vm_flags & VM_SHARED)));
  2398. set_huge_pte_at(mm, address, ptep, new_pte);
  2399. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  2400. /* Optimization, do the COW without a second fault */
  2401. ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
  2402. }
  2403. spin_unlock(&mm->page_table_lock);
  2404. unlock_page(page);
  2405. out:
  2406. return ret;
  2407. backout:
  2408. spin_unlock(&mm->page_table_lock);
  2409. backout_unlocked:
  2410. unlock_page(page);
  2411. put_page(page);
  2412. goto out;
  2413. }
  2414. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  2415. unsigned long address, unsigned int flags)
  2416. {
  2417. pte_t *ptep;
  2418. pte_t entry;
  2419. int ret;
  2420. struct page *page = NULL;
  2421. struct page *pagecache_page = NULL;
  2422. static DEFINE_MUTEX(hugetlb_instantiation_mutex);
  2423. struct hstate *h = hstate_vma(vma);
  2424. address &= huge_page_mask(h);
  2425. ptep = huge_pte_offset(mm, address);
  2426. if (ptep) {
  2427. entry = huge_ptep_get(ptep);
  2428. if (unlikely(is_hugetlb_entry_migration(entry))) {
  2429. migration_entry_wait_huge(mm, ptep);
  2430. return 0;
  2431. } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
  2432. return VM_FAULT_HWPOISON_LARGE |
  2433. VM_FAULT_SET_HINDEX(hstate_index(h));
  2434. }
  2435. ptep = huge_pte_alloc(mm, address, huge_page_size(h));
  2436. if (!ptep)
  2437. return VM_FAULT_OOM;
  2438. /*
  2439. * Serialize hugepage allocation and instantiation, so that we don't
  2440. * get spurious allocation failures if two CPUs race to instantiate
  2441. * the same page in the page cache.
  2442. */
  2443. mutex_lock(&hugetlb_instantiation_mutex);
  2444. entry = huge_ptep_get(ptep);
  2445. if (huge_pte_none(entry)) {
  2446. ret = hugetlb_no_page(mm, vma, address, ptep, flags);
  2447. goto out_mutex;
  2448. }
  2449. ret = 0;
  2450. /*
  2451. * If we are going to COW the mapping later, we examine the pending
  2452. * reservations for this page now. This will ensure that any
  2453. * allocations necessary to record that reservation occur outside the
  2454. * spinlock. For private mappings, we also lookup the pagecache
  2455. * page now as it is used to determine if a reservation has been
  2456. * consumed.
  2457. */
  2458. if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
  2459. if (vma_needs_reservation(h, vma, address) < 0) {
  2460. ret = VM_FAULT_OOM;
  2461. goto out_mutex;
  2462. }
  2463. if (!(vma->vm_flags & VM_MAYSHARE))
  2464. pagecache_page = hugetlbfs_pagecache_page(h,
  2465. vma, address);
  2466. }
  2467. /*
  2468. * hugetlb_cow() requires page locks of pte_page(entry) and
  2469. * pagecache_page, so here we need take the former one
  2470. * when page != pagecache_page or !pagecache_page.
  2471. * Note that locking order is always pagecache_page -> page,
  2472. * so no worry about deadlock.
  2473. */
  2474. page = pte_page(entry);
  2475. get_page(page);
  2476. if (page != pagecache_page)
  2477. lock_page(page);
  2478. spin_lock(&mm->page_table_lock);
  2479. /* Check for a racing update before calling hugetlb_cow */
  2480. if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
  2481. goto out_page_table_lock;
  2482. if (flags & FAULT_FLAG_WRITE) {
  2483. if (!huge_pte_write(entry)) {
  2484. ret = hugetlb_cow(mm, vma, address, ptep, entry,
  2485. pagecache_page);
  2486. goto out_page_table_lock;
  2487. }
  2488. entry = huge_pte_mkdirty(entry);
  2489. }
  2490. entry = pte_mkyoung(entry);
  2491. if (huge_ptep_set_access_flags(vma, address, ptep, entry,
  2492. flags & FAULT_FLAG_WRITE))
  2493. update_mmu_cache(vma, address, ptep);
  2494. out_page_table_lock:
  2495. spin_unlock(&mm->page_table_lock);
  2496. if (pagecache_page) {
  2497. unlock_page(pagecache_page);
  2498. put_page(pagecache_page);
  2499. }
  2500. if (page != pagecache_page)
  2501. unlock_page(page);
  2502. put_page(page);
  2503. out_mutex:
  2504. mutex_unlock(&hugetlb_instantiation_mutex);
  2505. return ret;
  2506. }
  2507. long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2508. struct page **pages, struct vm_area_struct **vmas,
  2509. unsigned long *position, unsigned long *nr_pages,
  2510. long i, unsigned int flags)
  2511. {
  2512. unsigned long pfn_offset;
  2513. unsigned long vaddr = *position;
  2514. unsigned long remainder = *nr_pages;
  2515. struct hstate *h = hstate_vma(vma);
  2516. spin_lock(&mm->page_table_lock);
  2517. while (vaddr < vma->vm_end && remainder) {
  2518. pte_t *pte;
  2519. int absent;
  2520. struct page *page;
  2521. /*
  2522. * Some archs (sparc64, sh*) have multiple pte_ts to
  2523. * each hugepage. We have to make sure we get the
  2524. * first, for the page indexing below to work.
  2525. */
  2526. pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
  2527. absent = !pte || huge_pte_none(huge_ptep_get(pte));
  2528. /*
  2529. * When coredumping, it suits get_dump_page if we just return
  2530. * an error where there's an empty slot with no huge pagecache
  2531. * to back it. This way, we avoid allocating a hugepage, and
  2532. * the sparse dumpfile avoids allocating disk blocks, but its
  2533. * huge holes still show up with zeroes where they need to be.
  2534. */
  2535. if (absent && (flags & FOLL_DUMP) &&
  2536. !hugetlbfs_pagecache_present(h, vma, vaddr)) {
  2537. remainder = 0;
  2538. break;
  2539. }
  2540. /*
  2541. * We need call hugetlb_fault for both hugepages under migration
  2542. * (in which case hugetlb_fault waits for the migration,) and
  2543. * hwpoisoned hugepages (in which case we need to prevent the
  2544. * caller from accessing to them.) In order to do this, we use
  2545. * here is_swap_pte instead of is_hugetlb_entry_migration and
  2546. * is_hugetlb_entry_hwpoisoned. This is because it simply covers
  2547. * both cases, and because we can't follow correct pages
  2548. * directly from any kind of swap entries.
  2549. */
  2550. if (absent || is_swap_pte(huge_ptep_get(pte)) ||
  2551. ((flags & FOLL_WRITE) &&
  2552. !huge_pte_write(huge_ptep_get(pte)))) {
  2553. int ret;
  2554. spin_unlock(&mm->page_table_lock);
  2555. ret = hugetlb_fault(mm, vma, vaddr,
  2556. (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
  2557. spin_lock(&mm->page_table_lock);
  2558. if (!(ret & VM_FAULT_ERROR))
  2559. continue;
  2560. remainder = 0;
  2561. break;
  2562. }
  2563. pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
  2564. page = pte_page(huge_ptep_get(pte));
  2565. same_page:
  2566. if (pages) {
  2567. pages[i] = mem_map_offset(page, pfn_offset);
  2568. get_page(pages[i]);
  2569. }
  2570. if (vmas)
  2571. vmas[i] = vma;
  2572. vaddr += PAGE_SIZE;
  2573. ++pfn_offset;
  2574. --remainder;
  2575. ++i;
  2576. if (vaddr < vma->vm_end && remainder &&
  2577. pfn_offset < pages_per_huge_page(h)) {
  2578. /*
  2579. * We use pfn_offset to avoid touching the pageframes
  2580. * of this compound page.
  2581. */
  2582. goto same_page;
  2583. }
  2584. }
  2585. spin_unlock(&mm->page_table_lock);
  2586. *nr_pages = remainder;
  2587. *position = vaddr;
  2588. return i ? i : -EFAULT;
  2589. }
  2590. unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
  2591. unsigned long address, unsigned long end, pgprot_t newprot)
  2592. {
  2593. struct mm_struct *mm = vma->vm_mm;
  2594. unsigned long start = address;
  2595. pte_t *ptep;
  2596. pte_t pte;
  2597. struct hstate *h = hstate_vma(vma);
  2598. unsigned long pages = 0;
  2599. BUG_ON(address >= end);
  2600. flush_cache_range(vma, address, end);
  2601. mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
  2602. spin_lock(&mm->page_table_lock);
  2603. for (; address < end; address += huge_page_size(h)) {
  2604. ptep = huge_pte_offset(mm, address);
  2605. if (!ptep)
  2606. continue;
  2607. if (huge_pmd_unshare(mm, &address, ptep)) {
  2608. pages++;
  2609. continue;
  2610. }
  2611. if (!huge_pte_none(huge_ptep_get(ptep))) {
  2612. pte = huge_ptep_get_and_clear(mm, address, ptep);
  2613. pte = pte_mkhuge(huge_pte_modify(pte, newprot));
  2614. pte = arch_make_huge_pte(pte, vma, NULL, 0);
  2615. set_huge_pte_at(mm, address, ptep, pte);
  2616. pages++;
  2617. }
  2618. }
  2619. spin_unlock(&mm->page_table_lock);
  2620. /*
  2621. * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
  2622. * may have cleared our pud entry and done put_page on the page table:
  2623. * once we release i_mmap_mutex, another task can do the final put_page
  2624. * and that page table be reused and filled with junk.
  2625. */
  2626. flush_tlb_range(vma, start, end);
  2627. mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
  2628. return pages << h->order;
  2629. }
  2630. int hugetlb_reserve_pages(struct inode *inode,
  2631. long from, long to,
  2632. struct vm_area_struct *vma,
  2633. vm_flags_t vm_flags)
  2634. {
  2635. long ret, chg;
  2636. struct hstate *h = hstate_inode(inode);
  2637. struct hugepage_subpool *spool = subpool_inode(inode);
  2638. /*
  2639. * Only apply hugepage reservation if asked. At fault time, an
  2640. * attempt will be made for VM_NORESERVE to allocate a page
  2641. * without using reserves
  2642. */
  2643. if (vm_flags & VM_NORESERVE)
  2644. return 0;
  2645. /*
  2646. * Shared mappings base their reservation on the number of pages that
  2647. * are already allocated on behalf of the file. Private mappings need
  2648. * to reserve the full area even if read-only as mprotect() may be
  2649. * called to make the mapping read-write. Assume !vma is a shm mapping
  2650. */
  2651. if (!vma || vma->vm_flags & VM_MAYSHARE)
  2652. chg = region_chg(&inode->i_mapping->private_list, from, to);
  2653. else {
  2654. struct resv_map *resv_map = resv_map_alloc();
  2655. if (!resv_map)
  2656. return -ENOMEM;
  2657. chg = to - from;
  2658. set_vma_resv_map(vma, resv_map);
  2659. set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
  2660. }
  2661. if (chg < 0) {
  2662. ret = chg;
  2663. goto out_err;
  2664. }
  2665. /* There must be enough pages in the subpool for the mapping */
  2666. if (hugepage_subpool_get_pages(spool, chg)) {
  2667. ret = -ENOSPC;
  2668. goto out_err;
  2669. }
  2670. /*
  2671. * Check enough hugepages are available for the reservation.
  2672. * Hand the pages back to the subpool if there are not
  2673. */
  2674. ret = hugetlb_acct_memory(h, chg);
  2675. if (ret < 0) {
  2676. hugepage_subpool_put_pages(spool, chg);
  2677. goto out_err;
  2678. }
  2679. /*
  2680. * Account for the reservations made. Shared mappings record regions
  2681. * that have reservations as they are shared by multiple VMAs.
  2682. * When the last VMA disappears, the region map says how much
  2683. * the reservation was and the page cache tells how much of
  2684. * the reservation was consumed. Private mappings are per-VMA and
  2685. * only the consumed reservations are tracked. When the VMA
  2686. * disappears, the original reservation is the VMA size and the
  2687. * consumed reservations are stored in the map. Hence, nothing
  2688. * else has to be done for private mappings here
  2689. */
  2690. if (!vma || vma->vm_flags & VM_MAYSHARE)
  2691. region_add(&inode->i_mapping->private_list, from, to);
  2692. return 0;
  2693. out_err:
  2694. if (vma)
  2695. resv_map_put(vma);
  2696. return ret;
  2697. }
  2698. void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
  2699. {
  2700. struct hstate *h = hstate_inode(inode);
  2701. long chg = region_truncate(&inode->i_mapping->private_list, offset);
  2702. struct hugepage_subpool *spool = subpool_inode(inode);
  2703. spin_lock(&inode->i_lock);
  2704. inode->i_blocks -= (blocks_per_huge_page(h) * freed);
  2705. spin_unlock(&inode->i_lock);
  2706. hugepage_subpool_put_pages(spool, (chg - freed));
  2707. hugetlb_acct_memory(h, -(chg - freed));
  2708. }
  2709. #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
  2710. static unsigned long page_table_shareable(struct vm_area_struct *svma,
  2711. struct vm_area_struct *vma,
  2712. unsigned long addr, pgoff_t idx)
  2713. {
  2714. unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
  2715. svma->vm_start;
  2716. unsigned long sbase = saddr & PUD_MASK;
  2717. unsigned long s_end = sbase + PUD_SIZE;
  2718. /* Allow segments to share if only one is marked locked */
  2719. unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
  2720. unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
  2721. /*
  2722. * match the virtual addresses, permission and the alignment of the
  2723. * page table page.
  2724. */
  2725. if (pmd_index(addr) != pmd_index(saddr) ||
  2726. vm_flags != svm_flags ||
  2727. sbase < svma->vm_start || svma->vm_end < s_end)
  2728. return 0;
  2729. return saddr;
  2730. }
  2731. static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
  2732. {
  2733. unsigned long base = addr & PUD_MASK;
  2734. unsigned long end = base + PUD_SIZE;
  2735. /*
  2736. * check on proper vm_flags and page table alignment
  2737. */
  2738. if (vma->vm_flags & VM_MAYSHARE &&
  2739. vma->vm_start <= base && end <= vma->vm_end)
  2740. return 1;
  2741. return 0;
  2742. }
  2743. /*
  2744. * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
  2745. * and returns the corresponding pte. While this is not necessary for the
  2746. * !shared pmd case because we can allocate the pmd later as well, it makes the
  2747. * code much cleaner. pmd allocation is essential for the shared case because
  2748. * pud has to be populated inside the same i_mmap_mutex section - otherwise
  2749. * racing tasks could either miss the sharing (see huge_pte_offset) or select a
  2750. * bad pmd for sharing.
  2751. */
  2752. pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
  2753. {
  2754. struct vm_area_struct *vma = find_vma(mm, addr);
  2755. struct address_space *mapping = vma->vm_file->f_mapping;
  2756. pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
  2757. vma->vm_pgoff;
  2758. struct vm_area_struct *svma;
  2759. unsigned long saddr;
  2760. pte_t *spte = NULL;
  2761. pte_t *pte;
  2762. if (!vma_shareable(vma, addr))
  2763. return (pte_t *)pmd_alloc(mm, pud, addr);
  2764. mutex_lock(&mapping->i_mmap_mutex);
  2765. vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
  2766. if (svma == vma)
  2767. continue;
  2768. saddr = page_table_shareable(svma, vma, addr, idx);
  2769. if (saddr) {
  2770. spte = huge_pte_offset(svma->vm_mm, saddr);
  2771. if (spte) {
  2772. get_page(virt_to_page(spte));
  2773. break;
  2774. }
  2775. }
  2776. }
  2777. if (!spte)
  2778. goto out;
  2779. spin_lock(&mm->page_table_lock);
  2780. if (pud_none(*pud))
  2781. pud_populate(mm, pud,
  2782. (pmd_t *)((unsigned long)spte & PAGE_MASK));
  2783. else
  2784. put_page(virt_to_page(spte));
  2785. spin_unlock(&mm->page_table_lock);
  2786. out:
  2787. pte = (pte_t *)pmd_alloc(mm, pud, addr);
  2788. mutex_unlock(&mapping->i_mmap_mutex);
  2789. return pte;
  2790. }
  2791. /*
  2792. * unmap huge page backed by shared pte.
  2793. *
  2794. * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
  2795. * indicated by page_count > 1, unmap is achieved by clearing pud and
  2796. * decrementing the ref count. If count == 1, the pte page is not shared.
  2797. *
  2798. * called with vma->vm_mm->page_table_lock held.
  2799. *
  2800. * returns: 1 successfully unmapped a shared pte page
  2801. * 0 the underlying pte page is not shared, or it is the last user
  2802. */
  2803. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  2804. {
  2805. pgd_t *pgd = pgd_offset(mm, *addr);
  2806. pud_t *pud = pud_offset(pgd, *addr);
  2807. BUG_ON(page_count(virt_to_page(ptep)) == 0);
  2808. if (page_count(virt_to_page(ptep)) == 1)
  2809. return 0;
  2810. pud_clear(pud);
  2811. put_page(virt_to_page(ptep));
  2812. *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
  2813. return 1;
  2814. }
  2815. #define want_pmd_share() (1)
  2816. #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
  2817. pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
  2818. {
  2819. return NULL;
  2820. }
  2821. #define want_pmd_share() (0)
  2822. #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
  2823. #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
  2824. pte_t *huge_pte_alloc(struct mm_struct *mm,
  2825. unsigned long addr, unsigned long sz)
  2826. {
  2827. pgd_t *pgd;
  2828. pud_t *pud;
  2829. pte_t *pte = NULL;
  2830. pgd = pgd_offset(mm, addr);
  2831. pud = pud_alloc(mm, pgd, addr);
  2832. if (pud) {
  2833. if (sz == PUD_SIZE) {
  2834. pte = (pte_t *)pud;
  2835. } else {
  2836. BUG_ON(sz != PMD_SIZE);
  2837. if (want_pmd_share() && pud_none(*pud))
  2838. pte = huge_pmd_share(mm, addr, pud);
  2839. else
  2840. pte = (pte_t *)pmd_alloc(mm, pud, addr);
  2841. }
  2842. }
  2843. BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
  2844. return pte;
  2845. }
  2846. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  2847. {
  2848. pgd_t *pgd;
  2849. pud_t *pud;
  2850. pmd_t *pmd = NULL;
  2851. pgd = pgd_offset(mm, addr);
  2852. if (pgd_present(*pgd)) {
  2853. pud = pud_offset(pgd, addr);
  2854. if (pud_present(*pud)) {
  2855. if (pud_huge(*pud))
  2856. return (pte_t *)pud;
  2857. pmd = pmd_offset(pud, addr);
  2858. }
  2859. }
  2860. return (pte_t *) pmd;
  2861. }
  2862. struct page *
  2863. follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  2864. pmd_t *pmd, int write)
  2865. {
  2866. struct page *page;
  2867. page = pte_page(*(pte_t *)pmd);
  2868. if (page)
  2869. page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
  2870. return page;
  2871. }
  2872. struct page *
  2873. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  2874. pud_t *pud, int write)
  2875. {
  2876. struct page *page;
  2877. page = pte_page(*(pte_t *)pud);
  2878. if (page)
  2879. page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
  2880. return page;
  2881. }
  2882. #else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
  2883. /* Can be overriden by architectures */
  2884. __attribute__((weak)) struct page *
  2885. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  2886. pud_t *pud, int write)
  2887. {
  2888. BUG();
  2889. return NULL;
  2890. }
  2891. #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
  2892. #ifdef CONFIG_MEMORY_FAILURE
  2893. /* Should be called in hugetlb_lock */
  2894. static int is_hugepage_on_freelist(struct page *hpage)
  2895. {
  2896. struct page *page;
  2897. struct page *tmp;
  2898. struct hstate *h = page_hstate(hpage);
  2899. int nid = page_to_nid(hpage);
  2900. list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
  2901. if (page == hpage)
  2902. return 1;
  2903. return 0;
  2904. }
  2905. /*
  2906. * This function is called from memory failure code.
  2907. * Assume the caller holds page lock of the head page.
  2908. */
  2909. int dequeue_hwpoisoned_huge_page(struct page *hpage)
  2910. {
  2911. struct hstate *h = page_hstate(hpage);
  2912. int nid = page_to_nid(hpage);
  2913. int ret = -EBUSY;
  2914. spin_lock(&hugetlb_lock);
  2915. if (is_hugepage_on_freelist(hpage)) {
  2916. /*
  2917. * Hwpoisoned hugepage isn't linked to activelist or freelist,
  2918. * but dangling hpage->lru can trigger list-debug warnings
  2919. * (this happens when we call unpoison_memory() on it),
  2920. * so let it point to itself with list_del_init().
  2921. */
  2922. list_del_init(&hpage->lru);
  2923. set_page_refcounted(hpage);
  2924. h->free_huge_pages--;
  2925. h->free_huge_pages_node[nid]--;
  2926. ret = 0;
  2927. }
  2928. spin_unlock(&hugetlb_lock);
  2929. return ret;
  2930. }
  2931. #endif