memory.c 99 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647
  1. /*
  2. * linux/mm/memory.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. */
  6. /*
  7. * demand-loading started 01.12.91 - seems it is high on the list of
  8. * things wanted, and it should be easy to implement. - Linus
  9. */
  10. /*
  11. * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  12. * pages started 02.12.91, seems to work. - Linus.
  13. *
  14. * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  15. * would have taken more than the 6M I have free, but it worked well as
  16. * far as I could see.
  17. *
  18. * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  19. */
  20. /*
  21. * Real VM (paging to/from disk) started 18.12.91. Much more work and
  22. * thought has to go into this. Oh, well..
  23. * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
  24. * Found it. Everything seems to work now.
  25. * 20.12.91 - Ok, making the swap-device changeable like the root.
  26. */
  27. /*
  28. * 05.04.94 - Multi-page memory management added for v1.1.
  29. * Idea by Alex Bligh (alex@cconcepts.co.uk)
  30. *
  31. * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
  32. * (Gerhard.Wichert@pdb.siemens.de)
  33. *
  34. * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
  35. */
  36. #include <linux/kernel_stat.h>
  37. #include <linux/mm.h>
  38. #include <linux/hugetlb.h>
  39. #include <linux/mman.h>
  40. #include <linux/swap.h>
  41. #include <linux/highmem.h>
  42. #include <linux/pagemap.h>
  43. #include <linux/ksm.h>
  44. #include <linux/rmap.h>
  45. #include <linux/module.h>
  46. #include <linux/delayacct.h>
  47. #include <linux/init.h>
  48. #include <linux/writeback.h>
  49. #include <linux/memcontrol.h>
  50. #include <linux/mmu_notifier.h>
  51. #include <linux/kallsyms.h>
  52. #include <linux/swapops.h>
  53. #include <linux/elf.h>
  54. #include <linux/gfp.h>
  55. #include <asm/io.h>
  56. #include <asm/pgalloc.h>
  57. #include <asm/uaccess.h>
  58. #include <asm/tlb.h>
  59. #include <asm/tlbflush.h>
  60. #include <asm/pgtable.h>
  61. #include "internal.h"
  62. #ifndef CONFIG_NEED_MULTIPLE_NODES
  63. /* use the per-pgdat data instead for discontigmem - mbligh */
  64. unsigned long max_mapnr;
  65. struct page *mem_map;
  66. EXPORT_SYMBOL(max_mapnr);
  67. EXPORT_SYMBOL(mem_map);
  68. #endif
  69. unsigned long num_physpages;
  70. /*
  71. * A number of key systems in x86 including ioremap() rely on the assumption
  72. * that high_memory defines the upper bound on direct map memory, then end
  73. * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
  74. * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
  75. * and ZONE_HIGHMEM.
  76. */
  77. void * high_memory;
  78. EXPORT_SYMBOL(num_physpages);
  79. EXPORT_SYMBOL(high_memory);
  80. /*
  81. * Randomize the address space (stacks, mmaps, brk, etc.).
  82. *
  83. * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
  84. * as ancient (libc5 based) binaries can segfault. )
  85. */
  86. int randomize_va_space __read_mostly =
  87. #ifdef CONFIG_COMPAT_BRK
  88. 1;
  89. #else
  90. 2;
  91. #endif
  92. static int __init disable_randmaps(char *s)
  93. {
  94. randomize_va_space = 0;
  95. return 1;
  96. }
  97. __setup("norandmaps", disable_randmaps);
  98. unsigned long zero_pfn __read_mostly;
  99. unsigned long highest_memmap_pfn __read_mostly;
  100. /*
  101. * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
  102. */
  103. static int __init init_zero_pfn(void)
  104. {
  105. zero_pfn = page_to_pfn(ZERO_PAGE(0));
  106. return 0;
  107. }
  108. core_initcall(init_zero_pfn);
  109. #if defined(SPLIT_RSS_COUNTING)
  110. static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm)
  111. {
  112. int i;
  113. for (i = 0; i < NR_MM_COUNTERS; i++) {
  114. if (task->rss_stat.count[i]) {
  115. add_mm_counter(mm, i, task->rss_stat.count[i]);
  116. task->rss_stat.count[i] = 0;
  117. }
  118. }
  119. task->rss_stat.events = 0;
  120. }
  121. static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
  122. {
  123. struct task_struct *task = current;
  124. if (likely(task->mm == mm))
  125. task->rss_stat.count[member] += val;
  126. else
  127. add_mm_counter(mm, member, val);
  128. }
  129. #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
  130. #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
  131. /* sync counter once per 64 page faults */
  132. #define TASK_RSS_EVENTS_THRESH (64)
  133. static void check_sync_rss_stat(struct task_struct *task)
  134. {
  135. if (unlikely(task != current))
  136. return;
  137. if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
  138. __sync_task_rss_stat(task, task->mm);
  139. }
  140. unsigned long get_mm_counter(struct mm_struct *mm, int member)
  141. {
  142. long val = 0;
  143. /*
  144. * Don't use task->mm here...for avoiding to use task_get_mm()..
  145. * The caller must guarantee task->mm is not invalid.
  146. */
  147. val = atomic_long_read(&mm->rss_stat.count[member]);
  148. /*
  149. * counter is updated in asynchronous manner and may go to minus.
  150. * But it's never be expected number for users.
  151. */
  152. if (val < 0)
  153. return 0;
  154. return (unsigned long)val;
  155. }
  156. void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
  157. {
  158. __sync_task_rss_stat(task, mm);
  159. }
  160. #else
  161. #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
  162. #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
  163. static void check_sync_rss_stat(struct task_struct *task)
  164. {
  165. }
  166. #endif
  167. /*
  168. * If a p?d_bad entry is found while walking page tables, report
  169. * the error, before resetting entry to p?d_none. Usually (but
  170. * very seldom) called out from the p?d_none_or_clear_bad macros.
  171. */
  172. void pgd_clear_bad(pgd_t *pgd)
  173. {
  174. pgd_ERROR(*pgd);
  175. pgd_clear(pgd);
  176. }
  177. void pud_clear_bad(pud_t *pud)
  178. {
  179. pud_ERROR(*pud);
  180. pud_clear(pud);
  181. }
  182. void pmd_clear_bad(pmd_t *pmd)
  183. {
  184. pmd_ERROR(*pmd);
  185. pmd_clear(pmd);
  186. }
  187. /*
  188. * Note: this doesn't free the actual pages themselves. That
  189. * has been handled earlier when unmapping all the memory regions.
  190. */
  191. static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
  192. unsigned long addr)
  193. {
  194. pgtable_t token = pmd_pgtable(*pmd);
  195. pmd_clear(pmd);
  196. pte_free_tlb(tlb, token, addr);
  197. tlb->mm->nr_ptes--;
  198. }
  199. static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  200. unsigned long addr, unsigned long end,
  201. unsigned long floor, unsigned long ceiling)
  202. {
  203. pmd_t *pmd;
  204. unsigned long next;
  205. unsigned long start;
  206. start = addr;
  207. pmd = pmd_offset(pud, addr);
  208. do {
  209. next = pmd_addr_end(addr, end);
  210. if (pmd_none_or_clear_bad(pmd))
  211. continue;
  212. free_pte_range(tlb, pmd, addr);
  213. } while (pmd++, addr = next, addr != end);
  214. start &= PUD_MASK;
  215. if (start < floor)
  216. return;
  217. if (ceiling) {
  218. ceiling &= PUD_MASK;
  219. if (!ceiling)
  220. return;
  221. }
  222. if (end - 1 > ceiling - 1)
  223. return;
  224. pmd = pmd_offset(pud, start);
  225. pud_clear(pud);
  226. pmd_free_tlb(tlb, pmd, start);
  227. }
  228. static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  229. unsigned long addr, unsigned long end,
  230. unsigned long floor, unsigned long ceiling)
  231. {
  232. pud_t *pud;
  233. unsigned long next;
  234. unsigned long start;
  235. start = addr;
  236. pud = pud_offset(pgd, addr);
  237. do {
  238. next = pud_addr_end(addr, end);
  239. if (pud_none_or_clear_bad(pud))
  240. continue;
  241. free_pmd_range(tlb, pud, addr, next, floor, ceiling);
  242. } while (pud++, addr = next, addr != end);
  243. start &= PGDIR_MASK;
  244. if (start < floor)
  245. return;
  246. if (ceiling) {
  247. ceiling &= PGDIR_MASK;
  248. if (!ceiling)
  249. return;
  250. }
  251. if (end - 1 > ceiling - 1)
  252. return;
  253. pud = pud_offset(pgd, start);
  254. pgd_clear(pgd);
  255. pud_free_tlb(tlb, pud, start);
  256. }
  257. /*
  258. * This function frees user-level page tables of a process.
  259. *
  260. * Must be called with pagetable lock held.
  261. */
  262. void free_pgd_range(struct mmu_gather *tlb,
  263. unsigned long addr, unsigned long end,
  264. unsigned long floor, unsigned long ceiling)
  265. {
  266. pgd_t *pgd;
  267. unsigned long next;
  268. /*
  269. * The next few lines have given us lots of grief...
  270. *
  271. * Why are we testing PMD* at this top level? Because often
  272. * there will be no work to do at all, and we'd prefer not to
  273. * go all the way down to the bottom just to discover that.
  274. *
  275. * Why all these "- 1"s? Because 0 represents both the bottom
  276. * of the address space and the top of it (using -1 for the
  277. * top wouldn't help much: the masks would do the wrong thing).
  278. * The rule is that addr 0 and floor 0 refer to the bottom of
  279. * the address space, but end 0 and ceiling 0 refer to the top
  280. * Comparisons need to use "end - 1" and "ceiling - 1" (though
  281. * that end 0 case should be mythical).
  282. *
  283. * Wherever addr is brought up or ceiling brought down, we must
  284. * be careful to reject "the opposite 0" before it confuses the
  285. * subsequent tests. But what about where end is brought down
  286. * by PMD_SIZE below? no, end can't go down to 0 there.
  287. *
  288. * Whereas we round start (addr) and ceiling down, by different
  289. * masks at different levels, in order to test whether a table
  290. * now has no other vmas using it, so can be freed, we don't
  291. * bother to round floor or end up - the tests don't need that.
  292. */
  293. addr &= PMD_MASK;
  294. if (addr < floor) {
  295. addr += PMD_SIZE;
  296. if (!addr)
  297. return;
  298. }
  299. if (ceiling) {
  300. ceiling &= PMD_MASK;
  301. if (!ceiling)
  302. return;
  303. }
  304. if (end - 1 > ceiling - 1)
  305. end -= PMD_SIZE;
  306. if (addr > end - 1)
  307. return;
  308. pgd = pgd_offset(tlb->mm, addr);
  309. do {
  310. next = pgd_addr_end(addr, end);
  311. if (pgd_none_or_clear_bad(pgd))
  312. continue;
  313. free_pud_range(tlb, pgd, addr, next, floor, ceiling);
  314. } while (pgd++, addr = next, addr != end);
  315. }
  316. void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
  317. unsigned long floor, unsigned long ceiling)
  318. {
  319. while (vma) {
  320. struct vm_area_struct *next = vma->vm_next;
  321. unsigned long addr = vma->vm_start;
  322. /*
  323. * Hide vma from rmap and truncate_pagecache before freeing
  324. * pgtables
  325. */
  326. unlink_anon_vmas(vma);
  327. unlink_file_vma(vma);
  328. if (is_vm_hugetlb_page(vma)) {
  329. hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
  330. floor, next? next->vm_start: ceiling);
  331. } else {
  332. /*
  333. * Optimization: gather nearby vmas into one call down
  334. */
  335. while (next && next->vm_start <= vma->vm_end + PMD_SIZE
  336. && !is_vm_hugetlb_page(next)) {
  337. vma = next;
  338. next = vma->vm_next;
  339. unlink_anon_vmas(vma);
  340. unlink_file_vma(vma);
  341. }
  342. free_pgd_range(tlb, addr, vma->vm_end,
  343. floor, next? next->vm_start: ceiling);
  344. }
  345. vma = next;
  346. }
  347. }
  348. int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
  349. pmd_t *pmd, unsigned long address)
  350. {
  351. pgtable_t new = pte_alloc_one(mm, address);
  352. int wait_split_huge_page;
  353. if (!new)
  354. return -ENOMEM;
  355. /*
  356. * Ensure all pte setup (eg. pte page lock and page clearing) are
  357. * visible before the pte is made visible to other CPUs by being
  358. * put into page tables.
  359. *
  360. * The other side of the story is the pointer chasing in the page
  361. * table walking code (when walking the page table without locking;
  362. * ie. most of the time). Fortunately, these data accesses consist
  363. * of a chain of data-dependent loads, meaning most CPUs (alpha
  364. * being the notable exception) will already guarantee loads are
  365. * seen in-order. See the alpha page table accessors for the
  366. * smp_read_barrier_depends() barriers in page table walking code.
  367. */
  368. smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
  369. spin_lock(&mm->page_table_lock);
  370. wait_split_huge_page = 0;
  371. if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
  372. mm->nr_ptes++;
  373. pmd_populate(mm, pmd, new);
  374. new = NULL;
  375. } else if (unlikely(pmd_trans_splitting(*pmd)))
  376. wait_split_huge_page = 1;
  377. spin_unlock(&mm->page_table_lock);
  378. if (new)
  379. pte_free(mm, new);
  380. if (wait_split_huge_page)
  381. wait_split_huge_page(vma->anon_vma, pmd);
  382. return 0;
  383. }
  384. int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
  385. {
  386. pte_t *new = pte_alloc_one_kernel(&init_mm, address);
  387. if (!new)
  388. return -ENOMEM;
  389. smp_wmb(); /* See comment in __pte_alloc */
  390. spin_lock(&init_mm.page_table_lock);
  391. if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
  392. pmd_populate_kernel(&init_mm, pmd, new);
  393. new = NULL;
  394. } else
  395. VM_BUG_ON(pmd_trans_splitting(*pmd));
  396. spin_unlock(&init_mm.page_table_lock);
  397. if (new)
  398. pte_free_kernel(&init_mm, new);
  399. return 0;
  400. }
  401. static inline void init_rss_vec(int *rss)
  402. {
  403. memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
  404. }
  405. static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
  406. {
  407. int i;
  408. if (current->mm == mm)
  409. sync_mm_rss(current, mm);
  410. for (i = 0; i < NR_MM_COUNTERS; i++)
  411. if (rss[i])
  412. add_mm_counter(mm, i, rss[i]);
  413. }
  414. /*
  415. * This function is called to print an error when a bad pte
  416. * is found. For example, we might have a PFN-mapped pte in
  417. * a region that doesn't allow it.
  418. *
  419. * The calling function must still handle the error.
  420. */
  421. static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
  422. pte_t pte, struct page *page)
  423. {
  424. pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
  425. pud_t *pud = pud_offset(pgd, addr);
  426. pmd_t *pmd = pmd_offset(pud, addr);
  427. struct address_space *mapping;
  428. pgoff_t index;
  429. static unsigned long resume;
  430. static unsigned long nr_shown;
  431. static unsigned long nr_unshown;
  432. /*
  433. * Allow a burst of 60 reports, then keep quiet for that minute;
  434. * or allow a steady drip of one report per second.
  435. */
  436. if (nr_shown == 60) {
  437. if (time_before(jiffies, resume)) {
  438. nr_unshown++;
  439. return;
  440. }
  441. if (nr_unshown) {
  442. printk(KERN_ALERT
  443. "BUG: Bad page map: %lu messages suppressed\n",
  444. nr_unshown);
  445. nr_unshown = 0;
  446. }
  447. nr_shown = 0;
  448. }
  449. if (nr_shown++ == 0)
  450. resume = jiffies + 60 * HZ;
  451. mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
  452. index = linear_page_index(vma, addr);
  453. printk(KERN_ALERT
  454. "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
  455. current->comm,
  456. (long long)pte_val(pte), (long long)pmd_val(*pmd));
  457. if (page)
  458. dump_page(page);
  459. printk(KERN_ALERT
  460. "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
  461. (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
  462. /*
  463. * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
  464. */
  465. if (vma->vm_ops)
  466. print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
  467. (unsigned long)vma->vm_ops->fault);
  468. if (vma->vm_file && vma->vm_file->f_op)
  469. print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
  470. (unsigned long)vma->vm_file->f_op->mmap);
  471. dump_stack();
  472. add_taint(TAINT_BAD_PAGE);
  473. }
  474. static inline int is_cow_mapping(unsigned int flags)
  475. {
  476. return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  477. }
  478. #ifndef is_zero_pfn
  479. static inline int is_zero_pfn(unsigned long pfn)
  480. {
  481. return pfn == zero_pfn;
  482. }
  483. #endif
  484. #ifndef my_zero_pfn
  485. static inline unsigned long my_zero_pfn(unsigned long addr)
  486. {
  487. return zero_pfn;
  488. }
  489. #endif
  490. /*
  491. * vm_normal_page -- This function gets the "struct page" associated with a pte.
  492. *
  493. * "Special" mappings do not wish to be associated with a "struct page" (either
  494. * it doesn't exist, or it exists but they don't want to touch it). In this
  495. * case, NULL is returned here. "Normal" mappings do have a struct page.
  496. *
  497. * There are 2 broad cases. Firstly, an architecture may define a pte_special()
  498. * pte bit, in which case this function is trivial. Secondly, an architecture
  499. * may not have a spare pte bit, which requires a more complicated scheme,
  500. * described below.
  501. *
  502. * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
  503. * special mapping (even if there are underlying and valid "struct pages").
  504. * COWed pages of a VM_PFNMAP are always normal.
  505. *
  506. * The way we recognize COWed pages within VM_PFNMAP mappings is through the
  507. * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
  508. * set, and the vm_pgoff will point to the first PFN mapped: thus every special
  509. * mapping will always honor the rule
  510. *
  511. * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
  512. *
  513. * And for normal mappings this is false.
  514. *
  515. * This restricts such mappings to be a linear translation from virtual address
  516. * to pfn. To get around this restriction, we allow arbitrary mappings so long
  517. * as the vma is not a COW mapping; in that case, we know that all ptes are
  518. * special (because none can have been COWed).
  519. *
  520. *
  521. * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
  522. *
  523. * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
  524. * page" backing, however the difference is that _all_ pages with a struct
  525. * page (that is, those where pfn_valid is true) are refcounted and considered
  526. * normal pages by the VM. The disadvantage is that pages are refcounted
  527. * (which can be slower and simply not an option for some PFNMAP users). The
  528. * advantage is that we don't have to follow the strict linearity rule of
  529. * PFNMAP mappings in order to support COWable mappings.
  530. *
  531. */
  532. #ifdef __HAVE_ARCH_PTE_SPECIAL
  533. # define HAVE_PTE_SPECIAL 1
  534. #else
  535. # define HAVE_PTE_SPECIAL 0
  536. #endif
  537. struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
  538. pte_t pte)
  539. {
  540. unsigned long pfn = pte_pfn(pte);
  541. if (HAVE_PTE_SPECIAL) {
  542. if (likely(!pte_special(pte)))
  543. goto check_pfn;
  544. if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
  545. return NULL;
  546. if (!is_zero_pfn(pfn))
  547. print_bad_pte(vma, addr, pte, NULL);
  548. return NULL;
  549. }
  550. /* !HAVE_PTE_SPECIAL case follows: */
  551. if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
  552. if (vma->vm_flags & VM_MIXEDMAP) {
  553. if (!pfn_valid(pfn))
  554. return NULL;
  555. goto out;
  556. } else {
  557. unsigned long off;
  558. off = (addr - vma->vm_start) >> PAGE_SHIFT;
  559. if (pfn == vma->vm_pgoff + off)
  560. return NULL;
  561. if (!is_cow_mapping(vma->vm_flags))
  562. return NULL;
  563. }
  564. }
  565. if (is_zero_pfn(pfn))
  566. return NULL;
  567. check_pfn:
  568. if (unlikely(pfn > highest_memmap_pfn)) {
  569. print_bad_pte(vma, addr, pte, NULL);
  570. return NULL;
  571. }
  572. /*
  573. * NOTE! We still have PageReserved() pages in the page tables.
  574. * eg. VDSO mappings can cause them to exist.
  575. */
  576. out:
  577. return pfn_to_page(pfn);
  578. }
  579. /*
  580. * copy one vm_area from one task to the other. Assumes the page tables
  581. * already present in the new task to be cleared in the whole range
  582. * covered by this vma.
  583. */
  584. static inline unsigned long
  585. copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  586. pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
  587. unsigned long addr, int *rss)
  588. {
  589. unsigned long vm_flags = vma->vm_flags;
  590. pte_t pte = *src_pte;
  591. struct page *page;
  592. /* pte contains position in swap or file, so copy. */
  593. if (unlikely(!pte_present(pte))) {
  594. if (!pte_file(pte)) {
  595. swp_entry_t entry = pte_to_swp_entry(pte);
  596. if (swap_duplicate(entry) < 0)
  597. return entry.val;
  598. /* make sure dst_mm is on swapoff's mmlist. */
  599. if (unlikely(list_empty(&dst_mm->mmlist))) {
  600. spin_lock(&mmlist_lock);
  601. if (list_empty(&dst_mm->mmlist))
  602. list_add(&dst_mm->mmlist,
  603. &src_mm->mmlist);
  604. spin_unlock(&mmlist_lock);
  605. }
  606. if (likely(!non_swap_entry(entry)))
  607. rss[MM_SWAPENTS]++;
  608. else if (is_write_migration_entry(entry) &&
  609. is_cow_mapping(vm_flags)) {
  610. /*
  611. * COW mappings require pages in both parent
  612. * and child to be set to read.
  613. */
  614. make_migration_entry_read(&entry);
  615. pte = swp_entry_to_pte(entry);
  616. set_pte_at(src_mm, addr, src_pte, pte);
  617. }
  618. }
  619. goto out_set_pte;
  620. }
  621. /*
  622. * If it's a COW mapping, write protect it both
  623. * in the parent and the child
  624. */
  625. if (is_cow_mapping(vm_flags)) {
  626. ptep_set_wrprotect(src_mm, addr, src_pte);
  627. pte = pte_wrprotect(pte);
  628. }
  629. /*
  630. * If it's a shared mapping, mark it clean in
  631. * the child
  632. */
  633. if (vm_flags & VM_SHARED)
  634. pte = pte_mkclean(pte);
  635. pte = pte_mkold(pte);
  636. page = vm_normal_page(vma, addr, pte);
  637. if (page) {
  638. get_page(page);
  639. page_dup_rmap(page);
  640. if (PageAnon(page))
  641. rss[MM_ANONPAGES]++;
  642. else
  643. rss[MM_FILEPAGES]++;
  644. }
  645. out_set_pte:
  646. set_pte_at(dst_mm, addr, dst_pte, pte);
  647. return 0;
  648. }
  649. static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  650. pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
  651. unsigned long addr, unsigned long end)
  652. {
  653. pte_t *orig_src_pte, *orig_dst_pte;
  654. pte_t *src_pte, *dst_pte;
  655. spinlock_t *src_ptl, *dst_ptl;
  656. int progress = 0;
  657. int rss[NR_MM_COUNTERS];
  658. swp_entry_t entry = (swp_entry_t){0};
  659. again:
  660. init_rss_vec(rss);
  661. dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
  662. if (!dst_pte)
  663. return -ENOMEM;
  664. src_pte = pte_offset_map(src_pmd, addr);
  665. src_ptl = pte_lockptr(src_mm, src_pmd);
  666. spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
  667. orig_src_pte = src_pte;
  668. orig_dst_pte = dst_pte;
  669. arch_enter_lazy_mmu_mode();
  670. do {
  671. /*
  672. * We are holding two locks at this point - either of them
  673. * could generate latencies in another task on another CPU.
  674. */
  675. if (progress >= 32) {
  676. progress = 0;
  677. if (need_resched() ||
  678. spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
  679. break;
  680. }
  681. if (pte_none(*src_pte)) {
  682. progress++;
  683. continue;
  684. }
  685. entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
  686. vma, addr, rss);
  687. if (entry.val)
  688. break;
  689. progress += 8;
  690. } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
  691. arch_leave_lazy_mmu_mode();
  692. spin_unlock(src_ptl);
  693. pte_unmap(orig_src_pte);
  694. add_mm_rss_vec(dst_mm, rss);
  695. pte_unmap_unlock(orig_dst_pte, dst_ptl);
  696. cond_resched();
  697. if (entry.val) {
  698. if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
  699. return -ENOMEM;
  700. progress = 0;
  701. }
  702. if (addr != end)
  703. goto again;
  704. return 0;
  705. }
  706. static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  707. pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
  708. unsigned long addr, unsigned long end)
  709. {
  710. pmd_t *src_pmd, *dst_pmd;
  711. unsigned long next;
  712. dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
  713. if (!dst_pmd)
  714. return -ENOMEM;
  715. src_pmd = pmd_offset(src_pud, addr);
  716. do {
  717. next = pmd_addr_end(addr, end);
  718. if (pmd_none_or_clear_bad(src_pmd))
  719. continue;
  720. if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
  721. vma, addr, next))
  722. return -ENOMEM;
  723. } while (dst_pmd++, src_pmd++, addr = next, addr != end);
  724. return 0;
  725. }
  726. static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  727. pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
  728. unsigned long addr, unsigned long end)
  729. {
  730. pud_t *src_pud, *dst_pud;
  731. unsigned long next;
  732. dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
  733. if (!dst_pud)
  734. return -ENOMEM;
  735. src_pud = pud_offset(src_pgd, addr);
  736. do {
  737. next = pud_addr_end(addr, end);
  738. if (pud_none_or_clear_bad(src_pud))
  739. continue;
  740. if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
  741. vma, addr, next))
  742. return -ENOMEM;
  743. } while (dst_pud++, src_pud++, addr = next, addr != end);
  744. return 0;
  745. }
  746. int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  747. struct vm_area_struct *vma)
  748. {
  749. pgd_t *src_pgd, *dst_pgd;
  750. unsigned long next;
  751. unsigned long addr = vma->vm_start;
  752. unsigned long end = vma->vm_end;
  753. int ret;
  754. /*
  755. * Don't copy ptes where a page fault will fill them correctly.
  756. * Fork becomes much lighter when there are big shared or private
  757. * readonly mappings. The tradeoff is that copy_page_range is more
  758. * efficient than faulting.
  759. */
  760. if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
  761. if (!vma->anon_vma)
  762. return 0;
  763. }
  764. if (is_vm_hugetlb_page(vma))
  765. return copy_hugetlb_page_range(dst_mm, src_mm, vma);
  766. if (unlikely(is_pfn_mapping(vma))) {
  767. /*
  768. * We do not free on error cases below as remove_vma
  769. * gets called on error from higher level routine
  770. */
  771. ret = track_pfn_vma_copy(vma);
  772. if (ret)
  773. return ret;
  774. }
  775. /*
  776. * We need to invalidate the secondary MMU mappings only when
  777. * there could be a permission downgrade on the ptes of the
  778. * parent mm. And a permission downgrade will only happen if
  779. * is_cow_mapping() returns true.
  780. */
  781. if (is_cow_mapping(vma->vm_flags))
  782. mmu_notifier_invalidate_range_start(src_mm, addr, end);
  783. ret = 0;
  784. dst_pgd = pgd_offset(dst_mm, addr);
  785. src_pgd = pgd_offset(src_mm, addr);
  786. do {
  787. next = pgd_addr_end(addr, end);
  788. if (pgd_none_or_clear_bad(src_pgd))
  789. continue;
  790. if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
  791. vma, addr, next))) {
  792. ret = -ENOMEM;
  793. break;
  794. }
  795. } while (dst_pgd++, src_pgd++, addr = next, addr != end);
  796. if (is_cow_mapping(vma->vm_flags))
  797. mmu_notifier_invalidate_range_end(src_mm,
  798. vma->vm_start, end);
  799. return ret;
  800. }
  801. static unsigned long zap_pte_range(struct mmu_gather *tlb,
  802. struct vm_area_struct *vma, pmd_t *pmd,
  803. unsigned long addr, unsigned long end,
  804. long *zap_work, struct zap_details *details)
  805. {
  806. struct mm_struct *mm = tlb->mm;
  807. pte_t *pte;
  808. spinlock_t *ptl;
  809. int rss[NR_MM_COUNTERS];
  810. init_rss_vec(rss);
  811. pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  812. arch_enter_lazy_mmu_mode();
  813. do {
  814. pte_t ptent = *pte;
  815. if (pte_none(ptent)) {
  816. (*zap_work)--;
  817. continue;
  818. }
  819. (*zap_work) -= PAGE_SIZE;
  820. if (pte_present(ptent)) {
  821. struct page *page;
  822. page = vm_normal_page(vma, addr, ptent);
  823. if (unlikely(details) && page) {
  824. /*
  825. * unmap_shared_mapping_pages() wants to
  826. * invalidate cache without truncating:
  827. * unmap shared but keep private pages.
  828. */
  829. if (details->check_mapping &&
  830. details->check_mapping != page->mapping)
  831. continue;
  832. /*
  833. * Each page->index must be checked when
  834. * invalidating or truncating nonlinear.
  835. */
  836. if (details->nonlinear_vma &&
  837. (page->index < details->first_index ||
  838. page->index > details->last_index))
  839. continue;
  840. }
  841. ptent = ptep_get_and_clear_full(mm, addr, pte,
  842. tlb->fullmm);
  843. tlb_remove_tlb_entry(tlb, pte, addr);
  844. if (unlikely(!page))
  845. continue;
  846. if (unlikely(details) && details->nonlinear_vma
  847. && linear_page_index(details->nonlinear_vma,
  848. addr) != page->index)
  849. set_pte_at(mm, addr, pte,
  850. pgoff_to_pte(page->index));
  851. if (PageAnon(page))
  852. rss[MM_ANONPAGES]--;
  853. else {
  854. if (pte_dirty(ptent))
  855. set_page_dirty(page);
  856. if (pte_young(ptent) &&
  857. likely(!VM_SequentialReadHint(vma)))
  858. mark_page_accessed(page);
  859. rss[MM_FILEPAGES]--;
  860. }
  861. page_remove_rmap(page);
  862. if (unlikely(page_mapcount(page) < 0))
  863. print_bad_pte(vma, addr, ptent, page);
  864. tlb_remove_page(tlb, page);
  865. continue;
  866. }
  867. /*
  868. * If details->check_mapping, we leave swap entries;
  869. * if details->nonlinear_vma, we leave file entries.
  870. */
  871. if (unlikely(details))
  872. continue;
  873. if (pte_file(ptent)) {
  874. if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
  875. print_bad_pte(vma, addr, ptent, NULL);
  876. } else {
  877. swp_entry_t entry = pte_to_swp_entry(ptent);
  878. if (!non_swap_entry(entry))
  879. rss[MM_SWAPENTS]--;
  880. if (unlikely(!free_swap_and_cache(entry)))
  881. print_bad_pte(vma, addr, ptent, NULL);
  882. }
  883. pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
  884. } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
  885. add_mm_rss_vec(mm, rss);
  886. arch_leave_lazy_mmu_mode();
  887. pte_unmap_unlock(pte - 1, ptl);
  888. return addr;
  889. }
  890. static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
  891. struct vm_area_struct *vma, pud_t *pud,
  892. unsigned long addr, unsigned long end,
  893. long *zap_work, struct zap_details *details)
  894. {
  895. pmd_t *pmd;
  896. unsigned long next;
  897. pmd = pmd_offset(pud, addr);
  898. do {
  899. next = pmd_addr_end(addr, end);
  900. if (pmd_none_or_clear_bad(pmd)) {
  901. (*zap_work)--;
  902. continue;
  903. }
  904. next = zap_pte_range(tlb, vma, pmd, addr, next,
  905. zap_work, details);
  906. } while (pmd++, addr = next, (addr != end && *zap_work > 0));
  907. return addr;
  908. }
  909. static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
  910. struct vm_area_struct *vma, pgd_t *pgd,
  911. unsigned long addr, unsigned long end,
  912. long *zap_work, struct zap_details *details)
  913. {
  914. pud_t *pud;
  915. unsigned long next;
  916. pud = pud_offset(pgd, addr);
  917. do {
  918. next = pud_addr_end(addr, end);
  919. if (pud_none_or_clear_bad(pud)) {
  920. (*zap_work)--;
  921. continue;
  922. }
  923. next = zap_pmd_range(tlb, vma, pud, addr, next,
  924. zap_work, details);
  925. } while (pud++, addr = next, (addr != end && *zap_work > 0));
  926. return addr;
  927. }
  928. static unsigned long unmap_page_range(struct mmu_gather *tlb,
  929. struct vm_area_struct *vma,
  930. unsigned long addr, unsigned long end,
  931. long *zap_work, struct zap_details *details)
  932. {
  933. pgd_t *pgd;
  934. unsigned long next;
  935. if (details && !details->check_mapping && !details->nonlinear_vma)
  936. details = NULL;
  937. BUG_ON(addr >= end);
  938. mem_cgroup_uncharge_start();
  939. tlb_start_vma(tlb, vma);
  940. pgd = pgd_offset(vma->vm_mm, addr);
  941. do {
  942. next = pgd_addr_end(addr, end);
  943. if (pgd_none_or_clear_bad(pgd)) {
  944. (*zap_work)--;
  945. continue;
  946. }
  947. next = zap_pud_range(tlb, vma, pgd, addr, next,
  948. zap_work, details);
  949. } while (pgd++, addr = next, (addr != end && *zap_work > 0));
  950. tlb_end_vma(tlb, vma);
  951. mem_cgroup_uncharge_end();
  952. return addr;
  953. }
  954. #ifdef CONFIG_PREEMPT
  955. # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
  956. #else
  957. /* No preempt: go for improved straight-line efficiency */
  958. # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
  959. #endif
  960. /**
  961. * unmap_vmas - unmap a range of memory covered by a list of vma's
  962. * @tlbp: address of the caller's struct mmu_gather
  963. * @vma: the starting vma
  964. * @start_addr: virtual address at which to start unmapping
  965. * @end_addr: virtual address at which to end unmapping
  966. * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
  967. * @details: details of nonlinear truncation or shared cache invalidation
  968. *
  969. * Returns the end address of the unmapping (restart addr if interrupted).
  970. *
  971. * Unmap all pages in the vma list.
  972. *
  973. * We aim to not hold locks for too long (for scheduling latency reasons).
  974. * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to
  975. * return the ending mmu_gather to the caller.
  976. *
  977. * Only addresses between `start' and `end' will be unmapped.
  978. *
  979. * The VMA list must be sorted in ascending virtual address order.
  980. *
  981. * unmap_vmas() assumes that the caller will flush the whole unmapped address
  982. * range after unmap_vmas() returns. So the only responsibility here is to
  983. * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
  984. * drops the lock and schedules.
  985. */
  986. unsigned long unmap_vmas(struct mmu_gather **tlbp,
  987. struct vm_area_struct *vma, unsigned long start_addr,
  988. unsigned long end_addr, unsigned long *nr_accounted,
  989. struct zap_details *details)
  990. {
  991. long zap_work = ZAP_BLOCK_SIZE;
  992. unsigned long tlb_start = 0; /* For tlb_finish_mmu */
  993. int tlb_start_valid = 0;
  994. unsigned long start = start_addr;
  995. spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
  996. int fullmm = (*tlbp)->fullmm;
  997. struct mm_struct *mm = vma->vm_mm;
  998. mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
  999. for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
  1000. unsigned long end;
  1001. start = max(vma->vm_start, start_addr);
  1002. if (start >= vma->vm_end)
  1003. continue;
  1004. end = min(vma->vm_end, end_addr);
  1005. if (end <= vma->vm_start)
  1006. continue;
  1007. if (vma->vm_flags & VM_ACCOUNT)
  1008. *nr_accounted += (end - start) >> PAGE_SHIFT;
  1009. if (unlikely(is_pfn_mapping(vma)))
  1010. untrack_pfn_vma(vma, 0, 0);
  1011. while (start != end) {
  1012. if (!tlb_start_valid) {
  1013. tlb_start = start;
  1014. tlb_start_valid = 1;
  1015. }
  1016. if (unlikely(is_vm_hugetlb_page(vma))) {
  1017. /*
  1018. * It is undesirable to test vma->vm_file as it
  1019. * should be non-null for valid hugetlb area.
  1020. * However, vm_file will be NULL in the error
  1021. * cleanup path of do_mmap_pgoff. When
  1022. * hugetlbfs ->mmap method fails,
  1023. * do_mmap_pgoff() nullifies vma->vm_file
  1024. * before calling this function to clean up.
  1025. * Since no pte has actually been setup, it is
  1026. * safe to do nothing in this case.
  1027. */
  1028. if (vma->vm_file) {
  1029. unmap_hugepage_range(vma, start, end, NULL);
  1030. zap_work -= (end - start) /
  1031. pages_per_huge_page(hstate_vma(vma));
  1032. }
  1033. start = end;
  1034. } else
  1035. start = unmap_page_range(*tlbp, vma,
  1036. start, end, &zap_work, details);
  1037. if (zap_work > 0) {
  1038. BUG_ON(start != end);
  1039. break;
  1040. }
  1041. tlb_finish_mmu(*tlbp, tlb_start, start);
  1042. if (need_resched() ||
  1043. (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
  1044. if (i_mmap_lock) {
  1045. *tlbp = NULL;
  1046. goto out;
  1047. }
  1048. cond_resched();
  1049. }
  1050. *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
  1051. tlb_start_valid = 0;
  1052. zap_work = ZAP_BLOCK_SIZE;
  1053. }
  1054. }
  1055. out:
  1056. mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
  1057. return start; /* which is now the end (or restart) address */
  1058. }
  1059. /**
  1060. * zap_page_range - remove user pages in a given range
  1061. * @vma: vm_area_struct holding the applicable pages
  1062. * @address: starting address of pages to zap
  1063. * @size: number of bytes to zap
  1064. * @details: details of nonlinear truncation or shared cache invalidation
  1065. */
  1066. unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
  1067. unsigned long size, struct zap_details *details)
  1068. {
  1069. struct mm_struct *mm = vma->vm_mm;
  1070. struct mmu_gather *tlb;
  1071. unsigned long end = address + size;
  1072. unsigned long nr_accounted = 0;
  1073. lru_add_drain();
  1074. tlb = tlb_gather_mmu(mm, 0);
  1075. update_hiwater_rss(mm);
  1076. end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
  1077. if (tlb)
  1078. tlb_finish_mmu(tlb, address, end);
  1079. return end;
  1080. }
  1081. /**
  1082. * zap_vma_ptes - remove ptes mapping the vma
  1083. * @vma: vm_area_struct holding ptes to be zapped
  1084. * @address: starting address of pages to zap
  1085. * @size: number of bytes to zap
  1086. *
  1087. * This function only unmaps ptes assigned to VM_PFNMAP vmas.
  1088. *
  1089. * The entire address range must be fully contained within the vma.
  1090. *
  1091. * Returns 0 if successful.
  1092. */
  1093. int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
  1094. unsigned long size)
  1095. {
  1096. if (address < vma->vm_start || address + size > vma->vm_end ||
  1097. !(vma->vm_flags & VM_PFNMAP))
  1098. return -1;
  1099. zap_page_range(vma, address, size, NULL);
  1100. return 0;
  1101. }
  1102. EXPORT_SYMBOL_GPL(zap_vma_ptes);
  1103. /**
  1104. * follow_page - look up a page descriptor from a user-virtual address
  1105. * @vma: vm_area_struct mapping @address
  1106. * @address: virtual address to look up
  1107. * @flags: flags modifying lookup behaviour
  1108. *
  1109. * @flags can have FOLL_ flags set, defined in <linux/mm.h>
  1110. *
  1111. * Returns the mapped (struct page *), %NULL if no mapping exists, or
  1112. * an error pointer if there is a mapping to something not represented
  1113. * by a page descriptor (see also vm_normal_page()).
  1114. */
  1115. struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
  1116. unsigned int flags)
  1117. {
  1118. pgd_t *pgd;
  1119. pud_t *pud;
  1120. pmd_t *pmd;
  1121. pte_t *ptep, pte;
  1122. spinlock_t *ptl;
  1123. struct page *page;
  1124. struct mm_struct *mm = vma->vm_mm;
  1125. page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
  1126. if (!IS_ERR(page)) {
  1127. BUG_ON(flags & FOLL_GET);
  1128. goto out;
  1129. }
  1130. page = NULL;
  1131. pgd = pgd_offset(mm, address);
  1132. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  1133. goto no_page_table;
  1134. pud = pud_offset(pgd, address);
  1135. if (pud_none(*pud))
  1136. goto no_page_table;
  1137. if (pud_huge(*pud)) {
  1138. BUG_ON(flags & FOLL_GET);
  1139. page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
  1140. goto out;
  1141. }
  1142. if (unlikely(pud_bad(*pud)))
  1143. goto no_page_table;
  1144. pmd = pmd_offset(pud, address);
  1145. if (pmd_none(*pmd))
  1146. goto no_page_table;
  1147. if (pmd_huge(*pmd)) {
  1148. BUG_ON(flags & FOLL_GET);
  1149. page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
  1150. goto out;
  1151. }
  1152. if (unlikely(pmd_bad(*pmd)))
  1153. goto no_page_table;
  1154. ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
  1155. pte = *ptep;
  1156. if (!pte_present(pte))
  1157. goto no_page;
  1158. if ((flags & FOLL_WRITE) && !pte_write(pte))
  1159. goto unlock;
  1160. page = vm_normal_page(vma, address, pte);
  1161. if (unlikely(!page)) {
  1162. if ((flags & FOLL_DUMP) ||
  1163. !is_zero_pfn(pte_pfn(pte)))
  1164. goto bad_page;
  1165. page = pte_page(pte);
  1166. }
  1167. if (flags & FOLL_GET)
  1168. get_page(page);
  1169. if (flags & FOLL_TOUCH) {
  1170. if ((flags & FOLL_WRITE) &&
  1171. !pte_dirty(pte) && !PageDirty(page))
  1172. set_page_dirty(page);
  1173. /*
  1174. * pte_mkyoung() would be more correct here, but atomic care
  1175. * is needed to avoid losing the dirty bit: it is easier to use
  1176. * mark_page_accessed().
  1177. */
  1178. mark_page_accessed(page);
  1179. }
  1180. if (flags & FOLL_MLOCK) {
  1181. /*
  1182. * The preliminary mapping check is mainly to avoid the
  1183. * pointless overhead of lock_page on the ZERO_PAGE
  1184. * which might bounce very badly if there is contention.
  1185. *
  1186. * If the page is already locked, we don't need to
  1187. * handle it now - vmscan will handle it later if and
  1188. * when it attempts to reclaim the page.
  1189. */
  1190. if (page->mapping && trylock_page(page)) {
  1191. lru_add_drain(); /* push cached pages to LRU */
  1192. /*
  1193. * Because we lock page here and migration is
  1194. * blocked by the pte's page reference, we need
  1195. * only check for file-cache page truncation.
  1196. */
  1197. if (page->mapping)
  1198. mlock_vma_page(page);
  1199. unlock_page(page);
  1200. }
  1201. }
  1202. unlock:
  1203. pte_unmap_unlock(ptep, ptl);
  1204. out:
  1205. return page;
  1206. bad_page:
  1207. pte_unmap_unlock(ptep, ptl);
  1208. return ERR_PTR(-EFAULT);
  1209. no_page:
  1210. pte_unmap_unlock(ptep, ptl);
  1211. if (!pte_none(pte))
  1212. return page;
  1213. no_page_table:
  1214. /*
  1215. * When core dumping an enormous anonymous area that nobody
  1216. * has touched so far, we don't want to allocate unnecessary pages or
  1217. * page tables. Return error instead of NULL to skip handle_mm_fault,
  1218. * then get_dump_page() will return NULL to leave a hole in the dump.
  1219. * But we can only make this optimization where a hole would surely
  1220. * be zero-filled if handle_mm_fault() actually did handle it.
  1221. */
  1222. if ((flags & FOLL_DUMP) &&
  1223. (!vma->vm_ops || !vma->vm_ops->fault))
  1224. return ERR_PTR(-EFAULT);
  1225. return page;
  1226. }
  1227. int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  1228. unsigned long start, int nr_pages, unsigned int gup_flags,
  1229. struct page **pages, struct vm_area_struct **vmas,
  1230. int *nonblocking)
  1231. {
  1232. int i;
  1233. unsigned long vm_flags;
  1234. if (nr_pages <= 0)
  1235. return 0;
  1236. VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
  1237. /*
  1238. * Require read or write permissions.
  1239. * If FOLL_FORCE is set, we only require the "MAY" flags.
  1240. */
  1241. vm_flags = (gup_flags & FOLL_WRITE) ?
  1242. (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
  1243. vm_flags &= (gup_flags & FOLL_FORCE) ?
  1244. (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
  1245. i = 0;
  1246. do {
  1247. struct vm_area_struct *vma;
  1248. vma = find_extend_vma(mm, start);
  1249. if (!vma && in_gate_area(tsk, start)) {
  1250. unsigned long pg = start & PAGE_MASK;
  1251. struct vm_area_struct *gate_vma = get_gate_vma(tsk);
  1252. pgd_t *pgd;
  1253. pud_t *pud;
  1254. pmd_t *pmd;
  1255. pte_t *pte;
  1256. /* user gate pages are read-only */
  1257. if (gup_flags & FOLL_WRITE)
  1258. return i ? : -EFAULT;
  1259. if (pg > TASK_SIZE)
  1260. pgd = pgd_offset_k(pg);
  1261. else
  1262. pgd = pgd_offset_gate(mm, pg);
  1263. BUG_ON(pgd_none(*pgd));
  1264. pud = pud_offset(pgd, pg);
  1265. BUG_ON(pud_none(*pud));
  1266. pmd = pmd_offset(pud, pg);
  1267. if (pmd_none(*pmd))
  1268. return i ? : -EFAULT;
  1269. pte = pte_offset_map(pmd, pg);
  1270. if (pte_none(*pte)) {
  1271. pte_unmap(pte);
  1272. return i ? : -EFAULT;
  1273. }
  1274. if (pages) {
  1275. struct page *page;
  1276. page = vm_normal_page(gate_vma, start, *pte);
  1277. if (!page) {
  1278. if (!(gup_flags & FOLL_DUMP) &&
  1279. is_zero_pfn(pte_pfn(*pte)))
  1280. page = pte_page(*pte);
  1281. else {
  1282. pte_unmap(pte);
  1283. return i ? : -EFAULT;
  1284. }
  1285. }
  1286. pages[i] = page;
  1287. get_page(page);
  1288. }
  1289. pte_unmap(pte);
  1290. if (vmas)
  1291. vmas[i] = gate_vma;
  1292. i++;
  1293. start += PAGE_SIZE;
  1294. nr_pages--;
  1295. continue;
  1296. }
  1297. if (!vma ||
  1298. (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
  1299. !(vm_flags & vma->vm_flags))
  1300. return i ? : -EFAULT;
  1301. if (is_vm_hugetlb_page(vma)) {
  1302. i = follow_hugetlb_page(mm, vma, pages, vmas,
  1303. &start, &nr_pages, i, gup_flags);
  1304. continue;
  1305. }
  1306. do {
  1307. struct page *page;
  1308. unsigned int foll_flags = gup_flags;
  1309. /*
  1310. * If we have a pending SIGKILL, don't keep faulting
  1311. * pages and potentially allocating memory.
  1312. */
  1313. if (unlikely(fatal_signal_pending(current)))
  1314. return i ? i : -ERESTARTSYS;
  1315. cond_resched();
  1316. while (!(page = follow_page(vma, start, foll_flags))) {
  1317. int ret;
  1318. unsigned int fault_flags = 0;
  1319. if (foll_flags & FOLL_WRITE)
  1320. fault_flags |= FAULT_FLAG_WRITE;
  1321. if (nonblocking)
  1322. fault_flags |= FAULT_FLAG_ALLOW_RETRY;
  1323. ret = handle_mm_fault(mm, vma, start,
  1324. fault_flags);
  1325. if (ret & VM_FAULT_ERROR) {
  1326. if (ret & VM_FAULT_OOM)
  1327. return i ? i : -ENOMEM;
  1328. if (ret &
  1329. (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE|
  1330. VM_FAULT_SIGBUS))
  1331. return i ? i : -EFAULT;
  1332. BUG();
  1333. }
  1334. if (ret & VM_FAULT_MAJOR)
  1335. tsk->maj_flt++;
  1336. else
  1337. tsk->min_flt++;
  1338. if (ret & VM_FAULT_RETRY) {
  1339. *nonblocking = 0;
  1340. return i;
  1341. }
  1342. /*
  1343. * The VM_FAULT_WRITE bit tells us that
  1344. * do_wp_page has broken COW when necessary,
  1345. * even if maybe_mkwrite decided not to set
  1346. * pte_write. We can thus safely do subsequent
  1347. * page lookups as if they were reads. But only
  1348. * do so when looping for pte_write is futile:
  1349. * in some cases userspace may also be wanting
  1350. * to write to the gotten user page, which a
  1351. * read fault here might prevent (a readonly
  1352. * page might get reCOWed by userspace write).
  1353. */
  1354. if ((ret & VM_FAULT_WRITE) &&
  1355. !(vma->vm_flags & VM_WRITE))
  1356. foll_flags &= ~FOLL_WRITE;
  1357. cond_resched();
  1358. }
  1359. if (IS_ERR(page))
  1360. return i ? i : PTR_ERR(page);
  1361. if (pages) {
  1362. pages[i] = page;
  1363. flush_anon_page(vma, page, start);
  1364. flush_dcache_page(page);
  1365. }
  1366. if (vmas)
  1367. vmas[i] = vma;
  1368. i++;
  1369. start += PAGE_SIZE;
  1370. nr_pages--;
  1371. } while (nr_pages && start < vma->vm_end);
  1372. } while (nr_pages);
  1373. return i;
  1374. }
  1375. /**
  1376. * get_user_pages() - pin user pages in memory
  1377. * @tsk: task_struct of target task
  1378. * @mm: mm_struct of target mm
  1379. * @start: starting user address
  1380. * @nr_pages: number of pages from start to pin
  1381. * @write: whether pages will be written to by the caller
  1382. * @force: whether to force write access even if user mapping is
  1383. * readonly. This will result in the page being COWed even
  1384. * in MAP_SHARED mappings. You do not want this.
  1385. * @pages: array that receives pointers to the pages pinned.
  1386. * Should be at least nr_pages long. Or NULL, if caller
  1387. * only intends to ensure the pages are faulted in.
  1388. * @vmas: array of pointers to vmas corresponding to each page.
  1389. * Or NULL if the caller does not require them.
  1390. *
  1391. * Returns number of pages pinned. This may be fewer than the number
  1392. * requested. If nr_pages is 0 or negative, returns 0. If no pages
  1393. * were pinned, returns -errno. Each page returned must be released
  1394. * with a put_page() call when it is finished with. vmas will only
  1395. * remain valid while mmap_sem is held.
  1396. *
  1397. * Must be called with mmap_sem held for read or write.
  1398. *
  1399. * get_user_pages walks a process's page tables and takes a reference to
  1400. * each struct page that each user address corresponds to at a given
  1401. * instant. That is, it takes the page that would be accessed if a user
  1402. * thread accesses the given user virtual address at that instant.
  1403. *
  1404. * This does not guarantee that the page exists in the user mappings when
  1405. * get_user_pages returns, and there may even be a completely different
  1406. * page there in some cases (eg. if mmapped pagecache has been invalidated
  1407. * and subsequently re faulted). However it does guarantee that the page
  1408. * won't be freed completely. And mostly callers simply care that the page
  1409. * contains data that was valid *at some point in time*. Typically, an IO
  1410. * or similar operation cannot guarantee anything stronger anyway because
  1411. * locks can't be held over the syscall boundary.
  1412. *
  1413. * If write=0, the page must not be written to. If the page is written to,
  1414. * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
  1415. * after the page is finished with, and before put_page is called.
  1416. *
  1417. * get_user_pages is typically used for fewer-copy IO operations, to get a
  1418. * handle on the memory by some means other than accesses via the user virtual
  1419. * addresses. The pages may be submitted for DMA to devices or accessed via
  1420. * their kernel linear mapping (via the kmap APIs). Care should be taken to
  1421. * use the correct cache flushing APIs.
  1422. *
  1423. * See also get_user_pages_fast, for performance critical applications.
  1424. */
  1425. int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  1426. unsigned long start, int nr_pages, int write, int force,
  1427. struct page **pages, struct vm_area_struct **vmas)
  1428. {
  1429. int flags = FOLL_TOUCH;
  1430. if (pages)
  1431. flags |= FOLL_GET;
  1432. if (write)
  1433. flags |= FOLL_WRITE;
  1434. if (force)
  1435. flags |= FOLL_FORCE;
  1436. return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
  1437. NULL);
  1438. }
  1439. EXPORT_SYMBOL(get_user_pages);
  1440. /**
  1441. * get_dump_page() - pin user page in memory while writing it to core dump
  1442. * @addr: user address
  1443. *
  1444. * Returns struct page pointer of user page pinned for dump,
  1445. * to be freed afterwards by page_cache_release() or put_page().
  1446. *
  1447. * Returns NULL on any kind of failure - a hole must then be inserted into
  1448. * the corefile, to preserve alignment with its headers; and also returns
  1449. * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
  1450. * allowing a hole to be left in the corefile to save diskspace.
  1451. *
  1452. * Called without mmap_sem, but after all other threads have been killed.
  1453. */
  1454. #ifdef CONFIG_ELF_CORE
  1455. struct page *get_dump_page(unsigned long addr)
  1456. {
  1457. struct vm_area_struct *vma;
  1458. struct page *page;
  1459. if (__get_user_pages(current, current->mm, addr, 1,
  1460. FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
  1461. NULL) < 1)
  1462. return NULL;
  1463. flush_cache_page(vma, addr, page_to_pfn(page));
  1464. return page;
  1465. }
  1466. #endif /* CONFIG_ELF_CORE */
  1467. pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
  1468. spinlock_t **ptl)
  1469. {
  1470. pgd_t * pgd = pgd_offset(mm, addr);
  1471. pud_t * pud = pud_alloc(mm, pgd, addr);
  1472. if (pud) {
  1473. pmd_t * pmd = pmd_alloc(mm, pud, addr);
  1474. if (pmd)
  1475. return pte_alloc_map_lock(mm, pmd, addr, ptl);
  1476. }
  1477. return NULL;
  1478. }
  1479. /*
  1480. * This is the old fallback for page remapping.
  1481. *
  1482. * For historical reasons, it only allows reserved pages. Only
  1483. * old drivers should use this, and they needed to mark their
  1484. * pages reserved for the old functions anyway.
  1485. */
  1486. static int insert_page(struct vm_area_struct *vma, unsigned long addr,
  1487. struct page *page, pgprot_t prot)
  1488. {
  1489. struct mm_struct *mm = vma->vm_mm;
  1490. int retval;
  1491. pte_t *pte;
  1492. spinlock_t *ptl;
  1493. retval = -EINVAL;
  1494. if (PageAnon(page))
  1495. goto out;
  1496. retval = -ENOMEM;
  1497. flush_dcache_page(page);
  1498. pte = get_locked_pte(mm, addr, &ptl);
  1499. if (!pte)
  1500. goto out;
  1501. retval = -EBUSY;
  1502. if (!pte_none(*pte))
  1503. goto out_unlock;
  1504. /* Ok, finally just insert the thing.. */
  1505. get_page(page);
  1506. inc_mm_counter_fast(mm, MM_FILEPAGES);
  1507. page_add_file_rmap(page);
  1508. set_pte_at(mm, addr, pte, mk_pte(page, prot));
  1509. retval = 0;
  1510. pte_unmap_unlock(pte, ptl);
  1511. return retval;
  1512. out_unlock:
  1513. pte_unmap_unlock(pte, ptl);
  1514. out:
  1515. return retval;
  1516. }
  1517. /**
  1518. * vm_insert_page - insert single page into user vma
  1519. * @vma: user vma to map to
  1520. * @addr: target user address of this page
  1521. * @page: source kernel page
  1522. *
  1523. * This allows drivers to insert individual pages they've allocated
  1524. * into a user vma.
  1525. *
  1526. * The page has to be a nice clean _individual_ kernel allocation.
  1527. * If you allocate a compound page, you need to have marked it as
  1528. * such (__GFP_COMP), or manually just split the page up yourself
  1529. * (see split_page()).
  1530. *
  1531. * NOTE! Traditionally this was done with "remap_pfn_range()" which
  1532. * took an arbitrary page protection parameter. This doesn't allow
  1533. * that. Your vma protection will have to be set up correctly, which
  1534. * means that if you want a shared writable mapping, you'd better
  1535. * ask for a shared writable mapping!
  1536. *
  1537. * The page does not need to be reserved.
  1538. */
  1539. int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
  1540. struct page *page)
  1541. {
  1542. if (addr < vma->vm_start || addr >= vma->vm_end)
  1543. return -EFAULT;
  1544. if (!page_count(page))
  1545. return -EINVAL;
  1546. vma->vm_flags |= VM_INSERTPAGE;
  1547. return insert_page(vma, addr, page, vma->vm_page_prot);
  1548. }
  1549. EXPORT_SYMBOL(vm_insert_page);
  1550. static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
  1551. unsigned long pfn, pgprot_t prot)
  1552. {
  1553. struct mm_struct *mm = vma->vm_mm;
  1554. int retval;
  1555. pte_t *pte, entry;
  1556. spinlock_t *ptl;
  1557. retval = -ENOMEM;
  1558. pte = get_locked_pte(mm, addr, &ptl);
  1559. if (!pte)
  1560. goto out;
  1561. retval = -EBUSY;
  1562. if (!pte_none(*pte))
  1563. goto out_unlock;
  1564. /* Ok, finally just insert the thing.. */
  1565. entry = pte_mkspecial(pfn_pte(pfn, prot));
  1566. set_pte_at(mm, addr, pte, entry);
  1567. update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
  1568. retval = 0;
  1569. out_unlock:
  1570. pte_unmap_unlock(pte, ptl);
  1571. out:
  1572. return retval;
  1573. }
  1574. /**
  1575. * vm_insert_pfn - insert single pfn into user vma
  1576. * @vma: user vma to map to
  1577. * @addr: target user address of this page
  1578. * @pfn: source kernel pfn
  1579. *
  1580. * Similar to vm_inert_page, this allows drivers to insert individual pages
  1581. * they've allocated into a user vma. Same comments apply.
  1582. *
  1583. * This function should only be called from a vm_ops->fault handler, and
  1584. * in that case the handler should return NULL.
  1585. *
  1586. * vma cannot be a COW mapping.
  1587. *
  1588. * As this is called only for pages that do not currently exist, we
  1589. * do not need to flush old virtual caches or the TLB.
  1590. */
  1591. int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
  1592. unsigned long pfn)
  1593. {
  1594. int ret;
  1595. pgprot_t pgprot = vma->vm_page_prot;
  1596. /*
  1597. * Technically, architectures with pte_special can avoid all these
  1598. * restrictions (same for remap_pfn_range). However we would like
  1599. * consistency in testing and feature parity among all, so we should
  1600. * try to keep these invariants in place for everybody.
  1601. */
  1602. BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
  1603. BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
  1604. (VM_PFNMAP|VM_MIXEDMAP));
  1605. BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
  1606. BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
  1607. if (addr < vma->vm_start || addr >= vma->vm_end)
  1608. return -EFAULT;
  1609. if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
  1610. return -EINVAL;
  1611. ret = insert_pfn(vma, addr, pfn, pgprot);
  1612. if (ret)
  1613. untrack_pfn_vma(vma, pfn, PAGE_SIZE);
  1614. return ret;
  1615. }
  1616. EXPORT_SYMBOL(vm_insert_pfn);
  1617. int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
  1618. unsigned long pfn)
  1619. {
  1620. BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
  1621. if (addr < vma->vm_start || addr >= vma->vm_end)
  1622. return -EFAULT;
  1623. /*
  1624. * If we don't have pte special, then we have to use the pfn_valid()
  1625. * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
  1626. * refcount the page if pfn_valid is true (hence insert_page rather
  1627. * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
  1628. * without pte special, it would there be refcounted as a normal page.
  1629. */
  1630. if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
  1631. struct page *page;
  1632. page = pfn_to_page(pfn);
  1633. return insert_page(vma, addr, page, vma->vm_page_prot);
  1634. }
  1635. return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
  1636. }
  1637. EXPORT_SYMBOL(vm_insert_mixed);
  1638. /*
  1639. * maps a range of physical memory into the requested pages. the old
  1640. * mappings are removed. any references to nonexistent pages results
  1641. * in null mappings (currently treated as "copy-on-access")
  1642. */
  1643. static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
  1644. unsigned long addr, unsigned long end,
  1645. unsigned long pfn, pgprot_t prot)
  1646. {
  1647. pte_t *pte;
  1648. spinlock_t *ptl;
  1649. pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
  1650. if (!pte)
  1651. return -ENOMEM;
  1652. arch_enter_lazy_mmu_mode();
  1653. do {
  1654. BUG_ON(!pte_none(*pte));
  1655. set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
  1656. pfn++;
  1657. } while (pte++, addr += PAGE_SIZE, addr != end);
  1658. arch_leave_lazy_mmu_mode();
  1659. pte_unmap_unlock(pte - 1, ptl);
  1660. return 0;
  1661. }
  1662. static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
  1663. unsigned long addr, unsigned long end,
  1664. unsigned long pfn, pgprot_t prot)
  1665. {
  1666. pmd_t *pmd;
  1667. unsigned long next;
  1668. pfn -= addr >> PAGE_SHIFT;
  1669. pmd = pmd_alloc(mm, pud, addr);
  1670. if (!pmd)
  1671. return -ENOMEM;
  1672. do {
  1673. next = pmd_addr_end(addr, end);
  1674. if (remap_pte_range(mm, pmd, addr, next,
  1675. pfn + (addr >> PAGE_SHIFT), prot))
  1676. return -ENOMEM;
  1677. } while (pmd++, addr = next, addr != end);
  1678. return 0;
  1679. }
  1680. static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
  1681. unsigned long addr, unsigned long end,
  1682. unsigned long pfn, pgprot_t prot)
  1683. {
  1684. pud_t *pud;
  1685. unsigned long next;
  1686. pfn -= addr >> PAGE_SHIFT;
  1687. pud = pud_alloc(mm, pgd, addr);
  1688. if (!pud)
  1689. return -ENOMEM;
  1690. do {
  1691. next = pud_addr_end(addr, end);
  1692. if (remap_pmd_range(mm, pud, addr, next,
  1693. pfn + (addr >> PAGE_SHIFT), prot))
  1694. return -ENOMEM;
  1695. } while (pud++, addr = next, addr != end);
  1696. return 0;
  1697. }
  1698. /**
  1699. * remap_pfn_range - remap kernel memory to userspace
  1700. * @vma: user vma to map to
  1701. * @addr: target user address to start at
  1702. * @pfn: physical address of kernel memory
  1703. * @size: size of map area
  1704. * @prot: page protection flags for this mapping
  1705. *
  1706. * Note: this is only safe if the mm semaphore is held when called.
  1707. */
  1708. int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
  1709. unsigned long pfn, unsigned long size, pgprot_t prot)
  1710. {
  1711. pgd_t *pgd;
  1712. unsigned long next;
  1713. unsigned long end = addr + PAGE_ALIGN(size);
  1714. struct mm_struct *mm = vma->vm_mm;
  1715. int err;
  1716. /*
  1717. * Physically remapped pages are special. Tell the
  1718. * rest of the world about it:
  1719. * VM_IO tells people not to look at these pages
  1720. * (accesses can have side effects).
  1721. * VM_RESERVED is specified all over the place, because
  1722. * in 2.4 it kept swapout's vma scan off this vma; but
  1723. * in 2.6 the LRU scan won't even find its pages, so this
  1724. * flag means no more than count its pages in reserved_vm,
  1725. * and omit it from core dump, even when VM_IO turned off.
  1726. * VM_PFNMAP tells the core MM that the base pages are just
  1727. * raw PFN mappings, and do not have a "struct page" associated
  1728. * with them.
  1729. *
  1730. * There's a horrible special case to handle copy-on-write
  1731. * behaviour that some programs depend on. We mark the "original"
  1732. * un-COW'ed pages by matching them up with "vma->vm_pgoff".
  1733. */
  1734. if (addr == vma->vm_start && end == vma->vm_end) {
  1735. vma->vm_pgoff = pfn;
  1736. vma->vm_flags |= VM_PFN_AT_MMAP;
  1737. } else if (is_cow_mapping(vma->vm_flags))
  1738. return -EINVAL;
  1739. vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
  1740. err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
  1741. if (err) {
  1742. /*
  1743. * To indicate that track_pfn related cleanup is not
  1744. * needed from higher level routine calling unmap_vmas
  1745. */
  1746. vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
  1747. vma->vm_flags &= ~VM_PFN_AT_MMAP;
  1748. return -EINVAL;
  1749. }
  1750. BUG_ON(addr >= end);
  1751. pfn -= addr >> PAGE_SHIFT;
  1752. pgd = pgd_offset(mm, addr);
  1753. flush_cache_range(vma, addr, end);
  1754. do {
  1755. next = pgd_addr_end(addr, end);
  1756. err = remap_pud_range(mm, pgd, addr, next,
  1757. pfn + (addr >> PAGE_SHIFT), prot);
  1758. if (err)
  1759. break;
  1760. } while (pgd++, addr = next, addr != end);
  1761. if (err)
  1762. untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
  1763. return err;
  1764. }
  1765. EXPORT_SYMBOL(remap_pfn_range);
  1766. static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
  1767. unsigned long addr, unsigned long end,
  1768. pte_fn_t fn, void *data)
  1769. {
  1770. pte_t *pte;
  1771. int err;
  1772. pgtable_t token;
  1773. spinlock_t *uninitialized_var(ptl);
  1774. pte = (mm == &init_mm) ?
  1775. pte_alloc_kernel(pmd, addr) :
  1776. pte_alloc_map_lock(mm, pmd, addr, &ptl);
  1777. if (!pte)
  1778. return -ENOMEM;
  1779. BUG_ON(pmd_huge(*pmd));
  1780. arch_enter_lazy_mmu_mode();
  1781. token = pmd_pgtable(*pmd);
  1782. do {
  1783. err = fn(pte++, token, addr, data);
  1784. if (err)
  1785. break;
  1786. } while (addr += PAGE_SIZE, addr != end);
  1787. arch_leave_lazy_mmu_mode();
  1788. if (mm != &init_mm)
  1789. pte_unmap_unlock(pte-1, ptl);
  1790. return err;
  1791. }
  1792. static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
  1793. unsigned long addr, unsigned long end,
  1794. pte_fn_t fn, void *data)
  1795. {
  1796. pmd_t *pmd;
  1797. unsigned long next;
  1798. int err;
  1799. BUG_ON(pud_huge(*pud));
  1800. pmd = pmd_alloc(mm, pud, addr);
  1801. if (!pmd)
  1802. return -ENOMEM;
  1803. do {
  1804. next = pmd_addr_end(addr, end);
  1805. err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
  1806. if (err)
  1807. break;
  1808. } while (pmd++, addr = next, addr != end);
  1809. return err;
  1810. }
  1811. static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
  1812. unsigned long addr, unsigned long end,
  1813. pte_fn_t fn, void *data)
  1814. {
  1815. pud_t *pud;
  1816. unsigned long next;
  1817. int err;
  1818. pud = pud_alloc(mm, pgd, addr);
  1819. if (!pud)
  1820. return -ENOMEM;
  1821. do {
  1822. next = pud_addr_end(addr, end);
  1823. err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
  1824. if (err)
  1825. break;
  1826. } while (pud++, addr = next, addr != end);
  1827. return err;
  1828. }
  1829. /*
  1830. * Scan a region of virtual memory, filling in page tables as necessary
  1831. * and calling a provided function on each leaf page table.
  1832. */
  1833. int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
  1834. unsigned long size, pte_fn_t fn, void *data)
  1835. {
  1836. pgd_t *pgd;
  1837. unsigned long next;
  1838. unsigned long end = addr + size;
  1839. int err;
  1840. BUG_ON(addr >= end);
  1841. pgd = pgd_offset(mm, addr);
  1842. do {
  1843. next = pgd_addr_end(addr, end);
  1844. err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
  1845. if (err)
  1846. break;
  1847. } while (pgd++, addr = next, addr != end);
  1848. return err;
  1849. }
  1850. EXPORT_SYMBOL_GPL(apply_to_page_range);
  1851. /*
  1852. * handle_pte_fault chooses page fault handler according to an entry
  1853. * which was read non-atomically. Before making any commitment, on
  1854. * those architectures or configurations (e.g. i386 with PAE) which
  1855. * might give a mix of unmatched parts, do_swap_page and do_file_page
  1856. * must check under lock before unmapping the pte and proceeding
  1857. * (but do_wp_page is only called after already making such a check;
  1858. * and do_anonymous_page and do_no_page can safely check later on).
  1859. */
  1860. static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
  1861. pte_t *page_table, pte_t orig_pte)
  1862. {
  1863. int same = 1;
  1864. #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
  1865. if (sizeof(pte_t) > sizeof(unsigned long)) {
  1866. spinlock_t *ptl = pte_lockptr(mm, pmd);
  1867. spin_lock(ptl);
  1868. same = pte_same(*page_table, orig_pte);
  1869. spin_unlock(ptl);
  1870. }
  1871. #endif
  1872. pte_unmap(page_table);
  1873. return same;
  1874. }
  1875. static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
  1876. {
  1877. /*
  1878. * If the source page was a PFN mapping, we don't have
  1879. * a "struct page" for it. We do a best-effort copy by
  1880. * just copying from the original user address. If that
  1881. * fails, we just zero-fill it. Live with it.
  1882. */
  1883. if (unlikely(!src)) {
  1884. void *kaddr = kmap_atomic(dst, KM_USER0);
  1885. void __user *uaddr = (void __user *)(va & PAGE_MASK);
  1886. /*
  1887. * This really shouldn't fail, because the page is there
  1888. * in the page tables. But it might just be unreadable,
  1889. * in which case we just give up and fill the result with
  1890. * zeroes.
  1891. */
  1892. if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
  1893. clear_page(kaddr);
  1894. kunmap_atomic(kaddr, KM_USER0);
  1895. flush_dcache_page(dst);
  1896. } else
  1897. copy_user_highpage(dst, src, va, vma);
  1898. }
  1899. /*
  1900. * This routine handles present pages, when users try to write
  1901. * to a shared page. It is done by copying the page to a new address
  1902. * and decrementing the shared-page counter for the old page.
  1903. *
  1904. * Note that this routine assumes that the protection checks have been
  1905. * done by the caller (the low-level page fault routine in most cases).
  1906. * Thus we can safely just mark it writable once we've done any necessary
  1907. * COW.
  1908. *
  1909. * We also mark the page dirty at this point even though the page will
  1910. * change only once the write actually happens. This avoids a few races,
  1911. * and potentially makes it more efficient.
  1912. *
  1913. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  1914. * but allow concurrent faults), with pte both mapped and locked.
  1915. * We return with mmap_sem still held, but pte unmapped and unlocked.
  1916. */
  1917. static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1918. unsigned long address, pte_t *page_table, pmd_t *pmd,
  1919. spinlock_t *ptl, pte_t orig_pte)
  1920. __releases(ptl)
  1921. {
  1922. struct page *old_page, *new_page;
  1923. pte_t entry;
  1924. int ret = 0;
  1925. int page_mkwrite = 0;
  1926. struct page *dirty_page = NULL;
  1927. old_page = vm_normal_page(vma, address, orig_pte);
  1928. if (!old_page) {
  1929. /*
  1930. * VM_MIXEDMAP !pfn_valid() case
  1931. *
  1932. * We should not cow pages in a shared writeable mapping.
  1933. * Just mark the pages writable as we can't do any dirty
  1934. * accounting on raw pfn maps.
  1935. */
  1936. if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
  1937. (VM_WRITE|VM_SHARED))
  1938. goto reuse;
  1939. goto gotten;
  1940. }
  1941. /*
  1942. * Take out anonymous pages first, anonymous shared vmas are
  1943. * not dirty accountable.
  1944. */
  1945. if (PageAnon(old_page) && !PageKsm(old_page)) {
  1946. if (!trylock_page(old_page)) {
  1947. page_cache_get(old_page);
  1948. pte_unmap_unlock(page_table, ptl);
  1949. lock_page(old_page);
  1950. page_table = pte_offset_map_lock(mm, pmd, address,
  1951. &ptl);
  1952. if (!pte_same(*page_table, orig_pte)) {
  1953. unlock_page(old_page);
  1954. page_cache_release(old_page);
  1955. goto unlock;
  1956. }
  1957. page_cache_release(old_page);
  1958. }
  1959. if (reuse_swap_page(old_page)) {
  1960. /*
  1961. * The page is all ours. Move it to our anon_vma so
  1962. * the rmap code will not search our parent or siblings.
  1963. * Protected against the rmap code by the page lock.
  1964. */
  1965. page_move_anon_rmap(old_page, vma, address);
  1966. unlock_page(old_page);
  1967. goto reuse;
  1968. }
  1969. unlock_page(old_page);
  1970. } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
  1971. (VM_WRITE|VM_SHARED))) {
  1972. /*
  1973. * Only catch write-faults on shared writable pages,
  1974. * read-only shared pages can get COWed by
  1975. * get_user_pages(.write=1, .force=1).
  1976. */
  1977. if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
  1978. struct vm_fault vmf;
  1979. int tmp;
  1980. vmf.virtual_address = (void __user *)(address &
  1981. PAGE_MASK);
  1982. vmf.pgoff = old_page->index;
  1983. vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
  1984. vmf.page = old_page;
  1985. /*
  1986. * Notify the address space that the page is about to
  1987. * become writable so that it can prohibit this or wait
  1988. * for the page to get into an appropriate state.
  1989. *
  1990. * We do this without the lock held, so that it can
  1991. * sleep if it needs to.
  1992. */
  1993. page_cache_get(old_page);
  1994. pte_unmap_unlock(page_table, ptl);
  1995. tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
  1996. if (unlikely(tmp &
  1997. (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
  1998. ret = tmp;
  1999. goto unwritable_page;
  2000. }
  2001. if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
  2002. lock_page(old_page);
  2003. if (!old_page->mapping) {
  2004. ret = 0; /* retry the fault */
  2005. unlock_page(old_page);
  2006. goto unwritable_page;
  2007. }
  2008. } else
  2009. VM_BUG_ON(!PageLocked(old_page));
  2010. /*
  2011. * Since we dropped the lock we need to revalidate
  2012. * the PTE as someone else may have changed it. If
  2013. * they did, we just return, as we can count on the
  2014. * MMU to tell us if they didn't also make it writable.
  2015. */
  2016. page_table = pte_offset_map_lock(mm, pmd, address,
  2017. &ptl);
  2018. if (!pte_same(*page_table, orig_pte)) {
  2019. unlock_page(old_page);
  2020. page_cache_release(old_page);
  2021. goto unlock;
  2022. }
  2023. page_mkwrite = 1;
  2024. }
  2025. dirty_page = old_page;
  2026. get_page(dirty_page);
  2027. reuse:
  2028. flush_cache_page(vma, address, pte_pfn(orig_pte));
  2029. entry = pte_mkyoung(orig_pte);
  2030. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  2031. if (ptep_set_access_flags(vma, address, page_table, entry,1))
  2032. update_mmu_cache(vma, address, page_table);
  2033. pte_unmap_unlock(page_table, ptl);
  2034. ret |= VM_FAULT_WRITE;
  2035. if (!dirty_page)
  2036. return ret;
  2037. /*
  2038. * Yes, Virginia, this is actually required to prevent a race
  2039. * with clear_page_dirty_for_io() from clearing the page dirty
  2040. * bit after it clear all dirty ptes, but before a racing
  2041. * do_wp_page installs a dirty pte.
  2042. *
  2043. * do_no_page is protected similarly.
  2044. */
  2045. if (!page_mkwrite) {
  2046. wait_on_page_locked(dirty_page);
  2047. set_page_dirty_balance(dirty_page, page_mkwrite);
  2048. }
  2049. put_page(dirty_page);
  2050. if (page_mkwrite) {
  2051. struct address_space *mapping = dirty_page->mapping;
  2052. set_page_dirty(dirty_page);
  2053. unlock_page(dirty_page);
  2054. page_cache_release(dirty_page);
  2055. if (mapping) {
  2056. /*
  2057. * Some device drivers do not set page.mapping
  2058. * but still dirty their pages
  2059. */
  2060. balance_dirty_pages_ratelimited(mapping);
  2061. }
  2062. }
  2063. /* file_update_time outside page_lock */
  2064. if (vma->vm_file)
  2065. file_update_time(vma->vm_file);
  2066. return ret;
  2067. }
  2068. /*
  2069. * Ok, we need to copy. Oh, well..
  2070. */
  2071. page_cache_get(old_page);
  2072. gotten:
  2073. pte_unmap_unlock(page_table, ptl);
  2074. if (unlikely(anon_vma_prepare(vma)))
  2075. goto oom;
  2076. if (is_zero_pfn(pte_pfn(orig_pte))) {
  2077. new_page = alloc_zeroed_user_highpage_movable(vma, address);
  2078. if (!new_page)
  2079. goto oom;
  2080. } else {
  2081. new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
  2082. if (!new_page)
  2083. goto oom;
  2084. cow_user_page(new_page, old_page, address, vma);
  2085. }
  2086. __SetPageUptodate(new_page);
  2087. /*
  2088. * Don't let another task, with possibly unlocked vma,
  2089. * keep the mlocked page.
  2090. */
  2091. if ((vma->vm_flags & VM_LOCKED) && old_page) {
  2092. lock_page(old_page); /* for LRU manipulation */
  2093. clear_page_mlock(old_page);
  2094. unlock_page(old_page);
  2095. }
  2096. if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
  2097. goto oom_free_new;
  2098. /*
  2099. * Re-check the pte - we dropped the lock
  2100. */
  2101. page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
  2102. if (likely(pte_same(*page_table, orig_pte))) {
  2103. if (old_page) {
  2104. if (!PageAnon(old_page)) {
  2105. dec_mm_counter_fast(mm, MM_FILEPAGES);
  2106. inc_mm_counter_fast(mm, MM_ANONPAGES);
  2107. }
  2108. } else
  2109. inc_mm_counter_fast(mm, MM_ANONPAGES);
  2110. flush_cache_page(vma, address, pte_pfn(orig_pte));
  2111. entry = mk_pte(new_page, vma->vm_page_prot);
  2112. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  2113. /*
  2114. * Clear the pte entry and flush it first, before updating the
  2115. * pte with the new entry. This will avoid a race condition
  2116. * seen in the presence of one thread doing SMC and another
  2117. * thread doing COW.
  2118. */
  2119. ptep_clear_flush(vma, address, page_table);
  2120. page_add_new_anon_rmap(new_page, vma, address);
  2121. /*
  2122. * We call the notify macro here because, when using secondary
  2123. * mmu page tables (such as kvm shadow page tables), we want the
  2124. * new page to be mapped directly into the secondary page table.
  2125. */
  2126. set_pte_at_notify(mm, address, page_table, entry);
  2127. update_mmu_cache(vma, address, page_table);
  2128. if (old_page) {
  2129. /*
  2130. * Only after switching the pte to the new page may
  2131. * we remove the mapcount here. Otherwise another
  2132. * process may come and find the rmap count decremented
  2133. * before the pte is switched to the new page, and
  2134. * "reuse" the old page writing into it while our pte
  2135. * here still points into it and can be read by other
  2136. * threads.
  2137. *
  2138. * The critical issue is to order this
  2139. * page_remove_rmap with the ptp_clear_flush above.
  2140. * Those stores are ordered by (if nothing else,)
  2141. * the barrier present in the atomic_add_negative
  2142. * in page_remove_rmap.
  2143. *
  2144. * Then the TLB flush in ptep_clear_flush ensures that
  2145. * no process can access the old page before the
  2146. * decremented mapcount is visible. And the old page
  2147. * cannot be reused until after the decremented
  2148. * mapcount is visible. So transitively, TLBs to
  2149. * old page will be flushed before it can be reused.
  2150. */
  2151. page_remove_rmap(old_page);
  2152. }
  2153. /* Free the old page.. */
  2154. new_page = old_page;
  2155. ret |= VM_FAULT_WRITE;
  2156. } else
  2157. mem_cgroup_uncharge_page(new_page);
  2158. if (new_page)
  2159. page_cache_release(new_page);
  2160. if (old_page)
  2161. page_cache_release(old_page);
  2162. unlock:
  2163. pte_unmap_unlock(page_table, ptl);
  2164. return ret;
  2165. oom_free_new:
  2166. page_cache_release(new_page);
  2167. oom:
  2168. if (old_page) {
  2169. if (page_mkwrite) {
  2170. unlock_page(old_page);
  2171. page_cache_release(old_page);
  2172. }
  2173. page_cache_release(old_page);
  2174. }
  2175. return VM_FAULT_OOM;
  2176. unwritable_page:
  2177. page_cache_release(old_page);
  2178. return ret;
  2179. }
  2180. /*
  2181. * Helper functions for unmap_mapping_range().
  2182. *
  2183. * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
  2184. *
  2185. * We have to restart searching the prio_tree whenever we drop the lock,
  2186. * since the iterator is only valid while the lock is held, and anyway
  2187. * a later vma might be split and reinserted earlier while lock dropped.
  2188. *
  2189. * The list of nonlinear vmas could be handled more efficiently, using
  2190. * a placeholder, but handle it in the same way until a need is shown.
  2191. * It is important to search the prio_tree before nonlinear list: a vma
  2192. * may become nonlinear and be shifted from prio_tree to nonlinear list
  2193. * while the lock is dropped; but never shifted from list to prio_tree.
  2194. *
  2195. * In order to make forward progress despite restarting the search,
  2196. * vm_truncate_count is used to mark a vma as now dealt with, so we can
  2197. * quickly skip it next time around. Since the prio_tree search only
  2198. * shows us those vmas affected by unmapping the range in question, we
  2199. * can't efficiently keep all vmas in step with mapping->truncate_count:
  2200. * so instead reset them all whenever it wraps back to 0 (then go to 1).
  2201. * mapping->truncate_count and vma->vm_truncate_count are protected by
  2202. * i_mmap_lock.
  2203. *
  2204. * In order to make forward progress despite repeatedly restarting some
  2205. * large vma, note the restart_addr from unmap_vmas when it breaks out:
  2206. * and restart from that address when we reach that vma again. It might
  2207. * have been split or merged, shrunk or extended, but never shifted: so
  2208. * restart_addr remains valid so long as it remains in the vma's range.
  2209. * unmap_mapping_range forces truncate_count to leap over page-aligned
  2210. * values so we can save vma's restart_addr in its truncate_count field.
  2211. */
  2212. #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
  2213. static void reset_vma_truncate_counts(struct address_space *mapping)
  2214. {
  2215. struct vm_area_struct *vma;
  2216. struct prio_tree_iter iter;
  2217. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
  2218. vma->vm_truncate_count = 0;
  2219. list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
  2220. vma->vm_truncate_count = 0;
  2221. }
  2222. static int unmap_mapping_range_vma(struct vm_area_struct *vma,
  2223. unsigned long start_addr, unsigned long end_addr,
  2224. struct zap_details *details)
  2225. {
  2226. unsigned long restart_addr;
  2227. int need_break;
  2228. /*
  2229. * files that support invalidating or truncating portions of the
  2230. * file from under mmaped areas must have their ->fault function
  2231. * return a locked page (and set VM_FAULT_LOCKED in the return).
  2232. * This provides synchronisation against concurrent unmapping here.
  2233. */
  2234. again:
  2235. restart_addr = vma->vm_truncate_count;
  2236. if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
  2237. start_addr = restart_addr;
  2238. if (start_addr >= end_addr) {
  2239. /* Top of vma has been split off since last time */
  2240. vma->vm_truncate_count = details->truncate_count;
  2241. return 0;
  2242. }
  2243. }
  2244. restart_addr = zap_page_range(vma, start_addr,
  2245. end_addr - start_addr, details);
  2246. need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
  2247. if (restart_addr >= end_addr) {
  2248. /* We have now completed this vma: mark it so */
  2249. vma->vm_truncate_count = details->truncate_count;
  2250. if (!need_break)
  2251. return 0;
  2252. } else {
  2253. /* Note restart_addr in vma's truncate_count field */
  2254. vma->vm_truncate_count = restart_addr;
  2255. if (!need_break)
  2256. goto again;
  2257. }
  2258. spin_unlock(details->i_mmap_lock);
  2259. cond_resched();
  2260. spin_lock(details->i_mmap_lock);
  2261. return -EINTR;
  2262. }
  2263. static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
  2264. struct zap_details *details)
  2265. {
  2266. struct vm_area_struct *vma;
  2267. struct prio_tree_iter iter;
  2268. pgoff_t vba, vea, zba, zea;
  2269. restart:
  2270. vma_prio_tree_foreach(vma, &iter, root,
  2271. details->first_index, details->last_index) {
  2272. /* Skip quickly over those we have already dealt with */
  2273. if (vma->vm_truncate_count == details->truncate_count)
  2274. continue;
  2275. vba = vma->vm_pgoff;
  2276. vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
  2277. /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
  2278. zba = details->first_index;
  2279. if (zba < vba)
  2280. zba = vba;
  2281. zea = details->last_index;
  2282. if (zea > vea)
  2283. zea = vea;
  2284. if (unmap_mapping_range_vma(vma,
  2285. ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
  2286. ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
  2287. details) < 0)
  2288. goto restart;
  2289. }
  2290. }
  2291. static inline void unmap_mapping_range_list(struct list_head *head,
  2292. struct zap_details *details)
  2293. {
  2294. struct vm_area_struct *vma;
  2295. /*
  2296. * In nonlinear VMAs there is no correspondence between virtual address
  2297. * offset and file offset. So we must perform an exhaustive search
  2298. * across *all* the pages in each nonlinear VMA, not just the pages
  2299. * whose virtual address lies outside the file truncation point.
  2300. */
  2301. restart:
  2302. list_for_each_entry(vma, head, shared.vm_set.list) {
  2303. /* Skip quickly over those we have already dealt with */
  2304. if (vma->vm_truncate_count == details->truncate_count)
  2305. continue;
  2306. details->nonlinear_vma = vma;
  2307. if (unmap_mapping_range_vma(vma, vma->vm_start,
  2308. vma->vm_end, details) < 0)
  2309. goto restart;
  2310. }
  2311. }
  2312. /**
  2313. * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
  2314. * @mapping: the address space containing mmaps to be unmapped.
  2315. * @holebegin: byte in first page to unmap, relative to the start of
  2316. * the underlying file. This will be rounded down to a PAGE_SIZE
  2317. * boundary. Note that this is different from truncate_pagecache(), which
  2318. * must keep the partial page. In contrast, we must get rid of
  2319. * partial pages.
  2320. * @holelen: size of prospective hole in bytes. This will be rounded
  2321. * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
  2322. * end of the file.
  2323. * @even_cows: 1 when truncating a file, unmap even private COWed pages;
  2324. * but 0 when invalidating pagecache, don't throw away private data.
  2325. */
  2326. void unmap_mapping_range(struct address_space *mapping,
  2327. loff_t const holebegin, loff_t const holelen, int even_cows)
  2328. {
  2329. struct zap_details details;
  2330. pgoff_t hba = holebegin >> PAGE_SHIFT;
  2331. pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  2332. /* Check for overflow. */
  2333. if (sizeof(holelen) > sizeof(hlen)) {
  2334. long long holeend =
  2335. (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  2336. if (holeend & ~(long long)ULONG_MAX)
  2337. hlen = ULONG_MAX - hba + 1;
  2338. }
  2339. details.check_mapping = even_cows? NULL: mapping;
  2340. details.nonlinear_vma = NULL;
  2341. details.first_index = hba;
  2342. details.last_index = hba + hlen - 1;
  2343. if (details.last_index < details.first_index)
  2344. details.last_index = ULONG_MAX;
  2345. details.i_mmap_lock = &mapping->i_mmap_lock;
  2346. spin_lock(&mapping->i_mmap_lock);
  2347. /* Protect against endless unmapping loops */
  2348. mapping->truncate_count++;
  2349. if (unlikely(is_restart_addr(mapping->truncate_count))) {
  2350. if (mapping->truncate_count == 0)
  2351. reset_vma_truncate_counts(mapping);
  2352. mapping->truncate_count++;
  2353. }
  2354. details.truncate_count = mapping->truncate_count;
  2355. if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
  2356. unmap_mapping_range_tree(&mapping->i_mmap, &details);
  2357. if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
  2358. unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
  2359. spin_unlock(&mapping->i_mmap_lock);
  2360. }
  2361. EXPORT_SYMBOL(unmap_mapping_range);
  2362. int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
  2363. {
  2364. struct address_space *mapping = inode->i_mapping;
  2365. /*
  2366. * If the underlying filesystem is not going to provide
  2367. * a way to truncate a range of blocks (punch a hole) -
  2368. * we should return failure right now.
  2369. */
  2370. if (!inode->i_op->truncate_range)
  2371. return -ENOSYS;
  2372. mutex_lock(&inode->i_mutex);
  2373. down_write(&inode->i_alloc_sem);
  2374. unmap_mapping_range(mapping, offset, (end - offset), 1);
  2375. truncate_inode_pages_range(mapping, offset, end);
  2376. unmap_mapping_range(mapping, offset, (end - offset), 1);
  2377. inode->i_op->truncate_range(inode, offset, end);
  2378. up_write(&inode->i_alloc_sem);
  2379. mutex_unlock(&inode->i_mutex);
  2380. return 0;
  2381. }
  2382. /*
  2383. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  2384. * but allow concurrent faults), and pte mapped but not yet locked.
  2385. * We return with mmap_sem still held, but pte unmapped and unlocked.
  2386. */
  2387. static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2388. unsigned long address, pte_t *page_table, pmd_t *pmd,
  2389. unsigned int flags, pte_t orig_pte)
  2390. {
  2391. spinlock_t *ptl;
  2392. struct page *page, *swapcache = NULL;
  2393. swp_entry_t entry;
  2394. pte_t pte;
  2395. int locked;
  2396. struct mem_cgroup *ptr = NULL;
  2397. int exclusive = 0;
  2398. int ret = 0;
  2399. if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
  2400. goto out;
  2401. entry = pte_to_swp_entry(orig_pte);
  2402. if (unlikely(non_swap_entry(entry))) {
  2403. if (is_migration_entry(entry)) {
  2404. migration_entry_wait(mm, pmd, address);
  2405. } else if (is_hwpoison_entry(entry)) {
  2406. ret = VM_FAULT_HWPOISON;
  2407. } else {
  2408. print_bad_pte(vma, address, orig_pte, NULL);
  2409. ret = VM_FAULT_SIGBUS;
  2410. }
  2411. goto out;
  2412. }
  2413. delayacct_set_flag(DELAYACCT_PF_SWAPIN);
  2414. page = lookup_swap_cache(entry);
  2415. if (!page) {
  2416. grab_swap_token(mm); /* Contend for token _before_ read-in */
  2417. page = swapin_readahead(entry,
  2418. GFP_HIGHUSER_MOVABLE, vma, address);
  2419. if (!page) {
  2420. /*
  2421. * Back out if somebody else faulted in this pte
  2422. * while we released the pte lock.
  2423. */
  2424. page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
  2425. if (likely(pte_same(*page_table, orig_pte)))
  2426. ret = VM_FAULT_OOM;
  2427. delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
  2428. goto unlock;
  2429. }
  2430. /* Had to read the page from swap area: Major fault */
  2431. ret = VM_FAULT_MAJOR;
  2432. count_vm_event(PGMAJFAULT);
  2433. } else if (PageHWPoison(page)) {
  2434. /*
  2435. * hwpoisoned dirty swapcache pages are kept for killing
  2436. * owner processes (which may be unknown at hwpoison time)
  2437. */
  2438. ret = VM_FAULT_HWPOISON;
  2439. delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
  2440. goto out_release;
  2441. }
  2442. locked = lock_page_or_retry(page, mm, flags);
  2443. delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
  2444. if (!locked) {
  2445. ret |= VM_FAULT_RETRY;
  2446. goto out_release;
  2447. }
  2448. /*
  2449. * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
  2450. * release the swapcache from under us. The page pin, and pte_same
  2451. * test below, are not enough to exclude that. Even if it is still
  2452. * swapcache, we need to check that the page's swap has not changed.
  2453. */
  2454. if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
  2455. goto out_page;
  2456. if (ksm_might_need_to_copy(page, vma, address)) {
  2457. swapcache = page;
  2458. page = ksm_does_need_to_copy(page, vma, address);
  2459. if (unlikely(!page)) {
  2460. ret = VM_FAULT_OOM;
  2461. page = swapcache;
  2462. swapcache = NULL;
  2463. goto out_page;
  2464. }
  2465. }
  2466. if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
  2467. ret = VM_FAULT_OOM;
  2468. goto out_page;
  2469. }
  2470. /*
  2471. * Back out if somebody else already faulted in this pte.
  2472. */
  2473. page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
  2474. if (unlikely(!pte_same(*page_table, orig_pte)))
  2475. goto out_nomap;
  2476. if (unlikely(!PageUptodate(page))) {
  2477. ret = VM_FAULT_SIGBUS;
  2478. goto out_nomap;
  2479. }
  2480. /*
  2481. * The page isn't present yet, go ahead with the fault.
  2482. *
  2483. * Be careful about the sequence of operations here.
  2484. * To get its accounting right, reuse_swap_page() must be called
  2485. * while the page is counted on swap but not yet in mapcount i.e.
  2486. * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
  2487. * must be called after the swap_free(), or it will never succeed.
  2488. * Because delete_from_swap_page() may be called by reuse_swap_page(),
  2489. * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
  2490. * in page->private. In this case, a record in swap_cgroup is silently
  2491. * discarded at swap_free().
  2492. */
  2493. inc_mm_counter_fast(mm, MM_ANONPAGES);
  2494. dec_mm_counter_fast(mm, MM_SWAPENTS);
  2495. pte = mk_pte(page, vma->vm_page_prot);
  2496. if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
  2497. pte = maybe_mkwrite(pte_mkdirty(pte), vma);
  2498. flags &= ~FAULT_FLAG_WRITE;
  2499. ret |= VM_FAULT_WRITE;
  2500. exclusive = 1;
  2501. }
  2502. flush_icache_page(vma, page);
  2503. set_pte_at(mm, address, page_table, pte);
  2504. do_page_add_anon_rmap(page, vma, address, exclusive);
  2505. /* It's better to call commit-charge after rmap is established */
  2506. mem_cgroup_commit_charge_swapin(page, ptr);
  2507. swap_free(entry);
  2508. if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
  2509. try_to_free_swap(page);
  2510. unlock_page(page);
  2511. if (swapcache) {
  2512. /*
  2513. * Hold the lock to avoid the swap entry to be reused
  2514. * until we take the PT lock for the pte_same() check
  2515. * (to avoid false positives from pte_same). For
  2516. * further safety release the lock after the swap_free
  2517. * so that the swap count won't change under a
  2518. * parallel locked swapcache.
  2519. */
  2520. unlock_page(swapcache);
  2521. page_cache_release(swapcache);
  2522. }
  2523. if (flags & FAULT_FLAG_WRITE) {
  2524. ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
  2525. if (ret & VM_FAULT_ERROR)
  2526. ret &= VM_FAULT_ERROR;
  2527. goto out;
  2528. }
  2529. /* No need to invalidate - it was non-present before */
  2530. update_mmu_cache(vma, address, page_table);
  2531. unlock:
  2532. pte_unmap_unlock(page_table, ptl);
  2533. out:
  2534. return ret;
  2535. out_nomap:
  2536. mem_cgroup_cancel_charge_swapin(ptr);
  2537. pte_unmap_unlock(page_table, ptl);
  2538. out_page:
  2539. unlock_page(page);
  2540. out_release:
  2541. page_cache_release(page);
  2542. if (swapcache) {
  2543. unlock_page(swapcache);
  2544. page_cache_release(swapcache);
  2545. }
  2546. return ret;
  2547. }
  2548. /*
  2549. * This is like a special single-page "expand_{down|up}wards()",
  2550. * except we must first make sure that 'address{-|+}PAGE_SIZE'
  2551. * doesn't hit another vma.
  2552. */
  2553. static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
  2554. {
  2555. address &= PAGE_MASK;
  2556. if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
  2557. struct vm_area_struct *prev = vma->vm_prev;
  2558. /*
  2559. * Is there a mapping abutting this one below?
  2560. *
  2561. * That's only ok if it's the same stack mapping
  2562. * that has gotten split..
  2563. */
  2564. if (prev && prev->vm_end == address)
  2565. return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
  2566. expand_stack(vma, address - PAGE_SIZE);
  2567. }
  2568. if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
  2569. struct vm_area_struct *next = vma->vm_next;
  2570. /* As VM_GROWSDOWN but s/below/above/ */
  2571. if (next && next->vm_start == address + PAGE_SIZE)
  2572. return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
  2573. expand_upwards(vma, address + PAGE_SIZE);
  2574. }
  2575. return 0;
  2576. }
  2577. /*
  2578. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  2579. * but allow concurrent faults), and pte mapped but not yet locked.
  2580. * We return with mmap_sem still held, but pte unmapped and unlocked.
  2581. */
  2582. static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2583. unsigned long address, pte_t *page_table, pmd_t *pmd,
  2584. unsigned int flags)
  2585. {
  2586. struct page *page;
  2587. spinlock_t *ptl;
  2588. pte_t entry;
  2589. pte_unmap(page_table);
  2590. /* Check if we need to add a guard page to the stack */
  2591. if (check_stack_guard_page(vma, address) < 0)
  2592. return VM_FAULT_SIGBUS;
  2593. /* Use the zero-page for reads */
  2594. if (!(flags & FAULT_FLAG_WRITE)) {
  2595. entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
  2596. vma->vm_page_prot));
  2597. page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
  2598. if (!pte_none(*page_table))
  2599. goto unlock;
  2600. goto setpte;
  2601. }
  2602. /* Allocate our own private page. */
  2603. if (unlikely(anon_vma_prepare(vma)))
  2604. goto oom;
  2605. page = alloc_zeroed_user_highpage_movable(vma, address);
  2606. if (!page)
  2607. goto oom;
  2608. __SetPageUptodate(page);
  2609. if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
  2610. goto oom_free_page;
  2611. entry = mk_pte(page, vma->vm_page_prot);
  2612. if (vma->vm_flags & VM_WRITE)
  2613. entry = pte_mkwrite(pte_mkdirty(entry));
  2614. page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
  2615. if (!pte_none(*page_table))
  2616. goto release;
  2617. inc_mm_counter_fast(mm, MM_ANONPAGES);
  2618. page_add_new_anon_rmap(page, vma, address);
  2619. setpte:
  2620. set_pte_at(mm, address, page_table, entry);
  2621. /* No need to invalidate - it was non-present before */
  2622. update_mmu_cache(vma, address, page_table);
  2623. unlock:
  2624. pte_unmap_unlock(page_table, ptl);
  2625. return 0;
  2626. release:
  2627. mem_cgroup_uncharge_page(page);
  2628. page_cache_release(page);
  2629. goto unlock;
  2630. oom_free_page:
  2631. page_cache_release(page);
  2632. oom:
  2633. return VM_FAULT_OOM;
  2634. }
  2635. /*
  2636. * __do_fault() tries to create a new page mapping. It aggressively
  2637. * tries to share with existing pages, but makes a separate copy if
  2638. * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
  2639. * the next page fault.
  2640. *
  2641. * As this is called only for pages that do not currently exist, we
  2642. * do not need to flush old virtual caches or the TLB.
  2643. *
  2644. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  2645. * but allow concurrent faults), and pte neither mapped nor locked.
  2646. * We return with mmap_sem still held, but pte unmapped and unlocked.
  2647. */
  2648. static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  2649. unsigned long address, pmd_t *pmd,
  2650. pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
  2651. {
  2652. pte_t *page_table;
  2653. spinlock_t *ptl;
  2654. struct page *page;
  2655. pte_t entry;
  2656. int anon = 0;
  2657. int charged = 0;
  2658. struct page *dirty_page = NULL;
  2659. struct vm_fault vmf;
  2660. int ret;
  2661. int page_mkwrite = 0;
  2662. vmf.virtual_address = (void __user *)(address & PAGE_MASK);
  2663. vmf.pgoff = pgoff;
  2664. vmf.flags = flags;
  2665. vmf.page = NULL;
  2666. ret = vma->vm_ops->fault(vma, &vmf);
  2667. if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
  2668. VM_FAULT_RETRY)))
  2669. return ret;
  2670. if (unlikely(PageHWPoison(vmf.page))) {
  2671. if (ret & VM_FAULT_LOCKED)
  2672. unlock_page(vmf.page);
  2673. return VM_FAULT_HWPOISON;
  2674. }
  2675. /*
  2676. * For consistency in subsequent calls, make the faulted page always
  2677. * locked.
  2678. */
  2679. if (unlikely(!(ret & VM_FAULT_LOCKED)))
  2680. lock_page(vmf.page);
  2681. else
  2682. VM_BUG_ON(!PageLocked(vmf.page));
  2683. /*
  2684. * Should we do an early C-O-W break?
  2685. */
  2686. page = vmf.page;
  2687. if (flags & FAULT_FLAG_WRITE) {
  2688. if (!(vma->vm_flags & VM_SHARED)) {
  2689. anon = 1;
  2690. if (unlikely(anon_vma_prepare(vma))) {
  2691. ret = VM_FAULT_OOM;
  2692. goto out;
  2693. }
  2694. page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
  2695. vma, address);
  2696. if (!page) {
  2697. ret = VM_FAULT_OOM;
  2698. goto out;
  2699. }
  2700. if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
  2701. ret = VM_FAULT_OOM;
  2702. page_cache_release(page);
  2703. goto out;
  2704. }
  2705. charged = 1;
  2706. /*
  2707. * Don't let another task, with possibly unlocked vma,
  2708. * keep the mlocked page.
  2709. */
  2710. if (vma->vm_flags & VM_LOCKED)
  2711. clear_page_mlock(vmf.page);
  2712. copy_user_highpage(page, vmf.page, address, vma);
  2713. __SetPageUptodate(page);
  2714. } else {
  2715. /*
  2716. * If the page will be shareable, see if the backing
  2717. * address space wants to know that the page is about
  2718. * to become writable
  2719. */
  2720. if (vma->vm_ops->page_mkwrite) {
  2721. int tmp;
  2722. unlock_page(page);
  2723. vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
  2724. tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
  2725. if (unlikely(tmp &
  2726. (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
  2727. ret = tmp;
  2728. goto unwritable_page;
  2729. }
  2730. if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
  2731. lock_page(page);
  2732. if (!page->mapping) {
  2733. ret = 0; /* retry the fault */
  2734. unlock_page(page);
  2735. goto unwritable_page;
  2736. }
  2737. } else
  2738. VM_BUG_ON(!PageLocked(page));
  2739. page_mkwrite = 1;
  2740. }
  2741. }
  2742. }
  2743. page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
  2744. /*
  2745. * This silly early PAGE_DIRTY setting removes a race
  2746. * due to the bad i386 page protection. But it's valid
  2747. * for other architectures too.
  2748. *
  2749. * Note that if FAULT_FLAG_WRITE is set, we either now have
  2750. * an exclusive copy of the page, or this is a shared mapping,
  2751. * so we can make it writable and dirty to avoid having to
  2752. * handle that later.
  2753. */
  2754. /* Only go through if we didn't race with anybody else... */
  2755. if (likely(pte_same(*page_table, orig_pte))) {
  2756. flush_icache_page(vma, page);
  2757. entry = mk_pte(page, vma->vm_page_prot);
  2758. if (flags & FAULT_FLAG_WRITE)
  2759. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  2760. if (anon) {
  2761. inc_mm_counter_fast(mm, MM_ANONPAGES);
  2762. page_add_new_anon_rmap(page, vma, address);
  2763. } else {
  2764. inc_mm_counter_fast(mm, MM_FILEPAGES);
  2765. page_add_file_rmap(page);
  2766. if (flags & FAULT_FLAG_WRITE) {
  2767. dirty_page = page;
  2768. get_page(dirty_page);
  2769. }
  2770. }
  2771. set_pte_at(mm, address, page_table, entry);
  2772. /* no need to invalidate: a not-present page won't be cached */
  2773. update_mmu_cache(vma, address, page_table);
  2774. } else {
  2775. if (charged)
  2776. mem_cgroup_uncharge_page(page);
  2777. if (anon)
  2778. page_cache_release(page);
  2779. else
  2780. anon = 1; /* no anon but release faulted_page */
  2781. }
  2782. pte_unmap_unlock(page_table, ptl);
  2783. out:
  2784. if (dirty_page) {
  2785. struct address_space *mapping = page->mapping;
  2786. if (set_page_dirty(dirty_page))
  2787. page_mkwrite = 1;
  2788. unlock_page(dirty_page);
  2789. put_page(dirty_page);
  2790. if (page_mkwrite && mapping) {
  2791. /*
  2792. * Some device drivers do not set page.mapping but still
  2793. * dirty their pages
  2794. */
  2795. balance_dirty_pages_ratelimited(mapping);
  2796. }
  2797. /* file_update_time outside page_lock */
  2798. if (vma->vm_file)
  2799. file_update_time(vma->vm_file);
  2800. } else {
  2801. unlock_page(vmf.page);
  2802. if (anon)
  2803. page_cache_release(vmf.page);
  2804. }
  2805. return ret;
  2806. unwritable_page:
  2807. page_cache_release(page);
  2808. return ret;
  2809. }
  2810. static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  2811. unsigned long address, pte_t *page_table, pmd_t *pmd,
  2812. unsigned int flags, pte_t orig_pte)
  2813. {
  2814. pgoff_t pgoff = (((address & PAGE_MASK)
  2815. - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  2816. pte_unmap(page_table);
  2817. return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
  2818. }
  2819. /*
  2820. * Fault of a previously existing named mapping. Repopulate the pte
  2821. * from the encoded file_pte if possible. This enables swappable
  2822. * nonlinear vmas.
  2823. *
  2824. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  2825. * but allow concurrent faults), and pte mapped but not yet locked.
  2826. * We return with mmap_sem still held, but pte unmapped and unlocked.
  2827. */
  2828. static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  2829. unsigned long address, pte_t *page_table, pmd_t *pmd,
  2830. unsigned int flags, pte_t orig_pte)
  2831. {
  2832. pgoff_t pgoff;
  2833. flags |= FAULT_FLAG_NONLINEAR;
  2834. if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
  2835. return 0;
  2836. if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
  2837. /*
  2838. * Page table corrupted: show pte and kill process.
  2839. */
  2840. print_bad_pte(vma, address, orig_pte, NULL);
  2841. return VM_FAULT_SIGBUS;
  2842. }
  2843. pgoff = pte_to_pgoff(orig_pte);
  2844. return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
  2845. }
  2846. /*
  2847. * These routines also need to handle stuff like marking pages dirty
  2848. * and/or accessed for architectures that don't do it in hardware (most
  2849. * RISC architectures). The early dirtying is also good on the i386.
  2850. *
  2851. * There is also a hook called "update_mmu_cache()" that architectures
  2852. * with external mmu caches can use to update those (ie the Sparc or
  2853. * PowerPC hashed page tables that act as extended TLBs).
  2854. *
  2855. * We enter with non-exclusive mmap_sem (to exclude vma changes,
  2856. * but allow concurrent faults), and pte mapped but not yet locked.
  2857. * We return with mmap_sem still held, but pte unmapped and unlocked.
  2858. */
  2859. static inline int handle_pte_fault(struct mm_struct *mm,
  2860. struct vm_area_struct *vma, unsigned long address,
  2861. pte_t *pte, pmd_t *pmd, unsigned int flags)
  2862. {
  2863. pte_t entry;
  2864. spinlock_t *ptl;
  2865. entry = *pte;
  2866. if (!pte_present(entry)) {
  2867. if (pte_none(entry)) {
  2868. if (vma->vm_ops) {
  2869. if (likely(vma->vm_ops->fault))
  2870. return do_linear_fault(mm, vma, address,
  2871. pte, pmd, flags, entry);
  2872. }
  2873. return do_anonymous_page(mm, vma, address,
  2874. pte, pmd, flags);
  2875. }
  2876. if (pte_file(entry))
  2877. return do_nonlinear_fault(mm, vma, address,
  2878. pte, pmd, flags, entry);
  2879. return do_swap_page(mm, vma, address,
  2880. pte, pmd, flags, entry);
  2881. }
  2882. ptl = pte_lockptr(mm, pmd);
  2883. spin_lock(ptl);
  2884. if (unlikely(!pte_same(*pte, entry)))
  2885. goto unlock;
  2886. if (flags & FAULT_FLAG_WRITE) {
  2887. if (!pte_write(entry))
  2888. return do_wp_page(mm, vma, address,
  2889. pte, pmd, ptl, entry);
  2890. entry = pte_mkdirty(entry);
  2891. }
  2892. entry = pte_mkyoung(entry);
  2893. if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
  2894. update_mmu_cache(vma, address, pte);
  2895. } else {
  2896. /*
  2897. * This is needed only for protection faults but the arch code
  2898. * is not yet telling us if this is a protection fault or not.
  2899. * This still avoids useless tlb flushes for .text page faults
  2900. * with threads.
  2901. */
  2902. if (flags & FAULT_FLAG_WRITE)
  2903. flush_tlb_fix_spurious_fault(vma, address);
  2904. }
  2905. unlock:
  2906. pte_unmap_unlock(pte, ptl);
  2907. return 0;
  2908. }
  2909. /*
  2910. * By the time we get here, we already hold the mm semaphore
  2911. */
  2912. int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  2913. unsigned long address, unsigned int flags)
  2914. {
  2915. pgd_t *pgd;
  2916. pud_t *pud;
  2917. pmd_t *pmd;
  2918. pte_t *pte;
  2919. __set_current_state(TASK_RUNNING);
  2920. count_vm_event(PGFAULT);
  2921. /* do counter updates before entering really critical section. */
  2922. check_sync_rss_stat(current);
  2923. if (unlikely(is_vm_hugetlb_page(vma)))
  2924. return hugetlb_fault(mm, vma, address, flags);
  2925. pgd = pgd_offset(mm, address);
  2926. pud = pud_alloc(mm, pgd, address);
  2927. if (!pud)
  2928. return VM_FAULT_OOM;
  2929. pmd = pmd_alloc(mm, pud, address);
  2930. if (!pmd)
  2931. return VM_FAULT_OOM;
  2932. pte = pte_alloc_map(mm, vma, pmd, address);
  2933. if (!pte)
  2934. return VM_FAULT_OOM;
  2935. return handle_pte_fault(mm, vma, address, pte, pmd, flags);
  2936. }
  2937. #ifndef __PAGETABLE_PUD_FOLDED
  2938. /*
  2939. * Allocate page upper directory.
  2940. * We've already handled the fast-path in-line.
  2941. */
  2942. int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  2943. {
  2944. pud_t *new = pud_alloc_one(mm, address);
  2945. if (!new)
  2946. return -ENOMEM;
  2947. smp_wmb(); /* See comment in __pte_alloc */
  2948. spin_lock(&mm->page_table_lock);
  2949. if (pgd_present(*pgd)) /* Another has populated it */
  2950. pud_free(mm, new);
  2951. else
  2952. pgd_populate(mm, pgd, new);
  2953. spin_unlock(&mm->page_table_lock);
  2954. return 0;
  2955. }
  2956. #endif /* __PAGETABLE_PUD_FOLDED */
  2957. #ifndef __PAGETABLE_PMD_FOLDED
  2958. /*
  2959. * Allocate page middle directory.
  2960. * We've already handled the fast-path in-line.
  2961. */
  2962. int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
  2963. {
  2964. pmd_t *new = pmd_alloc_one(mm, address);
  2965. if (!new)
  2966. return -ENOMEM;
  2967. smp_wmb(); /* See comment in __pte_alloc */
  2968. spin_lock(&mm->page_table_lock);
  2969. #ifndef __ARCH_HAS_4LEVEL_HACK
  2970. if (pud_present(*pud)) /* Another has populated it */
  2971. pmd_free(mm, new);
  2972. else
  2973. pud_populate(mm, pud, new);
  2974. #else
  2975. if (pgd_present(*pud)) /* Another has populated it */
  2976. pmd_free(mm, new);
  2977. else
  2978. pgd_populate(mm, pud, new);
  2979. #endif /* __ARCH_HAS_4LEVEL_HACK */
  2980. spin_unlock(&mm->page_table_lock);
  2981. return 0;
  2982. }
  2983. #endif /* __PAGETABLE_PMD_FOLDED */
  2984. int make_pages_present(unsigned long addr, unsigned long end)
  2985. {
  2986. int ret, len, write;
  2987. struct vm_area_struct * vma;
  2988. vma = find_vma(current->mm, addr);
  2989. if (!vma)
  2990. return -ENOMEM;
  2991. /*
  2992. * We want to touch writable mappings with a write fault in order
  2993. * to break COW, except for shared mappings because these don't COW
  2994. * and we would not want to dirty them for nothing.
  2995. */
  2996. write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
  2997. BUG_ON(addr >= end);
  2998. BUG_ON(end > vma->vm_end);
  2999. len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
  3000. ret = get_user_pages(current, current->mm, addr,
  3001. len, write, 0, NULL, NULL);
  3002. if (ret < 0)
  3003. return ret;
  3004. return ret == len ? 0 : -EFAULT;
  3005. }
  3006. #if !defined(__HAVE_ARCH_GATE_AREA)
  3007. #if defined(AT_SYSINFO_EHDR)
  3008. static struct vm_area_struct gate_vma;
  3009. static int __init gate_vma_init(void)
  3010. {
  3011. gate_vma.vm_mm = NULL;
  3012. gate_vma.vm_start = FIXADDR_USER_START;
  3013. gate_vma.vm_end = FIXADDR_USER_END;
  3014. gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
  3015. gate_vma.vm_page_prot = __P101;
  3016. /*
  3017. * Make sure the vDSO gets into every core dump.
  3018. * Dumping its contents makes post-mortem fully interpretable later
  3019. * without matching up the same kernel and hardware config to see
  3020. * what PC values meant.
  3021. */
  3022. gate_vma.vm_flags |= VM_ALWAYSDUMP;
  3023. return 0;
  3024. }
  3025. __initcall(gate_vma_init);
  3026. #endif
  3027. struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
  3028. {
  3029. #ifdef AT_SYSINFO_EHDR
  3030. return &gate_vma;
  3031. #else
  3032. return NULL;
  3033. #endif
  3034. }
  3035. int in_gate_area_no_task(unsigned long addr)
  3036. {
  3037. #ifdef AT_SYSINFO_EHDR
  3038. if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
  3039. return 1;
  3040. #endif
  3041. return 0;
  3042. }
  3043. #endif /* __HAVE_ARCH_GATE_AREA */
  3044. static int __follow_pte(struct mm_struct *mm, unsigned long address,
  3045. pte_t **ptepp, spinlock_t **ptlp)
  3046. {
  3047. pgd_t *pgd;
  3048. pud_t *pud;
  3049. pmd_t *pmd;
  3050. pte_t *ptep;
  3051. pgd = pgd_offset(mm, address);
  3052. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  3053. goto out;
  3054. pud = pud_offset(pgd, address);
  3055. if (pud_none(*pud) || unlikely(pud_bad(*pud)))
  3056. goto out;
  3057. pmd = pmd_offset(pud, address);
  3058. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  3059. goto out;
  3060. /* We cannot handle huge page PFN maps. Luckily they don't exist. */
  3061. if (pmd_huge(*pmd))
  3062. goto out;
  3063. ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
  3064. if (!ptep)
  3065. goto out;
  3066. if (!pte_present(*ptep))
  3067. goto unlock;
  3068. *ptepp = ptep;
  3069. return 0;
  3070. unlock:
  3071. pte_unmap_unlock(ptep, *ptlp);
  3072. out:
  3073. return -EINVAL;
  3074. }
  3075. static inline int follow_pte(struct mm_struct *mm, unsigned long address,
  3076. pte_t **ptepp, spinlock_t **ptlp)
  3077. {
  3078. int res;
  3079. /* (void) is needed to make gcc happy */
  3080. (void) __cond_lock(*ptlp,
  3081. !(res = __follow_pte(mm, address, ptepp, ptlp)));
  3082. return res;
  3083. }
  3084. /**
  3085. * follow_pfn - look up PFN at a user virtual address
  3086. * @vma: memory mapping
  3087. * @address: user virtual address
  3088. * @pfn: location to store found PFN
  3089. *
  3090. * Only IO mappings and raw PFN mappings are allowed.
  3091. *
  3092. * Returns zero and the pfn at @pfn on success, -ve otherwise.
  3093. */
  3094. int follow_pfn(struct vm_area_struct *vma, unsigned long address,
  3095. unsigned long *pfn)
  3096. {
  3097. int ret = -EINVAL;
  3098. spinlock_t *ptl;
  3099. pte_t *ptep;
  3100. if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  3101. return ret;
  3102. ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
  3103. if (ret)
  3104. return ret;
  3105. *pfn = pte_pfn(*ptep);
  3106. pte_unmap_unlock(ptep, ptl);
  3107. return 0;
  3108. }
  3109. EXPORT_SYMBOL(follow_pfn);
  3110. #ifdef CONFIG_HAVE_IOREMAP_PROT
  3111. int follow_phys(struct vm_area_struct *vma,
  3112. unsigned long address, unsigned int flags,
  3113. unsigned long *prot, resource_size_t *phys)
  3114. {
  3115. int ret = -EINVAL;
  3116. pte_t *ptep, pte;
  3117. spinlock_t *ptl;
  3118. if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
  3119. goto out;
  3120. if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
  3121. goto out;
  3122. pte = *ptep;
  3123. if ((flags & FOLL_WRITE) && !pte_write(pte))
  3124. goto unlock;
  3125. *prot = pgprot_val(pte_pgprot(pte));
  3126. *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
  3127. ret = 0;
  3128. unlock:
  3129. pte_unmap_unlock(ptep, ptl);
  3130. out:
  3131. return ret;
  3132. }
  3133. int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
  3134. void *buf, int len, int write)
  3135. {
  3136. resource_size_t phys_addr;
  3137. unsigned long prot = 0;
  3138. void __iomem *maddr;
  3139. int offset = addr & (PAGE_SIZE-1);
  3140. if (follow_phys(vma, addr, write, &prot, &phys_addr))
  3141. return -EINVAL;
  3142. maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
  3143. if (write)
  3144. memcpy_toio(maddr + offset, buf, len);
  3145. else
  3146. memcpy_fromio(buf, maddr + offset, len);
  3147. iounmap(maddr);
  3148. return len;
  3149. }
  3150. #endif
  3151. /*
  3152. * Access another process' address space.
  3153. * Source/target buffer must be kernel space,
  3154. * Do not walk the page table directly, use get_user_pages
  3155. */
  3156. int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
  3157. {
  3158. struct mm_struct *mm;
  3159. struct vm_area_struct *vma;
  3160. void *old_buf = buf;
  3161. mm = get_task_mm(tsk);
  3162. if (!mm)
  3163. return 0;
  3164. down_read(&mm->mmap_sem);
  3165. /* ignore errors, just check how much was successfully transferred */
  3166. while (len) {
  3167. int bytes, ret, offset;
  3168. void *maddr;
  3169. struct page *page = NULL;
  3170. ret = get_user_pages(tsk, mm, addr, 1,
  3171. write, 1, &page, &vma);
  3172. if (ret <= 0) {
  3173. /*
  3174. * Check if this is a VM_IO | VM_PFNMAP VMA, which
  3175. * we can access using slightly different code.
  3176. */
  3177. #ifdef CONFIG_HAVE_IOREMAP_PROT
  3178. vma = find_vma(mm, addr);
  3179. if (!vma)
  3180. break;
  3181. if (vma->vm_ops && vma->vm_ops->access)
  3182. ret = vma->vm_ops->access(vma, addr, buf,
  3183. len, write);
  3184. if (ret <= 0)
  3185. #endif
  3186. break;
  3187. bytes = ret;
  3188. } else {
  3189. bytes = len;
  3190. offset = addr & (PAGE_SIZE-1);
  3191. if (bytes > PAGE_SIZE-offset)
  3192. bytes = PAGE_SIZE-offset;
  3193. maddr = kmap(page);
  3194. if (write) {
  3195. copy_to_user_page(vma, page, addr,
  3196. maddr + offset, buf, bytes);
  3197. set_page_dirty_lock(page);
  3198. } else {
  3199. copy_from_user_page(vma, page, addr,
  3200. buf, maddr + offset, bytes);
  3201. }
  3202. kunmap(page);
  3203. page_cache_release(page);
  3204. }
  3205. len -= bytes;
  3206. buf += bytes;
  3207. addr += bytes;
  3208. }
  3209. up_read(&mm->mmap_sem);
  3210. mmput(mm);
  3211. return buf - old_buf;
  3212. }
  3213. /*
  3214. * Print the name of a VMA.
  3215. */
  3216. void print_vma_addr(char *prefix, unsigned long ip)
  3217. {
  3218. struct mm_struct *mm = current->mm;
  3219. struct vm_area_struct *vma;
  3220. /*
  3221. * Do not print if we are in atomic
  3222. * contexts (in exception stacks, etc.):
  3223. */
  3224. if (preempt_count())
  3225. return;
  3226. down_read(&mm->mmap_sem);
  3227. vma = find_vma(mm, ip);
  3228. if (vma && vma->vm_file) {
  3229. struct file *f = vma->vm_file;
  3230. char *buf = (char *)__get_free_page(GFP_KERNEL);
  3231. if (buf) {
  3232. char *p, *s;
  3233. p = d_path(&f->f_path, buf, PAGE_SIZE);
  3234. if (IS_ERR(p))
  3235. p = "?";
  3236. s = strrchr(p, '/');
  3237. if (s)
  3238. p = s+1;
  3239. printk("%s%s[%lx+%lx]", prefix, p,
  3240. vma->vm_start,
  3241. vma->vm_end - vma->vm_start);
  3242. free_page((unsigned long)buf);
  3243. }
  3244. }
  3245. up_read(&current->mm->mmap_sem);
  3246. }
  3247. #ifdef CONFIG_PROVE_LOCKING
  3248. void might_fault(void)
  3249. {
  3250. /*
  3251. * Some code (nfs/sunrpc) uses socket ops on kernel memory while
  3252. * holding the mmap_sem, this is safe because kernel memory doesn't
  3253. * get paged out, therefore we'll never actually fault, and the
  3254. * below annotations will generate false positives.
  3255. */
  3256. if (segment_eq(get_fs(), KERNEL_DS))
  3257. return;
  3258. might_sleep();
  3259. /*
  3260. * it would be nicer only to annotate paths which are not under
  3261. * pagefault_disable, however that requires a larger audit and
  3262. * providing helpers like get_user_atomic.
  3263. */
  3264. if (!in_atomic() && current->mm)
  3265. might_lock_read(&current->mm->mmap_sem);
  3266. }
  3267. EXPORT_SYMBOL(might_fault);
  3268. #endif