memory.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259
  1. /*
  2. * linux/mm/memory.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. */
  6. /*
  7. * demand-loading started 01.12.91 - seems it is high on the list of
  8. * things wanted, and it should be easy to implement. - Linus
  9. */
  10. /*
  11. * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
  12. * pages started 02.12.91, seems to work. - Linus.
  13. *
  14. * Tested sharing by executing about 30 /bin/sh: under the old kernel it
  15. * would have taken more than the 6M I have free, but it worked well as
  16. * far as I could see.
  17. *
  18. * Also corrected some "invalidate()"s - I wasn't doing enough of them.
  19. */
  20. /*
  21. * Real VM (paging to/from disk) started 18.12.91. Much more work and
  22. * thought has to go into this. Oh, well..
  23. * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
  24. * Found it. Everything seems to work now.
  25. * 20.12.91 - Ok, making the swap-device changeable like the root.
  26. */
  27. /*
  28. * 05.04.94 - Multi-page memory management added for v1.1.
  29. * Idea by Alex Bligh (alex@cconcepts.co.uk)
  30. *
  31. * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
  32. * (Gerhard.Wichert@pdb.siemens.de)
  33. *
  34. * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
  35. */
  36. #include <linux/kernel_stat.h>
  37. #include <linux/mm.h>
  38. #include <linux/hugetlb.h>
  39. #include <linux/mman.h>
  40. #include <linux/swap.h>
  41. #include <linux/highmem.h>
  42. #include <linux/pagemap.h>
  43. #include <linux/rmap.h>
  44. #include <linux/module.h>
  45. #include <linux/init.h>
  46. #include <asm/pgalloc.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/tlb.h>
  49. #include <asm/tlbflush.h>
  50. #include <asm/pgtable.h>
  51. #include <linux/swapops.h>
  52. #include <linux/elf.h>
  53. #ifndef CONFIG_NEED_MULTIPLE_NODES
  54. /* use the per-pgdat data instead for discontigmem - mbligh */
  55. unsigned long max_mapnr;
  56. struct page *mem_map;
  57. EXPORT_SYMBOL(max_mapnr);
  58. EXPORT_SYMBOL(mem_map);
  59. #endif
  60. unsigned long num_physpages;
  61. /*
  62. * A number of key systems in x86 including ioremap() rely on the assumption
  63. * that high_memory defines the upper bound on direct map memory, then end
  64. * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
  65. * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
  66. * and ZONE_HIGHMEM.
  67. */
  68. void * high_memory;
  69. unsigned long vmalloc_earlyreserve;
  70. EXPORT_SYMBOL(num_physpages);
  71. EXPORT_SYMBOL(high_memory);
  72. EXPORT_SYMBOL(vmalloc_earlyreserve);
  73. /*
  74. * If a p?d_bad entry is found while walking page tables, report
  75. * the error, before resetting entry to p?d_none. Usually (but
  76. * very seldom) called out from the p?d_none_or_clear_bad macros.
  77. */
  78. void pgd_clear_bad(pgd_t *pgd)
  79. {
  80. pgd_ERROR(*pgd);
  81. pgd_clear(pgd);
  82. }
  83. void pud_clear_bad(pud_t *pud)
  84. {
  85. pud_ERROR(*pud);
  86. pud_clear(pud);
  87. }
  88. void pmd_clear_bad(pmd_t *pmd)
  89. {
  90. pmd_ERROR(*pmd);
  91. pmd_clear(pmd);
  92. }
  93. /*
  94. * Note: this doesn't free the actual pages themselves. That
  95. * has been handled earlier when unmapping all the memory regions.
  96. */
  97. static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
  98. {
  99. struct page *page = pmd_page(*pmd);
  100. pmd_clear(pmd);
  101. pte_free_tlb(tlb, page);
  102. dec_page_state(nr_page_table_pages);
  103. tlb->mm->nr_ptes--;
  104. }
  105. static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  106. unsigned long addr, unsigned long end,
  107. unsigned long floor, unsigned long ceiling)
  108. {
  109. pmd_t *pmd;
  110. unsigned long next;
  111. unsigned long start;
  112. start = addr;
  113. pmd = pmd_offset(pud, addr);
  114. do {
  115. next = pmd_addr_end(addr, end);
  116. if (pmd_none_or_clear_bad(pmd))
  117. continue;
  118. free_pte_range(tlb, pmd);
  119. } while (pmd++, addr = next, addr != end);
  120. start &= PUD_MASK;
  121. if (start < floor)
  122. return;
  123. if (ceiling) {
  124. ceiling &= PUD_MASK;
  125. if (!ceiling)
  126. return;
  127. }
  128. if (end - 1 > ceiling - 1)
  129. return;
  130. pmd = pmd_offset(pud, start);
  131. pud_clear(pud);
  132. pmd_free_tlb(tlb, pmd);
  133. }
  134. static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  135. unsigned long addr, unsigned long end,
  136. unsigned long floor, unsigned long ceiling)
  137. {
  138. pud_t *pud;
  139. unsigned long next;
  140. unsigned long start;
  141. start = addr;
  142. pud = pud_offset(pgd, addr);
  143. do {
  144. next = pud_addr_end(addr, end);
  145. if (pud_none_or_clear_bad(pud))
  146. continue;
  147. free_pmd_range(tlb, pud, addr, next, floor, ceiling);
  148. } while (pud++, addr = next, addr != end);
  149. start &= PGDIR_MASK;
  150. if (start < floor)
  151. return;
  152. if (ceiling) {
  153. ceiling &= PGDIR_MASK;
  154. if (!ceiling)
  155. return;
  156. }
  157. if (end - 1 > ceiling - 1)
  158. return;
  159. pud = pud_offset(pgd, start);
  160. pgd_clear(pgd);
  161. pud_free_tlb(tlb, pud);
  162. }
  163. /*
  164. * This function frees user-level page tables of a process.
  165. *
  166. * Must be called with pagetable lock held.
  167. */
  168. void free_pgd_range(struct mmu_gather **tlb,
  169. unsigned long addr, unsigned long end,
  170. unsigned long floor, unsigned long ceiling)
  171. {
  172. pgd_t *pgd;
  173. unsigned long next;
  174. unsigned long start;
  175. /*
  176. * The next few lines have given us lots of grief...
  177. *
  178. * Why are we testing PMD* at this top level? Because often
  179. * there will be no work to do at all, and we'd prefer not to
  180. * go all the way down to the bottom just to discover that.
  181. *
  182. * Why all these "- 1"s? Because 0 represents both the bottom
  183. * of the address space and the top of it (using -1 for the
  184. * top wouldn't help much: the masks would do the wrong thing).
  185. * The rule is that addr 0 and floor 0 refer to the bottom of
  186. * the address space, but end 0 and ceiling 0 refer to the top
  187. * Comparisons need to use "end - 1" and "ceiling - 1" (though
  188. * that end 0 case should be mythical).
  189. *
  190. * Wherever addr is brought up or ceiling brought down, we must
  191. * be careful to reject "the opposite 0" before it confuses the
  192. * subsequent tests. But what about where end is brought down
  193. * by PMD_SIZE below? no, end can't go down to 0 there.
  194. *
  195. * Whereas we round start (addr) and ceiling down, by different
  196. * masks at different levels, in order to test whether a table
  197. * now has no other vmas using it, so can be freed, we don't
  198. * bother to round floor or end up - the tests don't need that.
  199. */
  200. addr &= PMD_MASK;
  201. if (addr < floor) {
  202. addr += PMD_SIZE;
  203. if (!addr)
  204. return;
  205. }
  206. if (ceiling) {
  207. ceiling &= PMD_MASK;
  208. if (!ceiling)
  209. return;
  210. }
  211. if (end - 1 > ceiling - 1)
  212. end -= PMD_SIZE;
  213. if (addr > end - 1)
  214. return;
  215. start = addr;
  216. pgd = pgd_offset((*tlb)->mm, addr);
  217. do {
  218. next = pgd_addr_end(addr, end);
  219. if (pgd_none_or_clear_bad(pgd))
  220. continue;
  221. free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
  222. } while (pgd++, addr = next, addr != end);
  223. if (!(*tlb)->fullmm)
  224. flush_tlb_pgtables((*tlb)->mm, start, end);
  225. }
  226. void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
  227. unsigned long floor, unsigned long ceiling)
  228. {
  229. while (vma) {
  230. struct vm_area_struct *next = vma->vm_next;
  231. unsigned long addr = vma->vm_start;
  232. if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
  233. hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
  234. floor, next? next->vm_start: ceiling);
  235. } else {
  236. /*
  237. * Optimization: gather nearby vmas into one call down
  238. */
  239. while (next && next->vm_start <= vma->vm_end + PMD_SIZE
  240. && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
  241. HPAGE_SIZE)) {
  242. vma = next;
  243. next = vma->vm_next;
  244. }
  245. free_pgd_range(tlb, addr, vma->vm_end,
  246. floor, next? next->vm_start: ceiling);
  247. }
  248. vma = next;
  249. }
  250. }
  251. pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd,
  252. unsigned long address)
  253. {
  254. if (!pmd_present(*pmd)) {
  255. struct page *new;
  256. spin_unlock(&mm->page_table_lock);
  257. new = pte_alloc_one(mm, address);
  258. spin_lock(&mm->page_table_lock);
  259. if (!new)
  260. return NULL;
  261. /*
  262. * Because we dropped the lock, we should re-check the
  263. * entry, as somebody else could have populated it..
  264. */
  265. if (pmd_present(*pmd)) {
  266. pte_free(new);
  267. goto out;
  268. }
  269. mm->nr_ptes++;
  270. inc_page_state(nr_page_table_pages);
  271. pmd_populate(mm, pmd, new);
  272. }
  273. out:
  274. return pte_offset_map(pmd, address);
  275. }
  276. pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
  277. {
  278. if (!pmd_present(*pmd)) {
  279. pte_t *new;
  280. spin_unlock(&mm->page_table_lock);
  281. new = pte_alloc_one_kernel(mm, address);
  282. spin_lock(&mm->page_table_lock);
  283. if (!new)
  284. return NULL;
  285. /*
  286. * Because we dropped the lock, we should re-check the
  287. * entry, as somebody else could have populated it..
  288. */
  289. if (pmd_present(*pmd)) {
  290. pte_free_kernel(new);
  291. goto out;
  292. }
  293. pmd_populate_kernel(mm, pmd, new);
  294. }
  295. out:
  296. return pte_offset_kernel(pmd, address);
  297. }
  298. static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
  299. {
  300. if (file_rss)
  301. add_mm_counter(mm, file_rss, file_rss);
  302. if (anon_rss)
  303. add_mm_counter(mm, anon_rss, anon_rss);
  304. }
  305. #define NO_RSS 2 /* Increment neither file_rss nor anon_rss */
  306. /*
  307. * copy one vm_area from one task to the other. Assumes the page tables
  308. * already present in the new task to be cleared in the whole range
  309. * covered by this vma.
  310. *
  311. * dst->page_table_lock is held on entry and exit,
  312. * but may be dropped within p[mg]d_alloc() and pte_alloc_map().
  313. */
  314. static inline int
  315. copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  316. pte_t *dst_pte, pte_t *src_pte, unsigned long vm_flags,
  317. unsigned long addr)
  318. {
  319. pte_t pte = *src_pte;
  320. struct page *page;
  321. unsigned long pfn;
  322. int anon = NO_RSS;
  323. /* pte contains position in swap or file, so copy. */
  324. if (unlikely(!pte_present(pte))) {
  325. if (!pte_file(pte)) {
  326. swap_duplicate(pte_to_swp_entry(pte));
  327. /* make sure dst_mm is on swapoff's mmlist. */
  328. if (unlikely(list_empty(&dst_mm->mmlist))) {
  329. spin_lock(&mmlist_lock);
  330. list_add(&dst_mm->mmlist, &src_mm->mmlist);
  331. spin_unlock(&mmlist_lock);
  332. }
  333. }
  334. goto out_set_pte;
  335. }
  336. pfn = pte_pfn(pte);
  337. /* the pte points outside of valid memory, the
  338. * mapping is assumed to be good, meaningful
  339. * and not mapped via rmap - duplicate the
  340. * mapping as is.
  341. */
  342. page = NULL;
  343. if (pfn_valid(pfn))
  344. page = pfn_to_page(pfn);
  345. if (!page || PageReserved(page))
  346. goto out_set_pte;
  347. /*
  348. * If it's a COW mapping, write protect it both
  349. * in the parent and the child
  350. */
  351. if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
  352. ptep_set_wrprotect(src_mm, addr, src_pte);
  353. pte = *src_pte;
  354. }
  355. /*
  356. * If it's a shared mapping, mark it clean in
  357. * the child
  358. */
  359. if (vm_flags & VM_SHARED)
  360. pte = pte_mkclean(pte);
  361. pte = pte_mkold(pte);
  362. get_page(page);
  363. page_dup_rmap(page);
  364. anon = !!PageAnon(page);
  365. out_set_pte:
  366. set_pte_at(dst_mm, addr, dst_pte, pte);
  367. return anon;
  368. }
  369. static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  370. pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
  371. unsigned long addr, unsigned long end)
  372. {
  373. pte_t *src_pte, *dst_pte;
  374. unsigned long vm_flags = vma->vm_flags;
  375. int progress = 0;
  376. int rss[NO_RSS+1], anon;
  377. again:
  378. rss[1] = rss[0] = 0;
  379. dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr);
  380. if (!dst_pte)
  381. return -ENOMEM;
  382. src_pte = pte_offset_map_nested(src_pmd, addr);
  383. spin_lock(&src_mm->page_table_lock);
  384. do {
  385. /*
  386. * We are holding two locks at this point - either of them
  387. * could generate latencies in another task on another CPU.
  388. */
  389. if (progress >= 32) {
  390. progress = 0;
  391. if (need_resched() ||
  392. need_lockbreak(&src_mm->page_table_lock) ||
  393. need_lockbreak(&dst_mm->page_table_lock))
  394. break;
  395. }
  396. if (pte_none(*src_pte)) {
  397. progress++;
  398. continue;
  399. }
  400. anon = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
  401. vm_flags, addr);
  402. rss[anon]++;
  403. progress += 8;
  404. } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
  405. spin_unlock(&src_mm->page_table_lock);
  406. pte_unmap_nested(src_pte - 1);
  407. pte_unmap(dst_pte - 1);
  408. add_mm_rss(dst_mm, rss[0], rss[1]);
  409. cond_resched_lock(&dst_mm->page_table_lock);
  410. if (addr != end)
  411. goto again;
  412. return 0;
  413. }
  414. static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  415. pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
  416. unsigned long addr, unsigned long end)
  417. {
  418. pmd_t *src_pmd, *dst_pmd;
  419. unsigned long next;
  420. dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
  421. if (!dst_pmd)
  422. return -ENOMEM;
  423. src_pmd = pmd_offset(src_pud, addr);
  424. do {
  425. next = pmd_addr_end(addr, end);
  426. if (pmd_none_or_clear_bad(src_pmd))
  427. continue;
  428. if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
  429. vma, addr, next))
  430. return -ENOMEM;
  431. } while (dst_pmd++, src_pmd++, addr = next, addr != end);
  432. return 0;
  433. }
  434. static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  435. pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
  436. unsigned long addr, unsigned long end)
  437. {
  438. pud_t *src_pud, *dst_pud;
  439. unsigned long next;
  440. dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
  441. if (!dst_pud)
  442. return -ENOMEM;
  443. src_pud = pud_offset(src_pgd, addr);
  444. do {
  445. next = pud_addr_end(addr, end);
  446. if (pud_none_or_clear_bad(src_pud))
  447. continue;
  448. if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
  449. vma, addr, next))
  450. return -ENOMEM;
  451. } while (dst_pud++, src_pud++, addr = next, addr != end);
  452. return 0;
  453. }
  454. int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  455. struct vm_area_struct *vma)
  456. {
  457. pgd_t *src_pgd, *dst_pgd;
  458. unsigned long next;
  459. unsigned long addr = vma->vm_start;
  460. unsigned long end = vma->vm_end;
  461. /*
  462. * Don't copy ptes where a page fault will fill them correctly.
  463. * Fork becomes much lighter when there are big shared or private
  464. * readonly mappings. The tradeoff is that copy_page_range is more
  465. * efficient than faulting.
  466. */
  467. if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) {
  468. if (!vma->anon_vma)
  469. return 0;
  470. }
  471. if (is_vm_hugetlb_page(vma))
  472. return copy_hugetlb_page_range(dst_mm, src_mm, vma);
  473. dst_pgd = pgd_offset(dst_mm, addr);
  474. src_pgd = pgd_offset(src_mm, addr);
  475. do {
  476. next = pgd_addr_end(addr, end);
  477. if (pgd_none_or_clear_bad(src_pgd))
  478. continue;
  479. if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
  480. vma, addr, next))
  481. return -ENOMEM;
  482. } while (dst_pgd++, src_pgd++, addr = next, addr != end);
  483. return 0;
  484. }
  485. static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
  486. unsigned long addr, unsigned long end,
  487. struct zap_details *details)
  488. {
  489. pte_t *pte;
  490. int file_rss = 0;
  491. int anon_rss = 0;
  492. pte = pte_offset_map(pmd, addr);
  493. do {
  494. pte_t ptent = *pte;
  495. if (pte_none(ptent))
  496. continue;
  497. if (pte_present(ptent)) {
  498. struct page *page = NULL;
  499. unsigned long pfn = pte_pfn(ptent);
  500. if (pfn_valid(pfn)) {
  501. page = pfn_to_page(pfn);
  502. if (PageReserved(page))
  503. page = NULL;
  504. }
  505. if (unlikely(details) && page) {
  506. /*
  507. * unmap_shared_mapping_pages() wants to
  508. * invalidate cache without truncating:
  509. * unmap shared but keep private pages.
  510. */
  511. if (details->check_mapping &&
  512. details->check_mapping != page->mapping)
  513. continue;
  514. /*
  515. * Each page->index must be checked when
  516. * invalidating or truncating nonlinear.
  517. */
  518. if (details->nonlinear_vma &&
  519. (page->index < details->first_index ||
  520. page->index > details->last_index))
  521. continue;
  522. }
  523. ptent = ptep_get_and_clear_full(tlb->mm, addr, pte,
  524. tlb->fullmm);
  525. tlb_remove_tlb_entry(tlb, pte, addr);
  526. if (unlikely(!page))
  527. continue;
  528. if (unlikely(details) && details->nonlinear_vma
  529. && linear_page_index(details->nonlinear_vma,
  530. addr) != page->index)
  531. set_pte_at(tlb->mm, addr, pte,
  532. pgoff_to_pte(page->index));
  533. if (PageAnon(page))
  534. anon_rss++;
  535. else {
  536. if (pte_dirty(ptent))
  537. set_page_dirty(page);
  538. if (pte_young(ptent))
  539. mark_page_accessed(page);
  540. file_rss++;
  541. }
  542. page_remove_rmap(page);
  543. tlb_remove_page(tlb, page);
  544. continue;
  545. }
  546. /*
  547. * If details->check_mapping, we leave swap entries;
  548. * if details->nonlinear_vma, we leave file entries.
  549. */
  550. if (unlikely(details))
  551. continue;
  552. if (!pte_file(ptent))
  553. free_swap_and_cache(pte_to_swp_entry(ptent));
  554. pte_clear_full(tlb->mm, addr, pte, tlb->fullmm);
  555. } while (pte++, addr += PAGE_SIZE, addr != end);
  556. add_mm_rss(tlb->mm, -file_rss, -anon_rss);
  557. pte_unmap(pte - 1);
  558. }
  559. static inline void zap_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  560. unsigned long addr, unsigned long end,
  561. struct zap_details *details)
  562. {
  563. pmd_t *pmd;
  564. unsigned long next;
  565. pmd = pmd_offset(pud, addr);
  566. do {
  567. next = pmd_addr_end(addr, end);
  568. if (pmd_none_or_clear_bad(pmd))
  569. continue;
  570. zap_pte_range(tlb, pmd, addr, next, details);
  571. } while (pmd++, addr = next, addr != end);
  572. }
  573. static inline void zap_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  574. unsigned long addr, unsigned long end,
  575. struct zap_details *details)
  576. {
  577. pud_t *pud;
  578. unsigned long next;
  579. pud = pud_offset(pgd, addr);
  580. do {
  581. next = pud_addr_end(addr, end);
  582. if (pud_none_or_clear_bad(pud))
  583. continue;
  584. zap_pmd_range(tlb, pud, addr, next, details);
  585. } while (pud++, addr = next, addr != end);
  586. }
  587. static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  588. unsigned long addr, unsigned long end,
  589. struct zap_details *details)
  590. {
  591. pgd_t *pgd;
  592. unsigned long next;
  593. if (details && !details->check_mapping && !details->nonlinear_vma)
  594. details = NULL;
  595. BUG_ON(addr >= end);
  596. tlb_start_vma(tlb, vma);
  597. pgd = pgd_offset(vma->vm_mm, addr);
  598. do {
  599. next = pgd_addr_end(addr, end);
  600. if (pgd_none_or_clear_bad(pgd))
  601. continue;
  602. zap_pud_range(tlb, pgd, addr, next, details);
  603. } while (pgd++, addr = next, addr != end);
  604. tlb_end_vma(tlb, vma);
  605. }
  606. #ifdef CONFIG_PREEMPT
  607. # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
  608. #else
  609. /* No preempt: go for improved straight-line efficiency */
  610. # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
  611. #endif
  612. /**
  613. * unmap_vmas - unmap a range of memory covered by a list of vma's
  614. * @tlbp: address of the caller's struct mmu_gather
  615. * @mm: the controlling mm_struct
  616. * @vma: the starting vma
  617. * @start_addr: virtual address at which to start unmapping
  618. * @end_addr: virtual address at which to end unmapping
  619. * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
  620. * @details: details of nonlinear truncation or shared cache invalidation
  621. *
  622. * Returns the end address of the unmapping (restart addr if interrupted).
  623. *
  624. * Unmap all pages in the vma list. Called under page_table_lock.
  625. *
  626. * We aim to not hold page_table_lock for too long (for scheduling latency
  627. * reasons). So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to
  628. * return the ending mmu_gather to the caller.
  629. *
  630. * Only addresses between `start' and `end' will be unmapped.
  631. *
  632. * The VMA list must be sorted in ascending virtual address order.
  633. *
  634. * unmap_vmas() assumes that the caller will flush the whole unmapped address
  635. * range after unmap_vmas() returns. So the only responsibility here is to
  636. * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
  637. * drops the lock and schedules.
  638. */
  639. unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
  640. struct vm_area_struct *vma, unsigned long start_addr,
  641. unsigned long end_addr, unsigned long *nr_accounted,
  642. struct zap_details *details)
  643. {
  644. unsigned long zap_bytes = ZAP_BLOCK_SIZE;
  645. unsigned long tlb_start = 0; /* For tlb_finish_mmu */
  646. int tlb_start_valid = 0;
  647. unsigned long start = start_addr;
  648. spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
  649. int fullmm = (*tlbp)->fullmm;
  650. for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
  651. unsigned long end;
  652. start = max(vma->vm_start, start_addr);
  653. if (start >= vma->vm_end)
  654. continue;
  655. end = min(vma->vm_end, end_addr);
  656. if (end <= vma->vm_start)
  657. continue;
  658. if (vma->vm_flags & VM_ACCOUNT)
  659. *nr_accounted += (end - start) >> PAGE_SHIFT;
  660. while (start != end) {
  661. unsigned long block;
  662. if (!tlb_start_valid) {
  663. tlb_start = start;
  664. tlb_start_valid = 1;
  665. }
  666. if (is_vm_hugetlb_page(vma)) {
  667. block = end - start;
  668. unmap_hugepage_range(vma, start, end);
  669. } else {
  670. block = min(zap_bytes, end - start);
  671. unmap_page_range(*tlbp, vma, start,
  672. start + block, details);
  673. }
  674. start += block;
  675. zap_bytes -= block;
  676. if ((long)zap_bytes > 0)
  677. continue;
  678. tlb_finish_mmu(*tlbp, tlb_start, start);
  679. if (need_resched() ||
  680. need_lockbreak(&mm->page_table_lock) ||
  681. (i_mmap_lock && need_lockbreak(i_mmap_lock))) {
  682. if (i_mmap_lock) {
  683. /* must reset count of rss freed */
  684. *tlbp = tlb_gather_mmu(mm, fullmm);
  685. goto out;
  686. }
  687. spin_unlock(&mm->page_table_lock);
  688. cond_resched();
  689. spin_lock(&mm->page_table_lock);
  690. }
  691. *tlbp = tlb_gather_mmu(mm, fullmm);
  692. tlb_start_valid = 0;
  693. zap_bytes = ZAP_BLOCK_SIZE;
  694. }
  695. }
  696. out:
  697. return start; /* which is now the end (or restart) address */
  698. }
  699. /**
  700. * zap_page_range - remove user pages in a given range
  701. * @vma: vm_area_struct holding the applicable pages
  702. * @address: starting address of pages to zap
  703. * @size: number of bytes to zap
  704. * @details: details of nonlinear truncation or shared cache invalidation
  705. */
  706. unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
  707. unsigned long size, struct zap_details *details)
  708. {
  709. struct mm_struct *mm = vma->vm_mm;
  710. struct mmu_gather *tlb;
  711. unsigned long end = address + size;
  712. unsigned long nr_accounted = 0;
  713. if (is_vm_hugetlb_page(vma)) {
  714. zap_hugepage_range(vma, address, size);
  715. return end;
  716. }
  717. lru_add_drain();
  718. spin_lock(&mm->page_table_lock);
  719. tlb = tlb_gather_mmu(mm, 0);
  720. end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
  721. tlb_finish_mmu(tlb, address, end);
  722. spin_unlock(&mm->page_table_lock);
  723. return end;
  724. }
  725. /*
  726. * Do a quick page-table lookup for a single page.
  727. * mm->page_table_lock must be held.
  728. */
  729. static struct page *__follow_page(struct mm_struct *mm, unsigned long address,
  730. int read, int write, int accessed)
  731. {
  732. pgd_t *pgd;
  733. pud_t *pud;
  734. pmd_t *pmd;
  735. pte_t *ptep, pte;
  736. unsigned long pfn;
  737. struct page *page;
  738. page = follow_huge_addr(mm, address, write);
  739. if (! IS_ERR(page))
  740. return page;
  741. pgd = pgd_offset(mm, address);
  742. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  743. goto out;
  744. pud = pud_offset(pgd, address);
  745. if (pud_none(*pud) || unlikely(pud_bad(*pud)))
  746. goto out;
  747. pmd = pmd_offset(pud, address);
  748. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  749. goto out;
  750. if (pmd_huge(*pmd))
  751. return follow_huge_pmd(mm, address, pmd, write);
  752. ptep = pte_offset_map(pmd, address);
  753. if (!ptep)
  754. goto out;
  755. pte = *ptep;
  756. pte_unmap(ptep);
  757. if (pte_present(pte)) {
  758. if (write && !pte_write(pte))
  759. goto out;
  760. if (read && !pte_read(pte))
  761. goto out;
  762. pfn = pte_pfn(pte);
  763. if (pfn_valid(pfn)) {
  764. page = pfn_to_page(pfn);
  765. if (accessed) {
  766. if (write && !pte_dirty(pte) &&!PageDirty(page))
  767. set_page_dirty(page);
  768. mark_page_accessed(page);
  769. }
  770. return page;
  771. }
  772. }
  773. out:
  774. return NULL;
  775. }
  776. inline struct page *
  777. follow_page(struct mm_struct *mm, unsigned long address, int write)
  778. {
  779. return __follow_page(mm, address, 0, write, 1);
  780. }
  781. /*
  782. * check_user_page_readable() can be called frm niterrupt context by oprofile,
  783. * so we need to avoid taking any non-irq-safe locks
  784. */
  785. int check_user_page_readable(struct mm_struct *mm, unsigned long address)
  786. {
  787. return __follow_page(mm, address, 1, 0, 0) != NULL;
  788. }
  789. EXPORT_SYMBOL(check_user_page_readable);
  790. static inline int
  791. untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
  792. unsigned long address)
  793. {
  794. pgd_t *pgd;
  795. pud_t *pud;
  796. pmd_t *pmd;
  797. /* Check if the vma is for an anonymous mapping. */
  798. if (vma->vm_ops && vma->vm_ops->nopage)
  799. return 0;
  800. /* Check if page directory entry exists. */
  801. pgd = pgd_offset(mm, address);
  802. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  803. return 1;
  804. pud = pud_offset(pgd, address);
  805. if (pud_none(*pud) || unlikely(pud_bad(*pud)))
  806. return 1;
  807. /* Check if page middle directory entry exists. */
  808. pmd = pmd_offset(pud, address);
  809. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  810. return 1;
  811. /* There is a pte slot for 'address' in 'mm'. */
  812. return 0;
  813. }
  814. int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  815. unsigned long start, int len, int write, int force,
  816. struct page **pages, struct vm_area_struct **vmas)
  817. {
  818. int i;
  819. unsigned int flags;
  820. /*
  821. * Require read or write permissions.
  822. * If 'force' is set, we only require the "MAY" flags.
  823. */
  824. flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
  825. flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
  826. i = 0;
  827. do {
  828. struct vm_area_struct * vma;
  829. vma = find_extend_vma(mm, start);
  830. if (!vma && in_gate_area(tsk, start)) {
  831. unsigned long pg = start & PAGE_MASK;
  832. struct vm_area_struct *gate_vma = get_gate_vma(tsk);
  833. pgd_t *pgd;
  834. pud_t *pud;
  835. pmd_t *pmd;
  836. pte_t *pte;
  837. if (write) /* user gate pages are read-only */
  838. return i ? : -EFAULT;
  839. if (pg > TASK_SIZE)
  840. pgd = pgd_offset_k(pg);
  841. else
  842. pgd = pgd_offset_gate(mm, pg);
  843. BUG_ON(pgd_none(*pgd));
  844. pud = pud_offset(pgd, pg);
  845. BUG_ON(pud_none(*pud));
  846. pmd = pmd_offset(pud, pg);
  847. if (pmd_none(*pmd))
  848. return i ? : -EFAULT;
  849. pte = pte_offset_map(pmd, pg);
  850. if (pte_none(*pte)) {
  851. pte_unmap(pte);
  852. return i ? : -EFAULT;
  853. }
  854. if (pages) {
  855. pages[i] = pte_page(*pte);
  856. get_page(pages[i]);
  857. }
  858. pte_unmap(pte);
  859. if (vmas)
  860. vmas[i] = gate_vma;
  861. i++;
  862. start += PAGE_SIZE;
  863. len--;
  864. continue;
  865. }
  866. if (!vma || (vma->vm_flags & VM_IO)
  867. || !(flags & vma->vm_flags))
  868. return i ? : -EFAULT;
  869. if (is_vm_hugetlb_page(vma)) {
  870. i = follow_hugetlb_page(mm, vma, pages, vmas,
  871. &start, &len, i);
  872. continue;
  873. }
  874. spin_lock(&mm->page_table_lock);
  875. do {
  876. int write_access = write;
  877. struct page *page;
  878. cond_resched_lock(&mm->page_table_lock);
  879. while (!(page = follow_page(mm, start, write_access))) {
  880. int ret;
  881. /*
  882. * Shortcut for anonymous pages. We don't want
  883. * to force the creation of pages tables for
  884. * insanely big anonymously mapped areas that
  885. * nobody touched so far. This is important
  886. * for doing a core dump for these mappings.
  887. */
  888. if (!write && untouched_anonymous_page(mm,vma,start)) {
  889. page = ZERO_PAGE(start);
  890. break;
  891. }
  892. spin_unlock(&mm->page_table_lock);
  893. ret = __handle_mm_fault(mm, vma, start, write_access);
  894. /*
  895. * The VM_FAULT_WRITE bit tells us that do_wp_page has
  896. * broken COW when necessary, even if maybe_mkwrite
  897. * decided not to set pte_write. We can thus safely do
  898. * subsequent page lookups as if they were reads.
  899. */
  900. if (ret & VM_FAULT_WRITE)
  901. write_access = 0;
  902. switch (ret & ~VM_FAULT_WRITE) {
  903. case VM_FAULT_MINOR:
  904. tsk->min_flt++;
  905. break;
  906. case VM_FAULT_MAJOR:
  907. tsk->maj_flt++;
  908. break;
  909. case VM_FAULT_SIGBUS:
  910. return i ? i : -EFAULT;
  911. case VM_FAULT_OOM:
  912. return i ? i : -ENOMEM;
  913. default:
  914. BUG();
  915. }
  916. spin_lock(&mm->page_table_lock);
  917. }
  918. if (pages) {
  919. pages[i] = page;
  920. flush_dcache_page(page);
  921. if (!PageReserved(page))
  922. page_cache_get(page);
  923. }
  924. if (vmas)
  925. vmas[i] = vma;
  926. i++;
  927. start += PAGE_SIZE;
  928. len--;
  929. } while (len && start < vma->vm_end);
  930. spin_unlock(&mm->page_table_lock);
  931. } while (len);
  932. return i;
  933. }
  934. EXPORT_SYMBOL(get_user_pages);
  935. static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
  936. unsigned long addr, unsigned long end, pgprot_t prot)
  937. {
  938. pte_t *pte;
  939. pte = pte_alloc_map(mm, pmd, addr);
  940. if (!pte)
  941. return -ENOMEM;
  942. do {
  943. pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(addr), prot));
  944. BUG_ON(!pte_none(*pte));
  945. set_pte_at(mm, addr, pte, zero_pte);
  946. } while (pte++, addr += PAGE_SIZE, addr != end);
  947. pte_unmap(pte - 1);
  948. return 0;
  949. }
  950. static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
  951. unsigned long addr, unsigned long end, pgprot_t prot)
  952. {
  953. pmd_t *pmd;
  954. unsigned long next;
  955. pmd = pmd_alloc(mm, pud, addr);
  956. if (!pmd)
  957. return -ENOMEM;
  958. do {
  959. next = pmd_addr_end(addr, end);
  960. if (zeromap_pte_range(mm, pmd, addr, next, prot))
  961. return -ENOMEM;
  962. } while (pmd++, addr = next, addr != end);
  963. return 0;
  964. }
  965. static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
  966. unsigned long addr, unsigned long end, pgprot_t prot)
  967. {
  968. pud_t *pud;
  969. unsigned long next;
  970. pud = pud_alloc(mm, pgd, addr);
  971. if (!pud)
  972. return -ENOMEM;
  973. do {
  974. next = pud_addr_end(addr, end);
  975. if (zeromap_pmd_range(mm, pud, addr, next, prot))
  976. return -ENOMEM;
  977. } while (pud++, addr = next, addr != end);
  978. return 0;
  979. }
  980. int zeromap_page_range(struct vm_area_struct *vma,
  981. unsigned long addr, unsigned long size, pgprot_t prot)
  982. {
  983. pgd_t *pgd;
  984. unsigned long next;
  985. unsigned long end = addr + size;
  986. struct mm_struct *mm = vma->vm_mm;
  987. int err;
  988. BUG_ON(addr >= end);
  989. pgd = pgd_offset(mm, addr);
  990. flush_cache_range(vma, addr, end);
  991. spin_lock(&mm->page_table_lock);
  992. do {
  993. next = pgd_addr_end(addr, end);
  994. err = zeromap_pud_range(mm, pgd, addr, next, prot);
  995. if (err)
  996. break;
  997. } while (pgd++, addr = next, addr != end);
  998. spin_unlock(&mm->page_table_lock);
  999. return err;
  1000. }
  1001. /*
  1002. * maps a range of physical memory into the requested pages. the old
  1003. * mappings are removed. any references to nonexistent pages results
  1004. * in null mappings (currently treated as "copy-on-access")
  1005. */
  1006. static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
  1007. unsigned long addr, unsigned long end,
  1008. unsigned long pfn, pgprot_t prot)
  1009. {
  1010. pte_t *pte;
  1011. pte = pte_alloc_map(mm, pmd, addr);
  1012. if (!pte)
  1013. return -ENOMEM;
  1014. do {
  1015. BUG_ON(!pte_none(*pte));
  1016. if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
  1017. set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
  1018. pfn++;
  1019. } while (pte++, addr += PAGE_SIZE, addr != end);
  1020. pte_unmap(pte - 1);
  1021. return 0;
  1022. }
  1023. static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
  1024. unsigned long addr, unsigned long end,
  1025. unsigned long pfn, pgprot_t prot)
  1026. {
  1027. pmd_t *pmd;
  1028. unsigned long next;
  1029. pfn -= addr >> PAGE_SHIFT;
  1030. pmd = pmd_alloc(mm, pud, addr);
  1031. if (!pmd)
  1032. return -ENOMEM;
  1033. do {
  1034. next = pmd_addr_end(addr, end);
  1035. if (remap_pte_range(mm, pmd, addr, next,
  1036. pfn + (addr >> PAGE_SHIFT), prot))
  1037. return -ENOMEM;
  1038. } while (pmd++, addr = next, addr != end);
  1039. return 0;
  1040. }
  1041. static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
  1042. unsigned long addr, unsigned long end,
  1043. unsigned long pfn, pgprot_t prot)
  1044. {
  1045. pud_t *pud;
  1046. unsigned long next;
  1047. pfn -= addr >> PAGE_SHIFT;
  1048. pud = pud_alloc(mm, pgd, addr);
  1049. if (!pud)
  1050. return -ENOMEM;
  1051. do {
  1052. next = pud_addr_end(addr, end);
  1053. if (remap_pmd_range(mm, pud, addr, next,
  1054. pfn + (addr >> PAGE_SHIFT), prot))
  1055. return -ENOMEM;
  1056. } while (pud++, addr = next, addr != end);
  1057. return 0;
  1058. }
  1059. /* Note: this is only safe if the mm semaphore is held when called. */
  1060. int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
  1061. unsigned long pfn, unsigned long size, pgprot_t prot)
  1062. {
  1063. pgd_t *pgd;
  1064. unsigned long next;
  1065. unsigned long end = addr + PAGE_ALIGN(size);
  1066. struct mm_struct *mm = vma->vm_mm;
  1067. int err;
  1068. /*
  1069. * Physically remapped pages are special. Tell the
  1070. * rest of the world about it:
  1071. * VM_IO tells people not to look at these pages
  1072. * (accesses can have side effects).
  1073. * VM_RESERVED tells swapout not to try to touch
  1074. * this region.
  1075. */
  1076. vma->vm_flags |= VM_IO | VM_RESERVED;
  1077. BUG_ON(addr >= end);
  1078. pfn -= addr >> PAGE_SHIFT;
  1079. pgd = pgd_offset(mm, addr);
  1080. flush_cache_range(vma, addr, end);
  1081. spin_lock(&mm->page_table_lock);
  1082. do {
  1083. next = pgd_addr_end(addr, end);
  1084. err = remap_pud_range(mm, pgd, addr, next,
  1085. pfn + (addr >> PAGE_SHIFT), prot);
  1086. if (err)
  1087. break;
  1088. } while (pgd++, addr = next, addr != end);
  1089. spin_unlock(&mm->page_table_lock);
  1090. return err;
  1091. }
  1092. EXPORT_SYMBOL(remap_pfn_range);
  1093. /*
  1094. * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
  1095. * servicing faults for write access. In the normal case, do always want
  1096. * pte_mkwrite. But get_user_pages can cause write faults for mappings
  1097. * that do not have writing enabled, when used by access_process_vm.
  1098. */
  1099. static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
  1100. {
  1101. if (likely(vma->vm_flags & VM_WRITE))
  1102. pte = pte_mkwrite(pte);
  1103. return pte;
  1104. }
  1105. /*
  1106. * This routine handles present pages, when users try to write
  1107. * to a shared page. It is done by copying the page to a new address
  1108. * and decrementing the shared-page counter for the old page.
  1109. *
  1110. * Note that this routine assumes that the protection checks have been
  1111. * done by the caller (the low-level page fault routine in most cases).
  1112. * Thus we can safely just mark it writable once we've done any necessary
  1113. * COW.
  1114. *
  1115. * We also mark the page dirty at this point even though the page will
  1116. * change only once the write actually happens. This avoids a few races,
  1117. * and potentially makes it more efficient.
  1118. *
  1119. * We hold the mm semaphore and the page_table_lock on entry and exit
  1120. * with the page_table_lock released.
  1121. */
  1122. static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1123. unsigned long address, pte_t *page_table, pmd_t *pmd,
  1124. pte_t orig_pte)
  1125. {
  1126. struct page *old_page, *new_page;
  1127. unsigned long pfn = pte_pfn(orig_pte);
  1128. pte_t entry;
  1129. int ret = VM_FAULT_MINOR;
  1130. if (unlikely(!pfn_valid(pfn))) {
  1131. /*
  1132. * Page table corrupted: show pte and kill process.
  1133. */
  1134. pte_ERROR(orig_pte);
  1135. ret = VM_FAULT_OOM;
  1136. goto unlock;
  1137. }
  1138. old_page = pfn_to_page(pfn);
  1139. if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
  1140. int reuse = can_share_swap_page(old_page);
  1141. unlock_page(old_page);
  1142. if (reuse) {
  1143. flush_cache_page(vma, address, pfn);
  1144. entry = pte_mkyoung(orig_pte);
  1145. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  1146. ptep_set_access_flags(vma, address, page_table, entry, 1);
  1147. update_mmu_cache(vma, address, entry);
  1148. lazy_mmu_prot_update(entry);
  1149. ret |= VM_FAULT_WRITE;
  1150. goto unlock;
  1151. }
  1152. }
  1153. /*
  1154. * Ok, we need to copy. Oh, well..
  1155. */
  1156. if (!PageReserved(old_page))
  1157. page_cache_get(old_page);
  1158. pte_unmap(page_table);
  1159. spin_unlock(&mm->page_table_lock);
  1160. if (unlikely(anon_vma_prepare(vma)))
  1161. goto oom;
  1162. if (old_page == ZERO_PAGE(address)) {
  1163. new_page = alloc_zeroed_user_highpage(vma, address);
  1164. if (!new_page)
  1165. goto oom;
  1166. } else {
  1167. new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
  1168. if (!new_page)
  1169. goto oom;
  1170. copy_user_highpage(new_page, old_page, address);
  1171. }
  1172. /*
  1173. * Re-check the pte - we dropped the lock
  1174. */
  1175. spin_lock(&mm->page_table_lock);
  1176. page_table = pte_offset_map(pmd, address);
  1177. if (likely(pte_same(*page_table, orig_pte))) {
  1178. if (PageReserved(old_page))
  1179. inc_mm_counter(mm, anon_rss);
  1180. else {
  1181. page_remove_rmap(old_page);
  1182. if (!PageAnon(old_page)) {
  1183. inc_mm_counter(mm, anon_rss);
  1184. dec_mm_counter(mm, file_rss);
  1185. }
  1186. }
  1187. flush_cache_page(vma, address, pfn);
  1188. entry = mk_pte(new_page, vma->vm_page_prot);
  1189. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  1190. ptep_establish(vma, address, page_table, entry);
  1191. update_mmu_cache(vma, address, entry);
  1192. lazy_mmu_prot_update(entry);
  1193. lru_cache_add_active(new_page);
  1194. page_add_anon_rmap(new_page, vma, address);
  1195. /* Free the old page.. */
  1196. new_page = old_page;
  1197. ret |= VM_FAULT_WRITE;
  1198. }
  1199. page_cache_release(new_page);
  1200. page_cache_release(old_page);
  1201. unlock:
  1202. pte_unmap(page_table);
  1203. spin_unlock(&mm->page_table_lock);
  1204. return ret;
  1205. oom:
  1206. page_cache_release(old_page);
  1207. return VM_FAULT_OOM;
  1208. }
  1209. /*
  1210. * Helper functions for unmap_mapping_range().
  1211. *
  1212. * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
  1213. *
  1214. * We have to restart searching the prio_tree whenever we drop the lock,
  1215. * since the iterator is only valid while the lock is held, and anyway
  1216. * a later vma might be split and reinserted earlier while lock dropped.
  1217. *
  1218. * The list of nonlinear vmas could be handled more efficiently, using
  1219. * a placeholder, but handle it in the same way until a need is shown.
  1220. * It is important to search the prio_tree before nonlinear list: a vma
  1221. * may become nonlinear and be shifted from prio_tree to nonlinear list
  1222. * while the lock is dropped; but never shifted from list to prio_tree.
  1223. *
  1224. * In order to make forward progress despite restarting the search,
  1225. * vm_truncate_count is used to mark a vma as now dealt with, so we can
  1226. * quickly skip it next time around. Since the prio_tree search only
  1227. * shows us those vmas affected by unmapping the range in question, we
  1228. * can't efficiently keep all vmas in step with mapping->truncate_count:
  1229. * so instead reset them all whenever it wraps back to 0 (then go to 1).
  1230. * mapping->truncate_count and vma->vm_truncate_count are protected by
  1231. * i_mmap_lock.
  1232. *
  1233. * In order to make forward progress despite repeatedly restarting some
  1234. * large vma, note the restart_addr from unmap_vmas when it breaks out:
  1235. * and restart from that address when we reach that vma again. It might
  1236. * have been split or merged, shrunk or extended, but never shifted: so
  1237. * restart_addr remains valid so long as it remains in the vma's range.
  1238. * unmap_mapping_range forces truncate_count to leap over page-aligned
  1239. * values so we can save vma's restart_addr in its truncate_count field.
  1240. */
  1241. #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
  1242. static void reset_vma_truncate_counts(struct address_space *mapping)
  1243. {
  1244. struct vm_area_struct *vma;
  1245. struct prio_tree_iter iter;
  1246. vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
  1247. vma->vm_truncate_count = 0;
  1248. list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
  1249. vma->vm_truncate_count = 0;
  1250. }
  1251. static int unmap_mapping_range_vma(struct vm_area_struct *vma,
  1252. unsigned long start_addr, unsigned long end_addr,
  1253. struct zap_details *details)
  1254. {
  1255. unsigned long restart_addr;
  1256. int need_break;
  1257. again:
  1258. restart_addr = vma->vm_truncate_count;
  1259. if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
  1260. start_addr = restart_addr;
  1261. if (start_addr >= end_addr) {
  1262. /* Top of vma has been split off since last time */
  1263. vma->vm_truncate_count = details->truncate_count;
  1264. return 0;
  1265. }
  1266. }
  1267. restart_addr = zap_page_range(vma, start_addr,
  1268. end_addr - start_addr, details);
  1269. /*
  1270. * We cannot rely on the break test in unmap_vmas:
  1271. * on the one hand, we don't want to restart our loop
  1272. * just because that broke out for the page_table_lock;
  1273. * on the other hand, it does no test when vma is small.
  1274. */
  1275. need_break = need_resched() ||
  1276. need_lockbreak(details->i_mmap_lock);
  1277. if (restart_addr >= end_addr) {
  1278. /* We have now completed this vma: mark it so */
  1279. vma->vm_truncate_count = details->truncate_count;
  1280. if (!need_break)
  1281. return 0;
  1282. } else {
  1283. /* Note restart_addr in vma's truncate_count field */
  1284. vma->vm_truncate_count = restart_addr;
  1285. if (!need_break)
  1286. goto again;
  1287. }
  1288. spin_unlock(details->i_mmap_lock);
  1289. cond_resched();
  1290. spin_lock(details->i_mmap_lock);
  1291. return -EINTR;
  1292. }
  1293. static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
  1294. struct zap_details *details)
  1295. {
  1296. struct vm_area_struct *vma;
  1297. struct prio_tree_iter iter;
  1298. pgoff_t vba, vea, zba, zea;
  1299. restart:
  1300. vma_prio_tree_foreach(vma, &iter, root,
  1301. details->first_index, details->last_index) {
  1302. /* Skip quickly over those we have already dealt with */
  1303. if (vma->vm_truncate_count == details->truncate_count)
  1304. continue;
  1305. vba = vma->vm_pgoff;
  1306. vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
  1307. /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
  1308. zba = details->first_index;
  1309. if (zba < vba)
  1310. zba = vba;
  1311. zea = details->last_index;
  1312. if (zea > vea)
  1313. zea = vea;
  1314. if (unmap_mapping_range_vma(vma,
  1315. ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
  1316. ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
  1317. details) < 0)
  1318. goto restart;
  1319. }
  1320. }
  1321. static inline void unmap_mapping_range_list(struct list_head *head,
  1322. struct zap_details *details)
  1323. {
  1324. struct vm_area_struct *vma;
  1325. /*
  1326. * In nonlinear VMAs there is no correspondence between virtual address
  1327. * offset and file offset. So we must perform an exhaustive search
  1328. * across *all* the pages in each nonlinear VMA, not just the pages
  1329. * whose virtual address lies outside the file truncation point.
  1330. */
  1331. restart:
  1332. list_for_each_entry(vma, head, shared.vm_set.list) {
  1333. /* Skip quickly over those we have already dealt with */
  1334. if (vma->vm_truncate_count == details->truncate_count)
  1335. continue;
  1336. details->nonlinear_vma = vma;
  1337. if (unmap_mapping_range_vma(vma, vma->vm_start,
  1338. vma->vm_end, details) < 0)
  1339. goto restart;
  1340. }
  1341. }
  1342. /**
  1343. * unmap_mapping_range - unmap the portion of all mmaps
  1344. * in the specified address_space corresponding to the specified
  1345. * page range in the underlying file.
  1346. * @mapping: the address space containing mmaps to be unmapped.
  1347. * @holebegin: byte in first page to unmap, relative to the start of
  1348. * the underlying file. This will be rounded down to a PAGE_SIZE
  1349. * boundary. Note that this is different from vmtruncate(), which
  1350. * must keep the partial page. In contrast, we must get rid of
  1351. * partial pages.
  1352. * @holelen: size of prospective hole in bytes. This will be rounded
  1353. * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
  1354. * end of the file.
  1355. * @even_cows: 1 when truncating a file, unmap even private COWed pages;
  1356. * but 0 when invalidating pagecache, don't throw away private data.
  1357. */
  1358. void unmap_mapping_range(struct address_space *mapping,
  1359. loff_t const holebegin, loff_t const holelen, int even_cows)
  1360. {
  1361. struct zap_details details;
  1362. pgoff_t hba = holebegin >> PAGE_SHIFT;
  1363. pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  1364. /* Check for overflow. */
  1365. if (sizeof(holelen) > sizeof(hlen)) {
  1366. long long holeend =
  1367. (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
  1368. if (holeend & ~(long long)ULONG_MAX)
  1369. hlen = ULONG_MAX - hba + 1;
  1370. }
  1371. details.check_mapping = even_cows? NULL: mapping;
  1372. details.nonlinear_vma = NULL;
  1373. details.first_index = hba;
  1374. details.last_index = hba + hlen - 1;
  1375. if (details.last_index < details.first_index)
  1376. details.last_index = ULONG_MAX;
  1377. details.i_mmap_lock = &mapping->i_mmap_lock;
  1378. spin_lock(&mapping->i_mmap_lock);
  1379. /* serialize i_size write against truncate_count write */
  1380. smp_wmb();
  1381. /* Protect against page faults, and endless unmapping loops */
  1382. mapping->truncate_count++;
  1383. /*
  1384. * For archs where spin_lock has inclusive semantics like ia64
  1385. * this smp_mb() will prevent to read pagetable contents
  1386. * before the truncate_count increment is visible to
  1387. * other cpus.
  1388. */
  1389. smp_mb();
  1390. if (unlikely(is_restart_addr(mapping->truncate_count))) {
  1391. if (mapping->truncate_count == 0)
  1392. reset_vma_truncate_counts(mapping);
  1393. mapping->truncate_count++;
  1394. }
  1395. details.truncate_count = mapping->truncate_count;
  1396. if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
  1397. unmap_mapping_range_tree(&mapping->i_mmap, &details);
  1398. if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
  1399. unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
  1400. spin_unlock(&mapping->i_mmap_lock);
  1401. }
  1402. EXPORT_SYMBOL(unmap_mapping_range);
  1403. /*
  1404. * Handle all mappings that got truncated by a "truncate()"
  1405. * system call.
  1406. *
  1407. * NOTE! We have to be ready to update the memory sharing
  1408. * between the file and the memory map for a potential last
  1409. * incomplete page. Ugly, but necessary.
  1410. */
  1411. int vmtruncate(struct inode * inode, loff_t offset)
  1412. {
  1413. struct address_space *mapping = inode->i_mapping;
  1414. unsigned long limit;
  1415. if (inode->i_size < offset)
  1416. goto do_expand;
  1417. /*
  1418. * truncation of in-use swapfiles is disallowed - it would cause
  1419. * subsequent swapout to scribble on the now-freed blocks.
  1420. */
  1421. if (IS_SWAPFILE(inode))
  1422. goto out_busy;
  1423. i_size_write(inode, offset);
  1424. unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
  1425. truncate_inode_pages(mapping, offset);
  1426. goto out_truncate;
  1427. do_expand:
  1428. limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
  1429. if (limit != RLIM_INFINITY && offset > limit)
  1430. goto out_sig;
  1431. if (offset > inode->i_sb->s_maxbytes)
  1432. goto out_big;
  1433. i_size_write(inode, offset);
  1434. out_truncate:
  1435. if (inode->i_op && inode->i_op->truncate)
  1436. inode->i_op->truncate(inode);
  1437. return 0;
  1438. out_sig:
  1439. send_sig(SIGXFSZ, current, 0);
  1440. out_big:
  1441. return -EFBIG;
  1442. out_busy:
  1443. return -ETXTBSY;
  1444. }
  1445. EXPORT_SYMBOL(vmtruncate);
  1446. /*
  1447. * Primitive swap readahead code. We simply read an aligned block of
  1448. * (1 << page_cluster) entries in the swap area. This method is chosen
  1449. * because it doesn't cost us any seek time. We also make sure to queue
  1450. * the 'original' request together with the readahead ones...
  1451. *
  1452. * This has been extended to use the NUMA policies from the mm triggering
  1453. * the readahead.
  1454. *
  1455. * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
  1456. */
  1457. void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
  1458. {
  1459. #ifdef CONFIG_NUMA
  1460. struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
  1461. #endif
  1462. int i, num;
  1463. struct page *new_page;
  1464. unsigned long offset;
  1465. /*
  1466. * Get the number of handles we should do readahead io to.
  1467. */
  1468. num = valid_swaphandles(entry, &offset);
  1469. for (i = 0; i < num; offset++, i++) {
  1470. /* Ok, do the async read-ahead now */
  1471. new_page = read_swap_cache_async(swp_entry(swp_type(entry),
  1472. offset), vma, addr);
  1473. if (!new_page)
  1474. break;
  1475. page_cache_release(new_page);
  1476. #ifdef CONFIG_NUMA
  1477. /*
  1478. * Find the next applicable VMA for the NUMA policy.
  1479. */
  1480. addr += PAGE_SIZE;
  1481. if (addr == 0)
  1482. vma = NULL;
  1483. if (vma) {
  1484. if (addr >= vma->vm_end) {
  1485. vma = next_vma;
  1486. next_vma = vma ? vma->vm_next : NULL;
  1487. }
  1488. if (vma && addr < vma->vm_start)
  1489. vma = NULL;
  1490. } else {
  1491. if (next_vma && addr >= next_vma->vm_start) {
  1492. vma = next_vma;
  1493. next_vma = vma->vm_next;
  1494. }
  1495. }
  1496. #endif
  1497. }
  1498. lru_add_drain(); /* Push any new pages onto the LRU now */
  1499. }
  1500. /*
  1501. * We hold the mm semaphore and the page_table_lock on entry and
  1502. * should release the pagetable lock on exit..
  1503. */
  1504. static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1505. unsigned long address, pte_t *page_table, pmd_t *pmd,
  1506. int write_access, pte_t orig_pte)
  1507. {
  1508. struct page *page;
  1509. swp_entry_t entry;
  1510. pte_t pte;
  1511. int ret = VM_FAULT_MINOR;
  1512. pte_unmap(page_table);
  1513. spin_unlock(&mm->page_table_lock);
  1514. entry = pte_to_swp_entry(orig_pte);
  1515. page = lookup_swap_cache(entry);
  1516. if (!page) {
  1517. swapin_readahead(entry, address, vma);
  1518. page = read_swap_cache_async(entry, vma, address);
  1519. if (!page) {
  1520. /*
  1521. * Back out if somebody else faulted in this pte while
  1522. * we released the page table lock.
  1523. */
  1524. spin_lock(&mm->page_table_lock);
  1525. page_table = pte_offset_map(pmd, address);
  1526. if (likely(pte_same(*page_table, orig_pte)))
  1527. ret = VM_FAULT_OOM;
  1528. goto unlock;
  1529. }
  1530. /* Had to read the page from swap area: Major fault */
  1531. ret = VM_FAULT_MAJOR;
  1532. inc_page_state(pgmajfault);
  1533. grab_swap_token();
  1534. }
  1535. mark_page_accessed(page);
  1536. lock_page(page);
  1537. /*
  1538. * Back out if somebody else faulted in this pte while we
  1539. * released the page table lock.
  1540. */
  1541. spin_lock(&mm->page_table_lock);
  1542. page_table = pte_offset_map(pmd, address);
  1543. if (unlikely(!pte_same(*page_table, orig_pte))) {
  1544. ret = VM_FAULT_MINOR;
  1545. goto out_nomap;
  1546. }
  1547. if (unlikely(!PageUptodate(page))) {
  1548. ret = VM_FAULT_SIGBUS;
  1549. goto out_nomap;
  1550. }
  1551. /* The page isn't present yet, go ahead with the fault. */
  1552. inc_mm_counter(mm, anon_rss);
  1553. pte = mk_pte(page, vma->vm_page_prot);
  1554. if (write_access && can_share_swap_page(page)) {
  1555. pte = maybe_mkwrite(pte_mkdirty(pte), vma);
  1556. write_access = 0;
  1557. }
  1558. flush_icache_page(vma, page);
  1559. set_pte_at(mm, address, page_table, pte);
  1560. page_add_anon_rmap(page, vma, address);
  1561. swap_free(entry);
  1562. if (vm_swap_full())
  1563. remove_exclusive_swap_page(page);
  1564. unlock_page(page);
  1565. if (write_access) {
  1566. if (do_wp_page(mm, vma, address,
  1567. page_table, pmd, pte) == VM_FAULT_OOM)
  1568. ret = VM_FAULT_OOM;
  1569. goto out;
  1570. }
  1571. /* No need to invalidate - it was non-present before */
  1572. update_mmu_cache(vma, address, pte);
  1573. lazy_mmu_prot_update(pte);
  1574. unlock:
  1575. pte_unmap(page_table);
  1576. spin_unlock(&mm->page_table_lock);
  1577. out:
  1578. return ret;
  1579. out_nomap:
  1580. pte_unmap(page_table);
  1581. spin_unlock(&mm->page_table_lock);
  1582. unlock_page(page);
  1583. page_cache_release(page);
  1584. return ret;
  1585. }
  1586. /*
  1587. * We are called with the MM semaphore and page_table_lock
  1588. * spinlock held to protect against concurrent faults in
  1589. * multithreaded programs.
  1590. */
  1591. static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1592. unsigned long address, pte_t *page_table, pmd_t *pmd,
  1593. int write_access)
  1594. {
  1595. pte_t entry;
  1596. /* Mapping of ZERO_PAGE - vm_page_prot is readonly */
  1597. entry = mk_pte(ZERO_PAGE(addr), vma->vm_page_prot);
  1598. if (write_access) {
  1599. struct page *page;
  1600. /* Allocate our own private page. */
  1601. pte_unmap(page_table);
  1602. spin_unlock(&mm->page_table_lock);
  1603. if (unlikely(anon_vma_prepare(vma)))
  1604. goto oom;
  1605. page = alloc_zeroed_user_highpage(vma, address);
  1606. if (!page)
  1607. goto oom;
  1608. spin_lock(&mm->page_table_lock);
  1609. page_table = pte_offset_map(pmd, address);
  1610. if (!pte_none(*page_table)) {
  1611. page_cache_release(page);
  1612. goto unlock;
  1613. }
  1614. inc_mm_counter(mm, anon_rss);
  1615. entry = mk_pte(page, vma->vm_page_prot);
  1616. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  1617. lru_cache_add_active(page);
  1618. SetPageReferenced(page);
  1619. page_add_anon_rmap(page, vma, address);
  1620. }
  1621. set_pte_at(mm, address, page_table, entry);
  1622. /* No need to invalidate - it was non-present before */
  1623. update_mmu_cache(vma, address, entry);
  1624. lazy_mmu_prot_update(entry);
  1625. unlock:
  1626. pte_unmap(page_table);
  1627. spin_unlock(&mm->page_table_lock);
  1628. return VM_FAULT_MINOR;
  1629. oom:
  1630. return VM_FAULT_OOM;
  1631. }
  1632. /*
  1633. * do_no_page() tries to create a new page mapping. It aggressively
  1634. * tries to share with existing pages, but makes a separate copy if
  1635. * the "write_access" parameter is true in order to avoid the next
  1636. * page fault.
  1637. *
  1638. * As this is called only for pages that do not currently exist, we
  1639. * do not need to flush old virtual caches or the TLB.
  1640. *
  1641. * This is called with the MM semaphore held and the page table
  1642. * spinlock held. Exit with the spinlock released.
  1643. */
  1644. static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1645. unsigned long address, pte_t *page_table, pmd_t *pmd,
  1646. int write_access)
  1647. {
  1648. struct page *new_page;
  1649. struct address_space *mapping = NULL;
  1650. pte_t entry;
  1651. unsigned int sequence = 0;
  1652. int ret = VM_FAULT_MINOR;
  1653. int anon = 0;
  1654. pte_unmap(page_table);
  1655. spin_unlock(&mm->page_table_lock);
  1656. if (vma->vm_file) {
  1657. mapping = vma->vm_file->f_mapping;
  1658. sequence = mapping->truncate_count;
  1659. smp_rmb(); /* serializes i_size against truncate_count */
  1660. }
  1661. retry:
  1662. new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
  1663. /*
  1664. * No smp_rmb is needed here as long as there's a full
  1665. * spin_lock/unlock sequence inside the ->nopage callback
  1666. * (for the pagecache lookup) that acts as an implicit
  1667. * smp_mb() and prevents the i_size read to happen
  1668. * after the next truncate_count read.
  1669. */
  1670. /* no page was available -- either SIGBUS or OOM */
  1671. if (new_page == NOPAGE_SIGBUS)
  1672. return VM_FAULT_SIGBUS;
  1673. if (new_page == NOPAGE_OOM)
  1674. return VM_FAULT_OOM;
  1675. /*
  1676. * Should we do an early C-O-W break?
  1677. */
  1678. if (write_access && !(vma->vm_flags & VM_SHARED)) {
  1679. struct page *page;
  1680. if (unlikely(anon_vma_prepare(vma)))
  1681. goto oom;
  1682. page = alloc_page_vma(GFP_HIGHUSER, vma, address);
  1683. if (!page)
  1684. goto oom;
  1685. copy_user_highpage(page, new_page, address);
  1686. page_cache_release(new_page);
  1687. new_page = page;
  1688. anon = 1;
  1689. }
  1690. spin_lock(&mm->page_table_lock);
  1691. /*
  1692. * For a file-backed vma, someone could have truncated or otherwise
  1693. * invalidated this page. If unmap_mapping_range got called,
  1694. * retry getting the page.
  1695. */
  1696. if (mapping && unlikely(sequence != mapping->truncate_count)) {
  1697. spin_unlock(&mm->page_table_lock);
  1698. page_cache_release(new_page);
  1699. cond_resched();
  1700. sequence = mapping->truncate_count;
  1701. smp_rmb();
  1702. goto retry;
  1703. }
  1704. page_table = pte_offset_map(pmd, address);
  1705. /*
  1706. * This silly early PAGE_DIRTY setting removes a race
  1707. * due to the bad i386 page protection. But it's valid
  1708. * for other architectures too.
  1709. *
  1710. * Note that if write_access is true, we either now have
  1711. * an exclusive copy of the page, or this is a shared mapping,
  1712. * so we can make it writable and dirty to avoid having to
  1713. * handle that later.
  1714. */
  1715. /* Only go through if we didn't race with anybody else... */
  1716. if (pte_none(*page_table)) {
  1717. flush_icache_page(vma, new_page);
  1718. entry = mk_pte(new_page, vma->vm_page_prot);
  1719. if (write_access)
  1720. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  1721. set_pte_at(mm, address, page_table, entry);
  1722. if (anon) {
  1723. inc_mm_counter(mm, anon_rss);
  1724. lru_cache_add_active(new_page);
  1725. page_add_anon_rmap(new_page, vma, address);
  1726. } else if (!PageReserved(new_page)) {
  1727. inc_mm_counter(mm, file_rss);
  1728. page_add_file_rmap(new_page);
  1729. }
  1730. } else {
  1731. /* One of our sibling threads was faster, back out. */
  1732. page_cache_release(new_page);
  1733. goto unlock;
  1734. }
  1735. /* no need to invalidate: a not-present page shouldn't be cached */
  1736. update_mmu_cache(vma, address, entry);
  1737. lazy_mmu_prot_update(entry);
  1738. unlock:
  1739. pte_unmap(page_table);
  1740. spin_unlock(&mm->page_table_lock);
  1741. return ret;
  1742. oom:
  1743. page_cache_release(new_page);
  1744. return VM_FAULT_OOM;
  1745. }
  1746. /*
  1747. * Fault of a previously existing named mapping. Repopulate the pte
  1748. * from the encoded file_pte if possible. This enables swappable
  1749. * nonlinear vmas.
  1750. */
  1751. static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1752. unsigned long address, pte_t *page_table, pmd_t *pmd,
  1753. int write_access, pte_t orig_pte)
  1754. {
  1755. pgoff_t pgoff;
  1756. int err;
  1757. pte_unmap(page_table);
  1758. spin_unlock(&mm->page_table_lock);
  1759. if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
  1760. /*
  1761. * Page table corrupted: show pte and kill process.
  1762. */
  1763. pte_ERROR(orig_pte);
  1764. return VM_FAULT_OOM;
  1765. }
  1766. /* We can then assume vm->vm_ops && vma->vm_ops->populate */
  1767. pgoff = pte_to_pgoff(orig_pte);
  1768. err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
  1769. vma->vm_page_prot, pgoff, 0);
  1770. if (err == -ENOMEM)
  1771. return VM_FAULT_OOM;
  1772. if (err)
  1773. return VM_FAULT_SIGBUS;
  1774. return VM_FAULT_MAJOR;
  1775. }
  1776. /*
  1777. * These routines also need to handle stuff like marking pages dirty
  1778. * and/or accessed for architectures that don't do it in hardware (most
  1779. * RISC architectures). The early dirtying is also good on the i386.
  1780. *
  1781. * There is also a hook called "update_mmu_cache()" that architectures
  1782. * with external mmu caches can use to update those (ie the Sparc or
  1783. * PowerPC hashed page tables that act as extended TLBs).
  1784. *
  1785. * Note the "page_table_lock". It is to protect against kswapd removing
  1786. * pages from under us. Note that kswapd only ever _removes_ pages, never
  1787. * adds them. As such, once we have noticed that the page is not present,
  1788. * we can drop the lock early.
  1789. *
  1790. * The adding of pages is protected by the MM semaphore (which we hold),
  1791. * so we don't need to worry about a page being suddenly been added into
  1792. * our VM.
  1793. *
  1794. * We enter with the pagetable spinlock held, we are supposed to
  1795. * release it when done.
  1796. */
  1797. static inline int handle_pte_fault(struct mm_struct *mm,
  1798. struct vm_area_struct *vma, unsigned long address,
  1799. pte_t *pte, pmd_t *pmd, int write_access)
  1800. {
  1801. pte_t entry;
  1802. entry = *pte;
  1803. if (!pte_present(entry)) {
  1804. if (pte_none(entry)) {
  1805. if (!vma->vm_ops || !vma->vm_ops->nopage)
  1806. return do_anonymous_page(mm, vma, address,
  1807. pte, pmd, write_access);
  1808. return do_no_page(mm, vma, address,
  1809. pte, pmd, write_access);
  1810. }
  1811. if (pte_file(entry))
  1812. return do_file_page(mm, vma, address,
  1813. pte, pmd, write_access, entry);
  1814. return do_swap_page(mm, vma, address,
  1815. pte, pmd, write_access, entry);
  1816. }
  1817. if (write_access) {
  1818. if (!pte_write(entry))
  1819. return do_wp_page(mm, vma, address, pte, pmd, entry);
  1820. entry = pte_mkdirty(entry);
  1821. }
  1822. entry = pte_mkyoung(entry);
  1823. ptep_set_access_flags(vma, address, pte, entry, write_access);
  1824. update_mmu_cache(vma, address, entry);
  1825. lazy_mmu_prot_update(entry);
  1826. pte_unmap(pte);
  1827. spin_unlock(&mm->page_table_lock);
  1828. return VM_FAULT_MINOR;
  1829. }
  1830. /*
  1831. * By the time we get here, we already hold the mm semaphore
  1832. */
  1833. int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  1834. unsigned long address, int write_access)
  1835. {
  1836. pgd_t *pgd;
  1837. pud_t *pud;
  1838. pmd_t *pmd;
  1839. pte_t *pte;
  1840. __set_current_state(TASK_RUNNING);
  1841. inc_page_state(pgfault);
  1842. if (unlikely(is_vm_hugetlb_page(vma)))
  1843. return hugetlb_fault(mm, vma, address, write_access);
  1844. /*
  1845. * We need the page table lock to synchronize with kswapd
  1846. * and the SMP-safe atomic PTE updates.
  1847. */
  1848. pgd = pgd_offset(mm, address);
  1849. spin_lock(&mm->page_table_lock);
  1850. pud = pud_alloc(mm, pgd, address);
  1851. if (!pud)
  1852. goto oom;
  1853. pmd = pmd_alloc(mm, pud, address);
  1854. if (!pmd)
  1855. goto oom;
  1856. pte = pte_alloc_map(mm, pmd, address);
  1857. if (!pte)
  1858. goto oom;
  1859. return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
  1860. oom:
  1861. spin_unlock(&mm->page_table_lock);
  1862. return VM_FAULT_OOM;
  1863. }
  1864. #ifndef __PAGETABLE_PUD_FOLDED
  1865. /*
  1866. * Allocate page upper directory.
  1867. *
  1868. * We've already handled the fast-path in-line, and we own the
  1869. * page table lock.
  1870. */
  1871. pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  1872. {
  1873. pud_t *new;
  1874. spin_unlock(&mm->page_table_lock);
  1875. new = pud_alloc_one(mm, address);
  1876. spin_lock(&mm->page_table_lock);
  1877. if (!new)
  1878. return NULL;
  1879. /*
  1880. * Because we dropped the lock, we should re-check the
  1881. * entry, as somebody else could have populated it..
  1882. */
  1883. if (pgd_present(*pgd)) {
  1884. pud_free(new);
  1885. goto out;
  1886. }
  1887. pgd_populate(mm, pgd, new);
  1888. out:
  1889. return pud_offset(pgd, address);
  1890. }
  1891. #endif /* __PAGETABLE_PUD_FOLDED */
  1892. #ifndef __PAGETABLE_PMD_FOLDED
  1893. /*
  1894. * Allocate page middle directory.
  1895. *
  1896. * We've already handled the fast-path in-line, and we own the
  1897. * page table lock.
  1898. */
  1899. pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
  1900. {
  1901. pmd_t *new;
  1902. spin_unlock(&mm->page_table_lock);
  1903. new = pmd_alloc_one(mm, address);
  1904. spin_lock(&mm->page_table_lock);
  1905. if (!new)
  1906. return NULL;
  1907. /*
  1908. * Because we dropped the lock, we should re-check the
  1909. * entry, as somebody else could have populated it..
  1910. */
  1911. #ifndef __ARCH_HAS_4LEVEL_HACK
  1912. if (pud_present(*pud)) {
  1913. pmd_free(new);
  1914. goto out;
  1915. }
  1916. pud_populate(mm, pud, new);
  1917. #else
  1918. if (pgd_present(*pud)) {
  1919. pmd_free(new);
  1920. goto out;
  1921. }
  1922. pgd_populate(mm, pud, new);
  1923. #endif /* __ARCH_HAS_4LEVEL_HACK */
  1924. out:
  1925. return pmd_offset(pud, address);
  1926. }
  1927. #endif /* __PAGETABLE_PMD_FOLDED */
  1928. int make_pages_present(unsigned long addr, unsigned long end)
  1929. {
  1930. int ret, len, write;
  1931. struct vm_area_struct * vma;
  1932. vma = find_vma(current->mm, addr);
  1933. if (!vma)
  1934. return -1;
  1935. write = (vma->vm_flags & VM_WRITE) != 0;
  1936. if (addr >= end)
  1937. BUG();
  1938. if (end > vma->vm_end)
  1939. BUG();
  1940. len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
  1941. ret = get_user_pages(current, current->mm, addr,
  1942. len, write, 0, NULL, NULL);
  1943. if (ret < 0)
  1944. return ret;
  1945. return ret == len ? 0 : -1;
  1946. }
  1947. /*
  1948. * Map a vmalloc()-space virtual address to the physical page.
  1949. */
  1950. struct page * vmalloc_to_page(void * vmalloc_addr)
  1951. {
  1952. unsigned long addr = (unsigned long) vmalloc_addr;
  1953. struct page *page = NULL;
  1954. pgd_t *pgd = pgd_offset_k(addr);
  1955. pud_t *pud;
  1956. pmd_t *pmd;
  1957. pte_t *ptep, pte;
  1958. if (!pgd_none(*pgd)) {
  1959. pud = pud_offset(pgd, addr);
  1960. if (!pud_none(*pud)) {
  1961. pmd = pmd_offset(pud, addr);
  1962. if (!pmd_none(*pmd)) {
  1963. ptep = pte_offset_map(pmd, addr);
  1964. pte = *ptep;
  1965. if (pte_present(pte))
  1966. page = pte_page(pte);
  1967. pte_unmap(ptep);
  1968. }
  1969. }
  1970. }
  1971. return page;
  1972. }
  1973. EXPORT_SYMBOL(vmalloc_to_page);
  1974. /*
  1975. * Map a vmalloc()-space virtual address to the physical page frame number.
  1976. */
  1977. unsigned long vmalloc_to_pfn(void * vmalloc_addr)
  1978. {
  1979. return page_to_pfn(vmalloc_to_page(vmalloc_addr));
  1980. }
  1981. EXPORT_SYMBOL(vmalloc_to_pfn);
  1982. /*
  1983. * update_mem_hiwater
  1984. * - update per process rss and vm high water data
  1985. */
  1986. void update_mem_hiwater(struct task_struct *tsk)
  1987. {
  1988. if (tsk->mm) {
  1989. unsigned long rss = get_mm_rss(tsk->mm);
  1990. if (tsk->mm->hiwater_rss < rss)
  1991. tsk->mm->hiwater_rss = rss;
  1992. if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
  1993. tsk->mm->hiwater_vm = tsk->mm->total_vm;
  1994. }
  1995. }
  1996. #if !defined(__HAVE_ARCH_GATE_AREA)
  1997. #if defined(AT_SYSINFO_EHDR)
  1998. static struct vm_area_struct gate_vma;
  1999. static int __init gate_vma_init(void)
  2000. {
  2001. gate_vma.vm_mm = NULL;
  2002. gate_vma.vm_start = FIXADDR_USER_START;
  2003. gate_vma.vm_end = FIXADDR_USER_END;
  2004. gate_vma.vm_page_prot = PAGE_READONLY;
  2005. gate_vma.vm_flags = 0;
  2006. return 0;
  2007. }
  2008. __initcall(gate_vma_init);
  2009. #endif
  2010. struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
  2011. {
  2012. #ifdef AT_SYSINFO_EHDR
  2013. return &gate_vma;
  2014. #else
  2015. return NULL;
  2016. #endif
  2017. }
  2018. int in_gate_area_no_task(unsigned long addr)
  2019. {
  2020. #ifdef AT_SYSINFO_EHDR
  2021. if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
  2022. return 1;
  2023. #endif
  2024. return 0;
  2025. }
  2026. #endif /* __HAVE_ARCH_GATE_AREA */