swapfile.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549
  1. /*
  2. * linux/mm/swapfile.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. * Swap reorganised 29.12.95, Stephen Tweedie
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/hugetlb.h>
  9. #include <linux/mman.h>
  10. #include <linux/slab.h>
  11. #include <linux/kernel_stat.h>
  12. #include <linux/swap.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/namei.h>
  16. #include <linux/shmem_fs.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/random.h>
  19. #include <linux/writeback.h>
  20. #include <linux/proc_fs.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/init.h>
  23. #include <linux/ksm.h>
  24. #include <linux/rmap.h>
  25. #include <linux/security.h>
  26. #include <linux/backing-dev.h>
  27. #include <linux/mutex.h>
  28. #include <linux/capability.h>
  29. #include <linux/syscalls.h>
  30. #include <linux/memcontrol.h>
  31. #include <linux/poll.h>
  32. #include <linux/oom.h>
  33. #include <linux/frontswap.h>
  34. #include <linux/swapfile.h>
  35. #include <linux/export.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/tlbflush.h>
  38. #include <linux/swapops.h>
  39. #include <linux/page_cgroup.h>
  40. static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  41. unsigned char);
  42. static void free_swap_count_continuations(struct swap_info_struct *);
  43. static sector_t map_swap_entry(swp_entry_t, struct block_device**);
  44. DEFINE_SPINLOCK(swap_lock);
  45. static unsigned int nr_swapfiles;
  46. atomic_long_t nr_swap_pages;
  47. /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  48. long total_swap_pages;
  49. static int least_priority;
  50. static atomic_t highest_priority_index = ATOMIC_INIT(-1);
  51. static const char Bad_file[] = "Bad swap file entry ";
  52. static const char Unused_file[] = "Unused swap file entry ";
  53. static const char Bad_offset[] = "Bad swap offset entry ";
  54. static const char Unused_offset[] = "Unused swap offset entry ";
  55. struct swap_list_t swap_list = {-1, -1};
  56. struct swap_info_struct *swap_info[MAX_SWAPFILES];
  57. static DEFINE_MUTEX(swapon_mutex);
  58. static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
  59. /* Activity counter to indicate that a swapon or swapoff has occurred */
  60. static atomic_t proc_poll_event = ATOMIC_INIT(0);
  61. static inline unsigned char swap_count(unsigned char ent)
  62. {
  63. return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
  64. }
  65. /* returns 1 if swap entry is freed */
  66. static int
  67. __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
  68. {
  69. swp_entry_t entry = swp_entry(si->type, offset);
  70. struct page *page;
  71. int ret = 0;
  72. page = find_get_page(swap_address_space(entry), entry.val);
  73. if (!page)
  74. return 0;
  75. /*
  76. * This function is called from scan_swap_map() and it's called
  77. * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
  78. * We have to use trylock for avoiding deadlock. This is a special
  79. * case and you should use try_to_free_swap() with explicit lock_page()
  80. * in usual operations.
  81. */
  82. if (trylock_page(page)) {
  83. ret = try_to_free_swap(page);
  84. unlock_page(page);
  85. }
  86. page_cache_release(page);
  87. return ret;
  88. }
  89. /*
  90. * swapon tell device that all the old swap contents can be discarded,
  91. * to allow the swap device to optimize its wear-levelling.
  92. */
  93. static int discard_swap(struct swap_info_struct *si)
  94. {
  95. struct swap_extent *se;
  96. sector_t start_block;
  97. sector_t nr_blocks;
  98. int err = 0;
  99. /* Do not discard the swap header page! */
  100. se = &si->first_swap_extent;
  101. start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
  102. nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
  103. if (nr_blocks) {
  104. err = blkdev_issue_discard(si->bdev, start_block,
  105. nr_blocks, GFP_KERNEL, 0);
  106. if (err)
  107. return err;
  108. cond_resched();
  109. }
  110. list_for_each_entry(se, &si->first_swap_extent.list, list) {
  111. start_block = se->start_block << (PAGE_SHIFT - 9);
  112. nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
  113. err = blkdev_issue_discard(si->bdev, start_block,
  114. nr_blocks, GFP_KERNEL, 0);
  115. if (err)
  116. break;
  117. cond_resched();
  118. }
  119. return err; /* That will often be -EOPNOTSUPP */
  120. }
  121. /*
  122. * swap allocation tell device that a cluster of swap can now be discarded,
  123. * to allow the swap device to optimize its wear-levelling.
  124. */
  125. static void discard_swap_cluster(struct swap_info_struct *si,
  126. pgoff_t start_page, pgoff_t nr_pages)
  127. {
  128. struct swap_extent *se = si->curr_swap_extent;
  129. int found_extent = 0;
  130. while (nr_pages) {
  131. struct list_head *lh;
  132. if (se->start_page <= start_page &&
  133. start_page < se->start_page + se->nr_pages) {
  134. pgoff_t offset = start_page - se->start_page;
  135. sector_t start_block = se->start_block + offset;
  136. sector_t nr_blocks = se->nr_pages - offset;
  137. if (nr_blocks > nr_pages)
  138. nr_blocks = nr_pages;
  139. start_page += nr_blocks;
  140. nr_pages -= nr_blocks;
  141. if (!found_extent++)
  142. si->curr_swap_extent = se;
  143. start_block <<= PAGE_SHIFT - 9;
  144. nr_blocks <<= PAGE_SHIFT - 9;
  145. if (blkdev_issue_discard(si->bdev, start_block,
  146. nr_blocks, GFP_NOIO, 0))
  147. break;
  148. }
  149. lh = se->list.next;
  150. se = list_entry(lh, struct swap_extent, list);
  151. }
  152. }
  153. static int wait_for_discard(void *word)
  154. {
  155. schedule();
  156. return 0;
  157. }
  158. #define SWAPFILE_CLUSTER 256
  159. #define LATENCY_LIMIT 256
  160. static unsigned long scan_swap_map(struct swap_info_struct *si,
  161. unsigned char usage)
  162. {
  163. unsigned long offset;
  164. unsigned long scan_base;
  165. unsigned long last_in_cluster = 0;
  166. int latency_ration = LATENCY_LIMIT;
  167. int found_free_cluster = 0;
  168. /*
  169. * We try to cluster swap pages by allocating them sequentially
  170. * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
  171. * way, however, we resort to first-free allocation, starting
  172. * a new cluster. This prevents us from scattering swap pages
  173. * all over the entire swap partition, so that we reduce
  174. * overall disk seek times between swap pages. -- sct
  175. * But we do now try to find an empty cluster. -Andrea
  176. * And we let swap pages go all over an SSD partition. Hugh
  177. */
  178. si->flags += SWP_SCANNING;
  179. scan_base = offset = si->cluster_next;
  180. if (unlikely(!si->cluster_nr--)) {
  181. if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
  182. si->cluster_nr = SWAPFILE_CLUSTER - 1;
  183. goto checks;
  184. }
  185. if (si->flags & SWP_DISCARDABLE) {
  186. /*
  187. * Start range check on racing allocations, in case
  188. * they overlap the cluster we eventually decide on
  189. * (we scan without swap_lock to allow preemption).
  190. * It's hardly conceivable that cluster_nr could be
  191. * wrapped during our scan, but don't depend on it.
  192. */
  193. if (si->lowest_alloc)
  194. goto checks;
  195. si->lowest_alloc = si->max;
  196. si->highest_alloc = 0;
  197. }
  198. spin_unlock(&si->lock);
  199. /*
  200. * If seek is expensive, start searching for new cluster from
  201. * start of partition, to minimize the span of allocated swap.
  202. * But if seek is cheap, search from our current position, so
  203. * that swap is allocated from all over the partition: if the
  204. * Flash Translation Layer only remaps within limited zones,
  205. * we don't want to wear out the first zone too quickly.
  206. */
  207. if (!(si->flags & SWP_SOLIDSTATE))
  208. scan_base = offset = si->lowest_bit;
  209. last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
  210. /* Locate the first empty (unaligned) cluster */
  211. for (; last_in_cluster <= si->highest_bit; offset++) {
  212. if (si->swap_map[offset])
  213. last_in_cluster = offset + SWAPFILE_CLUSTER;
  214. else if (offset == last_in_cluster) {
  215. spin_lock(&si->lock);
  216. offset -= SWAPFILE_CLUSTER - 1;
  217. si->cluster_next = offset;
  218. si->cluster_nr = SWAPFILE_CLUSTER - 1;
  219. found_free_cluster = 1;
  220. goto checks;
  221. }
  222. if (unlikely(--latency_ration < 0)) {
  223. cond_resched();
  224. latency_ration = LATENCY_LIMIT;
  225. }
  226. }
  227. offset = si->lowest_bit;
  228. last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
  229. /* Locate the first empty (unaligned) cluster */
  230. for (; last_in_cluster < scan_base; offset++) {
  231. if (si->swap_map[offset])
  232. last_in_cluster = offset + SWAPFILE_CLUSTER;
  233. else if (offset == last_in_cluster) {
  234. spin_lock(&si->lock);
  235. offset -= SWAPFILE_CLUSTER - 1;
  236. si->cluster_next = offset;
  237. si->cluster_nr = SWAPFILE_CLUSTER - 1;
  238. found_free_cluster = 1;
  239. goto checks;
  240. }
  241. if (unlikely(--latency_ration < 0)) {
  242. cond_resched();
  243. latency_ration = LATENCY_LIMIT;
  244. }
  245. }
  246. offset = scan_base;
  247. spin_lock(&si->lock);
  248. si->cluster_nr = SWAPFILE_CLUSTER - 1;
  249. si->lowest_alloc = 0;
  250. }
  251. checks:
  252. if (!(si->flags & SWP_WRITEOK))
  253. goto no_page;
  254. if (!si->highest_bit)
  255. goto no_page;
  256. if (offset > si->highest_bit)
  257. scan_base = offset = si->lowest_bit;
  258. /* reuse swap entry of cache-only swap if not busy. */
  259. if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
  260. int swap_was_freed;
  261. spin_unlock(&si->lock);
  262. swap_was_freed = __try_to_reclaim_swap(si, offset);
  263. spin_lock(&si->lock);
  264. /* entry was freed successfully, try to use this again */
  265. if (swap_was_freed)
  266. goto checks;
  267. goto scan; /* check next one */
  268. }
  269. if (si->swap_map[offset])
  270. goto scan;
  271. if (offset == si->lowest_bit)
  272. si->lowest_bit++;
  273. if (offset == si->highest_bit)
  274. si->highest_bit--;
  275. si->inuse_pages++;
  276. if (si->inuse_pages == si->pages) {
  277. si->lowest_bit = si->max;
  278. si->highest_bit = 0;
  279. }
  280. si->swap_map[offset] = usage;
  281. si->cluster_next = offset + 1;
  282. si->flags -= SWP_SCANNING;
  283. if (si->lowest_alloc) {
  284. /*
  285. * Only set when SWP_DISCARDABLE, and there's a scan
  286. * for a free cluster in progress or just completed.
  287. */
  288. if (found_free_cluster) {
  289. /*
  290. * To optimize wear-levelling, discard the
  291. * old data of the cluster, taking care not to
  292. * discard any of its pages that have already
  293. * been allocated by racing tasks (offset has
  294. * already stepped over any at the beginning).
  295. */
  296. if (offset < si->highest_alloc &&
  297. si->lowest_alloc <= last_in_cluster)
  298. last_in_cluster = si->lowest_alloc - 1;
  299. si->flags |= SWP_DISCARDING;
  300. spin_unlock(&si->lock);
  301. if (offset < last_in_cluster)
  302. discard_swap_cluster(si, offset,
  303. last_in_cluster - offset + 1);
  304. spin_lock(&si->lock);
  305. si->lowest_alloc = 0;
  306. si->flags &= ~SWP_DISCARDING;
  307. smp_mb(); /* wake_up_bit advises this */
  308. wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
  309. } else if (si->flags & SWP_DISCARDING) {
  310. /*
  311. * Delay using pages allocated by racing tasks
  312. * until the whole discard has been issued. We
  313. * could defer that delay until swap_writepage,
  314. * but it's easier to keep this self-contained.
  315. */
  316. spin_unlock(&si->lock);
  317. wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
  318. wait_for_discard, TASK_UNINTERRUPTIBLE);
  319. spin_lock(&si->lock);
  320. } else {
  321. /*
  322. * Note pages allocated by racing tasks while
  323. * scan for a free cluster is in progress, so
  324. * that its final discard can exclude them.
  325. */
  326. if (offset < si->lowest_alloc)
  327. si->lowest_alloc = offset;
  328. if (offset > si->highest_alloc)
  329. si->highest_alloc = offset;
  330. }
  331. }
  332. return offset;
  333. scan:
  334. spin_unlock(&si->lock);
  335. while (++offset <= si->highest_bit) {
  336. if (!si->swap_map[offset]) {
  337. spin_lock(&si->lock);
  338. goto checks;
  339. }
  340. if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
  341. spin_lock(&si->lock);
  342. goto checks;
  343. }
  344. if (unlikely(--latency_ration < 0)) {
  345. cond_resched();
  346. latency_ration = LATENCY_LIMIT;
  347. }
  348. }
  349. offset = si->lowest_bit;
  350. while (++offset < scan_base) {
  351. if (!si->swap_map[offset]) {
  352. spin_lock(&si->lock);
  353. goto checks;
  354. }
  355. if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
  356. spin_lock(&si->lock);
  357. goto checks;
  358. }
  359. if (unlikely(--latency_ration < 0)) {
  360. cond_resched();
  361. latency_ration = LATENCY_LIMIT;
  362. }
  363. }
  364. spin_lock(&si->lock);
  365. no_page:
  366. si->flags -= SWP_SCANNING;
  367. return 0;
  368. }
  369. swp_entry_t get_swap_page(void)
  370. {
  371. struct swap_info_struct *si;
  372. pgoff_t offset;
  373. int type, next;
  374. int wrapped = 0;
  375. int hp_index;
  376. spin_lock(&swap_lock);
  377. if (atomic_long_read(&nr_swap_pages) <= 0)
  378. goto noswap;
  379. atomic_long_dec(&nr_swap_pages);
  380. for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
  381. hp_index = atomic_xchg(&highest_priority_index, -1);
  382. /*
  383. * highest_priority_index records current highest priority swap
  384. * type which just frees swap entries. If its priority is
  385. * higher than that of swap_list.next swap type, we use it. It
  386. * isn't protected by swap_lock, so it can be an invalid value
  387. * if the corresponding swap type is swapoff. We double check
  388. * the flags here. It's even possible the swap type is swapoff
  389. * and swapon again and its priority is changed. In such rare
  390. * case, low prority swap type might be used, but eventually
  391. * high priority swap will be used after several rounds of
  392. * swap.
  393. */
  394. if (hp_index != -1 && hp_index != type &&
  395. swap_info[type]->prio < swap_info[hp_index]->prio &&
  396. (swap_info[hp_index]->flags & SWP_WRITEOK)) {
  397. type = hp_index;
  398. swap_list.next = type;
  399. }
  400. si = swap_info[type];
  401. next = si->next;
  402. if (next < 0 ||
  403. (!wrapped && si->prio != swap_info[next]->prio)) {
  404. next = swap_list.head;
  405. wrapped++;
  406. }
  407. spin_lock(&si->lock);
  408. if (!si->highest_bit) {
  409. spin_unlock(&si->lock);
  410. continue;
  411. }
  412. if (!(si->flags & SWP_WRITEOK)) {
  413. spin_unlock(&si->lock);
  414. continue;
  415. }
  416. swap_list.next = next;
  417. spin_unlock(&swap_lock);
  418. /* This is called for allocating swap entry for cache */
  419. offset = scan_swap_map(si, SWAP_HAS_CACHE);
  420. spin_unlock(&si->lock);
  421. if (offset)
  422. return swp_entry(type, offset);
  423. spin_lock(&swap_lock);
  424. next = swap_list.next;
  425. }
  426. atomic_long_inc(&nr_swap_pages);
  427. noswap:
  428. spin_unlock(&swap_lock);
  429. return (swp_entry_t) {0};
  430. }
  431. /* The only caller of this function is now susupend routine */
  432. swp_entry_t get_swap_page_of_type(int type)
  433. {
  434. struct swap_info_struct *si;
  435. pgoff_t offset;
  436. si = swap_info[type];
  437. spin_lock(&si->lock);
  438. if (si && (si->flags & SWP_WRITEOK)) {
  439. atomic_long_dec(&nr_swap_pages);
  440. /* This is called for allocating swap entry, not cache */
  441. offset = scan_swap_map(si, 1);
  442. if (offset) {
  443. spin_unlock(&si->lock);
  444. return swp_entry(type, offset);
  445. }
  446. atomic_long_inc(&nr_swap_pages);
  447. }
  448. spin_unlock(&si->lock);
  449. return (swp_entry_t) {0};
  450. }
  451. static struct swap_info_struct *swap_info_get(swp_entry_t entry)
  452. {
  453. struct swap_info_struct *p;
  454. unsigned long offset, type;
  455. if (!entry.val)
  456. goto out;
  457. type = swp_type(entry);
  458. if (type >= nr_swapfiles)
  459. goto bad_nofile;
  460. p = swap_info[type];
  461. if (!(p->flags & SWP_USED))
  462. goto bad_device;
  463. offset = swp_offset(entry);
  464. if (offset >= p->max)
  465. goto bad_offset;
  466. if (!p->swap_map[offset])
  467. goto bad_free;
  468. spin_lock(&p->lock);
  469. return p;
  470. bad_free:
  471. printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
  472. goto out;
  473. bad_offset:
  474. printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
  475. goto out;
  476. bad_device:
  477. printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
  478. goto out;
  479. bad_nofile:
  480. printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
  481. out:
  482. return NULL;
  483. }
  484. /*
  485. * This swap type frees swap entry, check if it is the highest priority swap
  486. * type which just frees swap entry. get_swap_page() uses
  487. * highest_priority_index to search highest priority swap type. The
  488. * swap_info_struct.lock can't protect us if there are multiple swap types
  489. * active, so we use atomic_cmpxchg.
  490. */
  491. static void set_highest_priority_index(int type)
  492. {
  493. int old_hp_index, new_hp_index;
  494. do {
  495. old_hp_index = atomic_read(&highest_priority_index);
  496. if (old_hp_index != -1 &&
  497. swap_info[old_hp_index]->prio >= swap_info[type]->prio)
  498. break;
  499. new_hp_index = type;
  500. } while (atomic_cmpxchg(&highest_priority_index,
  501. old_hp_index, new_hp_index) != old_hp_index);
  502. }
  503. static unsigned char swap_entry_free(struct swap_info_struct *p,
  504. swp_entry_t entry, unsigned char usage)
  505. {
  506. unsigned long offset = swp_offset(entry);
  507. unsigned char count;
  508. unsigned char has_cache;
  509. count = p->swap_map[offset];
  510. has_cache = count & SWAP_HAS_CACHE;
  511. count &= ~SWAP_HAS_CACHE;
  512. if (usage == SWAP_HAS_CACHE) {
  513. VM_BUG_ON(!has_cache);
  514. has_cache = 0;
  515. } else if (count == SWAP_MAP_SHMEM) {
  516. /*
  517. * Or we could insist on shmem.c using a special
  518. * swap_shmem_free() and free_shmem_swap_and_cache()...
  519. */
  520. count = 0;
  521. } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
  522. if (count == COUNT_CONTINUED) {
  523. if (swap_count_continued(p, offset, count))
  524. count = SWAP_MAP_MAX | COUNT_CONTINUED;
  525. else
  526. count = SWAP_MAP_MAX;
  527. } else
  528. count--;
  529. }
  530. if (!count)
  531. mem_cgroup_uncharge_swap(entry);
  532. usage = count | has_cache;
  533. p->swap_map[offset] = usage;
  534. /* free if no reference */
  535. if (!usage) {
  536. if (offset < p->lowest_bit)
  537. p->lowest_bit = offset;
  538. if (offset > p->highest_bit)
  539. p->highest_bit = offset;
  540. set_highest_priority_index(p->type);
  541. atomic_long_inc(&nr_swap_pages);
  542. p->inuse_pages--;
  543. frontswap_invalidate_page(p->type, offset);
  544. if (p->flags & SWP_BLKDEV) {
  545. struct gendisk *disk = p->bdev->bd_disk;
  546. if (disk->fops->swap_slot_free_notify)
  547. disk->fops->swap_slot_free_notify(p->bdev,
  548. offset);
  549. }
  550. }
  551. return usage;
  552. }
  553. /*
  554. * Caller has made sure that the swapdevice corresponding to entry
  555. * is still around or has not been recycled.
  556. */
  557. void swap_free(swp_entry_t entry)
  558. {
  559. struct swap_info_struct *p;
  560. p = swap_info_get(entry);
  561. if (p) {
  562. swap_entry_free(p, entry, 1);
  563. spin_unlock(&p->lock);
  564. }
  565. }
  566. /*
  567. * Called after dropping swapcache to decrease refcnt to swap entries.
  568. */
  569. void swapcache_free(swp_entry_t entry, struct page *page)
  570. {
  571. struct swap_info_struct *p;
  572. unsigned char count;
  573. p = swap_info_get(entry);
  574. if (p) {
  575. count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
  576. if (page)
  577. mem_cgroup_uncharge_swapcache(page, entry, count != 0);
  578. spin_unlock(&p->lock);
  579. }
  580. }
  581. /*
  582. * How many references to page are currently swapped out?
  583. * This does not give an exact answer when swap count is continued,
  584. * but does include the high COUNT_CONTINUED flag to allow for that.
  585. */
  586. int page_swapcount(struct page *page)
  587. {
  588. int count = 0;
  589. struct swap_info_struct *p;
  590. swp_entry_t entry;
  591. entry.val = page_private(page);
  592. p = swap_info_get(entry);
  593. if (p) {
  594. count = swap_count(p->swap_map[swp_offset(entry)]);
  595. spin_unlock(&p->lock);
  596. }
  597. return count;
  598. }
  599. /*
  600. * We can write to an anon page without COW if there are no other references
  601. * to it. And as a side-effect, free up its swap: because the old content
  602. * on disk will never be read, and seeking back there to write new content
  603. * later would only waste time away from clustering.
  604. */
  605. int reuse_swap_page(struct page *page)
  606. {
  607. int count;
  608. VM_BUG_ON(!PageLocked(page));
  609. if (unlikely(PageKsm(page)))
  610. return 0;
  611. count = page_mapcount(page);
  612. if (count <= 1 && PageSwapCache(page)) {
  613. count += page_swapcount(page);
  614. if (count == 1 && !PageWriteback(page)) {
  615. delete_from_swap_cache(page);
  616. SetPageDirty(page);
  617. }
  618. }
  619. return count <= 1;
  620. }
  621. /*
  622. * If swap is getting full, or if there are no more mappings of this page,
  623. * then try_to_free_swap is called to free its swap space.
  624. */
  625. int try_to_free_swap(struct page *page)
  626. {
  627. VM_BUG_ON(!PageLocked(page));
  628. if (!PageSwapCache(page))
  629. return 0;
  630. if (PageWriteback(page))
  631. return 0;
  632. if (page_swapcount(page))
  633. return 0;
  634. /*
  635. * Once hibernation has begun to create its image of memory,
  636. * there's a danger that one of the calls to try_to_free_swap()
  637. * - most probably a call from __try_to_reclaim_swap() while
  638. * hibernation is allocating its own swap pages for the image,
  639. * but conceivably even a call from memory reclaim - will free
  640. * the swap from a page which has already been recorded in the
  641. * image as a clean swapcache page, and then reuse its swap for
  642. * another page of the image. On waking from hibernation, the
  643. * original page might be freed under memory pressure, then
  644. * later read back in from swap, now with the wrong data.
  645. *
  646. * Hibration suspends storage while it is writing the image
  647. * to disk so check that here.
  648. */
  649. if (pm_suspended_storage())
  650. return 0;
  651. delete_from_swap_cache(page);
  652. SetPageDirty(page);
  653. return 1;
  654. }
  655. /*
  656. * Free the swap entry like above, but also try to
  657. * free the page cache entry if it is the last user.
  658. */
  659. int free_swap_and_cache(swp_entry_t entry)
  660. {
  661. struct swap_info_struct *p;
  662. struct page *page = NULL;
  663. if (non_swap_entry(entry))
  664. return 1;
  665. p = swap_info_get(entry);
  666. if (p) {
  667. if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
  668. page = find_get_page(swap_address_space(entry),
  669. entry.val);
  670. if (page && !trylock_page(page)) {
  671. page_cache_release(page);
  672. page = NULL;
  673. }
  674. }
  675. spin_unlock(&p->lock);
  676. }
  677. if (page) {
  678. /*
  679. * Not mapped elsewhere, or swap space full? Free it!
  680. * Also recheck PageSwapCache now page is locked (above).
  681. */
  682. if (PageSwapCache(page) && !PageWriteback(page) &&
  683. (!page_mapped(page) || vm_swap_full())) {
  684. delete_from_swap_cache(page);
  685. SetPageDirty(page);
  686. }
  687. unlock_page(page);
  688. page_cache_release(page);
  689. }
  690. return p != NULL;
  691. }
  692. #ifdef CONFIG_HIBERNATION
  693. /*
  694. * Find the swap type that corresponds to given device (if any).
  695. *
  696. * @offset - number of the PAGE_SIZE-sized block of the device, starting
  697. * from 0, in which the swap header is expected to be located.
  698. *
  699. * This is needed for the suspend to disk (aka swsusp).
  700. */
  701. int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
  702. {
  703. struct block_device *bdev = NULL;
  704. int type;
  705. if (device)
  706. bdev = bdget(device);
  707. spin_lock(&swap_lock);
  708. for (type = 0; type < nr_swapfiles; type++) {
  709. struct swap_info_struct *sis = swap_info[type];
  710. if (!(sis->flags & SWP_WRITEOK))
  711. continue;
  712. if (!bdev) {
  713. if (bdev_p)
  714. *bdev_p = bdgrab(sis->bdev);
  715. spin_unlock(&swap_lock);
  716. return type;
  717. }
  718. if (bdev == sis->bdev) {
  719. struct swap_extent *se = &sis->first_swap_extent;
  720. if (se->start_block == offset) {
  721. if (bdev_p)
  722. *bdev_p = bdgrab(sis->bdev);
  723. spin_unlock(&swap_lock);
  724. bdput(bdev);
  725. return type;
  726. }
  727. }
  728. }
  729. spin_unlock(&swap_lock);
  730. if (bdev)
  731. bdput(bdev);
  732. return -ENODEV;
  733. }
  734. /*
  735. * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
  736. * corresponding to given index in swap_info (swap type).
  737. */
  738. sector_t swapdev_block(int type, pgoff_t offset)
  739. {
  740. struct block_device *bdev;
  741. if ((unsigned int)type >= nr_swapfiles)
  742. return 0;
  743. if (!(swap_info[type]->flags & SWP_WRITEOK))
  744. return 0;
  745. return map_swap_entry(swp_entry(type, offset), &bdev);
  746. }
  747. /*
  748. * Return either the total number of swap pages of given type, or the number
  749. * of free pages of that type (depending on @free)
  750. *
  751. * This is needed for software suspend
  752. */
  753. unsigned int count_swap_pages(int type, int free)
  754. {
  755. unsigned int n = 0;
  756. spin_lock(&swap_lock);
  757. if ((unsigned int)type < nr_swapfiles) {
  758. struct swap_info_struct *sis = swap_info[type];
  759. spin_lock(&sis->lock);
  760. if (sis->flags & SWP_WRITEOK) {
  761. n = sis->pages;
  762. if (free)
  763. n -= sis->inuse_pages;
  764. }
  765. spin_unlock(&sis->lock);
  766. }
  767. spin_unlock(&swap_lock);
  768. return n;
  769. }
  770. #endif /* CONFIG_HIBERNATION */
  771. /*
  772. * No need to decide whether this PTE shares the swap entry with others,
  773. * just let do_wp_page work it out if a write is requested later - to
  774. * force COW, vm_page_prot omits write permission from any private vma.
  775. */
  776. static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
  777. unsigned long addr, swp_entry_t entry, struct page *page)
  778. {
  779. struct page *swapcache;
  780. struct mem_cgroup *memcg;
  781. spinlock_t *ptl;
  782. pte_t *pte;
  783. int ret = 1;
  784. swapcache = page;
  785. page = ksm_might_need_to_copy(page, vma, addr);
  786. if (unlikely(!page))
  787. return -ENOMEM;
  788. if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
  789. GFP_KERNEL, &memcg)) {
  790. ret = -ENOMEM;
  791. goto out_nolock;
  792. }
  793. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  794. if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
  795. mem_cgroup_cancel_charge_swapin(memcg);
  796. ret = 0;
  797. goto out;
  798. }
  799. dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
  800. inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
  801. get_page(page);
  802. set_pte_at(vma->vm_mm, addr, pte,
  803. pte_mkold(mk_pte(page, vma->vm_page_prot)));
  804. if (page == swapcache)
  805. page_add_anon_rmap(page, vma, addr);
  806. else /* ksm created a completely new copy */
  807. page_add_new_anon_rmap(page, vma, addr);
  808. mem_cgroup_commit_charge_swapin(page, memcg);
  809. swap_free(entry);
  810. /*
  811. * Move the page to the active list so it is not
  812. * immediately swapped out again after swapon.
  813. */
  814. activate_page(page);
  815. out:
  816. pte_unmap_unlock(pte, ptl);
  817. out_nolock:
  818. if (page != swapcache) {
  819. unlock_page(page);
  820. put_page(page);
  821. }
  822. return ret;
  823. }
  824. static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
  825. unsigned long addr, unsigned long end,
  826. swp_entry_t entry, struct page *page)
  827. {
  828. pte_t swp_pte = swp_entry_to_pte(entry);
  829. pte_t *pte;
  830. int ret = 0;
  831. /*
  832. * We don't actually need pte lock while scanning for swp_pte: since
  833. * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
  834. * page table while we're scanning; though it could get zapped, and on
  835. * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
  836. * of unmatched parts which look like swp_pte, so unuse_pte must
  837. * recheck under pte lock. Scanning without pte lock lets it be
  838. * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
  839. */
  840. pte = pte_offset_map(pmd, addr);
  841. do {
  842. /*
  843. * swapoff spends a _lot_ of time in this loop!
  844. * Test inline before going to call unuse_pte.
  845. */
  846. if (unlikely(pte_same(*pte, swp_pte))) {
  847. pte_unmap(pte);
  848. ret = unuse_pte(vma, pmd, addr, entry, page);
  849. if (ret)
  850. goto out;
  851. pte = pte_offset_map(pmd, addr);
  852. }
  853. } while (pte++, addr += PAGE_SIZE, addr != end);
  854. pte_unmap(pte - 1);
  855. out:
  856. return ret;
  857. }
  858. static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
  859. unsigned long addr, unsigned long end,
  860. swp_entry_t entry, struct page *page)
  861. {
  862. pmd_t *pmd;
  863. unsigned long next;
  864. int ret;
  865. pmd = pmd_offset(pud, addr);
  866. do {
  867. next = pmd_addr_end(addr, end);
  868. if (pmd_none_or_trans_huge_or_clear_bad(pmd))
  869. continue;
  870. ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
  871. if (ret)
  872. return ret;
  873. } while (pmd++, addr = next, addr != end);
  874. return 0;
  875. }
  876. static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
  877. unsigned long addr, unsigned long end,
  878. swp_entry_t entry, struct page *page)
  879. {
  880. pud_t *pud;
  881. unsigned long next;
  882. int ret;
  883. pud = pud_offset(pgd, addr);
  884. do {
  885. next = pud_addr_end(addr, end);
  886. if (pud_none_or_clear_bad(pud))
  887. continue;
  888. ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
  889. if (ret)
  890. return ret;
  891. } while (pud++, addr = next, addr != end);
  892. return 0;
  893. }
  894. static int unuse_vma(struct vm_area_struct *vma,
  895. swp_entry_t entry, struct page *page)
  896. {
  897. pgd_t *pgd;
  898. unsigned long addr, end, next;
  899. int ret;
  900. if (page_anon_vma(page)) {
  901. addr = page_address_in_vma(page, vma);
  902. if (addr == -EFAULT)
  903. return 0;
  904. else
  905. end = addr + PAGE_SIZE;
  906. } else {
  907. addr = vma->vm_start;
  908. end = vma->vm_end;
  909. }
  910. pgd = pgd_offset(vma->vm_mm, addr);
  911. do {
  912. next = pgd_addr_end(addr, end);
  913. if (pgd_none_or_clear_bad(pgd))
  914. continue;
  915. ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
  916. if (ret)
  917. return ret;
  918. } while (pgd++, addr = next, addr != end);
  919. return 0;
  920. }
  921. static int unuse_mm(struct mm_struct *mm,
  922. swp_entry_t entry, struct page *page)
  923. {
  924. struct vm_area_struct *vma;
  925. int ret = 0;
  926. if (!down_read_trylock(&mm->mmap_sem)) {
  927. /*
  928. * Activate page so shrink_inactive_list is unlikely to unmap
  929. * its ptes while lock is dropped, so swapoff can make progress.
  930. */
  931. activate_page(page);
  932. unlock_page(page);
  933. down_read(&mm->mmap_sem);
  934. lock_page(page);
  935. }
  936. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  937. if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
  938. break;
  939. }
  940. up_read(&mm->mmap_sem);
  941. return (ret < 0)? ret: 0;
  942. }
  943. /*
  944. * Scan swap_map (or frontswap_map if frontswap parameter is true)
  945. * from current position to next entry still in use.
  946. * Recycle to start on reaching the end, returning 0 when empty.
  947. */
  948. static unsigned int find_next_to_unuse(struct swap_info_struct *si,
  949. unsigned int prev, bool frontswap)
  950. {
  951. unsigned int max = si->max;
  952. unsigned int i = prev;
  953. unsigned char count;
  954. /*
  955. * No need for swap_lock here: we're just looking
  956. * for whether an entry is in use, not modifying it; false
  957. * hits are okay, and sys_swapoff() has already prevented new
  958. * allocations from this area (while holding swap_lock).
  959. */
  960. for (;;) {
  961. if (++i >= max) {
  962. if (!prev) {
  963. i = 0;
  964. break;
  965. }
  966. /*
  967. * No entries in use at top of swap_map,
  968. * loop back to start and recheck there.
  969. */
  970. max = prev + 1;
  971. prev = 0;
  972. i = 1;
  973. }
  974. if (frontswap) {
  975. if (frontswap_test(si, i))
  976. break;
  977. else
  978. continue;
  979. }
  980. count = si->swap_map[i];
  981. if (count && swap_count(count) != SWAP_MAP_BAD)
  982. break;
  983. }
  984. return i;
  985. }
  986. /*
  987. * We completely avoid races by reading each swap page in advance,
  988. * and then search for the process using it. All the necessary
  989. * page table adjustments can then be made atomically.
  990. *
  991. * if the boolean frontswap is true, only unuse pages_to_unuse pages;
  992. * pages_to_unuse==0 means all pages; ignored if frontswap is false
  993. */
  994. int try_to_unuse(unsigned int type, bool frontswap,
  995. unsigned long pages_to_unuse)
  996. {
  997. struct swap_info_struct *si = swap_info[type];
  998. struct mm_struct *start_mm;
  999. unsigned char *swap_map;
  1000. unsigned char swcount;
  1001. struct page *page;
  1002. swp_entry_t entry;
  1003. unsigned int i = 0;
  1004. int retval = 0;
  1005. /*
  1006. * When searching mms for an entry, a good strategy is to
  1007. * start at the first mm we freed the previous entry from
  1008. * (though actually we don't notice whether we or coincidence
  1009. * freed the entry). Initialize this start_mm with a hold.
  1010. *
  1011. * A simpler strategy would be to start at the last mm we
  1012. * freed the previous entry from; but that would take less
  1013. * advantage of mmlist ordering, which clusters forked mms
  1014. * together, child after parent. If we race with dup_mmap(), we
  1015. * prefer to resolve parent before child, lest we miss entries
  1016. * duplicated after we scanned child: using last mm would invert
  1017. * that.
  1018. */
  1019. start_mm = &init_mm;
  1020. atomic_inc(&init_mm.mm_users);
  1021. /*
  1022. * Keep on scanning until all entries have gone. Usually,
  1023. * one pass through swap_map is enough, but not necessarily:
  1024. * there are races when an instance of an entry might be missed.
  1025. */
  1026. while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
  1027. if (signal_pending(current)) {
  1028. retval = -EINTR;
  1029. break;
  1030. }
  1031. /*
  1032. * Get a page for the entry, using the existing swap
  1033. * cache page if there is one. Otherwise, get a clean
  1034. * page and read the swap into it.
  1035. */
  1036. swap_map = &si->swap_map[i];
  1037. entry = swp_entry(type, i);
  1038. page = read_swap_cache_async(entry,
  1039. GFP_HIGHUSER_MOVABLE, NULL, 0);
  1040. if (!page) {
  1041. /*
  1042. * Either swap_duplicate() failed because entry
  1043. * has been freed independently, and will not be
  1044. * reused since sys_swapoff() already disabled
  1045. * allocation from here, or alloc_page() failed.
  1046. */
  1047. if (!*swap_map)
  1048. continue;
  1049. retval = -ENOMEM;
  1050. break;
  1051. }
  1052. /*
  1053. * Don't hold on to start_mm if it looks like exiting.
  1054. */
  1055. if (atomic_read(&start_mm->mm_users) == 1) {
  1056. mmput(start_mm);
  1057. start_mm = &init_mm;
  1058. atomic_inc(&init_mm.mm_users);
  1059. }
  1060. /*
  1061. * Wait for and lock page. When do_swap_page races with
  1062. * try_to_unuse, do_swap_page can handle the fault much
  1063. * faster than try_to_unuse can locate the entry. This
  1064. * apparently redundant "wait_on_page_locked" lets try_to_unuse
  1065. * defer to do_swap_page in such a case - in some tests,
  1066. * do_swap_page and try_to_unuse repeatedly compete.
  1067. */
  1068. wait_on_page_locked(page);
  1069. wait_on_page_writeback(page);
  1070. lock_page(page);
  1071. wait_on_page_writeback(page);
  1072. /*
  1073. * Remove all references to entry.
  1074. */
  1075. swcount = *swap_map;
  1076. if (swap_count(swcount) == SWAP_MAP_SHMEM) {
  1077. retval = shmem_unuse(entry, page);
  1078. /* page has already been unlocked and released */
  1079. if (retval < 0)
  1080. break;
  1081. continue;
  1082. }
  1083. if (swap_count(swcount) && start_mm != &init_mm)
  1084. retval = unuse_mm(start_mm, entry, page);
  1085. if (swap_count(*swap_map)) {
  1086. int set_start_mm = (*swap_map >= swcount);
  1087. struct list_head *p = &start_mm->mmlist;
  1088. struct mm_struct *new_start_mm = start_mm;
  1089. struct mm_struct *prev_mm = start_mm;
  1090. struct mm_struct *mm;
  1091. atomic_inc(&new_start_mm->mm_users);
  1092. atomic_inc(&prev_mm->mm_users);
  1093. spin_lock(&mmlist_lock);
  1094. while (swap_count(*swap_map) && !retval &&
  1095. (p = p->next) != &start_mm->mmlist) {
  1096. mm = list_entry(p, struct mm_struct, mmlist);
  1097. if (!atomic_inc_not_zero(&mm->mm_users))
  1098. continue;
  1099. spin_unlock(&mmlist_lock);
  1100. mmput(prev_mm);
  1101. prev_mm = mm;
  1102. cond_resched();
  1103. swcount = *swap_map;
  1104. if (!swap_count(swcount)) /* any usage ? */
  1105. ;
  1106. else if (mm == &init_mm)
  1107. set_start_mm = 1;
  1108. else
  1109. retval = unuse_mm(mm, entry, page);
  1110. if (set_start_mm && *swap_map < swcount) {
  1111. mmput(new_start_mm);
  1112. atomic_inc(&mm->mm_users);
  1113. new_start_mm = mm;
  1114. set_start_mm = 0;
  1115. }
  1116. spin_lock(&mmlist_lock);
  1117. }
  1118. spin_unlock(&mmlist_lock);
  1119. mmput(prev_mm);
  1120. mmput(start_mm);
  1121. start_mm = new_start_mm;
  1122. }
  1123. if (retval) {
  1124. unlock_page(page);
  1125. page_cache_release(page);
  1126. break;
  1127. }
  1128. /*
  1129. * If a reference remains (rare), we would like to leave
  1130. * the page in the swap cache; but try_to_unmap could
  1131. * then re-duplicate the entry once we drop page lock,
  1132. * so we might loop indefinitely; also, that page could
  1133. * not be swapped out to other storage meanwhile. So:
  1134. * delete from cache even if there's another reference,
  1135. * after ensuring that the data has been saved to disk -
  1136. * since if the reference remains (rarer), it will be
  1137. * read from disk into another page. Splitting into two
  1138. * pages would be incorrect if swap supported "shared
  1139. * private" pages, but they are handled by tmpfs files.
  1140. *
  1141. * Given how unuse_vma() targets one particular offset
  1142. * in an anon_vma, once the anon_vma has been determined,
  1143. * this splitting happens to be just what is needed to
  1144. * handle where KSM pages have been swapped out: re-reading
  1145. * is unnecessarily slow, but we can fix that later on.
  1146. */
  1147. if (swap_count(*swap_map) &&
  1148. PageDirty(page) && PageSwapCache(page)) {
  1149. struct writeback_control wbc = {
  1150. .sync_mode = WB_SYNC_NONE,
  1151. };
  1152. swap_writepage(page, &wbc);
  1153. lock_page(page);
  1154. wait_on_page_writeback(page);
  1155. }
  1156. /*
  1157. * It is conceivable that a racing task removed this page from
  1158. * swap cache just before we acquired the page lock at the top,
  1159. * or while we dropped it in unuse_mm(). The page might even
  1160. * be back in swap cache on another swap area: that we must not
  1161. * delete, since it may not have been written out to swap yet.
  1162. */
  1163. if (PageSwapCache(page) &&
  1164. likely(page_private(page) == entry.val))
  1165. delete_from_swap_cache(page);
  1166. /*
  1167. * So we could skip searching mms once swap count went
  1168. * to 1, we did not mark any present ptes as dirty: must
  1169. * mark page dirty so shrink_page_list will preserve it.
  1170. */
  1171. SetPageDirty(page);
  1172. unlock_page(page);
  1173. page_cache_release(page);
  1174. /*
  1175. * Make sure that we aren't completely killing
  1176. * interactive performance.
  1177. */
  1178. cond_resched();
  1179. if (frontswap && pages_to_unuse > 0) {
  1180. if (!--pages_to_unuse)
  1181. break;
  1182. }
  1183. }
  1184. mmput(start_mm);
  1185. return retval;
  1186. }
  1187. /*
  1188. * After a successful try_to_unuse, if no swap is now in use, we know
  1189. * we can empty the mmlist. swap_lock must be held on entry and exit.
  1190. * Note that mmlist_lock nests inside swap_lock, and an mm must be
  1191. * added to the mmlist just after page_duplicate - before would be racy.
  1192. */
  1193. static void drain_mmlist(void)
  1194. {
  1195. struct list_head *p, *next;
  1196. unsigned int type;
  1197. for (type = 0; type < nr_swapfiles; type++)
  1198. if (swap_info[type]->inuse_pages)
  1199. return;
  1200. spin_lock(&mmlist_lock);
  1201. list_for_each_safe(p, next, &init_mm.mmlist)
  1202. list_del_init(p);
  1203. spin_unlock(&mmlist_lock);
  1204. }
  1205. /*
  1206. * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
  1207. * corresponds to page offset for the specified swap entry.
  1208. * Note that the type of this function is sector_t, but it returns page offset
  1209. * into the bdev, not sector offset.
  1210. */
  1211. static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
  1212. {
  1213. struct swap_info_struct *sis;
  1214. struct swap_extent *start_se;
  1215. struct swap_extent *se;
  1216. pgoff_t offset;
  1217. sis = swap_info[swp_type(entry)];
  1218. *bdev = sis->bdev;
  1219. offset = swp_offset(entry);
  1220. start_se = sis->curr_swap_extent;
  1221. se = start_se;
  1222. for ( ; ; ) {
  1223. struct list_head *lh;
  1224. if (se->start_page <= offset &&
  1225. offset < (se->start_page + se->nr_pages)) {
  1226. return se->start_block + (offset - se->start_page);
  1227. }
  1228. lh = se->list.next;
  1229. se = list_entry(lh, struct swap_extent, list);
  1230. sis->curr_swap_extent = se;
  1231. BUG_ON(se == start_se); /* It *must* be present */
  1232. }
  1233. }
  1234. /*
  1235. * Returns the page offset into bdev for the specified page's swap entry.
  1236. */
  1237. sector_t map_swap_page(struct page *page, struct block_device **bdev)
  1238. {
  1239. swp_entry_t entry;
  1240. entry.val = page_private(page);
  1241. return map_swap_entry(entry, bdev);
  1242. }
  1243. /*
  1244. * Free all of a swapdev's extent information
  1245. */
  1246. static void destroy_swap_extents(struct swap_info_struct *sis)
  1247. {
  1248. while (!list_empty(&sis->first_swap_extent.list)) {
  1249. struct swap_extent *se;
  1250. se = list_entry(sis->first_swap_extent.list.next,
  1251. struct swap_extent, list);
  1252. list_del(&se->list);
  1253. kfree(se);
  1254. }
  1255. if (sis->flags & SWP_FILE) {
  1256. struct file *swap_file = sis->swap_file;
  1257. struct address_space *mapping = swap_file->f_mapping;
  1258. sis->flags &= ~SWP_FILE;
  1259. mapping->a_ops->swap_deactivate(swap_file);
  1260. }
  1261. }
  1262. /*
  1263. * Add a block range (and the corresponding page range) into this swapdev's
  1264. * extent list. The extent list is kept sorted in page order.
  1265. *
  1266. * This function rather assumes that it is called in ascending page order.
  1267. */
  1268. int
  1269. add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
  1270. unsigned long nr_pages, sector_t start_block)
  1271. {
  1272. struct swap_extent *se;
  1273. struct swap_extent *new_se;
  1274. struct list_head *lh;
  1275. if (start_page == 0) {
  1276. se = &sis->first_swap_extent;
  1277. sis->curr_swap_extent = se;
  1278. se->start_page = 0;
  1279. se->nr_pages = nr_pages;
  1280. se->start_block = start_block;
  1281. return 1;
  1282. } else {
  1283. lh = sis->first_swap_extent.list.prev; /* Highest extent */
  1284. se = list_entry(lh, struct swap_extent, list);
  1285. BUG_ON(se->start_page + se->nr_pages != start_page);
  1286. if (se->start_block + se->nr_pages == start_block) {
  1287. /* Merge it */
  1288. se->nr_pages += nr_pages;
  1289. return 0;
  1290. }
  1291. }
  1292. /*
  1293. * No merge. Insert a new extent, preserving ordering.
  1294. */
  1295. new_se = kmalloc(sizeof(*se), GFP_KERNEL);
  1296. if (new_se == NULL)
  1297. return -ENOMEM;
  1298. new_se->start_page = start_page;
  1299. new_se->nr_pages = nr_pages;
  1300. new_se->start_block = start_block;
  1301. list_add_tail(&new_se->list, &sis->first_swap_extent.list);
  1302. return 1;
  1303. }
  1304. /*
  1305. * A `swap extent' is a simple thing which maps a contiguous range of pages
  1306. * onto a contiguous range of disk blocks. An ordered list of swap extents
  1307. * is built at swapon time and is then used at swap_writepage/swap_readpage
  1308. * time for locating where on disk a page belongs.
  1309. *
  1310. * If the swapfile is an S_ISBLK block device, a single extent is installed.
  1311. * This is done so that the main operating code can treat S_ISBLK and S_ISREG
  1312. * swap files identically.
  1313. *
  1314. * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
  1315. * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
  1316. * swapfiles are handled *identically* after swapon time.
  1317. *
  1318. * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
  1319. * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If
  1320. * some stray blocks are found which do not fall within the PAGE_SIZE alignment
  1321. * requirements, they are simply tossed out - we will never use those blocks
  1322. * for swapping.
  1323. *
  1324. * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This
  1325. * prevents root from shooting her foot off by ftruncating an in-use swapfile,
  1326. * which will scribble on the fs.
  1327. *
  1328. * The amount of disk space which a single swap extent represents varies.
  1329. * Typically it is in the 1-4 megabyte range. So we can have hundreds of
  1330. * extents in the list. To avoid much list walking, we cache the previous
  1331. * search location in `curr_swap_extent', and start new searches from there.
  1332. * This is extremely effective. The average number of iterations in
  1333. * map_swap_page() has been measured at about 0.3 per page. - akpm.
  1334. */
  1335. static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
  1336. {
  1337. struct file *swap_file = sis->swap_file;
  1338. struct address_space *mapping = swap_file->f_mapping;
  1339. struct inode *inode = mapping->host;
  1340. int ret;
  1341. if (S_ISBLK(inode->i_mode)) {
  1342. ret = add_swap_extent(sis, 0, sis->max, 0);
  1343. *span = sis->pages;
  1344. return ret;
  1345. }
  1346. if (mapping->a_ops->swap_activate) {
  1347. ret = mapping->a_ops->swap_activate(sis, swap_file, span);
  1348. if (!ret) {
  1349. sis->flags |= SWP_FILE;
  1350. ret = add_swap_extent(sis, 0, sis->max, 0);
  1351. *span = sis->pages;
  1352. }
  1353. return ret;
  1354. }
  1355. return generic_swapfile_activate(sis, swap_file, span);
  1356. }
  1357. static void _enable_swap_info(struct swap_info_struct *p, int prio,
  1358. unsigned char *swap_map,
  1359. unsigned long *frontswap_map)
  1360. {
  1361. int i, prev;
  1362. if (prio >= 0)
  1363. p->prio = prio;
  1364. else
  1365. p->prio = --least_priority;
  1366. p->swap_map = swap_map;
  1367. frontswap_map_set(p, frontswap_map);
  1368. p->flags |= SWP_WRITEOK;
  1369. atomic_long_add(p->pages, &nr_swap_pages);
  1370. total_swap_pages += p->pages;
  1371. /* insert swap space into swap_list: */
  1372. prev = -1;
  1373. for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
  1374. if (p->prio >= swap_info[i]->prio)
  1375. break;
  1376. prev = i;
  1377. }
  1378. p->next = i;
  1379. if (prev < 0)
  1380. swap_list.head = swap_list.next = p->type;
  1381. else
  1382. swap_info[prev]->next = p->type;
  1383. }
  1384. static void enable_swap_info(struct swap_info_struct *p, int prio,
  1385. unsigned char *swap_map,
  1386. unsigned long *frontswap_map)
  1387. {
  1388. spin_lock(&swap_lock);
  1389. spin_lock(&p->lock);
  1390. _enable_swap_info(p, prio, swap_map, frontswap_map);
  1391. frontswap_init(p->type);
  1392. spin_unlock(&p->lock);
  1393. spin_unlock(&swap_lock);
  1394. }
  1395. static void reinsert_swap_info(struct swap_info_struct *p)
  1396. {
  1397. spin_lock(&swap_lock);
  1398. spin_lock(&p->lock);
  1399. _enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p));
  1400. spin_unlock(&p->lock);
  1401. spin_unlock(&swap_lock);
  1402. }
  1403. SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
  1404. {
  1405. struct swap_info_struct *p = NULL;
  1406. unsigned char *swap_map;
  1407. struct file *swap_file, *victim;
  1408. struct address_space *mapping;
  1409. struct inode *inode;
  1410. struct filename *pathname;
  1411. int i, type, prev;
  1412. int err;
  1413. if (!capable(CAP_SYS_ADMIN))
  1414. return -EPERM;
  1415. BUG_ON(!current->mm);
  1416. pathname = getname(specialfile);
  1417. if (IS_ERR(pathname))
  1418. return PTR_ERR(pathname);
  1419. victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
  1420. err = PTR_ERR(victim);
  1421. if (IS_ERR(victim))
  1422. goto out;
  1423. mapping = victim->f_mapping;
  1424. prev = -1;
  1425. spin_lock(&swap_lock);
  1426. for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
  1427. p = swap_info[type];
  1428. if (p->flags & SWP_WRITEOK) {
  1429. if (p->swap_file->f_mapping == mapping)
  1430. break;
  1431. }
  1432. prev = type;
  1433. }
  1434. if (type < 0) {
  1435. err = -EINVAL;
  1436. spin_unlock(&swap_lock);
  1437. goto out_dput;
  1438. }
  1439. if (!security_vm_enough_memory_mm(current->mm, p->pages))
  1440. vm_unacct_memory(p->pages);
  1441. else {
  1442. err = -ENOMEM;
  1443. spin_unlock(&swap_lock);
  1444. goto out_dput;
  1445. }
  1446. if (prev < 0)
  1447. swap_list.head = p->next;
  1448. else
  1449. swap_info[prev]->next = p->next;
  1450. if (type == swap_list.next) {
  1451. /* just pick something that's safe... */
  1452. swap_list.next = swap_list.head;
  1453. }
  1454. spin_lock(&p->lock);
  1455. if (p->prio < 0) {
  1456. for (i = p->next; i >= 0; i = swap_info[i]->next)
  1457. swap_info[i]->prio = p->prio--;
  1458. least_priority++;
  1459. }
  1460. atomic_long_sub(p->pages, &nr_swap_pages);
  1461. total_swap_pages -= p->pages;
  1462. p->flags &= ~SWP_WRITEOK;
  1463. spin_unlock(&p->lock);
  1464. spin_unlock(&swap_lock);
  1465. set_current_oom_origin();
  1466. err = try_to_unuse(type, false, 0); /* force all pages to be unused */
  1467. clear_current_oom_origin();
  1468. if (err) {
  1469. /* re-insert swap space back into swap_list */
  1470. reinsert_swap_info(p);
  1471. goto out_dput;
  1472. }
  1473. destroy_swap_extents(p);
  1474. if (p->flags & SWP_CONTINUED)
  1475. free_swap_count_continuations(p);
  1476. mutex_lock(&swapon_mutex);
  1477. spin_lock(&swap_lock);
  1478. spin_lock(&p->lock);
  1479. drain_mmlist();
  1480. /* wait for anyone still in scan_swap_map */
  1481. p->highest_bit = 0; /* cuts scans short */
  1482. while (p->flags >= SWP_SCANNING) {
  1483. spin_unlock(&p->lock);
  1484. spin_unlock(&swap_lock);
  1485. schedule_timeout_uninterruptible(1);
  1486. spin_lock(&swap_lock);
  1487. spin_lock(&p->lock);
  1488. }
  1489. swap_file = p->swap_file;
  1490. p->swap_file = NULL;
  1491. p->max = 0;
  1492. swap_map = p->swap_map;
  1493. p->swap_map = NULL;
  1494. p->flags = 0;
  1495. frontswap_invalidate_area(type);
  1496. spin_unlock(&p->lock);
  1497. spin_unlock(&swap_lock);
  1498. mutex_unlock(&swapon_mutex);
  1499. vfree(swap_map);
  1500. vfree(frontswap_map_get(p));
  1501. /* Destroy swap account informatin */
  1502. swap_cgroup_swapoff(type);
  1503. inode = mapping->host;
  1504. if (S_ISBLK(inode->i_mode)) {
  1505. struct block_device *bdev = I_BDEV(inode);
  1506. set_blocksize(bdev, p->old_block_size);
  1507. blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1508. } else {
  1509. mutex_lock(&inode->i_mutex);
  1510. inode->i_flags &= ~S_SWAPFILE;
  1511. mutex_unlock(&inode->i_mutex);
  1512. }
  1513. filp_close(swap_file, NULL);
  1514. err = 0;
  1515. atomic_inc(&proc_poll_event);
  1516. wake_up_interruptible(&proc_poll_wait);
  1517. out_dput:
  1518. filp_close(victim, NULL);
  1519. out:
  1520. putname(pathname);
  1521. return err;
  1522. }
  1523. #ifdef CONFIG_PROC_FS
  1524. static unsigned swaps_poll(struct file *file, poll_table *wait)
  1525. {
  1526. struct seq_file *seq = file->private_data;
  1527. poll_wait(file, &proc_poll_wait, wait);
  1528. if (seq->poll_event != atomic_read(&proc_poll_event)) {
  1529. seq->poll_event = atomic_read(&proc_poll_event);
  1530. return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
  1531. }
  1532. return POLLIN | POLLRDNORM;
  1533. }
  1534. /* iterator */
  1535. static void *swap_start(struct seq_file *swap, loff_t *pos)
  1536. {
  1537. struct swap_info_struct *si;
  1538. int type;
  1539. loff_t l = *pos;
  1540. mutex_lock(&swapon_mutex);
  1541. if (!l)
  1542. return SEQ_START_TOKEN;
  1543. for (type = 0; type < nr_swapfiles; type++) {
  1544. smp_rmb(); /* read nr_swapfiles before swap_info[type] */
  1545. si = swap_info[type];
  1546. if (!(si->flags & SWP_USED) || !si->swap_map)
  1547. continue;
  1548. if (!--l)
  1549. return si;
  1550. }
  1551. return NULL;
  1552. }
  1553. static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
  1554. {
  1555. struct swap_info_struct *si = v;
  1556. int type;
  1557. if (v == SEQ_START_TOKEN)
  1558. type = 0;
  1559. else
  1560. type = si->type + 1;
  1561. for (; type < nr_swapfiles; type++) {
  1562. smp_rmb(); /* read nr_swapfiles before swap_info[type] */
  1563. si = swap_info[type];
  1564. if (!(si->flags & SWP_USED) || !si->swap_map)
  1565. continue;
  1566. ++*pos;
  1567. return si;
  1568. }
  1569. return NULL;
  1570. }
  1571. static void swap_stop(struct seq_file *swap, void *v)
  1572. {
  1573. mutex_unlock(&swapon_mutex);
  1574. }
  1575. static int swap_show(struct seq_file *swap, void *v)
  1576. {
  1577. struct swap_info_struct *si = v;
  1578. struct file *file;
  1579. int len;
  1580. if (si == SEQ_START_TOKEN) {
  1581. seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
  1582. return 0;
  1583. }
  1584. file = si->swap_file;
  1585. len = seq_path(swap, &file->f_path, " \t\n\\");
  1586. seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
  1587. len < 40 ? 40 - len : 1, " ",
  1588. S_ISBLK(file_inode(file)->i_mode) ?
  1589. "partition" : "file\t",
  1590. si->pages << (PAGE_SHIFT - 10),
  1591. si->inuse_pages << (PAGE_SHIFT - 10),
  1592. si->prio);
  1593. return 0;
  1594. }
  1595. static const struct seq_operations swaps_op = {
  1596. .start = swap_start,
  1597. .next = swap_next,
  1598. .stop = swap_stop,
  1599. .show = swap_show
  1600. };
  1601. static int swaps_open(struct inode *inode, struct file *file)
  1602. {
  1603. struct seq_file *seq;
  1604. int ret;
  1605. ret = seq_open(file, &swaps_op);
  1606. if (ret)
  1607. return ret;
  1608. seq = file->private_data;
  1609. seq->poll_event = atomic_read(&proc_poll_event);
  1610. return 0;
  1611. }
  1612. static const struct file_operations proc_swaps_operations = {
  1613. .open = swaps_open,
  1614. .read = seq_read,
  1615. .llseek = seq_lseek,
  1616. .release = seq_release,
  1617. .poll = swaps_poll,
  1618. };
  1619. static int __init procswaps_init(void)
  1620. {
  1621. proc_create("swaps", 0, NULL, &proc_swaps_operations);
  1622. return 0;
  1623. }
  1624. __initcall(procswaps_init);
  1625. #endif /* CONFIG_PROC_FS */
  1626. #ifdef MAX_SWAPFILES_CHECK
  1627. static int __init max_swapfiles_check(void)
  1628. {
  1629. MAX_SWAPFILES_CHECK();
  1630. return 0;
  1631. }
  1632. late_initcall(max_swapfiles_check);
  1633. #endif
  1634. static struct swap_info_struct *alloc_swap_info(void)
  1635. {
  1636. struct swap_info_struct *p;
  1637. unsigned int type;
  1638. p = kzalloc(sizeof(*p), GFP_KERNEL);
  1639. if (!p)
  1640. return ERR_PTR(-ENOMEM);
  1641. spin_lock(&swap_lock);
  1642. for (type = 0; type < nr_swapfiles; type++) {
  1643. if (!(swap_info[type]->flags & SWP_USED))
  1644. break;
  1645. }
  1646. if (type >= MAX_SWAPFILES) {
  1647. spin_unlock(&swap_lock);
  1648. kfree(p);
  1649. return ERR_PTR(-EPERM);
  1650. }
  1651. if (type >= nr_swapfiles) {
  1652. p->type = type;
  1653. swap_info[type] = p;
  1654. /*
  1655. * Write swap_info[type] before nr_swapfiles, in case a
  1656. * racing procfs swap_start() or swap_next() is reading them.
  1657. * (We never shrink nr_swapfiles, we never free this entry.)
  1658. */
  1659. smp_wmb();
  1660. nr_swapfiles++;
  1661. } else {
  1662. kfree(p);
  1663. p = swap_info[type];
  1664. /*
  1665. * Do not memset this entry: a racing procfs swap_next()
  1666. * would be relying on p->type to remain valid.
  1667. */
  1668. }
  1669. INIT_LIST_HEAD(&p->first_swap_extent.list);
  1670. p->flags = SWP_USED;
  1671. p->next = -1;
  1672. spin_unlock(&swap_lock);
  1673. spin_lock_init(&p->lock);
  1674. return p;
  1675. }
  1676. static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
  1677. {
  1678. int error;
  1679. if (S_ISBLK(inode->i_mode)) {
  1680. p->bdev = bdgrab(I_BDEV(inode));
  1681. error = blkdev_get(p->bdev,
  1682. FMODE_READ | FMODE_WRITE | FMODE_EXCL,
  1683. sys_swapon);
  1684. if (error < 0) {
  1685. p->bdev = NULL;
  1686. return -EINVAL;
  1687. }
  1688. p->old_block_size = block_size(p->bdev);
  1689. error = set_blocksize(p->bdev, PAGE_SIZE);
  1690. if (error < 0)
  1691. return error;
  1692. p->flags |= SWP_BLKDEV;
  1693. } else if (S_ISREG(inode->i_mode)) {
  1694. p->bdev = inode->i_sb->s_bdev;
  1695. mutex_lock(&inode->i_mutex);
  1696. if (IS_SWAPFILE(inode))
  1697. return -EBUSY;
  1698. } else
  1699. return -EINVAL;
  1700. return 0;
  1701. }
  1702. static unsigned long read_swap_header(struct swap_info_struct *p,
  1703. union swap_header *swap_header,
  1704. struct inode *inode)
  1705. {
  1706. int i;
  1707. unsigned long maxpages;
  1708. unsigned long swapfilepages;
  1709. if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
  1710. printk(KERN_ERR "Unable to find swap-space signature\n");
  1711. return 0;
  1712. }
  1713. /* swap partition endianess hack... */
  1714. if (swab32(swap_header->info.version) == 1) {
  1715. swab32s(&swap_header->info.version);
  1716. swab32s(&swap_header->info.last_page);
  1717. swab32s(&swap_header->info.nr_badpages);
  1718. for (i = 0; i < swap_header->info.nr_badpages; i++)
  1719. swab32s(&swap_header->info.badpages[i]);
  1720. }
  1721. /* Check the swap header's sub-version */
  1722. if (swap_header->info.version != 1) {
  1723. printk(KERN_WARNING
  1724. "Unable to handle swap header version %d\n",
  1725. swap_header->info.version);
  1726. return 0;
  1727. }
  1728. p->lowest_bit = 1;
  1729. p->cluster_next = 1;
  1730. p->cluster_nr = 0;
  1731. /*
  1732. * Find out how many pages are allowed for a single swap
  1733. * device. There are two limiting factors: 1) the number
  1734. * of bits for the swap offset in the swp_entry_t type, and
  1735. * 2) the number of bits in the swap pte as defined by the
  1736. * different architectures. In order to find the
  1737. * largest possible bit mask, a swap entry with swap type 0
  1738. * and swap offset ~0UL is created, encoded to a swap pte,
  1739. * decoded to a swp_entry_t again, and finally the swap
  1740. * offset is extracted. This will mask all the bits from
  1741. * the initial ~0UL mask that can't be encoded in either
  1742. * the swp_entry_t or the architecture definition of a
  1743. * swap pte.
  1744. */
  1745. maxpages = swp_offset(pte_to_swp_entry(
  1746. swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
  1747. if (maxpages > swap_header->info.last_page) {
  1748. maxpages = swap_header->info.last_page + 1;
  1749. /* p->max is an unsigned int: don't overflow it */
  1750. if ((unsigned int)maxpages == 0)
  1751. maxpages = UINT_MAX;
  1752. }
  1753. p->highest_bit = maxpages - 1;
  1754. if (!maxpages)
  1755. return 0;
  1756. swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
  1757. if (swapfilepages && maxpages > swapfilepages) {
  1758. printk(KERN_WARNING
  1759. "Swap area shorter than signature indicates\n");
  1760. return 0;
  1761. }
  1762. if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
  1763. return 0;
  1764. if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
  1765. return 0;
  1766. return maxpages;
  1767. }
  1768. static int setup_swap_map_and_extents(struct swap_info_struct *p,
  1769. union swap_header *swap_header,
  1770. unsigned char *swap_map,
  1771. unsigned long maxpages,
  1772. sector_t *span)
  1773. {
  1774. int i;
  1775. unsigned int nr_good_pages;
  1776. int nr_extents;
  1777. nr_good_pages = maxpages - 1; /* omit header page */
  1778. for (i = 0; i < swap_header->info.nr_badpages; i++) {
  1779. unsigned int page_nr = swap_header->info.badpages[i];
  1780. if (page_nr == 0 || page_nr > swap_header->info.last_page)
  1781. return -EINVAL;
  1782. if (page_nr < maxpages) {
  1783. swap_map[page_nr] = SWAP_MAP_BAD;
  1784. nr_good_pages--;
  1785. }
  1786. }
  1787. if (nr_good_pages) {
  1788. swap_map[0] = SWAP_MAP_BAD;
  1789. p->max = maxpages;
  1790. p->pages = nr_good_pages;
  1791. nr_extents = setup_swap_extents(p, span);
  1792. if (nr_extents < 0)
  1793. return nr_extents;
  1794. nr_good_pages = p->pages;
  1795. }
  1796. if (!nr_good_pages) {
  1797. printk(KERN_WARNING "Empty swap-file\n");
  1798. return -EINVAL;
  1799. }
  1800. return nr_extents;
  1801. }
  1802. SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
  1803. {
  1804. struct swap_info_struct *p;
  1805. struct filename *name;
  1806. struct file *swap_file = NULL;
  1807. struct address_space *mapping;
  1808. int i;
  1809. int prio;
  1810. int error;
  1811. union swap_header *swap_header;
  1812. int nr_extents;
  1813. sector_t span;
  1814. unsigned long maxpages;
  1815. unsigned char *swap_map = NULL;
  1816. unsigned long *frontswap_map = NULL;
  1817. struct page *page = NULL;
  1818. struct inode *inode = NULL;
  1819. if (swap_flags & ~SWAP_FLAGS_VALID)
  1820. return -EINVAL;
  1821. if (!capable(CAP_SYS_ADMIN))
  1822. return -EPERM;
  1823. p = alloc_swap_info();
  1824. if (IS_ERR(p))
  1825. return PTR_ERR(p);
  1826. name = getname(specialfile);
  1827. if (IS_ERR(name)) {
  1828. error = PTR_ERR(name);
  1829. name = NULL;
  1830. goto bad_swap;
  1831. }
  1832. swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
  1833. if (IS_ERR(swap_file)) {
  1834. error = PTR_ERR(swap_file);
  1835. swap_file = NULL;
  1836. goto bad_swap;
  1837. }
  1838. p->swap_file = swap_file;
  1839. mapping = swap_file->f_mapping;
  1840. for (i = 0; i < nr_swapfiles; i++) {
  1841. struct swap_info_struct *q = swap_info[i];
  1842. if (q == p || !q->swap_file)
  1843. continue;
  1844. if (mapping == q->swap_file->f_mapping) {
  1845. error = -EBUSY;
  1846. goto bad_swap;
  1847. }
  1848. }
  1849. inode = mapping->host;
  1850. /* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
  1851. error = claim_swapfile(p, inode);
  1852. if (unlikely(error))
  1853. goto bad_swap;
  1854. /*
  1855. * Read the swap header.
  1856. */
  1857. if (!mapping->a_ops->readpage) {
  1858. error = -EINVAL;
  1859. goto bad_swap;
  1860. }
  1861. page = read_mapping_page(mapping, 0, swap_file);
  1862. if (IS_ERR(page)) {
  1863. error = PTR_ERR(page);
  1864. goto bad_swap;
  1865. }
  1866. swap_header = kmap(page);
  1867. maxpages = read_swap_header(p, swap_header, inode);
  1868. if (unlikely(!maxpages)) {
  1869. error = -EINVAL;
  1870. goto bad_swap;
  1871. }
  1872. /* OK, set up the swap map and apply the bad block list */
  1873. swap_map = vzalloc(maxpages);
  1874. if (!swap_map) {
  1875. error = -ENOMEM;
  1876. goto bad_swap;
  1877. }
  1878. error = swap_cgroup_swapon(p->type, maxpages);
  1879. if (error)
  1880. goto bad_swap;
  1881. nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
  1882. maxpages, &span);
  1883. if (unlikely(nr_extents < 0)) {
  1884. error = nr_extents;
  1885. goto bad_swap;
  1886. }
  1887. /* frontswap enabled? set up bit-per-page map for frontswap */
  1888. if (frontswap_enabled)
  1889. frontswap_map = vzalloc(maxpages / sizeof(long));
  1890. if (p->bdev) {
  1891. if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
  1892. p->flags |= SWP_SOLIDSTATE;
  1893. p->cluster_next = 1 + (random32() % p->highest_bit);
  1894. }
  1895. if ((swap_flags & SWAP_FLAG_DISCARD) && discard_swap(p) == 0)
  1896. p->flags |= SWP_DISCARDABLE;
  1897. }
  1898. mutex_lock(&swapon_mutex);
  1899. prio = -1;
  1900. if (swap_flags & SWAP_FLAG_PREFER)
  1901. prio =
  1902. (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
  1903. enable_swap_info(p, prio, swap_map, frontswap_map);
  1904. printk(KERN_INFO "Adding %uk swap on %s. "
  1905. "Priority:%d extents:%d across:%lluk %s%s%s\n",
  1906. p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
  1907. nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
  1908. (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
  1909. (p->flags & SWP_DISCARDABLE) ? "D" : "",
  1910. (frontswap_map) ? "FS" : "");
  1911. mutex_unlock(&swapon_mutex);
  1912. atomic_inc(&proc_poll_event);
  1913. wake_up_interruptible(&proc_poll_wait);
  1914. if (S_ISREG(inode->i_mode))
  1915. inode->i_flags |= S_SWAPFILE;
  1916. error = 0;
  1917. goto out;
  1918. bad_swap:
  1919. if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
  1920. set_blocksize(p->bdev, p->old_block_size);
  1921. blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1922. }
  1923. destroy_swap_extents(p);
  1924. swap_cgroup_swapoff(p->type);
  1925. spin_lock(&swap_lock);
  1926. p->swap_file = NULL;
  1927. p->flags = 0;
  1928. spin_unlock(&swap_lock);
  1929. vfree(swap_map);
  1930. if (swap_file) {
  1931. if (inode && S_ISREG(inode->i_mode)) {
  1932. mutex_unlock(&inode->i_mutex);
  1933. inode = NULL;
  1934. }
  1935. filp_close(swap_file, NULL);
  1936. }
  1937. out:
  1938. if (page && !IS_ERR(page)) {
  1939. kunmap(page);
  1940. page_cache_release(page);
  1941. }
  1942. if (name)
  1943. putname(name);
  1944. if (inode && S_ISREG(inode->i_mode))
  1945. mutex_unlock(&inode->i_mutex);
  1946. return error;
  1947. }
  1948. void si_swapinfo(struct sysinfo *val)
  1949. {
  1950. unsigned int type;
  1951. unsigned long nr_to_be_unused = 0;
  1952. spin_lock(&swap_lock);
  1953. for (type = 0; type < nr_swapfiles; type++) {
  1954. struct swap_info_struct *si = swap_info[type];
  1955. if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
  1956. nr_to_be_unused += si->inuse_pages;
  1957. }
  1958. val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
  1959. val->totalswap = total_swap_pages + nr_to_be_unused;
  1960. spin_unlock(&swap_lock);
  1961. }
  1962. /*
  1963. * Verify that a swap entry is valid and increment its swap map count.
  1964. *
  1965. * Returns error code in following case.
  1966. * - success -> 0
  1967. * - swp_entry is invalid -> EINVAL
  1968. * - swp_entry is migration entry -> EINVAL
  1969. * - swap-cache reference is requested but there is already one. -> EEXIST
  1970. * - swap-cache reference is requested but the entry is not used. -> ENOENT
  1971. * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
  1972. */
  1973. static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
  1974. {
  1975. struct swap_info_struct *p;
  1976. unsigned long offset, type;
  1977. unsigned char count;
  1978. unsigned char has_cache;
  1979. int err = -EINVAL;
  1980. if (non_swap_entry(entry))
  1981. goto out;
  1982. type = swp_type(entry);
  1983. if (type >= nr_swapfiles)
  1984. goto bad_file;
  1985. p = swap_info[type];
  1986. offset = swp_offset(entry);
  1987. spin_lock(&p->lock);
  1988. if (unlikely(offset >= p->max))
  1989. goto unlock_out;
  1990. count = p->swap_map[offset];
  1991. has_cache = count & SWAP_HAS_CACHE;
  1992. count &= ~SWAP_HAS_CACHE;
  1993. err = 0;
  1994. if (usage == SWAP_HAS_CACHE) {
  1995. /* set SWAP_HAS_CACHE if there is no cache and entry is used */
  1996. if (!has_cache && count)
  1997. has_cache = SWAP_HAS_CACHE;
  1998. else if (has_cache) /* someone else added cache */
  1999. err = -EEXIST;
  2000. else /* no users remaining */
  2001. err = -ENOENT;
  2002. } else if (count || has_cache) {
  2003. if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
  2004. count += usage;
  2005. else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
  2006. err = -EINVAL;
  2007. else if (swap_count_continued(p, offset, count))
  2008. count = COUNT_CONTINUED;
  2009. else
  2010. err = -ENOMEM;
  2011. } else
  2012. err = -ENOENT; /* unused swap entry */
  2013. p->swap_map[offset] = count | has_cache;
  2014. unlock_out:
  2015. spin_unlock(&p->lock);
  2016. out:
  2017. return err;
  2018. bad_file:
  2019. printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
  2020. goto out;
  2021. }
  2022. /*
  2023. * Help swapoff by noting that swap entry belongs to shmem/tmpfs
  2024. * (in which case its reference count is never incremented).
  2025. */
  2026. void swap_shmem_alloc(swp_entry_t entry)
  2027. {
  2028. __swap_duplicate(entry, SWAP_MAP_SHMEM);
  2029. }
  2030. /*
  2031. * Increase reference count of swap entry by 1.
  2032. * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
  2033. * but could not be atomically allocated. Returns 0, just as if it succeeded,
  2034. * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
  2035. * might occur if a page table entry has got corrupted.
  2036. */
  2037. int swap_duplicate(swp_entry_t entry)
  2038. {
  2039. int err = 0;
  2040. while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
  2041. err = add_swap_count_continuation(entry, GFP_ATOMIC);
  2042. return err;
  2043. }
  2044. /*
  2045. * @entry: swap entry for which we allocate swap cache.
  2046. *
  2047. * Called when allocating swap cache for existing swap entry,
  2048. * This can return error codes. Returns 0 at success.
  2049. * -EBUSY means there is a swap cache.
  2050. * Note: return code is different from swap_duplicate().
  2051. */
  2052. int swapcache_prepare(swp_entry_t entry)
  2053. {
  2054. return __swap_duplicate(entry, SWAP_HAS_CACHE);
  2055. }
  2056. struct swap_info_struct *page_swap_info(struct page *page)
  2057. {
  2058. swp_entry_t swap = { .val = page_private(page) };
  2059. BUG_ON(!PageSwapCache(page));
  2060. return swap_info[swp_type(swap)];
  2061. }
  2062. /*
  2063. * out-of-line __page_file_ methods to avoid include hell.
  2064. */
  2065. struct address_space *__page_file_mapping(struct page *page)
  2066. {
  2067. VM_BUG_ON(!PageSwapCache(page));
  2068. return page_swap_info(page)->swap_file->f_mapping;
  2069. }
  2070. EXPORT_SYMBOL_GPL(__page_file_mapping);
  2071. pgoff_t __page_file_index(struct page *page)
  2072. {
  2073. swp_entry_t swap = { .val = page_private(page) };
  2074. VM_BUG_ON(!PageSwapCache(page));
  2075. return swp_offset(swap);
  2076. }
  2077. EXPORT_SYMBOL_GPL(__page_file_index);
  2078. /*
  2079. * add_swap_count_continuation - called when a swap count is duplicated
  2080. * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
  2081. * page of the original vmalloc'ed swap_map, to hold the continuation count
  2082. * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
  2083. * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
  2084. *
  2085. * These continuation pages are seldom referenced: the common paths all work
  2086. * on the original swap_map, only referring to a continuation page when the
  2087. * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
  2088. *
  2089. * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
  2090. * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
  2091. * can be called after dropping locks.
  2092. */
  2093. int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
  2094. {
  2095. struct swap_info_struct *si;
  2096. struct page *head;
  2097. struct page *page;
  2098. struct page *list_page;
  2099. pgoff_t offset;
  2100. unsigned char count;
  2101. /*
  2102. * When debugging, it's easier to use __GFP_ZERO here; but it's better
  2103. * for latency not to zero a page while GFP_ATOMIC and holding locks.
  2104. */
  2105. page = alloc_page(gfp_mask | __GFP_HIGHMEM);
  2106. si = swap_info_get(entry);
  2107. if (!si) {
  2108. /*
  2109. * An acceptable race has occurred since the failing
  2110. * __swap_duplicate(): the swap entry has been freed,
  2111. * perhaps even the whole swap_map cleared for swapoff.
  2112. */
  2113. goto outer;
  2114. }
  2115. offset = swp_offset(entry);
  2116. count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
  2117. if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
  2118. /*
  2119. * The higher the swap count, the more likely it is that tasks
  2120. * will race to add swap count continuation: we need to avoid
  2121. * over-provisioning.
  2122. */
  2123. goto out;
  2124. }
  2125. if (!page) {
  2126. spin_unlock(&si->lock);
  2127. return -ENOMEM;
  2128. }
  2129. /*
  2130. * We are fortunate that although vmalloc_to_page uses pte_offset_map,
  2131. * no architecture is using highmem pages for kernel pagetables: so it
  2132. * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
  2133. */
  2134. head = vmalloc_to_page(si->swap_map + offset);
  2135. offset &= ~PAGE_MASK;
  2136. /*
  2137. * Page allocation does not initialize the page's lru field,
  2138. * but it does always reset its private field.
  2139. */
  2140. if (!page_private(head)) {
  2141. BUG_ON(count & COUNT_CONTINUED);
  2142. INIT_LIST_HEAD(&head->lru);
  2143. set_page_private(head, SWP_CONTINUED);
  2144. si->flags |= SWP_CONTINUED;
  2145. }
  2146. list_for_each_entry(list_page, &head->lru, lru) {
  2147. unsigned char *map;
  2148. /*
  2149. * If the previous map said no continuation, but we've found
  2150. * a continuation page, free our allocation and use this one.
  2151. */
  2152. if (!(count & COUNT_CONTINUED))
  2153. goto out;
  2154. map = kmap_atomic(list_page) + offset;
  2155. count = *map;
  2156. kunmap_atomic(map);
  2157. /*
  2158. * If this continuation count now has some space in it,
  2159. * free our allocation and use this one.
  2160. */
  2161. if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
  2162. goto out;
  2163. }
  2164. list_add_tail(&page->lru, &head->lru);
  2165. page = NULL; /* now it's attached, don't free it */
  2166. out:
  2167. spin_unlock(&si->lock);
  2168. outer:
  2169. if (page)
  2170. __free_page(page);
  2171. return 0;
  2172. }
  2173. /*
  2174. * swap_count_continued - when the original swap_map count is incremented
  2175. * from SWAP_MAP_MAX, check if there is already a continuation page to carry
  2176. * into, carry if so, or else fail until a new continuation page is allocated;
  2177. * when the original swap_map count is decremented from 0 with continuation,
  2178. * borrow from the continuation and report whether it still holds more.
  2179. * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
  2180. */
  2181. static bool swap_count_continued(struct swap_info_struct *si,
  2182. pgoff_t offset, unsigned char count)
  2183. {
  2184. struct page *head;
  2185. struct page *page;
  2186. unsigned char *map;
  2187. head = vmalloc_to_page(si->swap_map + offset);
  2188. if (page_private(head) != SWP_CONTINUED) {
  2189. BUG_ON(count & COUNT_CONTINUED);
  2190. return false; /* need to add count continuation */
  2191. }
  2192. offset &= ~PAGE_MASK;
  2193. page = list_entry(head->lru.next, struct page, lru);
  2194. map = kmap_atomic(page) + offset;
  2195. if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
  2196. goto init_map; /* jump over SWAP_CONT_MAX checks */
  2197. if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
  2198. /*
  2199. * Think of how you add 1 to 999
  2200. */
  2201. while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
  2202. kunmap_atomic(map);
  2203. page = list_entry(page->lru.next, struct page, lru);
  2204. BUG_ON(page == head);
  2205. map = kmap_atomic(page) + offset;
  2206. }
  2207. if (*map == SWAP_CONT_MAX) {
  2208. kunmap_atomic(map);
  2209. page = list_entry(page->lru.next, struct page, lru);
  2210. if (page == head)
  2211. return false; /* add count continuation */
  2212. map = kmap_atomic(page) + offset;
  2213. init_map: *map = 0; /* we didn't zero the page */
  2214. }
  2215. *map += 1;
  2216. kunmap_atomic(map);
  2217. page = list_entry(page->lru.prev, struct page, lru);
  2218. while (page != head) {
  2219. map = kmap_atomic(page) + offset;
  2220. *map = COUNT_CONTINUED;
  2221. kunmap_atomic(map);
  2222. page = list_entry(page->lru.prev, struct page, lru);
  2223. }
  2224. return true; /* incremented */
  2225. } else { /* decrementing */
  2226. /*
  2227. * Think of how you subtract 1 from 1000
  2228. */
  2229. BUG_ON(count != COUNT_CONTINUED);
  2230. while (*map == COUNT_CONTINUED) {
  2231. kunmap_atomic(map);
  2232. page = list_entry(page->lru.next, struct page, lru);
  2233. BUG_ON(page == head);
  2234. map = kmap_atomic(page) + offset;
  2235. }
  2236. BUG_ON(*map == 0);
  2237. *map -= 1;
  2238. if (*map == 0)
  2239. count = 0;
  2240. kunmap_atomic(map);
  2241. page = list_entry(page->lru.prev, struct page, lru);
  2242. while (page != head) {
  2243. map = kmap_atomic(page) + offset;
  2244. *map = SWAP_CONT_MAX | count;
  2245. count = COUNT_CONTINUED;
  2246. kunmap_atomic(map);
  2247. page = list_entry(page->lru.prev, struct page, lru);
  2248. }
  2249. return count == COUNT_CONTINUED;
  2250. }
  2251. }
  2252. /*
  2253. * free_swap_count_continuations - swapoff free all the continuation pages
  2254. * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
  2255. */
  2256. static void free_swap_count_continuations(struct swap_info_struct *si)
  2257. {
  2258. pgoff_t offset;
  2259. for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
  2260. struct page *head;
  2261. head = vmalloc_to_page(si->swap_map + offset);
  2262. if (page_private(head)) {
  2263. struct list_head *this, *next;
  2264. list_for_each_safe(this, next, &head->lru) {
  2265. struct page *page;
  2266. page = list_entry(this, struct page, lru);
  2267. list_del(this);
  2268. __free_page(page);
  2269. }
  2270. }
  2271. }
  2272. }