swapfile.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614
  1. /*
  2. * linux/mm/swapfile.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. * Swap reorganised 29.12.95, Stephen Tweedie
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/hugetlb.h>
  9. #include <linux/mman.h>
  10. #include <linux/slab.h>
  11. #include <linux/kernel_stat.h>
  12. #include <linux/swap.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/namei.h>
  16. #include <linux/shmem_fs.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/random.h>
  19. #include <linux/writeback.h>
  20. #include <linux/proc_fs.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/init.h>
  23. #include <linux/ksm.h>
  24. #include <linux/rmap.h>
  25. #include <linux/security.h>
  26. #include <linux/backing-dev.h>
  27. #include <linux/mutex.h>
  28. #include <linux/capability.h>
  29. #include <linux/syscalls.h>
  30. #include <linux/memcontrol.h>
  31. #include <linux/poll.h>
  32. #include <linux/oom.h>
  33. #include <linux/frontswap.h>
  34. #include <linux/swapfile.h>
  35. #include <linux/export.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/tlbflush.h>
  38. #include <linux/swapops.h>
  39. #include <linux/page_cgroup.h>
  40. static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
  41. unsigned char);
  42. static void free_swap_count_continuations(struct swap_info_struct *);
  43. static sector_t map_swap_entry(swp_entry_t, struct block_device**);
  44. DEFINE_SPINLOCK(swap_lock);
  45. static unsigned int nr_swapfiles;
  46. atomic_long_t nr_swap_pages;
  47. /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  48. long total_swap_pages;
  49. static int least_priority;
  50. static atomic_t highest_priority_index = ATOMIC_INIT(-1);
  51. static const char Bad_file[] = "Bad swap file entry ";
  52. static const char Unused_file[] = "Unused swap file entry ";
  53. static const char Bad_offset[] = "Bad swap offset entry ";
  54. static const char Unused_offset[] = "Unused swap offset entry ";
  55. struct swap_list_t swap_list = {-1, -1};
  56. struct swap_info_struct *swap_info[MAX_SWAPFILES];
  57. static DEFINE_MUTEX(swapon_mutex);
  58. static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
  59. /* Activity counter to indicate that a swapon or swapoff has occurred */
  60. static atomic_t proc_poll_event = ATOMIC_INIT(0);
  61. static inline unsigned char swap_count(unsigned char ent)
  62. {
  63. return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
  64. }
  65. /* returns 1 if swap entry is freed */
  66. static int
  67. __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
  68. {
  69. swp_entry_t entry = swp_entry(si->type, offset);
  70. struct page *page;
  71. int ret = 0;
  72. page = find_get_page(swap_address_space(entry), entry.val);
  73. if (!page)
  74. return 0;
  75. /*
  76. * This function is called from scan_swap_map() and it's called
  77. * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
  78. * We have to use trylock for avoiding deadlock. This is a special
  79. * case and you should use try_to_free_swap() with explicit lock_page()
  80. * in usual operations.
  81. */
  82. if (trylock_page(page)) {
  83. ret = try_to_free_swap(page);
  84. unlock_page(page);
  85. }
  86. page_cache_release(page);
  87. return ret;
  88. }
  89. /*
  90. * swapon tell device that all the old swap contents can be discarded,
  91. * to allow the swap device to optimize its wear-levelling.
  92. */
  93. static int discard_swap(struct swap_info_struct *si)
  94. {
  95. struct swap_extent *se;
  96. sector_t start_block;
  97. sector_t nr_blocks;
  98. int err = 0;
  99. /* Do not discard the swap header page! */
  100. se = &si->first_swap_extent;
  101. start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
  102. nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
  103. if (nr_blocks) {
  104. err = blkdev_issue_discard(si->bdev, start_block,
  105. nr_blocks, GFP_KERNEL, 0);
  106. if (err)
  107. return err;
  108. cond_resched();
  109. }
  110. list_for_each_entry(se, &si->first_swap_extent.list, list) {
  111. start_block = se->start_block << (PAGE_SHIFT - 9);
  112. nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
  113. err = blkdev_issue_discard(si->bdev, start_block,
  114. nr_blocks, GFP_KERNEL, 0);
  115. if (err)
  116. break;
  117. cond_resched();
  118. }
  119. return err; /* That will often be -EOPNOTSUPP */
  120. }
  121. /*
  122. * swap allocation tell device that a cluster of swap can now be discarded,
  123. * to allow the swap device to optimize its wear-levelling.
  124. */
  125. static void discard_swap_cluster(struct swap_info_struct *si,
  126. pgoff_t start_page, pgoff_t nr_pages)
  127. {
  128. struct swap_extent *se = si->curr_swap_extent;
  129. int found_extent = 0;
  130. while (nr_pages) {
  131. struct list_head *lh;
  132. if (se->start_page <= start_page &&
  133. start_page < se->start_page + se->nr_pages) {
  134. pgoff_t offset = start_page - se->start_page;
  135. sector_t start_block = se->start_block + offset;
  136. sector_t nr_blocks = se->nr_pages - offset;
  137. if (nr_blocks > nr_pages)
  138. nr_blocks = nr_pages;
  139. start_page += nr_blocks;
  140. nr_pages -= nr_blocks;
  141. if (!found_extent++)
  142. si->curr_swap_extent = se;
  143. start_block <<= PAGE_SHIFT - 9;
  144. nr_blocks <<= PAGE_SHIFT - 9;
  145. if (blkdev_issue_discard(si->bdev, start_block,
  146. nr_blocks, GFP_NOIO, 0))
  147. break;
  148. }
  149. lh = se->list.next;
  150. se = list_entry(lh, struct swap_extent, list);
  151. }
  152. }
  153. static int wait_for_discard(void *word)
  154. {
  155. schedule();
  156. return 0;
  157. }
  158. #define SWAPFILE_CLUSTER 256
  159. #define LATENCY_LIMIT 256
  160. static unsigned long scan_swap_map(struct swap_info_struct *si,
  161. unsigned char usage)
  162. {
  163. unsigned long offset;
  164. unsigned long scan_base;
  165. unsigned long last_in_cluster = 0;
  166. int latency_ration = LATENCY_LIMIT;
  167. int found_free_cluster = 0;
  168. /*
  169. * We try to cluster swap pages by allocating them sequentially
  170. * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
  171. * way, however, we resort to first-free allocation, starting
  172. * a new cluster. This prevents us from scattering swap pages
  173. * all over the entire swap partition, so that we reduce
  174. * overall disk seek times between swap pages. -- sct
  175. * But we do now try to find an empty cluster. -Andrea
  176. * And we let swap pages go all over an SSD partition. Hugh
  177. */
  178. si->flags += SWP_SCANNING;
  179. scan_base = offset = si->cluster_next;
  180. if (unlikely(!si->cluster_nr--)) {
  181. if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
  182. si->cluster_nr = SWAPFILE_CLUSTER - 1;
  183. goto checks;
  184. }
  185. if (si->flags & SWP_PAGE_DISCARD) {
  186. /*
  187. * Start range check on racing allocations, in case
  188. * they overlap the cluster we eventually decide on
  189. * (we scan without swap_lock to allow preemption).
  190. * It's hardly conceivable that cluster_nr could be
  191. * wrapped during our scan, but don't depend on it.
  192. */
  193. if (si->lowest_alloc)
  194. goto checks;
  195. si->lowest_alloc = si->max;
  196. si->highest_alloc = 0;
  197. }
  198. spin_unlock(&si->lock);
  199. /*
  200. * If seek is expensive, start searching for new cluster from
  201. * start of partition, to minimize the span of allocated swap.
  202. * But if seek is cheap, search from our current position, so
  203. * that swap is allocated from all over the partition: if the
  204. * Flash Translation Layer only remaps within limited zones,
  205. * we don't want to wear out the first zone too quickly.
  206. */
  207. if (!(si->flags & SWP_SOLIDSTATE))
  208. scan_base = offset = si->lowest_bit;
  209. last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
  210. /* Locate the first empty (unaligned) cluster */
  211. for (; last_in_cluster <= si->highest_bit; offset++) {
  212. if (si->swap_map[offset])
  213. last_in_cluster = offset + SWAPFILE_CLUSTER;
  214. else if (offset == last_in_cluster) {
  215. spin_lock(&si->lock);
  216. offset -= SWAPFILE_CLUSTER - 1;
  217. si->cluster_next = offset;
  218. si->cluster_nr = SWAPFILE_CLUSTER - 1;
  219. found_free_cluster = 1;
  220. goto checks;
  221. }
  222. if (unlikely(--latency_ration < 0)) {
  223. cond_resched();
  224. latency_ration = LATENCY_LIMIT;
  225. }
  226. }
  227. offset = si->lowest_bit;
  228. last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
  229. /* Locate the first empty (unaligned) cluster */
  230. for (; last_in_cluster < scan_base; offset++) {
  231. if (si->swap_map[offset])
  232. last_in_cluster = offset + SWAPFILE_CLUSTER;
  233. else if (offset == last_in_cluster) {
  234. spin_lock(&si->lock);
  235. offset -= SWAPFILE_CLUSTER - 1;
  236. si->cluster_next = offset;
  237. si->cluster_nr = SWAPFILE_CLUSTER - 1;
  238. found_free_cluster = 1;
  239. goto checks;
  240. }
  241. if (unlikely(--latency_ration < 0)) {
  242. cond_resched();
  243. latency_ration = LATENCY_LIMIT;
  244. }
  245. }
  246. offset = scan_base;
  247. spin_lock(&si->lock);
  248. si->cluster_nr = SWAPFILE_CLUSTER - 1;
  249. si->lowest_alloc = 0;
  250. }
  251. checks:
  252. if (!(si->flags & SWP_WRITEOK))
  253. goto no_page;
  254. if (!si->highest_bit)
  255. goto no_page;
  256. if (offset > si->highest_bit)
  257. scan_base = offset = si->lowest_bit;
  258. /* reuse swap entry of cache-only swap if not busy. */
  259. if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
  260. int swap_was_freed;
  261. spin_unlock(&si->lock);
  262. swap_was_freed = __try_to_reclaim_swap(si, offset);
  263. spin_lock(&si->lock);
  264. /* entry was freed successfully, try to use this again */
  265. if (swap_was_freed)
  266. goto checks;
  267. goto scan; /* check next one */
  268. }
  269. if (si->swap_map[offset])
  270. goto scan;
  271. if (offset == si->lowest_bit)
  272. si->lowest_bit++;
  273. if (offset == si->highest_bit)
  274. si->highest_bit--;
  275. si->inuse_pages++;
  276. if (si->inuse_pages == si->pages) {
  277. si->lowest_bit = si->max;
  278. si->highest_bit = 0;
  279. }
  280. si->swap_map[offset] = usage;
  281. si->cluster_next = offset + 1;
  282. si->flags -= SWP_SCANNING;
  283. if (si->lowest_alloc) {
  284. /*
  285. * Only set when SWP_PAGE_DISCARD, and there's a scan
  286. * for a free cluster in progress or just completed.
  287. */
  288. if (found_free_cluster) {
  289. /*
  290. * To optimize wear-levelling, discard the
  291. * old data of the cluster, taking care not to
  292. * discard any of its pages that have already
  293. * been allocated by racing tasks (offset has
  294. * already stepped over any at the beginning).
  295. */
  296. if (offset < si->highest_alloc &&
  297. si->lowest_alloc <= last_in_cluster)
  298. last_in_cluster = si->lowest_alloc - 1;
  299. si->flags |= SWP_DISCARDING;
  300. spin_unlock(&si->lock);
  301. if (offset < last_in_cluster)
  302. discard_swap_cluster(si, offset,
  303. last_in_cluster - offset + 1);
  304. spin_lock(&si->lock);
  305. si->lowest_alloc = 0;
  306. si->flags &= ~SWP_DISCARDING;
  307. smp_mb(); /* wake_up_bit advises this */
  308. wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
  309. } else if (si->flags & SWP_DISCARDING) {
  310. /*
  311. * Delay using pages allocated by racing tasks
  312. * until the whole discard has been issued. We
  313. * could defer that delay until swap_writepage,
  314. * but it's easier to keep this self-contained.
  315. */
  316. spin_unlock(&si->lock);
  317. wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
  318. wait_for_discard, TASK_UNINTERRUPTIBLE);
  319. spin_lock(&si->lock);
  320. } else {
  321. /*
  322. * Note pages allocated by racing tasks while
  323. * scan for a free cluster is in progress, so
  324. * that its final discard can exclude them.
  325. */
  326. if (offset < si->lowest_alloc)
  327. si->lowest_alloc = offset;
  328. if (offset > si->highest_alloc)
  329. si->highest_alloc = offset;
  330. }
  331. }
  332. return offset;
  333. scan:
  334. spin_unlock(&si->lock);
  335. while (++offset <= si->highest_bit) {
  336. if (!si->swap_map[offset]) {
  337. spin_lock(&si->lock);
  338. goto checks;
  339. }
  340. if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
  341. spin_lock(&si->lock);
  342. goto checks;
  343. }
  344. if (unlikely(--latency_ration < 0)) {
  345. cond_resched();
  346. latency_ration = LATENCY_LIMIT;
  347. }
  348. }
  349. offset = si->lowest_bit;
  350. while (++offset < scan_base) {
  351. if (!si->swap_map[offset]) {
  352. spin_lock(&si->lock);
  353. goto checks;
  354. }
  355. if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
  356. spin_lock(&si->lock);
  357. goto checks;
  358. }
  359. if (unlikely(--latency_ration < 0)) {
  360. cond_resched();
  361. latency_ration = LATENCY_LIMIT;
  362. }
  363. }
  364. spin_lock(&si->lock);
  365. no_page:
  366. si->flags -= SWP_SCANNING;
  367. return 0;
  368. }
  369. swp_entry_t get_swap_page(void)
  370. {
  371. struct swap_info_struct *si;
  372. pgoff_t offset;
  373. int type, next;
  374. int wrapped = 0;
  375. int hp_index;
  376. spin_lock(&swap_lock);
  377. if (atomic_long_read(&nr_swap_pages) <= 0)
  378. goto noswap;
  379. atomic_long_dec(&nr_swap_pages);
  380. for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
  381. hp_index = atomic_xchg(&highest_priority_index, -1);
  382. /*
  383. * highest_priority_index records current highest priority swap
  384. * type which just frees swap entries. If its priority is
  385. * higher than that of swap_list.next swap type, we use it. It
  386. * isn't protected by swap_lock, so it can be an invalid value
  387. * if the corresponding swap type is swapoff. We double check
  388. * the flags here. It's even possible the swap type is swapoff
  389. * and swapon again and its priority is changed. In such rare
  390. * case, low prority swap type might be used, but eventually
  391. * high priority swap will be used after several rounds of
  392. * swap.
  393. */
  394. if (hp_index != -1 && hp_index != type &&
  395. swap_info[type]->prio < swap_info[hp_index]->prio &&
  396. (swap_info[hp_index]->flags & SWP_WRITEOK)) {
  397. type = hp_index;
  398. swap_list.next = type;
  399. }
  400. si = swap_info[type];
  401. next = si->next;
  402. if (next < 0 ||
  403. (!wrapped && si->prio != swap_info[next]->prio)) {
  404. next = swap_list.head;
  405. wrapped++;
  406. }
  407. spin_lock(&si->lock);
  408. if (!si->highest_bit) {
  409. spin_unlock(&si->lock);
  410. continue;
  411. }
  412. if (!(si->flags & SWP_WRITEOK)) {
  413. spin_unlock(&si->lock);
  414. continue;
  415. }
  416. swap_list.next = next;
  417. spin_unlock(&swap_lock);
  418. /* This is called for allocating swap entry for cache */
  419. offset = scan_swap_map(si, SWAP_HAS_CACHE);
  420. spin_unlock(&si->lock);
  421. if (offset)
  422. return swp_entry(type, offset);
  423. spin_lock(&swap_lock);
  424. next = swap_list.next;
  425. }
  426. atomic_long_inc(&nr_swap_pages);
  427. noswap:
  428. spin_unlock(&swap_lock);
  429. return (swp_entry_t) {0};
  430. }
  431. /* The only caller of this function is now susupend routine */
  432. swp_entry_t get_swap_page_of_type(int type)
  433. {
  434. struct swap_info_struct *si;
  435. pgoff_t offset;
  436. si = swap_info[type];
  437. spin_lock(&si->lock);
  438. if (si && (si->flags & SWP_WRITEOK)) {
  439. atomic_long_dec(&nr_swap_pages);
  440. /* This is called for allocating swap entry, not cache */
  441. offset = scan_swap_map(si, 1);
  442. if (offset) {
  443. spin_unlock(&si->lock);
  444. return swp_entry(type, offset);
  445. }
  446. atomic_long_inc(&nr_swap_pages);
  447. }
  448. spin_unlock(&si->lock);
  449. return (swp_entry_t) {0};
  450. }
  451. static struct swap_info_struct *swap_info_get(swp_entry_t entry)
  452. {
  453. struct swap_info_struct *p;
  454. unsigned long offset, type;
  455. if (!entry.val)
  456. goto out;
  457. type = swp_type(entry);
  458. if (type >= nr_swapfiles)
  459. goto bad_nofile;
  460. p = swap_info[type];
  461. if (!(p->flags & SWP_USED))
  462. goto bad_device;
  463. offset = swp_offset(entry);
  464. if (offset >= p->max)
  465. goto bad_offset;
  466. if (!p->swap_map[offset])
  467. goto bad_free;
  468. spin_lock(&p->lock);
  469. return p;
  470. bad_free:
  471. pr_err("swap_free: %s%08lx\n", Unused_offset, entry.val);
  472. goto out;
  473. bad_offset:
  474. pr_err("swap_free: %s%08lx\n", Bad_offset, entry.val);
  475. goto out;
  476. bad_device:
  477. pr_err("swap_free: %s%08lx\n", Unused_file, entry.val);
  478. goto out;
  479. bad_nofile:
  480. pr_err("swap_free: %s%08lx\n", Bad_file, entry.val);
  481. out:
  482. return NULL;
  483. }
  484. /*
  485. * This swap type frees swap entry, check if it is the highest priority swap
  486. * type which just frees swap entry. get_swap_page() uses
  487. * highest_priority_index to search highest priority swap type. The
  488. * swap_info_struct.lock can't protect us if there are multiple swap types
  489. * active, so we use atomic_cmpxchg.
  490. */
  491. static void set_highest_priority_index(int type)
  492. {
  493. int old_hp_index, new_hp_index;
  494. do {
  495. old_hp_index = atomic_read(&highest_priority_index);
  496. if (old_hp_index != -1 &&
  497. swap_info[old_hp_index]->prio >= swap_info[type]->prio)
  498. break;
  499. new_hp_index = type;
  500. } while (atomic_cmpxchg(&highest_priority_index,
  501. old_hp_index, new_hp_index) != old_hp_index);
  502. }
  503. static unsigned char swap_entry_free(struct swap_info_struct *p,
  504. swp_entry_t entry, unsigned char usage)
  505. {
  506. unsigned long offset = swp_offset(entry);
  507. unsigned char count;
  508. unsigned char has_cache;
  509. count = p->swap_map[offset];
  510. has_cache = count & SWAP_HAS_CACHE;
  511. count &= ~SWAP_HAS_CACHE;
  512. if (usage == SWAP_HAS_CACHE) {
  513. VM_BUG_ON(!has_cache);
  514. has_cache = 0;
  515. } else if (count == SWAP_MAP_SHMEM) {
  516. /*
  517. * Or we could insist on shmem.c using a special
  518. * swap_shmem_free() and free_shmem_swap_and_cache()...
  519. */
  520. count = 0;
  521. } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
  522. if (count == COUNT_CONTINUED) {
  523. if (swap_count_continued(p, offset, count))
  524. count = SWAP_MAP_MAX | COUNT_CONTINUED;
  525. else
  526. count = SWAP_MAP_MAX;
  527. } else
  528. count--;
  529. }
  530. if (!count)
  531. mem_cgroup_uncharge_swap(entry);
  532. usage = count | has_cache;
  533. p->swap_map[offset] = usage;
  534. /* free if no reference */
  535. if (!usage) {
  536. if (offset < p->lowest_bit)
  537. p->lowest_bit = offset;
  538. if (offset > p->highest_bit)
  539. p->highest_bit = offset;
  540. set_highest_priority_index(p->type);
  541. atomic_long_inc(&nr_swap_pages);
  542. p->inuse_pages--;
  543. frontswap_invalidate_page(p->type, offset);
  544. if (p->flags & SWP_BLKDEV) {
  545. struct gendisk *disk = p->bdev->bd_disk;
  546. if (disk->fops->swap_slot_free_notify)
  547. disk->fops->swap_slot_free_notify(p->bdev,
  548. offset);
  549. }
  550. }
  551. return usage;
  552. }
  553. /*
  554. * Caller has made sure that the swapdevice corresponding to entry
  555. * is still around or has not been recycled.
  556. */
  557. void swap_free(swp_entry_t entry)
  558. {
  559. struct swap_info_struct *p;
  560. p = swap_info_get(entry);
  561. if (p) {
  562. swap_entry_free(p, entry, 1);
  563. spin_unlock(&p->lock);
  564. }
  565. }
  566. /*
  567. * Called after dropping swapcache to decrease refcnt to swap entries.
  568. */
  569. void swapcache_free(swp_entry_t entry, struct page *page)
  570. {
  571. struct swap_info_struct *p;
  572. unsigned char count;
  573. p = swap_info_get(entry);
  574. if (p) {
  575. count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
  576. if (page)
  577. mem_cgroup_uncharge_swapcache(page, entry, count != 0);
  578. spin_unlock(&p->lock);
  579. }
  580. }
  581. /*
  582. * How many references to page are currently swapped out?
  583. * This does not give an exact answer when swap count is continued,
  584. * but does include the high COUNT_CONTINUED flag to allow for that.
  585. */
  586. int page_swapcount(struct page *page)
  587. {
  588. int count = 0;
  589. struct swap_info_struct *p;
  590. swp_entry_t entry;
  591. entry.val = page_private(page);
  592. p = swap_info_get(entry);
  593. if (p) {
  594. count = swap_count(p->swap_map[swp_offset(entry)]);
  595. spin_unlock(&p->lock);
  596. }
  597. return count;
  598. }
  599. /*
  600. * We can write to an anon page without COW if there are no other references
  601. * to it. And as a side-effect, free up its swap: because the old content
  602. * on disk will never be read, and seeking back there to write new content
  603. * later would only waste time away from clustering.
  604. */
  605. int reuse_swap_page(struct page *page)
  606. {
  607. int count;
  608. VM_BUG_ON(!PageLocked(page));
  609. if (unlikely(PageKsm(page)))
  610. return 0;
  611. count = page_mapcount(page);
  612. if (count <= 1 && PageSwapCache(page)) {
  613. count += page_swapcount(page);
  614. if (count == 1 && !PageWriteback(page)) {
  615. delete_from_swap_cache(page);
  616. SetPageDirty(page);
  617. }
  618. }
  619. return count <= 1;
  620. }
  621. /*
  622. * If swap is getting full, or if there are no more mappings of this page,
  623. * then try_to_free_swap is called to free its swap space.
  624. */
  625. int try_to_free_swap(struct page *page)
  626. {
  627. VM_BUG_ON(!PageLocked(page));
  628. if (!PageSwapCache(page))
  629. return 0;
  630. if (PageWriteback(page))
  631. return 0;
  632. if (page_swapcount(page))
  633. return 0;
  634. /*
  635. * Once hibernation has begun to create its image of memory,
  636. * there's a danger that one of the calls to try_to_free_swap()
  637. * - most probably a call from __try_to_reclaim_swap() while
  638. * hibernation is allocating its own swap pages for the image,
  639. * but conceivably even a call from memory reclaim - will free
  640. * the swap from a page which has already been recorded in the
  641. * image as a clean swapcache page, and then reuse its swap for
  642. * another page of the image. On waking from hibernation, the
  643. * original page might be freed under memory pressure, then
  644. * later read back in from swap, now with the wrong data.
  645. *
  646. * Hibration suspends storage while it is writing the image
  647. * to disk so check that here.
  648. */
  649. if (pm_suspended_storage())
  650. return 0;
  651. delete_from_swap_cache(page);
  652. SetPageDirty(page);
  653. return 1;
  654. }
  655. /*
  656. * Free the swap entry like above, but also try to
  657. * free the page cache entry if it is the last user.
  658. */
  659. int free_swap_and_cache(swp_entry_t entry)
  660. {
  661. struct swap_info_struct *p;
  662. struct page *page = NULL;
  663. if (non_swap_entry(entry))
  664. return 1;
  665. p = swap_info_get(entry);
  666. if (p) {
  667. if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
  668. page = find_get_page(swap_address_space(entry),
  669. entry.val);
  670. if (page && !trylock_page(page)) {
  671. page_cache_release(page);
  672. page = NULL;
  673. }
  674. }
  675. spin_unlock(&p->lock);
  676. }
  677. if (page) {
  678. /*
  679. * Not mapped elsewhere, or swap space full? Free it!
  680. * Also recheck PageSwapCache now page is locked (above).
  681. */
  682. if (PageSwapCache(page) && !PageWriteback(page) &&
  683. (!page_mapped(page) || vm_swap_full())) {
  684. delete_from_swap_cache(page);
  685. SetPageDirty(page);
  686. }
  687. unlock_page(page);
  688. page_cache_release(page);
  689. }
  690. return p != NULL;
  691. }
  692. #ifdef CONFIG_HIBERNATION
  693. /*
  694. * Find the swap type that corresponds to given device (if any).
  695. *
  696. * @offset - number of the PAGE_SIZE-sized block of the device, starting
  697. * from 0, in which the swap header is expected to be located.
  698. *
  699. * This is needed for the suspend to disk (aka swsusp).
  700. */
  701. int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
  702. {
  703. struct block_device *bdev = NULL;
  704. int type;
  705. if (device)
  706. bdev = bdget(device);
  707. spin_lock(&swap_lock);
  708. for (type = 0; type < nr_swapfiles; type++) {
  709. struct swap_info_struct *sis = swap_info[type];
  710. if (!(sis->flags & SWP_WRITEOK))
  711. continue;
  712. if (!bdev) {
  713. if (bdev_p)
  714. *bdev_p = bdgrab(sis->bdev);
  715. spin_unlock(&swap_lock);
  716. return type;
  717. }
  718. if (bdev == sis->bdev) {
  719. struct swap_extent *se = &sis->first_swap_extent;
  720. if (se->start_block == offset) {
  721. if (bdev_p)
  722. *bdev_p = bdgrab(sis->bdev);
  723. spin_unlock(&swap_lock);
  724. bdput(bdev);
  725. return type;
  726. }
  727. }
  728. }
  729. spin_unlock(&swap_lock);
  730. if (bdev)
  731. bdput(bdev);
  732. return -ENODEV;
  733. }
  734. /*
  735. * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
  736. * corresponding to given index in swap_info (swap type).
  737. */
  738. sector_t swapdev_block(int type, pgoff_t offset)
  739. {
  740. struct block_device *bdev;
  741. if ((unsigned int)type >= nr_swapfiles)
  742. return 0;
  743. if (!(swap_info[type]->flags & SWP_WRITEOK))
  744. return 0;
  745. return map_swap_entry(swp_entry(type, offset), &bdev);
  746. }
  747. /*
  748. * Return either the total number of swap pages of given type, or the number
  749. * of free pages of that type (depending on @free)
  750. *
  751. * This is needed for software suspend
  752. */
  753. unsigned int count_swap_pages(int type, int free)
  754. {
  755. unsigned int n = 0;
  756. spin_lock(&swap_lock);
  757. if ((unsigned int)type < nr_swapfiles) {
  758. struct swap_info_struct *sis = swap_info[type];
  759. spin_lock(&sis->lock);
  760. if (sis->flags & SWP_WRITEOK) {
  761. n = sis->pages;
  762. if (free)
  763. n -= sis->inuse_pages;
  764. }
  765. spin_unlock(&sis->lock);
  766. }
  767. spin_unlock(&swap_lock);
  768. return n;
  769. }
  770. #endif /* CONFIG_HIBERNATION */
  771. static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
  772. {
  773. #ifdef CONFIG_MEM_SOFT_DIRTY
  774. /*
  775. * When pte keeps soft dirty bit the pte generated
  776. * from swap entry does not has it, still it's same
  777. * pte from logical point of view.
  778. */
  779. pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
  780. return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
  781. #else
  782. return pte_same(pte, swp_pte);
  783. #endif
  784. }
  785. /*
  786. * No need to decide whether this PTE shares the swap entry with others,
  787. * just let do_wp_page work it out if a write is requested later - to
  788. * force COW, vm_page_prot omits write permission from any private vma.
  789. */
  790. static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
  791. unsigned long addr, swp_entry_t entry, struct page *page)
  792. {
  793. struct page *swapcache;
  794. struct mem_cgroup *memcg;
  795. spinlock_t *ptl;
  796. pte_t *pte;
  797. int ret = 1;
  798. swapcache = page;
  799. page = ksm_might_need_to_copy(page, vma, addr);
  800. if (unlikely(!page))
  801. return -ENOMEM;
  802. if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
  803. GFP_KERNEL, &memcg)) {
  804. ret = -ENOMEM;
  805. goto out_nolock;
  806. }
  807. pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
  808. if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
  809. mem_cgroup_cancel_charge_swapin(memcg);
  810. ret = 0;
  811. goto out;
  812. }
  813. dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
  814. inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
  815. get_page(page);
  816. set_pte_at(vma->vm_mm, addr, pte,
  817. pte_mkold(mk_pte(page, vma->vm_page_prot)));
  818. if (page == swapcache)
  819. page_add_anon_rmap(page, vma, addr);
  820. else /* ksm created a completely new copy */
  821. page_add_new_anon_rmap(page, vma, addr);
  822. mem_cgroup_commit_charge_swapin(page, memcg);
  823. swap_free(entry);
  824. /*
  825. * Move the page to the active list so it is not
  826. * immediately swapped out again after swapon.
  827. */
  828. activate_page(page);
  829. out:
  830. pte_unmap_unlock(pte, ptl);
  831. out_nolock:
  832. if (page != swapcache) {
  833. unlock_page(page);
  834. put_page(page);
  835. }
  836. return ret;
  837. }
  838. static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
  839. unsigned long addr, unsigned long end,
  840. swp_entry_t entry, struct page *page)
  841. {
  842. pte_t swp_pte = swp_entry_to_pte(entry);
  843. pte_t *pte;
  844. int ret = 0;
  845. /*
  846. * We don't actually need pte lock while scanning for swp_pte: since
  847. * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
  848. * page table while we're scanning; though it could get zapped, and on
  849. * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
  850. * of unmatched parts which look like swp_pte, so unuse_pte must
  851. * recheck under pte lock. Scanning without pte lock lets it be
  852. * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
  853. */
  854. pte = pte_offset_map(pmd, addr);
  855. do {
  856. /*
  857. * swapoff spends a _lot_ of time in this loop!
  858. * Test inline before going to call unuse_pte.
  859. */
  860. if (unlikely(maybe_same_pte(*pte, swp_pte))) {
  861. pte_unmap(pte);
  862. ret = unuse_pte(vma, pmd, addr, entry, page);
  863. if (ret)
  864. goto out;
  865. pte = pte_offset_map(pmd, addr);
  866. }
  867. } while (pte++, addr += PAGE_SIZE, addr != end);
  868. pte_unmap(pte - 1);
  869. out:
  870. return ret;
  871. }
  872. static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
  873. unsigned long addr, unsigned long end,
  874. swp_entry_t entry, struct page *page)
  875. {
  876. pmd_t *pmd;
  877. unsigned long next;
  878. int ret;
  879. pmd = pmd_offset(pud, addr);
  880. do {
  881. next = pmd_addr_end(addr, end);
  882. if (pmd_none_or_trans_huge_or_clear_bad(pmd))
  883. continue;
  884. ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
  885. if (ret)
  886. return ret;
  887. } while (pmd++, addr = next, addr != end);
  888. return 0;
  889. }
  890. static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
  891. unsigned long addr, unsigned long end,
  892. swp_entry_t entry, struct page *page)
  893. {
  894. pud_t *pud;
  895. unsigned long next;
  896. int ret;
  897. pud = pud_offset(pgd, addr);
  898. do {
  899. next = pud_addr_end(addr, end);
  900. if (pud_none_or_clear_bad(pud))
  901. continue;
  902. ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
  903. if (ret)
  904. return ret;
  905. } while (pud++, addr = next, addr != end);
  906. return 0;
  907. }
  908. static int unuse_vma(struct vm_area_struct *vma,
  909. swp_entry_t entry, struct page *page)
  910. {
  911. pgd_t *pgd;
  912. unsigned long addr, end, next;
  913. int ret;
  914. if (page_anon_vma(page)) {
  915. addr = page_address_in_vma(page, vma);
  916. if (addr == -EFAULT)
  917. return 0;
  918. else
  919. end = addr + PAGE_SIZE;
  920. } else {
  921. addr = vma->vm_start;
  922. end = vma->vm_end;
  923. }
  924. pgd = pgd_offset(vma->vm_mm, addr);
  925. do {
  926. next = pgd_addr_end(addr, end);
  927. if (pgd_none_or_clear_bad(pgd))
  928. continue;
  929. ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
  930. if (ret)
  931. return ret;
  932. } while (pgd++, addr = next, addr != end);
  933. return 0;
  934. }
  935. static int unuse_mm(struct mm_struct *mm,
  936. swp_entry_t entry, struct page *page)
  937. {
  938. struct vm_area_struct *vma;
  939. int ret = 0;
  940. if (!down_read_trylock(&mm->mmap_sem)) {
  941. /*
  942. * Activate page so shrink_inactive_list is unlikely to unmap
  943. * its ptes while lock is dropped, so swapoff can make progress.
  944. */
  945. activate_page(page);
  946. unlock_page(page);
  947. down_read(&mm->mmap_sem);
  948. lock_page(page);
  949. }
  950. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  951. if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
  952. break;
  953. }
  954. up_read(&mm->mmap_sem);
  955. return (ret < 0)? ret: 0;
  956. }
  957. /*
  958. * Scan swap_map (or frontswap_map if frontswap parameter is true)
  959. * from current position to next entry still in use.
  960. * Recycle to start on reaching the end, returning 0 when empty.
  961. */
  962. static unsigned int find_next_to_unuse(struct swap_info_struct *si,
  963. unsigned int prev, bool frontswap)
  964. {
  965. unsigned int max = si->max;
  966. unsigned int i = prev;
  967. unsigned char count;
  968. /*
  969. * No need for swap_lock here: we're just looking
  970. * for whether an entry is in use, not modifying it; false
  971. * hits are okay, and sys_swapoff() has already prevented new
  972. * allocations from this area (while holding swap_lock).
  973. */
  974. for (;;) {
  975. if (++i >= max) {
  976. if (!prev) {
  977. i = 0;
  978. break;
  979. }
  980. /*
  981. * No entries in use at top of swap_map,
  982. * loop back to start and recheck there.
  983. */
  984. max = prev + 1;
  985. prev = 0;
  986. i = 1;
  987. }
  988. if (frontswap) {
  989. if (frontswap_test(si, i))
  990. break;
  991. else
  992. continue;
  993. }
  994. count = si->swap_map[i];
  995. if (count && swap_count(count) != SWAP_MAP_BAD)
  996. break;
  997. }
  998. return i;
  999. }
  1000. /*
  1001. * We completely avoid races by reading each swap page in advance,
  1002. * and then search for the process using it. All the necessary
  1003. * page table adjustments can then be made atomically.
  1004. *
  1005. * if the boolean frontswap is true, only unuse pages_to_unuse pages;
  1006. * pages_to_unuse==0 means all pages; ignored if frontswap is false
  1007. */
  1008. int try_to_unuse(unsigned int type, bool frontswap,
  1009. unsigned long pages_to_unuse)
  1010. {
  1011. struct swap_info_struct *si = swap_info[type];
  1012. struct mm_struct *start_mm;
  1013. unsigned char *swap_map;
  1014. unsigned char swcount;
  1015. struct page *page;
  1016. swp_entry_t entry;
  1017. unsigned int i = 0;
  1018. int retval = 0;
  1019. /*
  1020. * When searching mms for an entry, a good strategy is to
  1021. * start at the first mm we freed the previous entry from
  1022. * (though actually we don't notice whether we or coincidence
  1023. * freed the entry). Initialize this start_mm with a hold.
  1024. *
  1025. * A simpler strategy would be to start at the last mm we
  1026. * freed the previous entry from; but that would take less
  1027. * advantage of mmlist ordering, which clusters forked mms
  1028. * together, child after parent. If we race with dup_mmap(), we
  1029. * prefer to resolve parent before child, lest we miss entries
  1030. * duplicated after we scanned child: using last mm would invert
  1031. * that.
  1032. */
  1033. start_mm = &init_mm;
  1034. atomic_inc(&init_mm.mm_users);
  1035. /*
  1036. * Keep on scanning until all entries have gone. Usually,
  1037. * one pass through swap_map is enough, but not necessarily:
  1038. * there are races when an instance of an entry might be missed.
  1039. */
  1040. while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
  1041. if (signal_pending(current)) {
  1042. retval = -EINTR;
  1043. break;
  1044. }
  1045. /*
  1046. * Get a page for the entry, using the existing swap
  1047. * cache page if there is one. Otherwise, get a clean
  1048. * page and read the swap into it.
  1049. */
  1050. swap_map = &si->swap_map[i];
  1051. entry = swp_entry(type, i);
  1052. page = read_swap_cache_async(entry,
  1053. GFP_HIGHUSER_MOVABLE, NULL, 0);
  1054. if (!page) {
  1055. /*
  1056. * Either swap_duplicate() failed because entry
  1057. * has been freed independently, and will not be
  1058. * reused since sys_swapoff() already disabled
  1059. * allocation from here, or alloc_page() failed.
  1060. */
  1061. if (!*swap_map)
  1062. continue;
  1063. retval = -ENOMEM;
  1064. break;
  1065. }
  1066. /*
  1067. * Don't hold on to start_mm if it looks like exiting.
  1068. */
  1069. if (atomic_read(&start_mm->mm_users) == 1) {
  1070. mmput(start_mm);
  1071. start_mm = &init_mm;
  1072. atomic_inc(&init_mm.mm_users);
  1073. }
  1074. /*
  1075. * Wait for and lock page. When do_swap_page races with
  1076. * try_to_unuse, do_swap_page can handle the fault much
  1077. * faster than try_to_unuse can locate the entry. This
  1078. * apparently redundant "wait_on_page_locked" lets try_to_unuse
  1079. * defer to do_swap_page in such a case - in some tests,
  1080. * do_swap_page and try_to_unuse repeatedly compete.
  1081. */
  1082. wait_on_page_locked(page);
  1083. wait_on_page_writeback(page);
  1084. lock_page(page);
  1085. wait_on_page_writeback(page);
  1086. /*
  1087. * Remove all references to entry.
  1088. */
  1089. swcount = *swap_map;
  1090. if (swap_count(swcount) == SWAP_MAP_SHMEM) {
  1091. retval = shmem_unuse(entry, page);
  1092. /* page has already been unlocked and released */
  1093. if (retval < 0)
  1094. break;
  1095. continue;
  1096. }
  1097. if (swap_count(swcount) && start_mm != &init_mm)
  1098. retval = unuse_mm(start_mm, entry, page);
  1099. if (swap_count(*swap_map)) {
  1100. int set_start_mm = (*swap_map >= swcount);
  1101. struct list_head *p = &start_mm->mmlist;
  1102. struct mm_struct *new_start_mm = start_mm;
  1103. struct mm_struct *prev_mm = start_mm;
  1104. struct mm_struct *mm;
  1105. atomic_inc(&new_start_mm->mm_users);
  1106. atomic_inc(&prev_mm->mm_users);
  1107. spin_lock(&mmlist_lock);
  1108. while (swap_count(*swap_map) && !retval &&
  1109. (p = p->next) != &start_mm->mmlist) {
  1110. mm = list_entry(p, struct mm_struct, mmlist);
  1111. if (!atomic_inc_not_zero(&mm->mm_users))
  1112. continue;
  1113. spin_unlock(&mmlist_lock);
  1114. mmput(prev_mm);
  1115. prev_mm = mm;
  1116. cond_resched();
  1117. swcount = *swap_map;
  1118. if (!swap_count(swcount)) /* any usage ? */
  1119. ;
  1120. else if (mm == &init_mm)
  1121. set_start_mm = 1;
  1122. else
  1123. retval = unuse_mm(mm, entry, page);
  1124. if (set_start_mm && *swap_map < swcount) {
  1125. mmput(new_start_mm);
  1126. atomic_inc(&mm->mm_users);
  1127. new_start_mm = mm;
  1128. set_start_mm = 0;
  1129. }
  1130. spin_lock(&mmlist_lock);
  1131. }
  1132. spin_unlock(&mmlist_lock);
  1133. mmput(prev_mm);
  1134. mmput(start_mm);
  1135. start_mm = new_start_mm;
  1136. }
  1137. if (retval) {
  1138. unlock_page(page);
  1139. page_cache_release(page);
  1140. break;
  1141. }
  1142. /*
  1143. * If a reference remains (rare), we would like to leave
  1144. * the page in the swap cache; but try_to_unmap could
  1145. * then re-duplicate the entry once we drop page lock,
  1146. * so we might loop indefinitely; also, that page could
  1147. * not be swapped out to other storage meanwhile. So:
  1148. * delete from cache even if there's another reference,
  1149. * after ensuring that the data has been saved to disk -
  1150. * since if the reference remains (rarer), it will be
  1151. * read from disk into another page. Splitting into two
  1152. * pages would be incorrect if swap supported "shared
  1153. * private" pages, but they are handled by tmpfs files.
  1154. *
  1155. * Given how unuse_vma() targets one particular offset
  1156. * in an anon_vma, once the anon_vma has been determined,
  1157. * this splitting happens to be just what is needed to
  1158. * handle where KSM pages have been swapped out: re-reading
  1159. * is unnecessarily slow, but we can fix that later on.
  1160. */
  1161. if (swap_count(*swap_map) &&
  1162. PageDirty(page) && PageSwapCache(page)) {
  1163. struct writeback_control wbc = {
  1164. .sync_mode = WB_SYNC_NONE,
  1165. };
  1166. swap_writepage(page, &wbc);
  1167. lock_page(page);
  1168. wait_on_page_writeback(page);
  1169. }
  1170. /*
  1171. * It is conceivable that a racing task removed this page from
  1172. * swap cache just before we acquired the page lock at the top,
  1173. * or while we dropped it in unuse_mm(). The page might even
  1174. * be back in swap cache on another swap area: that we must not
  1175. * delete, since it may not have been written out to swap yet.
  1176. */
  1177. if (PageSwapCache(page) &&
  1178. likely(page_private(page) == entry.val))
  1179. delete_from_swap_cache(page);
  1180. /*
  1181. * So we could skip searching mms once swap count went
  1182. * to 1, we did not mark any present ptes as dirty: must
  1183. * mark page dirty so shrink_page_list will preserve it.
  1184. */
  1185. SetPageDirty(page);
  1186. unlock_page(page);
  1187. page_cache_release(page);
  1188. /*
  1189. * Make sure that we aren't completely killing
  1190. * interactive performance.
  1191. */
  1192. cond_resched();
  1193. if (frontswap && pages_to_unuse > 0) {
  1194. if (!--pages_to_unuse)
  1195. break;
  1196. }
  1197. }
  1198. mmput(start_mm);
  1199. return retval;
  1200. }
  1201. /*
  1202. * After a successful try_to_unuse, if no swap is now in use, we know
  1203. * we can empty the mmlist. swap_lock must be held on entry and exit.
  1204. * Note that mmlist_lock nests inside swap_lock, and an mm must be
  1205. * added to the mmlist just after page_duplicate - before would be racy.
  1206. */
  1207. static void drain_mmlist(void)
  1208. {
  1209. struct list_head *p, *next;
  1210. unsigned int type;
  1211. for (type = 0; type < nr_swapfiles; type++)
  1212. if (swap_info[type]->inuse_pages)
  1213. return;
  1214. spin_lock(&mmlist_lock);
  1215. list_for_each_safe(p, next, &init_mm.mmlist)
  1216. list_del_init(p);
  1217. spin_unlock(&mmlist_lock);
  1218. }
  1219. /*
  1220. * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
  1221. * corresponds to page offset for the specified swap entry.
  1222. * Note that the type of this function is sector_t, but it returns page offset
  1223. * into the bdev, not sector offset.
  1224. */
  1225. static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
  1226. {
  1227. struct swap_info_struct *sis;
  1228. struct swap_extent *start_se;
  1229. struct swap_extent *se;
  1230. pgoff_t offset;
  1231. sis = swap_info[swp_type(entry)];
  1232. *bdev = sis->bdev;
  1233. offset = swp_offset(entry);
  1234. start_se = sis->curr_swap_extent;
  1235. se = start_se;
  1236. for ( ; ; ) {
  1237. struct list_head *lh;
  1238. if (se->start_page <= offset &&
  1239. offset < (se->start_page + se->nr_pages)) {
  1240. return se->start_block + (offset - se->start_page);
  1241. }
  1242. lh = se->list.next;
  1243. se = list_entry(lh, struct swap_extent, list);
  1244. sis->curr_swap_extent = se;
  1245. BUG_ON(se == start_se); /* It *must* be present */
  1246. }
  1247. }
  1248. /*
  1249. * Returns the page offset into bdev for the specified page's swap entry.
  1250. */
  1251. sector_t map_swap_page(struct page *page, struct block_device **bdev)
  1252. {
  1253. swp_entry_t entry;
  1254. entry.val = page_private(page);
  1255. return map_swap_entry(entry, bdev);
  1256. }
  1257. /*
  1258. * Free all of a swapdev's extent information
  1259. */
  1260. static void destroy_swap_extents(struct swap_info_struct *sis)
  1261. {
  1262. while (!list_empty(&sis->first_swap_extent.list)) {
  1263. struct swap_extent *se;
  1264. se = list_entry(sis->first_swap_extent.list.next,
  1265. struct swap_extent, list);
  1266. list_del(&se->list);
  1267. kfree(se);
  1268. }
  1269. if (sis->flags & SWP_FILE) {
  1270. struct file *swap_file = sis->swap_file;
  1271. struct address_space *mapping = swap_file->f_mapping;
  1272. sis->flags &= ~SWP_FILE;
  1273. mapping->a_ops->swap_deactivate(swap_file);
  1274. }
  1275. }
  1276. /*
  1277. * Add a block range (and the corresponding page range) into this swapdev's
  1278. * extent list. The extent list is kept sorted in page order.
  1279. *
  1280. * This function rather assumes that it is called in ascending page order.
  1281. */
  1282. int
  1283. add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
  1284. unsigned long nr_pages, sector_t start_block)
  1285. {
  1286. struct swap_extent *se;
  1287. struct swap_extent *new_se;
  1288. struct list_head *lh;
  1289. if (start_page == 0) {
  1290. se = &sis->first_swap_extent;
  1291. sis->curr_swap_extent = se;
  1292. se->start_page = 0;
  1293. se->nr_pages = nr_pages;
  1294. se->start_block = start_block;
  1295. return 1;
  1296. } else {
  1297. lh = sis->first_swap_extent.list.prev; /* Highest extent */
  1298. se = list_entry(lh, struct swap_extent, list);
  1299. BUG_ON(se->start_page + se->nr_pages != start_page);
  1300. if (se->start_block + se->nr_pages == start_block) {
  1301. /* Merge it */
  1302. se->nr_pages += nr_pages;
  1303. return 0;
  1304. }
  1305. }
  1306. /*
  1307. * No merge. Insert a new extent, preserving ordering.
  1308. */
  1309. new_se = kmalloc(sizeof(*se), GFP_KERNEL);
  1310. if (new_se == NULL)
  1311. return -ENOMEM;
  1312. new_se->start_page = start_page;
  1313. new_se->nr_pages = nr_pages;
  1314. new_se->start_block = start_block;
  1315. list_add_tail(&new_se->list, &sis->first_swap_extent.list);
  1316. return 1;
  1317. }
  1318. /*
  1319. * A `swap extent' is a simple thing which maps a contiguous range of pages
  1320. * onto a contiguous range of disk blocks. An ordered list of swap extents
  1321. * is built at swapon time and is then used at swap_writepage/swap_readpage
  1322. * time for locating where on disk a page belongs.
  1323. *
  1324. * If the swapfile is an S_ISBLK block device, a single extent is installed.
  1325. * This is done so that the main operating code can treat S_ISBLK and S_ISREG
  1326. * swap files identically.
  1327. *
  1328. * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
  1329. * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
  1330. * swapfiles are handled *identically* after swapon time.
  1331. *
  1332. * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
  1333. * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If
  1334. * some stray blocks are found which do not fall within the PAGE_SIZE alignment
  1335. * requirements, they are simply tossed out - we will never use those blocks
  1336. * for swapping.
  1337. *
  1338. * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This
  1339. * prevents root from shooting her foot off by ftruncating an in-use swapfile,
  1340. * which will scribble on the fs.
  1341. *
  1342. * The amount of disk space which a single swap extent represents varies.
  1343. * Typically it is in the 1-4 megabyte range. So we can have hundreds of
  1344. * extents in the list. To avoid much list walking, we cache the previous
  1345. * search location in `curr_swap_extent', and start new searches from there.
  1346. * This is extremely effective. The average number of iterations in
  1347. * map_swap_page() has been measured at about 0.3 per page. - akpm.
  1348. */
  1349. static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
  1350. {
  1351. struct file *swap_file = sis->swap_file;
  1352. struct address_space *mapping = swap_file->f_mapping;
  1353. struct inode *inode = mapping->host;
  1354. int ret;
  1355. if (S_ISBLK(inode->i_mode)) {
  1356. ret = add_swap_extent(sis, 0, sis->max, 0);
  1357. *span = sis->pages;
  1358. return ret;
  1359. }
  1360. if (mapping->a_ops->swap_activate) {
  1361. ret = mapping->a_ops->swap_activate(sis, swap_file, span);
  1362. if (!ret) {
  1363. sis->flags |= SWP_FILE;
  1364. ret = add_swap_extent(sis, 0, sis->max, 0);
  1365. *span = sis->pages;
  1366. }
  1367. return ret;
  1368. }
  1369. return generic_swapfile_activate(sis, swap_file, span);
  1370. }
  1371. static void _enable_swap_info(struct swap_info_struct *p, int prio,
  1372. unsigned char *swap_map)
  1373. {
  1374. int i, prev;
  1375. if (prio >= 0)
  1376. p->prio = prio;
  1377. else
  1378. p->prio = --least_priority;
  1379. p->swap_map = swap_map;
  1380. p->flags |= SWP_WRITEOK;
  1381. atomic_long_add(p->pages, &nr_swap_pages);
  1382. total_swap_pages += p->pages;
  1383. /* insert swap space into swap_list: */
  1384. prev = -1;
  1385. for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
  1386. if (p->prio >= swap_info[i]->prio)
  1387. break;
  1388. prev = i;
  1389. }
  1390. p->next = i;
  1391. if (prev < 0)
  1392. swap_list.head = swap_list.next = p->type;
  1393. else
  1394. swap_info[prev]->next = p->type;
  1395. }
  1396. static void enable_swap_info(struct swap_info_struct *p, int prio,
  1397. unsigned char *swap_map,
  1398. unsigned long *frontswap_map)
  1399. {
  1400. frontswap_init(p->type, frontswap_map);
  1401. spin_lock(&swap_lock);
  1402. spin_lock(&p->lock);
  1403. _enable_swap_info(p, prio, swap_map);
  1404. spin_unlock(&p->lock);
  1405. spin_unlock(&swap_lock);
  1406. }
  1407. static void reinsert_swap_info(struct swap_info_struct *p)
  1408. {
  1409. spin_lock(&swap_lock);
  1410. spin_lock(&p->lock);
  1411. _enable_swap_info(p, p->prio, p->swap_map);
  1412. spin_unlock(&p->lock);
  1413. spin_unlock(&swap_lock);
  1414. }
  1415. SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
  1416. {
  1417. struct swap_info_struct *p = NULL;
  1418. unsigned char *swap_map;
  1419. unsigned long *frontswap_map;
  1420. struct file *swap_file, *victim;
  1421. struct address_space *mapping;
  1422. struct inode *inode;
  1423. struct filename *pathname;
  1424. int i, type, prev;
  1425. int err;
  1426. if (!capable(CAP_SYS_ADMIN))
  1427. return -EPERM;
  1428. BUG_ON(!current->mm);
  1429. pathname = getname(specialfile);
  1430. if (IS_ERR(pathname))
  1431. return PTR_ERR(pathname);
  1432. victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
  1433. err = PTR_ERR(victim);
  1434. if (IS_ERR(victim))
  1435. goto out;
  1436. mapping = victim->f_mapping;
  1437. prev = -1;
  1438. spin_lock(&swap_lock);
  1439. for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
  1440. p = swap_info[type];
  1441. if (p->flags & SWP_WRITEOK) {
  1442. if (p->swap_file->f_mapping == mapping)
  1443. break;
  1444. }
  1445. prev = type;
  1446. }
  1447. if (type < 0) {
  1448. err = -EINVAL;
  1449. spin_unlock(&swap_lock);
  1450. goto out_dput;
  1451. }
  1452. if (!security_vm_enough_memory_mm(current->mm, p->pages))
  1453. vm_unacct_memory(p->pages);
  1454. else {
  1455. err = -ENOMEM;
  1456. spin_unlock(&swap_lock);
  1457. goto out_dput;
  1458. }
  1459. if (prev < 0)
  1460. swap_list.head = p->next;
  1461. else
  1462. swap_info[prev]->next = p->next;
  1463. if (type == swap_list.next) {
  1464. /* just pick something that's safe... */
  1465. swap_list.next = swap_list.head;
  1466. }
  1467. spin_lock(&p->lock);
  1468. if (p->prio < 0) {
  1469. for (i = p->next; i >= 0; i = swap_info[i]->next)
  1470. swap_info[i]->prio = p->prio--;
  1471. least_priority++;
  1472. }
  1473. atomic_long_sub(p->pages, &nr_swap_pages);
  1474. total_swap_pages -= p->pages;
  1475. p->flags &= ~SWP_WRITEOK;
  1476. spin_unlock(&p->lock);
  1477. spin_unlock(&swap_lock);
  1478. set_current_oom_origin();
  1479. err = try_to_unuse(type, false, 0); /* force all pages to be unused */
  1480. clear_current_oom_origin();
  1481. if (err) {
  1482. /* re-insert swap space back into swap_list */
  1483. reinsert_swap_info(p);
  1484. goto out_dput;
  1485. }
  1486. destroy_swap_extents(p);
  1487. if (p->flags & SWP_CONTINUED)
  1488. free_swap_count_continuations(p);
  1489. mutex_lock(&swapon_mutex);
  1490. spin_lock(&swap_lock);
  1491. spin_lock(&p->lock);
  1492. drain_mmlist();
  1493. /* wait for anyone still in scan_swap_map */
  1494. p->highest_bit = 0; /* cuts scans short */
  1495. while (p->flags >= SWP_SCANNING) {
  1496. spin_unlock(&p->lock);
  1497. spin_unlock(&swap_lock);
  1498. schedule_timeout_uninterruptible(1);
  1499. spin_lock(&swap_lock);
  1500. spin_lock(&p->lock);
  1501. }
  1502. swap_file = p->swap_file;
  1503. p->swap_file = NULL;
  1504. p->max = 0;
  1505. swap_map = p->swap_map;
  1506. p->swap_map = NULL;
  1507. p->flags = 0;
  1508. frontswap_map = frontswap_map_get(p);
  1509. frontswap_map_set(p, NULL);
  1510. spin_unlock(&p->lock);
  1511. spin_unlock(&swap_lock);
  1512. frontswap_invalidate_area(type);
  1513. mutex_unlock(&swapon_mutex);
  1514. vfree(swap_map);
  1515. vfree(frontswap_map);
  1516. /* Destroy swap account informatin */
  1517. swap_cgroup_swapoff(type);
  1518. inode = mapping->host;
  1519. if (S_ISBLK(inode->i_mode)) {
  1520. struct block_device *bdev = I_BDEV(inode);
  1521. set_blocksize(bdev, p->old_block_size);
  1522. blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1523. } else {
  1524. mutex_lock(&inode->i_mutex);
  1525. inode->i_flags &= ~S_SWAPFILE;
  1526. mutex_unlock(&inode->i_mutex);
  1527. }
  1528. filp_close(swap_file, NULL);
  1529. err = 0;
  1530. atomic_inc(&proc_poll_event);
  1531. wake_up_interruptible(&proc_poll_wait);
  1532. out_dput:
  1533. filp_close(victim, NULL);
  1534. out:
  1535. putname(pathname);
  1536. return err;
  1537. }
  1538. #ifdef CONFIG_PROC_FS
  1539. static unsigned swaps_poll(struct file *file, poll_table *wait)
  1540. {
  1541. struct seq_file *seq = file->private_data;
  1542. poll_wait(file, &proc_poll_wait, wait);
  1543. if (seq->poll_event != atomic_read(&proc_poll_event)) {
  1544. seq->poll_event = atomic_read(&proc_poll_event);
  1545. return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
  1546. }
  1547. return POLLIN | POLLRDNORM;
  1548. }
  1549. /* iterator */
  1550. static void *swap_start(struct seq_file *swap, loff_t *pos)
  1551. {
  1552. struct swap_info_struct *si;
  1553. int type;
  1554. loff_t l = *pos;
  1555. mutex_lock(&swapon_mutex);
  1556. if (!l)
  1557. return SEQ_START_TOKEN;
  1558. for (type = 0; type < nr_swapfiles; type++) {
  1559. smp_rmb(); /* read nr_swapfiles before swap_info[type] */
  1560. si = swap_info[type];
  1561. if (!(si->flags & SWP_USED) || !si->swap_map)
  1562. continue;
  1563. if (!--l)
  1564. return si;
  1565. }
  1566. return NULL;
  1567. }
  1568. static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
  1569. {
  1570. struct swap_info_struct *si = v;
  1571. int type;
  1572. if (v == SEQ_START_TOKEN)
  1573. type = 0;
  1574. else
  1575. type = si->type + 1;
  1576. for (; type < nr_swapfiles; type++) {
  1577. smp_rmb(); /* read nr_swapfiles before swap_info[type] */
  1578. si = swap_info[type];
  1579. if (!(si->flags & SWP_USED) || !si->swap_map)
  1580. continue;
  1581. ++*pos;
  1582. return si;
  1583. }
  1584. return NULL;
  1585. }
  1586. static void swap_stop(struct seq_file *swap, void *v)
  1587. {
  1588. mutex_unlock(&swapon_mutex);
  1589. }
  1590. static int swap_show(struct seq_file *swap, void *v)
  1591. {
  1592. struct swap_info_struct *si = v;
  1593. struct file *file;
  1594. int len;
  1595. if (si == SEQ_START_TOKEN) {
  1596. seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
  1597. return 0;
  1598. }
  1599. file = si->swap_file;
  1600. len = seq_path(swap, &file->f_path, " \t\n\\");
  1601. seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
  1602. len < 40 ? 40 - len : 1, " ",
  1603. S_ISBLK(file_inode(file)->i_mode) ?
  1604. "partition" : "file\t",
  1605. si->pages << (PAGE_SHIFT - 10),
  1606. si->inuse_pages << (PAGE_SHIFT - 10),
  1607. si->prio);
  1608. return 0;
  1609. }
  1610. static const struct seq_operations swaps_op = {
  1611. .start = swap_start,
  1612. .next = swap_next,
  1613. .stop = swap_stop,
  1614. .show = swap_show
  1615. };
  1616. static int swaps_open(struct inode *inode, struct file *file)
  1617. {
  1618. struct seq_file *seq;
  1619. int ret;
  1620. ret = seq_open(file, &swaps_op);
  1621. if (ret)
  1622. return ret;
  1623. seq = file->private_data;
  1624. seq->poll_event = atomic_read(&proc_poll_event);
  1625. return 0;
  1626. }
  1627. static const struct file_operations proc_swaps_operations = {
  1628. .open = swaps_open,
  1629. .read = seq_read,
  1630. .llseek = seq_lseek,
  1631. .release = seq_release,
  1632. .poll = swaps_poll,
  1633. };
  1634. static int __init procswaps_init(void)
  1635. {
  1636. proc_create("swaps", 0, NULL, &proc_swaps_operations);
  1637. return 0;
  1638. }
  1639. __initcall(procswaps_init);
  1640. #endif /* CONFIG_PROC_FS */
  1641. #ifdef MAX_SWAPFILES_CHECK
  1642. static int __init max_swapfiles_check(void)
  1643. {
  1644. MAX_SWAPFILES_CHECK();
  1645. return 0;
  1646. }
  1647. late_initcall(max_swapfiles_check);
  1648. #endif
  1649. static struct swap_info_struct *alloc_swap_info(void)
  1650. {
  1651. struct swap_info_struct *p;
  1652. unsigned int type;
  1653. p = kzalloc(sizeof(*p), GFP_KERNEL);
  1654. if (!p)
  1655. return ERR_PTR(-ENOMEM);
  1656. spin_lock(&swap_lock);
  1657. for (type = 0; type < nr_swapfiles; type++) {
  1658. if (!(swap_info[type]->flags & SWP_USED))
  1659. break;
  1660. }
  1661. if (type >= MAX_SWAPFILES) {
  1662. spin_unlock(&swap_lock);
  1663. kfree(p);
  1664. return ERR_PTR(-EPERM);
  1665. }
  1666. if (type >= nr_swapfiles) {
  1667. p->type = type;
  1668. swap_info[type] = p;
  1669. /*
  1670. * Write swap_info[type] before nr_swapfiles, in case a
  1671. * racing procfs swap_start() or swap_next() is reading them.
  1672. * (We never shrink nr_swapfiles, we never free this entry.)
  1673. */
  1674. smp_wmb();
  1675. nr_swapfiles++;
  1676. } else {
  1677. kfree(p);
  1678. p = swap_info[type];
  1679. /*
  1680. * Do not memset this entry: a racing procfs swap_next()
  1681. * would be relying on p->type to remain valid.
  1682. */
  1683. }
  1684. INIT_LIST_HEAD(&p->first_swap_extent.list);
  1685. p->flags = SWP_USED;
  1686. p->next = -1;
  1687. spin_unlock(&swap_lock);
  1688. spin_lock_init(&p->lock);
  1689. return p;
  1690. }
  1691. static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
  1692. {
  1693. int error;
  1694. if (S_ISBLK(inode->i_mode)) {
  1695. p->bdev = bdgrab(I_BDEV(inode));
  1696. error = blkdev_get(p->bdev,
  1697. FMODE_READ | FMODE_WRITE | FMODE_EXCL,
  1698. sys_swapon);
  1699. if (error < 0) {
  1700. p->bdev = NULL;
  1701. return -EINVAL;
  1702. }
  1703. p->old_block_size = block_size(p->bdev);
  1704. error = set_blocksize(p->bdev, PAGE_SIZE);
  1705. if (error < 0)
  1706. return error;
  1707. p->flags |= SWP_BLKDEV;
  1708. } else if (S_ISREG(inode->i_mode)) {
  1709. p->bdev = inode->i_sb->s_bdev;
  1710. mutex_lock(&inode->i_mutex);
  1711. if (IS_SWAPFILE(inode))
  1712. return -EBUSY;
  1713. } else
  1714. return -EINVAL;
  1715. return 0;
  1716. }
  1717. static unsigned long read_swap_header(struct swap_info_struct *p,
  1718. union swap_header *swap_header,
  1719. struct inode *inode)
  1720. {
  1721. int i;
  1722. unsigned long maxpages;
  1723. unsigned long swapfilepages;
  1724. unsigned long last_page;
  1725. if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
  1726. pr_err("Unable to find swap-space signature\n");
  1727. return 0;
  1728. }
  1729. /* swap partition endianess hack... */
  1730. if (swab32(swap_header->info.version) == 1) {
  1731. swab32s(&swap_header->info.version);
  1732. swab32s(&swap_header->info.last_page);
  1733. swab32s(&swap_header->info.nr_badpages);
  1734. for (i = 0; i < swap_header->info.nr_badpages; i++)
  1735. swab32s(&swap_header->info.badpages[i]);
  1736. }
  1737. /* Check the swap header's sub-version */
  1738. if (swap_header->info.version != 1) {
  1739. pr_warn("Unable to handle swap header version %d\n",
  1740. swap_header->info.version);
  1741. return 0;
  1742. }
  1743. p->lowest_bit = 1;
  1744. p->cluster_next = 1;
  1745. p->cluster_nr = 0;
  1746. /*
  1747. * Find out how many pages are allowed for a single swap
  1748. * device. There are two limiting factors: 1) the number
  1749. * of bits for the swap offset in the swp_entry_t type, and
  1750. * 2) the number of bits in the swap pte as defined by the
  1751. * different architectures. In order to find the
  1752. * largest possible bit mask, a swap entry with swap type 0
  1753. * and swap offset ~0UL is created, encoded to a swap pte,
  1754. * decoded to a swp_entry_t again, and finally the swap
  1755. * offset is extracted. This will mask all the bits from
  1756. * the initial ~0UL mask that can't be encoded in either
  1757. * the swp_entry_t or the architecture definition of a
  1758. * swap pte.
  1759. */
  1760. maxpages = swp_offset(pte_to_swp_entry(
  1761. swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
  1762. last_page = swap_header->info.last_page;
  1763. if (last_page > maxpages) {
  1764. pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
  1765. maxpages << (PAGE_SHIFT - 10),
  1766. last_page << (PAGE_SHIFT - 10));
  1767. }
  1768. if (maxpages > last_page) {
  1769. maxpages = last_page + 1;
  1770. /* p->max is an unsigned int: don't overflow it */
  1771. if ((unsigned int)maxpages == 0)
  1772. maxpages = UINT_MAX;
  1773. }
  1774. p->highest_bit = maxpages - 1;
  1775. if (!maxpages)
  1776. return 0;
  1777. swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
  1778. if (swapfilepages && maxpages > swapfilepages) {
  1779. pr_warn("Swap area shorter than signature indicates\n");
  1780. return 0;
  1781. }
  1782. if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
  1783. return 0;
  1784. if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
  1785. return 0;
  1786. return maxpages;
  1787. }
  1788. static int setup_swap_map_and_extents(struct swap_info_struct *p,
  1789. union swap_header *swap_header,
  1790. unsigned char *swap_map,
  1791. unsigned long maxpages,
  1792. sector_t *span)
  1793. {
  1794. int i;
  1795. unsigned int nr_good_pages;
  1796. int nr_extents;
  1797. nr_good_pages = maxpages - 1; /* omit header page */
  1798. for (i = 0; i < swap_header->info.nr_badpages; i++) {
  1799. unsigned int page_nr = swap_header->info.badpages[i];
  1800. if (page_nr == 0 || page_nr > swap_header->info.last_page)
  1801. return -EINVAL;
  1802. if (page_nr < maxpages) {
  1803. swap_map[page_nr] = SWAP_MAP_BAD;
  1804. nr_good_pages--;
  1805. }
  1806. }
  1807. if (nr_good_pages) {
  1808. swap_map[0] = SWAP_MAP_BAD;
  1809. p->max = maxpages;
  1810. p->pages = nr_good_pages;
  1811. nr_extents = setup_swap_extents(p, span);
  1812. if (nr_extents < 0)
  1813. return nr_extents;
  1814. nr_good_pages = p->pages;
  1815. }
  1816. if (!nr_good_pages) {
  1817. pr_warn("Empty swap-file\n");
  1818. return -EINVAL;
  1819. }
  1820. return nr_extents;
  1821. }
  1822. /*
  1823. * Helper to sys_swapon determining if a given swap
  1824. * backing device queue supports DISCARD operations.
  1825. */
  1826. static bool swap_discardable(struct swap_info_struct *si)
  1827. {
  1828. struct request_queue *q = bdev_get_queue(si->bdev);
  1829. if (!q || !blk_queue_discard(q))
  1830. return false;
  1831. return true;
  1832. }
  1833. SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
  1834. {
  1835. struct swap_info_struct *p;
  1836. struct filename *name;
  1837. struct file *swap_file = NULL;
  1838. struct address_space *mapping;
  1839. int i;
  1840. int prio;
  1841. int error;
  1842. union swap_header *swap_header;
  1843. int nr_extents;
  1844. sector_t span;
  1845. unsigned long maxpages;
  1846. unsigned char *swap_map = NULL;
  1847. unsigned long *frontswap_map = NULL;
  1848. struct page *page = NULL;
  1849. struct inode *inode = NULL;
  1850. if (swap_flags & ~SWAP_FLAGS_VALID)
  1851. return -EINVAL;
  1852. if (!capable(CAP_SYS_ADMIN))
  1853. return -EPERM;
  1854. p = alloc_swap_info();
  1855. if (IS_ERR(p))
  1856. return PTR_ERR(p);
  1857. name = getname(specialfile);
  1858. if (IS_ERR(name)) {
  1859. error = PTR_ERR(name);
  1860. name = NULL;
  1861. goto bad_swap;
  1862. }
  1863. swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
  1864. if (IS_ERR(swap_file)) {
  1865. error = PTR_ERR(swap_file);
  1866. swap_file = NULL;
  1867. goto bad_swap;
  1868. }
  1869. p->swap_file = swap_file;
  1870. mapping = swap_file->f_mapping;
  1871. for (i = 0; i < nr_swapfiles; i++) {
  1872. struct swap_info_struct *q = swap_info[i];
  1873. if (q == p || !q->swap_file)
  1874. continue;
  1875. if (mapping == q->swap_file->f_mapping) {
  1876. error = -EBUSY;
  1877. goto bad_swap;
  1878. }
  1879. }
  1880. inode = mapping->host;
  1881. /* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
  1882. error = claim_swapfile(p, inode);
  1883. if (unlikely(error))
  1884. goto bad_swap;
  1885. /*
  1886. * Read the swap header.
  1887. */
  1888. if (!mapping->a_ops->readpage) {
  1889. error = -EINVAL;
  1890. goto bad_swap;
  1891. }
  1892. page = read_mapping_page(mapping, 0, swap_file);
  1893. if (IS_ERR(page)) {
  1894. error = PTR_ERR(page);
  1895. goto bad_swap;
  1896. }
  1897. swap_header = kmap(page);
  1898. maxpages = read_swap_header(p, swap_header, inode);
  1899. if (unlikely(!maxpages)) {
  1900. error = -EINVAL;
  1901. goto bad_swap;
  1902. }
  1903. /* OK, set up the swap map and apply the bad block list */
  1904. swap_map = vzalloc(maxpages);
  1905. if (!swap_map) {
  1906. error = -ENOMEM;
  1907. goto bad_swap;
  1908. }
  1909. error = swap_cgroup_swapon(p->type, maxpages);
  1910. if (error)
  1911. goto bad_swap;
  1912. nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
  1913. maxpages, &span);
  1914. if (unlikely(nr_extents < 0)) {
  1915. error = nr_extents;
  1916. goto bad_swap;
  1917. }
  1918. /* frontswap enabled? set up bit-per-page map for frontswap */
  1919. if (frontswap_enabled)
  1920. frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
  1921. if (p->bdev) {
  1922. if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
  1923. p->flags |= SWP_SOLIDSTATE;
  1924. p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
  1925. }
  1926. if ((swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
  1927. /*
  1928. * When discard is enabled for swap with no particular
  1929. * policy flagged, we set all swap discard flags here in
  1930. * order to sustain backward compatibility with older
  1931. * swapon(8) releases.
  1932. */
  1933. p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
  1934. SWP_PAGE_DISCARD);
  1935. /*
  1936. * By flagging sys_swapon, a sysadmin can tell us to
  1937. * either do single-time area discards only, or to just
  1938. * perform discards for released swap page-clusters.
  1939. * Now it's time to adjust the p->flags accordingly.
  1940. */
  1941. if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
  1942. p->flags &= ~SWP_PAGE_DISCARD;
  1943. else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
  1944. p->flags &= ~SWP_AREA_DISCARD;
  1945. /* issue a swapon-time discard if it's still required */
  1946. if (p->flags & SWP_AREA_DISCARD) {
  1947. int err = discard_swap(p);
  1948. if (unlikely(err))
  1949. pr_err("swapon: discard_swap(%p): %d\n",
  1950. p, err);
  1951. }
  1952. }
  1953. }
  1954. mutex_lock(&swapon_mutex);
  1955. prio = -1;
  1956. if (swap_flags & SWAP_FLAG_PREFER)
  1957. prio =
  1958. (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
  1959. enable_swap_info(p, prio, swap_map, frontswap_map);
  1960. pr_info("Adding %uk swap on %s. "
  1961. "Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
  1962. p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
  1963. nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
  1964. (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
  1965. (p->flags & SWP_DISCARDABLE) ? "D" : "",
  1966. (p->flags & SWP_AREA_DISCARD) ? "s" : "",
  1967. (p->flags & SWP_PAGE_DISCARD) ? "c" : "",
  1968. (frontswap_map) ? "FS" : "");
  1969. mutex_unlock(&swapon_mutex);
  1970. atomic_inc(&proc_poll_event);
  1971. wake_up_interruptible(&proc_poll_wait);
  1972. if (S_ISREG(inode->i_mode))
  1973. inode->i_flags |= S_SWAPFILE;
  1974. error = 0;
  1975. goto out;
  1976. bad_swap:
  1977. if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
  1978. set_blocksize(p->bdev, p->old_block_size);
  1979. blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1980. }
  1981. destroy_swap_extents(p);
  1982. swap_cgroup_swapoff(p->type);
  1983. spin_lock(&swap_lock);
  1984. p->swap_file = NULL;
  1985. p->flags = 0;
  1986. spin_unlock(&swap_lock);
  1987. vfree(swap_map);
  1988. if (swap_file) {
  1989. if (inode && S_ISREG(inode->i_mode)) {
  1990. mutex_unlock(&inode->i_mutex);
  1991. inode = NULL;
  1992. }
  1993. filp_close(swap_file, NULL);
  1994. }
  1995. out:
  1996. if (page && !IS_ERR(page)) {
  1997. kunmap(page);
  1998. page_cache_release(page);
  1999. }
  2000. if (name)
  2001. putname(name);
  2002. if (inode && S_ISREG(inode->i_mode))
  2003. mutex_unlock(&inode->i_mutex);
  2004. return error;
  2005. }
  2006. void si_swapinfo(struct sysinfo *val)
  2007. {
  2008. unsigned int type;
  2009. unsigned long nr_to_be_unused = 0;
  2010. spin_lock(&swap_lock);
  2011. for (type = 0; type < nr_swapfiles; type++) {
  2012. struct swap_info_struct *si = swap_info[type];
  2013. if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
  2014. nr_to_be_unused += si->inuse_pages;
  2015. }
  2016. val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
  2017. val->totalswap = total_swap_pages + nr_to_be_unused;
  2018. spin_unlock(&swap_lock);
  2019. }
  2020. /*
  2021. * Verify that a swap entry is valid and increment its swap map count.
  2022. *
  2023. * Returns error code in following case.
  2024. * - success -> 0
  2025. * - swp_entry is invalid -> EINVAL
  2026. * - swp_entry is migration entry -> EINVAL
  2027. * - swap-cache reference is requested but there is already one. -> EEXIST
  2028. * - swap-cache reference is requested but the entry is not used. -> ENOENT
  2029. * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
  2030. */
  2031. static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
  2032. {
  2033. struct swap_info_struct *p;
  2034. unsigned long offset, type;
  2035. unsigned char count;
  2036. unsigned char has_cache;
  2037. int err = -EINVAL;
  2038. if (non_swap_entry(entry))
  2039. goto out;
  2040. type = swp_type(entry);
  2041. if (type >= nr_swapfiles)
  2042. goto bad_file;
  2043. p = swap_info[type];
  2044. offset = swp_offset(entry);
  2045. spin_lock(&p->lock);
  2046. if (unlikely(offset >= p->max))
  2047. goto unlock_out;
  2048. count = p->swap_map[offset];
  2049. has_cache = count & SWAP_HAS_CACHE;
  2050. count &= ~SWAP_HAS_CACHE;
  2051. err = 0;
  2052. if (usage == SWAP_HAS_CACHE) {
  2053. /* set SWAP_HAS_CACHE if there is no cache and entry is used */
  2054. if (!has_cache && count)
  2055. has_cache = SWAP_HAS_CACHE;
  2056. else if (has_cache) /* someone else added cache */
  2057. err = -EEXIST;
  2058. else /* no users remaining */
  2059. err = -ENOENT;
  2060. } else if (count || has_cache) {
  2061. if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
  2062. count += usage;
  2063. else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
  2064. err = -EINVAL;
  2065. else if (swap_count_continued(p, offset, count))
  2066. count = COUNT_CONTINUED;
  2067. else
  2068. err = -ENOMEM;
  2069. } else
  2070. err = -ENOENT; /* unused swap entry */
  2071. p->swap_map[offset] = count | has_cache;
  2072. unlock_out:
  2073. spin_unlock(&p->lock);
  2074. out:
  2075. return err;
  2076. bad_file:
  2077. pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val);
  2078. goto out;
  2079. }
  2080. /*
  2081. * Help swapoff by noting that swap entry belongs to shmem/tmpfs
  2082. * (in which case its reference count is never incremented).
  2083. */
  2084. void swap_shmem_alloc(swp_entry_t entry)
  2085. {
  2086. __swap_duplicate(entry, SWAP_MAP_SHMEM);
  2087. }
  2088. /*
  2089. * Increase reference count of swap entry by 1.
  2090. * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
  2091. * but could not be atomically allocated. Returns 0, just as if it succeeded,
  2092. * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
  2093. * might occur if a page table entry has got corrupted.
  2094. */
  2095. int swap_duplicate(swp_entry_t entry)
  2096. {
  2097. int err = 0;
  2098. while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
  2099. err = add_swap_count_continuation(entry, GFP_ATOMIC);
  2100. return err;
  2101. }
  2102. /*
  2103. * @entry: swap entry for which we allocate swap cache.
  2104. *
  2105. * Called when allocating swap cache for existing swap entry,
  2106. * This can return error codes. Returns 0 at success.
  2107. * -EBUSY means there is a swap cache.
  2108. * Note: return code is different from swap_duplicate().
  2109. */
  2110. int swapcache_prepare(swp_entry_t entry)
  2111. {
  2112. return __swap_duplicate(entry, SWAP_HAS_CACHE);
  2113. }
  2114. struct swap_info_struct *page_swap_info(struct page *page)
  2115. {
  2116. swp_entry_t swap = { .val = page_private(page) };
  2117. BUG_ON(!PageSwapCache(page));
  2118. return swap_info[swp_type(swap)];
  2119. }
  2120. /*
  2121. * out-of-line __page_file_ methods to avoid include hell.
  2122. */
  2123. struct address_space *__page_file_mapping(struct page *page)
  2124. {
  2125. VM_BUG_ON(!PageSwapCache(page));
  2126. return page_swap_info(page)->swap_file->f_mapping;
  2127. }
  2128. EXPORT_SYMBOL_GPL(__page_file_mapping);
  2129. pgoff_t __page_file_index(struct page *page)
  2130. {
  2131. swp_entry_t swap = { .val = page_private(page) };
  2132. VM_BUG_ON(!PageSwapCache(page));
  2133. return swp_offset(swap);
  2134. }
  2135. EXPORT_SYMBOL_GPL(__page_file_index);
  2136. /*
  2137. * add_swap_count_continuation - called when a swap count is duplicated
  2138. * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
  2139. * page of the original vmalloc'ed swap_map, to hold the continuation count
  2140. * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
  2141. * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
  2142. *
  2143. * These continuation pages are seldom referenced: the common paths all work
  2144. * on the original swap_map, only referring to a continuation page when the
  2145. * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
  2146. *
  2147. * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
  2148. * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
  2149. * can be called after dropping locks.
  2150. */
  2151. int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
  2152. {
  2153. struct swap_info_struct *si;
  2154. struct page *head;
  2155. struct page *page;
  2156. struct page *list_page;
  2157. pgoff_t offset;
  2158. unsigned char count;
  2159. /*
  2160. * When debugging, it's easier to use __GFP_ZERO here; but it's better
  2161. * for latency not to zero a page while GFP_ATOMIC and holding locks.
  2162. */
  2163. page = alloc_page(gfp_mask | __GFP_HIGHMEM);
  2164. si = swap_info_get(entry);
  2165. if (!si) {
  2166. /*
  2167. * An acceptable race has occurred since the failing
  2168. * __swap_duplicate(): the swap entry has been freed,
  2169. * perhaps even the whole swap_map cleared for swapoff.
  2170. */
  2171. goto outer;
  2172. }
  2173. offset = swp_offset(entry);
  2174. count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
  2175. if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
  2176. /*
  2177. * The higher the swap count, the more likely it is that tasks
  2178. * will race to add swap count continuation: we need to avoid
  2179. * over-provisioning.
  2180. */
  2181. goto out;
  2182. }
  2183. if (!page) {
  2184. spin_unlock(&si->lock);
  2185. return -ENOMEM;
  2186. }
  2187. /*
  2188. * We are fortunate that although vmalloc_to_page uses pte_offset_map,
  2189. * no architecture is using highmem pages for kernel pagetables: so it
  2190. * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
  2191. */
  2192. head = vmalloc_to_page(si->swap_map + offset);
  2193. offset &= ~PAGE_MASK;
  2194. /*
  2195. * Page allocation does not initialize the page's lru field,
  2196. * but it does always reset its private field.
  2197. */
  2198. if (!page_private(head)) {
  2199. BUG_ON(count & COUNT_CONTINUED);
  2200. INIT_LIST_HEAD(&head->lru);
  2201. set_page_private(head, SWP_CONTINUED);
  2202. si->flags |= SWP_CONTINUED;
  2203. }
  2204. list_for_each_entry(list_page, &head->lru, lru) {
  2205. unsigned char *map;
  2206. /*
  2207. * If the previous map said no continuation, but we've found
  2208. * a continuation page, free our allocation and use this one.
  2209. */
  2210. if (!(count & COUNT_CONTINUED))
  2211. goto out;
  2212. map = kmap_atomic(list_page) + offset;
  2213. count = *map;
  2214. kunmap_atomic(map);
  2215. /*
  2216. * If this continuation count now has some space in it,
  2217. * free our allocation and use this one.
  2218. */
  2219. if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
  2220. goto out;
  2221. }
  2222. list_add_tail(&page->lru, &head->lru);
  2223. page = NULL; /* now it's attached, don't free it */
  2224. out:
  2225. spin_unlock(&si->lock);
  2226. outer:
  2227. if (page)
  2228. __free_page(page);
  2229. return 0;
  2230. }
  2231. /*
  2232. * swap_count_continued - when the original swap_map count is incremented
  2233. * from SWAP_MAP_MAX, check if there is already a continuation page to carry
  2234. * into, carry if so, or else fail until a new continuation page is allocated;
  2235. * when the original swap_map count is decremented from 0 with continuation,
  2236. * borrow from the continuation and report whether it still holds more.
  2237. * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
  2238. */
  2239. static bool swap_count_continued(struct swap_info_struct *si,
  2240. pgoff_t offset, unsigned char count)
  2241. {
  2242. struct page *head;
  2243. struct page *page;
  2244. unsigned char *map;
  2245. head = vmalloc_to_page(si->swap_map + offset);
  2246. if (page_private(head) != SWP_CONTINUED) {
  2247. BUG_ON(count & COUNT_CONTINUED);
  2248. return false; /* need to add count continuation */
  2249. }
  2250. offset &= ~PAGE_MASK;
  2251. page = list_entry(head->lru.next, struct page, lru);
  2252. map = kmap_atomic(page) + offset;
  2253. if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
  2254. goto init_map; /* jump over SWAP_CONT_MAX checks */
  2255. if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
  2256. /*
  2257. * Think of how you add 1 to 999
  2258. */
  2259. while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
  2260. kunmap_atomic(map);
  2261. page = list_entry(page->lru.next, struct page, lru);
  2262. BUG_ON(page == head);
  2263. map = kmap_atomic(page) + offset;
  2264. }
  2265. if (*map == SWAP_CONT_MAX) {
  2266. kunmap_atomic(map);
  2267. page = list_entry(page->lru.next, struct page, lru);
  2268. if (page == head)
  2269. return false; /* add count continuation */
  2270. map = kmap_atomic(page) + offset;
  2271. init_map: *map = 0; /* we didn't zero the page */
  2272. }
  2273. *map += 1;
  2274. kunmap_atomic(map);
  2275. page = list_entry(page->lru.prev, struct page, lru);
  2276. while (page != head) {
  2277. map = kmap_atomic(page) + offset;
  2278. *map = COUNT_CONTINUED;
  2279. kunmap_atomic(map);
  2280. page = list_entry(page->lru.prev, struct page, lru);
  2281. }
  2282. return true; /* incremented */
  2283. } else { /* decrementing */
  2284. /*
  2285. * Think of how you subtract 1 from 1000
  2286. */
  2287. BUG_ON(count != COUNT_CONTINUED);
  2288. while (*map == COUNT_CONTINUED) {
  2289. kunmap_atomic(map);
  2290. page = list_entry(page->lru.next, struct page, lru);
  2291. BUG_ON(page == head);
  2292. map = kmap_atomic(page) + offset;
  2293. }
  2294. BUG_ON(*map == 0);
  2295. *map -= 1;
  2296. if (*map == 0)
  2297. count = 0;
  2298. kunmap_atomic(map);
  2299. page = list_entry(page->lru.prev, struct page, lru);
  2300. while (page != head) {
  2301. map = kmap_atomic(page) + offset;
  2302. *map = SWAP_CONT_MAX | count;
  2303. count = COUNT_CONTINUED;
  2304. kunmap_atomic(map);
  2305. page = list_entry(page->lru.prev, struct page, lru);
  2306. }
  2307. return count == COUNT_CONTINUED;
  2308. }
  2309. }
  2310. /*
  2311. * free_swap_count_continuations - swapoff free all the continuation pages
  2312. * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
  2313. */
  2314. static void free_swap_count_continuations(struct swap_info_struct *si)
  2315. {
  2316. pgoff_t offset;
  2317. for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
  2318. struct page *head;
  2319. head = vmalloc_to_page(si->swap_map + offset);
  2320. if (page_private(head)) {
  2321. struct list_head *this, *next;
  2322. list_for_each_safe(this, next, &head->lru) {
  2323. struct page *page;
  2324. page = list_entry(this, struct page, lru);
  2325. list_del(this);
  2326. __free_page(page);
  2327. }
  2328. }
  2329. }
  2330. }