filemap.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665
  1. /*
  2. * linux/mm/filemap.c
  3. *
  4. * Copyright (C) 1994-1999 Linus Torvalds
  5. */
  6. /*
  7. * This file handles the generic file mmap semantics used by
  8. * most "normal" filesystems (but you don't /have/ to use this:
  9. * the NFS filesystem used to do this differently, for example)
  10. */
  11. #include <linux/export.h>
  12. #include <linux/compiler.h>
  13. #include <linux/fs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/aio.h>
  16. #include <linux/capability.h>
  17. #include <linux/kernel_stat.h>
  18. #include <linux/gfp.h>
  19. #include <linux/mm.h>
  20. #include <linux/swap.h>
  21. #include <linux/mman.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/file.h>
  24. #include <linux/uio.h>
  25. #include <linux/hash.h>
  26. #include <linux/writeback.h>
  27. #include <linux/backing-dev.h>
  28. #include <linux/pagevec.h>
  29. #include <linux/blkdev.h>
  30. #include <linux/security.h>
  31. #include <linux/syscalls.h>
  32. #include <linux/cpuset.h>
  33. #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
  34. #include <linux/memcontrol.h>
  35. #include <linux/cleancache.h>
  36. #include "internal.h"
  37. /*
  38. * FIXME: remove all knowledge of the buffer layer from the core VM
  39. */
  40. #include <linux/buffer_head.h> /* for try_to_free_buffers */
  41. #include <asm/mman.h>
  42. /*
  43. * Shared mappings implemented 30.11.1994. It's not fully working yet,
  44. * though.
  45. *
  46. * Shared mappings now work. 15.8.1995 Bruno.
  47. *
  48. * finished 'unifying' the page and buffer cache and SMP-threaded the
  49. * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
  50. *
  51. * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
  52. */
  53. /*
  54. * Lock ordering:
  55. *
  56. * ->i_mmap_mutex (truncate_pagecache)
  57. * ->private_lock (__free_pte->__set_page_dirty_buffers)
  58. * ->swap_lock (exclusive_swap_page, others)
  59. * ->mapping->tree_lock
  60. *
  61. * ->i_mutex
  62. * ->i_mmap_mutex (truncate->unmap_mapping_range)
  63. *
  64. * ->mmap_sem
  65. * ->i_mmap_mutex
  66. * ->page_table_lock or pte_lock (various, mainly in memory.c)
  67. * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
  68. *
  69. * ->mmap_sem
  70. * ->lock_page (access_process_vm)
  71. *
  72. * ->i_mutex (generic_file_buffered_write)
  73. * ->mmap_sem (fault_in_pages_readable->do_page_fault)
  74. *
  75. * bdi->wb.list_lock
  76. * sb_lock (fs/fs-writeback.c)
  77. * ->mapping->tree_lock (__sync_single_inode)
  78. *
  79. * ->i_mmap_mutex
  80. * ->anon_vma.lock (vma_adjust)
  81. *
  82. * ->anon_vma.lock
  83. * ->page_table_lock or pte_lock (anon_vma_prepare and various)
  84. *
  85. * ->page_table_lock or pte_lock
  86. * ->swap_lock (try_to_unmap_one)
  87. * ->private_lock (try_to_unmap_one)
  88. * ->tree_lock (try_to_unmap_one)
  89. * ->zone.lru_lock (follow_page->mark_page_accessed)
  90. * ->zone.lru_lock (check_pte_range->isolate_lru_page)
  91. * ->private_lock (page_remove_rmap->set_page_dirty)
  92. * ->tree_lock (page_remove_rmap->set_page_dirty)
  93. * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
  94. * ->inode->i_lock (page_remove_rmap->set_page_dirty)
  95. * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
  96. * ->inode->i_lock (zap_pte_range->set_page_dirty)
  97. * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
  98. *
  99. * (code doesn't rely on that order, so you could switch it around)
  100. * ->tasklist_lock (memory_failure, collect_procs_ao)
  101. * ->i_mmap_mutex
  102. */
  103. /*
  104. * Delete a page from the page cache and free it. Caller has to make
  105. * sure the page is locked and that nobody else uses it - or that usage
  106. * is safe. The caller must hold the mapping's tree_lock.
  107. */
  108. void __delete_from_page_cache(struct page *page)
  109. {
  110. struct address_space *mapping = page->mapping;
  111. /*
  112. * if we're uptodate, flush out into the cleancache, otherwise
  113. * invalidate any existing cleancache entries. We can't leave
  114. * stale data around in the cleancache once our page is gone
  115. */
  116. if (PageUptodate(page) && PageMappedToDisk(page))
  117. cleancache_put_page(page);
  118. else
  119. cleancache_flush_page(mapping, page);
  120. radix_tree_delete(&mapping->page_tree, page->index);
  121. page->mapping = NULL;
  122. /* Leave page->index set: truncation lookup relies upon it */
  123. mapping->nrpages--;
  124. __dec_zone_page_state(page, NR_FILE_PAGES);
  125. if (PageSwapBacked(page))
  126. __dec_zone_page_state(page, NR_SHMEM);
  127. BUG_ON(page_mapped(page));
  128. /*
  129. * Some filesystems seem to re-dirty the page even after
  130. * the VM has canceled the dirty bit (eg ext3 journaling).
  131. *
  132. * Fix it up by doing a final dirty accounting check after
  133. * having removed the page entirely.
  134. */
  135. if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
  136. dec_zone_page_state(page, NR_FILE_DIRTY);
  137. dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
  138. }
  139. }
  140. /**
  141. * delete_from_page_cache - delete page from page cache
  142. * @page: the page which the kernel is trying to remove from page cache
  143. *
  144. * This must be called only on pages that have been verified to be in the page
  145. * cache and locked. It will never put the page into the free list, the caller
  146. * has a reference on the page.
  147. */
  148. void delete_from_page_cache(struct page *page)
  149. {
  150. struct address_space *mapping = page->mapping;
  151. void (*freepage)(struct page *);
  152. BUG_ON(!PageLocked(page));
  153. freepage = mapping->a_ops->freepage;
  154. spin_lock_irq(&mapping->tree_lock);
  155. __delete_from_page_cache(page);
  156. spin_unlock_irq(&mapping->tree_lock);
  157. mem_cgroup_uncharge_cache_page(page);
  158. if (freepage)
  159. freepage(page);
  160. page_cache_release(page);
  161. }
  162. EXPORT_SYMBOL(delete_from_page_cache);
  163. static int sleep_on_page(void *word)
  164. {
  165. io_schedule();
  166. return 0;
  167. }
  168. static int sleep_on_page_killable(void *word)
  169. {
  170. sleep_on_page(word);
  171. return fatal_signal_pending(current) ? -EINTR : 0;
  172. }
  173. /**
  174. * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  175. * @mapping: address space structure to write
  176. * @start: offset in bytes where the range starts
  177. * @end: offset in bytes where the range ends (inclusive)
  178. * @sync_mode: enable synchronous operation
  179. *
  180. * Start writeback against all of a mapping's dirty pages that lie
  181. * within the byte offsets <start, end> inclusive.
  182. *
  183. * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
  184. * opposed to a regular memory cleansing writeback. The difference between
  185. * these two operations is that if a dirty page/buffer is encountered, it must
  186. * be waited upon, and not just skipped over.
  187. */
  188. int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
  189. loff_t end, int sync_mode)
  190. {
  191. int ret;
  192. struct writeback_control wbc = {
  193. .sync_mode = sync_mode,
  194. .nr_to_write = LONG_MAX,
  195. .range_start = start,
  196. .range_end = end,
  197. };
  198. if (!mapping_cap_writeback_dirty(mapping))
  199. return 0;
  200. ret = do_writepages(mapping, &wbc);
  201. return ret;
  202. }
  203. static inline int __filemap_fdatawrite(struct address_space *mapping,
  204. int sync_mode)
  205. {
  206. return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
  207. }
  208. int filemap_fdatawrite(struct address_space *mapping)
  209. {
  210. return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
  211. }
  212. EXPORT_SYMBOL(filemap_fdatawrite);
  213. int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
  214. loff_t end)
  215. {
  216. return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
  217. }
  218. EXPORT_SYMBOL(filemap_fdatawrite_range);
  219. /**
  220. * filemap_flush - mostly a non-blocking flush
  221. * @mapping: target address_space
  222. *
  223. * This is a mostly non-blocking flush. Not suitable for data-integrity
  224. * purposes - I/O may not be started against all dirty pages.
  225. */
  226. int filemap_flush(struct address_space *mapping)
  227. {
  228. return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
  229. }
  230. EXPORT_SYMBOL(filemap_flush);
  231. /**
  232. * filemap_fdatawait_range - wait for writeback to complete
  233. * @mapping: address space structure to wait for
  234. * @start_byte: offset in bytes where the range starts
  235. * @end_byte: offset in bytes where the range ends (inclusive)
  236. *
  237. * Walk the list of under-writeback pages of the given address space
  238. * in the given range and wait for all of them.
  239. */
  240. int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
  241. loff_t end_byte)
  242. {
  243. pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
  244. pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
  245. struct pagevec pvec;
  246. int nr_pages;
  247. int ret = 0;
  248. if (end_byte < start_byte)
  249. return 0;
  250. pagevec_init(&pvec, 0);
  251. while ((index <= end) &&
  252. (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  253. PAGECACHE_TAG_WRITEBACK,
  254. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
  255. unsigned i;
  256. for (i = 0; i < nr_pages; i++) {
  257. struct page *page = pvec.pages[i];
  258. /* until radix tree lookup accepts end_index */
  259. if (page->index > end)
  260. continue;
  261. wait_on_page_writeback(page);
  262. if (TestClearPageError(page))
  263. ret = -EIO;
  264. }
  265. pagevec_release(&pvec);
  266. cond_resched();
  267. }
  268. /* Check for outstanding write errors */
  269. if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
  270. ret = -ENOSPC;
  271. if (test_and_clear_bit(AS_EIO, &mapping->flags))
  272. ret = -EIO;
  273. return ret;
  274. }
  275. EXPORT_SYMBOL(filemap_fdatawait_range);
  276. /**
  277. * filemap_fdatawait - wait for all under-writeback pages to complete
  278. * @mapping: address space structure to wait for
  279. *
  280. * Walk the list of under-writeback pages of the given address space
  281. * and wait for all of them.
  282. */
  283. int filemap_fdatawait(struct address_space *mapping)
  284. {
  285. loff_t i_size = i_size_read(mapping->host);
  286. if (i_size == 0)
  287. return 0;
  288. return filemap_fdatawait_range(mapping, 0, i_size - 1);
  289. }
  290. EXPORT_SYMBOL(filemap_fdatawait);
  291. int filemap_write_and_wait(struct address_space *mapping)
  292. {
  293. int err = 0;
  294. if (mapping->nrpages) {
  295. err = filemap_fdatawrite(mapping);
  296. /*
  297. * Even if the above returned error, the pages may be
  298. * written partially (e.g. -ENOSPC), so we wait for it.
  299. * But the -EIO is special case, it may indicate the worst
  300. * thing (e.g. bug) happened, so we avoid waiting for it.
  301. */
  302. if (err != -EIO) {
  303. int err2 = filemap_fdatawait(mapping);
  304. if (!err)
  305. err = err2;
  306. }
  307. }
  308. return err;
  309. }
  310. EXPORT_SYMBOL(filemap_write_and_wait);
  311. /**
  312. * filemap_write_and_wait_range - write out & wait on a file range
  313. * @mapping: the address_space for the pages
  314. * @lstart: offset in bytes where the range starts
  315. * @lend: offset in bytes where the range ends (inclusive)
  316. *
  317. * Write out and wait upon file offsets lstart->lend, inclusive.
  318. *
  319. * Note that `lend' is inclusive (describes the last byte to be written) so
  320. * that this function can be used to write to the very end-of-file (end = -1).
  321. */
  322. int filemap_write_and_wait_range(struct address_space *mapping,
  323. loff_t lstart, loff_t lend)
  324. {
  325. int err = 0;
  326. if (mapping->nrpages) {
  327. err = __filemap_fdatawrite_range(mapping, lstart, lend,
  328. WB_SYNC_ALL);
  329. /* See comment of filemap_write_and_wait() */
  330. if (err != -EIO) {
  331. int err2 = filemap_fdatawait_range(mapping,
  332. lstart, lend);
  333. if (!err)
  334. err = err2;
  335. }
  336. }
  337. return err;
  338. }
  339. EXPORT_SYMBOL(filemap_write_and_wait_range);
  340. /**
  341. * replace_page_cache_page - replace a pagecache page with a new one
  342. * @old: page to be replaced
  343. * @new: page to replace with
  344. * @gfp_mask: allocation mode
  345. *
  346. * This function replaces a page in the pagecache with a new one. On
  347. * success it acquires the pagecache reference for the new page and
  348. * drops it for the old page. Both the old and new pages must be
  349. * locked. This function does not add the new page to the LRU, the
  350. * caller must do that.
  351. *
  352. * The remove + add is atomic. The only way this function can fail is
  353. * memory allocation failure.
  354. */
  355. int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
  356. {
  357. int error;
  358. VM_BUG_ON(!PageLocked(old));
  359. VM_BUG_ON(!PageLocked(new));
  360. VM_BUG_ON(new->mapping);
  361. error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
  362. if (!error) {
  363. struct address_space *mapping = old->mapping;
  364. void (*freepage)(struct page *);
  365. pgoff_t offset = old->index;
  366. freepage = mapping->a_ops->freepage;
  367. page_cache_get(new);
  368. new->mapping = mapping;
  369. new->index = offset;
  370. spin_lock_irq(&mapping->tree_lock);
  371. __delete_from_page_cache(old);
  372. error = radix_tree_insert(&mapping->page_tree, offset, new);
  373. BUG_ON(error);
  374. mapping->nrpages++;
  375. __inc_zone_page_state(new, NR_FILE_PAGES);
  376. if (PageSwapBacked(new))
  377. __inc_zone_page_state(new, NR_SHMEM);
  378. spin_unlock_irq(&mapping->tree_lock);
  379. /* mem_cgroup codes must not be called under tree_lock */
  380. mem_cgroup_replace_page_cache(old, new);
  381. radix_tree_preload_end();
  382. if (freepage)
  383. freepage(old);
  384. page_cache_release(old);
  385. }
  386. return error;
  387. }
  388. EXPORT_SYMBOL_GPL(replace_page_cache_page);
  389. /**
  390. * add_to_page_cache_locked - add a locked page to the pagecache
  391. * @page: page to add
  392. * @mapping: the page's address_space
  393. * @offset: page index
  394. * @gfp_mask: page allocation mode
  395. *
  396. * This function is used to add a page to the pagecache. It must be locked.
  397. * This function does not add the page to the LRU. The caller must do that.
  398. */
  399. int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
  400. pgoff_t offset, gfp_t gfp_mask)
  401. {
  402. int error;
  403. VM_BUG_ON(!PageLocked(page));
  404. VM_BUG_ON(PageSwapBacked(page));
  405. error = mem_cgroup_cache_charge(page, current->mm,
  406. gfp_mask & GFP_RECLAIM_MASK);
  407. if (error)
  408. goto out;
  409. error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
  410. if (error == 0) {
  411. page_cache_get(page);
  412. page->mapping = mapping;
  413. page->index = offset;
  414. spin_lock_irq(&mapping->tree_lock);
  415. error = radix_tree_insert(&mapping->page_tree, offset, page);
  416. if (likely(!error)) {
  417. mapping->nrpages++;
  418. __inc_zone_page_state(page, NR_FILE_PAGES);
  419. spin_unlock_irq(&mapping->tree_lock);
  420. } else {
  421. page->mapping = NULL;
  422. /* Leave page->index set: truncation relies upon it */
  423. spin_unlock_irq(&mapping->tree_lock);
  424. mem_cgroup_uncharge_cache_page(page);
  425. page_cache_release(page);
  426. }
  427. radix_tree_preload_end();
  428. } else
  429. mem_cgroup_uncharge_cache_page(page);
  430. out:
  431. return error;
  432. }
  433. EXPORT_SYMBOL(add_to_page_cache_locked);
  434. int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  435. pgoff_t offset, gfp_t gfp_mask)
  436. {
  437. int ret;
  438. ret = add_to_page_cache(page, mapping, offset, gfp_mask);
  439. if (ret == 0)
  440. lru_cache_add_file(page);
  441. return ret;
  442. }
  443. EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
  444. #ifdef CONFIG_NUMA
  445. struct page *__page_cache_alloc(gfp_t gfp)
  446. {
  447. int n;
  448. struct page *page;
  449. if (cpuset_do_page_mem_spread()) {
  450. get_mems_allowed();
  451. n = cpuset_mem_spread_node();
  452. page = alloc_pages_exact_node(n, gfp, 0);
  453. put_mems_allowed();
  454. return page;
  455. }
  456. return alloc_pages(gfp, 0);
  457. }
  458. EXPORT_SYMBOL(__page_cache_alloc);
  459. #endif
  460. /*
  461. * In order to wait for pages to become available there must be
  462. * waitqueues associated with pages. By using a hash table of
  463. * waitqueues where the bucket discipline is to maintain all
  464. * waiters on the same queue and wake all when any of the pages
  465. * become available, and for the woken contexts to check to be
  466. * sure the appropriate page became available, this saves space
  467. * at a cost of "thundering herd" phenomena during rare hash
  468. * collisions.
  469. */
  470. static wait_queue_head_t *page_waitqueue(struct page *page)
  471. {
  472. const struct zone *zone = page_zone(page);
  473. return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
  474. }
  475. static inline void wake_up_page(struct page *page, int bit)
  476. {
  477. __wake_up_bit(page_waitqueue(page), &page->flags, bit);
  478. }
  479. void wait_on_page_bit(struct page *page, int bit_nr)
  480. {
  481. DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
  482. if (test_bit(bit_nr, &page->flags))
  483. __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
  484. TASK_UNINTERRUPTIBLE);
  485. }
  486. EXPORT_SYMBOL(wait_on_page_bit);
  487. int wait_on_page_bit_killable(struct page *page, int bit_nr)
  488. {
  489. DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
  490. if (!test_bit(bit_nr, &page->flags))
  491. return 0;
  492. return __wait_on_bit(page_waitqueue(page), &wait,
  493. sleep_on_page_killable, TASK_KILLABLE);
  494. }
  495. /**
  496. * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
  497. * @page: Page defining the wait queue of interest
  498. * @waiter: Waiter to add to the queue
  499. *
  500. * Add an arbitrary @waiter to the wait queue for the nominated @page.
  501. */
  502. void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
  503. {
  504. wait_queue_head_t *q = page_waitqueue(page);
  505. unsigned long flags;
  506. spin_lock_irqsave(&q->lock, flags);
  507. __add_wait_queue(q, waiter);
  508. spin_unlock_irqrestore(&q->lock, flags);
  509. }
  510. EXPORT_SYMBOL_GPL(add_page_wait_queue);
  511. /**
  512. * unlock_page - unlock a locked page
  513. * @page: the page
  514. *
  515. * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
  516. * Also wakes sleepers in wait_on_page_writeback() because the wakeup
  517. * mechananism between PageLocked pages and PageWriteback pages is shared.
  518. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
  519. *
  520. * The mb is necessary to enforce ordering between the clear_bit and the read
  521. * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
  522. */
  523. void unlock_page(struct page *page)
  524. {
  525. VM_BUG_ON(!PageLocked(page));
  526. clear_bit_unlock(PG_locked, &page->flags);
  527. smp_mb__after_clear_bit();
  528. wake_up_page(page, PG_locked);
  529. }
  530. EXPORT_SYMBOL(unlock_page);
  531. /**
  532. * end_page_writeback - end writeback against a page
  533. * @page: the page
  534. */
  535. void end_page_writeback(struct page *page)
  536. {
  537. if (TestClearPageReclaim(page))
  538. rotate_reclaimable_page(page);
  539. if (!test_clear_page_writeback(page))
  540. BUG();
  541. smp_mb__after_clear_bit();
  542. wake_up_page(page, PG_writeback);
  543. }
  544. EXPORT_SYMBOL(end_page_writeback);
  545. /**
  546. * __lock_page - get a lock on the page, assuming we need to sleep to get it
  547. * @page: the page to lock
  548. */
  549. void __lock_page(struct page *page)
  550. {
  551. DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
  552. __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
  553. TASK_UNINTERRUPTIBLE);
  554. }
  555. EXPORT_SYMBOL(__lock_page);
  556. int __lock_page_killable(struct page *page)
  557. {
  558. DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
  559. return __wait_on_bit_lock(page_waitqueue(page), &wait,
  560. sleep_on_page_killable, TASK_KILLABLE);
  561. }
  562. EXPORT_SYMBOL_GPL(__lock_page_killable);
  563. int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
  564. unsigned int flags)
  565. {
  566. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  567. /*
  568. * CAUTION! In this case, mmap_sem is not released
  569. * even though return 0.
  570. */
  571. if (flags & FAULT_FLAG_RETRY_NOWAIT)
  572. return 0;
  573. up_read(&mm->mmap_sem);
  574. if (flags & FAULT_FLAG_KILLABLE)
  575. wait_on_page_locked_killable(page);
  576. else
  577. wait_on_page_locked(page);
  578. return 0;
  579. } else {
  580. if (flags & FAULT_FLAG_KILLABLE) {
  581. int ret;
  582. ret = __lock_page_killable(page);
  583. if (ret) {
  584. up_read(&mm->mmap_sem);
  585. return 0;
  586. }
  587. } else
  588. __lock_page(page);
  589. return 1;
  590. }
  591. }
  592. /**
  593. * find_get_page - find and get a page reference
  594. * @mapping: the address_space to search
  595. * @offset: the page index
  596. *
  597. * Is there a pagecache struct page at the given (mapping, offset) tuple?
  598. * If yes, increment its refcount and return it; if no, return NULL.
  599. */
  600. struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
  601. {
  602. void **pagep;
  603. struct page *page;
  604. rcu_read_lock();
  605. repeat:
  606. page = NULL;
  607. pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
  608. if (pagep) {
  609. page = radix_tree_deref_slot(pagep);
  610. if (unlikely(!page))
  611. goto out;
  612. if (radix_tree_exception(page)) {
  613. if (radix_tree_deref_retry(page))
  614. goto repeat;
  615. /*
  616. * Otherwise, shmem/tmpfs must be storing a swap entry
  617. * here as an exceptional entry: so return it without
  618. * attempting to raise page count.
  619. */
  620. goto out;
  621. }
  622. if (!page_cache_get_speculative(page))
  623. goto repeat;
  624. /*
  625. * Has the page moved?
  626. * This is part of the lockless pagecache protocol. See
  627. * include/linux/pagemap.h for details.
  628. */
  629. if (unlikely(page != *pagep)) {
  630. page_cache_release(page);
  631. goto repeat;
  632. }
  633. }
  634. out:
  635. rcu_read_unlock();
  636. return page;
  637. }
  638. EXPORT_SYMBOL(find_get_page);
  639. /**
  640. * find_lock_page - locate, pin and lock a pagecache page
  641. * @mapping: the address_space to search
  642. * @offset: the page index
  643. *
  644. * Locates the desired pagecache page, locks it, increments its reference
  645. * count and returns its address.
  646. *
  647. * Returns zero if the page was not present. find_lock_page() may sleep.
  648. */
  649. struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
  650. {
  651. struct page *page;
  652. repeat:
  653. page = find_get_page(mapping, offset);
  654. if (page && !radix_tree_exception(page)) {
  655. lock_page(page);
  656. /* Has the page been truncated? */
  657. if (unlikely(page->mapping != mapping)) {
  658. unlock_page(page);
  659. page_cache_release(page);
  660. goto repeat;
  661. }
  662. VM_BUG_ON(page->index != offset);
  663. }
  664. return page;
  665. }
  666. EXPORT_SYMBOL(find_lock_page);
  667. /**
  668. * find_or_create_page - locate or add a pagecache page
  669. * @mapping: the page's address_space
  670. * @index: the page's index into the mapping
  671. * @gfp_mask: page allocation mode
  672. *
  673. * Locates a page in the pagecache. If the page is not present, a new page
  674. * is allocated using @gfp_mask and is added to the pagecache and to the VM's
  675. * LRU list. The returned page is locked and has its reference count
  676. * incremented.
  677. *
  678. * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
  679. * allocation!
  680. *
  681. * find_or_create_page() returns the desired page's address, or zero on
  682. * memory exhaustion.
  683. */
  684. struct page *find_or_create_page(struct address_space *mapping,
  685. pgoff_t index, gfp_t gfp_mask)
  686. {
  687. struct page *page;
  688. int err;
  689. repeat:
  690. page = find_lock_page(mapping, index);
  691. if (!page) {
  692. page = __page_cache_alloc(gfp_mask);
  693. if (!page)
  694. return NULL;
  695. /*
  696. * We want a regular kernel memory (not highmem or DMA etc)
  697. * allocation for the radix tree nodes, but we need to honour
  698. * the context-specific requirements the caller has asked for.
  699. * GFP_RECLAIM_MASK collects those requirements.
  700. */
  701. err = add_to_page_cache_lru(page, mapping, index,
  702. (gfp_mask & GFP_RECLAIM_MASK));
  703. if (unlikely(err)) {
  704. page_cache_release(page);
  705. page = NULL;
  706. if (err == -EEXIST)
  707. goto repeat;
  708. }
  709. }
  710. return page;
  711. }
  712. EXPORT_SYMBOL(find_or_create_page);
  713. /**
  714. * find_get_pages - gang pagecache lookup
  715. * @mapping: The address_space to search
  716. * @start: The starting page index
  717. * @nr_pages: The maximum number of pages
  718. * @pages: Where the resulting pages are placed
  719. *
  720. * find_get_pages() will search for and return a group of up to
  721. * @nr_pages pages in the mapping. The pages are placed at @pages.
  722. * find_get_pages() takes a reference against the returned pages.
  723. *
  724. * The search returns a group of mapping-contiguous pages with ascending
  725. * indexes. There may be holes in the indices due to not-present pages.
  726. *
  727. * find_get_pages() returns the number of pages which were found.
  728. */
  729. unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
  730. unsigned int nr_pages, struct page **pages)
  731. {
  732. unsigned int i;
  733. unsigned int ret;
  734. unsigned int nr_found, nr_skip;
  735. rcu_read_lock();
  736. restart:
  737. nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
  738. (void ***)pages, NULL, start, nr_pages);
  739. ret = 0;
  740. nr_skip = 0;
  741. for (i = 0; i < nr_found; i++) {
  742. struct page *page;
  743. repeat:
  744. page = radix_tree_deref_slot((void **)pages[i]);
  745. if (unlikely(!page))
  746. continue;
  747. if (radix_tree_exception(page)) {
  748. if (radix_tree_deref_retry(page)) {
  749. /*
  750. * Transient condition which can only trigger
  751. * when entry at index 0 moves out of or back
  752. * to root: none yet gotten, safe to restart.
  753. */
  754. WARN_ON(start | i);
  755. goto restart;
  756. }
  757. /*
  758. * Otherwise, shmem/tmpfs must be storing a swap entry
  759. * here as an exceptional entry: so skip over it -
  760. * we only reach this from invalidate_mapping_pages().
  761. */
  762. nr_skip++;
  763. continue;
  764. }
  765. if (!page_cache_get_speculative(page))
  766. goto repeat;
  767. /* Has the page moved? */
  768. if (unlikely(page != *((void **)pages[i]))) {
  769. page_cache_release(page);
  770. goto repeat;
  771. }
  772. pages[ret] = page;
  773. ret++;
  774. }
  775. /*
  776. * If all entries were removed before we could secure them,
  777. * try again, because callers stop trying once 0 is returned.
  778. */
  779. if (unlikely(!ret && nr_found > nr_skip))
  780. goto restart;
  781. rcu_read_unlock();
  782. return ret;
  783. }
  784. /**
  785. * find_get_pages_contig - gang contiguous pagecache lookup
  786. * @mapping: The address_space to search
  787. * @index: The starting page index
  788. * @nr_pages: The maximum number of pages
  789. * @pages: Where the resulting pages are placed
  790. *
  791. * find_get_pages_contig() works exactly like find_get_pages(), except
  792. * that the returned number of pages are guaranteed to be contiguous.
  793. *
  794. * find_get_pages_contig() returns the number of pages which were found.
  795. */
  796. unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
  797. unsigned int nr_pages, struct page **pages)
  798. {
  799. unsigned int i;
  800. unsigned int ret;
  801. unsigned int nr_found;
  802. rcu_read_lock();
  803. restart:
  804. nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
  805. (void ***)pages, NULL, index, nr_pages);
  806. ret = 0;
  807. for (i = 0; i < nr_found; i++) {
  808. struct page *page;
  809. repeat:
  810. page = radix_tree_deref_slot((void **)pages[i]);
  811. if (unlikely(!page))
  812. continue;
  813. if (radix_tree_exception(page)) {
  814. if (radix_tree_deref_retry(page)) {
  815. /*
  816. * Transient condition which can only trigger
  817. * when entry at index 0 moves out of or back
  818. * to root: none yet gotten, safe to restart.
  819. */
  820. goto restart;
  821. }
  822. /*
  823. * Otherwise, shmem/tmpfs must be storing a swap entry
  824. * here as an exceptional entry: so stop looking for
  825. * contiguous pages.
  826. */
  827. break;
  828. }
  829. if (!page_cache_get_speculative(page))
  830. goto repeat;
  831. /* Has the page moved? */
  832. if (unlikely(page != *((void **)pages[i]))) {
  833. page_cache_release(page);
  834. goto repeat;
  835. }
  836. /*
  837. * must check mapping and index after taking the ref.
  838. * otherwise we can get both false positives and false
  839. * negatives, which is just confusing to the caller.
  840. */
  841. if (page->mapping == NULL || page->index != index) {
  842. page_cache_release(page);
  843. break;
  844. }
  845. pages[ret] = page;
  846. ret++;
  847. index++;
  848. }
  849. rcu_read_unlock();
  850. return ret;
  851. }
  852. EXPORT_SYMBOL(find_get_pages_contig);
  853. /**
  854. * find_get_pages_tag - find and return pages that match @tag
  855. * @mapping: the address_space to search
  856. * @index: the starting page index
  857. * @tag: the tag index
  858. * @nr_pages: the maximum number of pages
  859. * @pages: where the resulting pages are placed
  860. *
  861. * Like find_get_pages, except we only return pages which are tagged with
  862. * @tag. We update @index to index the next page for the traversal.
  863. */
  864. unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
  865. int tag, unsigned int nr_pages, struct page **pages)
  866. {
  867. unsigned int i;
  868. unsigned int ret;
  869. unsigned int nr_found;
  870. rcu_read_lock();
  871. restart:
  872. nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree,
  873. (void ***)pages, *index, nr_pages, tag);
  874. ret = 0;
  875. for (i = 0; i < nr_found; i++) {
  876. struct page *page;
  877. repeat:
  878. page = radix_tree_deref_slot((void **)pages[i]);
  879. if (unlikely(!page))
  880. continue;
  881. if (radix_tree_exception(page)) {
  882. if (radix_tree_deref_retry(page)) {
  883. /*
  884. * Transient condition which can only trigger
  885. * when entry at index 0 moves out of or back
  886. * to root: none yet gotten, safe to restart.
  887. */
  888. goto restart;
  889. }
  890. /*
  891. * This function is never used on a shmem/tmpfs
  892. * mapping, so a swap entry won't be found here.
  893. */
  894. BUG();
  895. }
  896. if (!page_cache_get_speculative(page))
  897. goto repeat;
  898. /* Has the page moved? */
  899. if (unlikely(page != *((void **)pages[i]))) {
  900. page_cache_release(page);
  901. goto repeat;
  902. }
  903. pages[ret] = page;
  904. ret++;
  905. }
  906. /*
  907. * If all entries were removed before we could secure them,
  908. * try again, because callers stop trying once 0 is returned.
  909. */
  910. if (unlikely(!ret && nr_found))
  911. goto restart;
  912. rcu_read_unlock();
  913. if (ret)
  914. *index = pages[ret - 1]->index + 1;
  915. return ret;
  916. }
  917. EXPORT_SYMBOL(find_get_pages_tag);
  918. /**
  919. * grab_cache_page_nowait - returns locked page at given index in given cache
  920. * @mapping: target address_space
  921. * @index: the page index
  922. *
  923. * Same as grab_cache_page(), but do not wait if the page is unavailable.
  924. * This is intended for speculative data generators, where the data can
  925. * be regenerated if the page couldn't be grabbed. This routine should
  926. * be safe to call while holding the lock for another page.
  927. *
  928. * Clear __GFP_FS when allocating the page to avoid recursion into the fs
  929. * and deadlock against the caller's locked page.
  930. */
  931. struct page *
  932. grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
  933. {
  934. struct page *page = find_get_page(mapping, index);
  935. if (page) {
  936. if (trylock_page(page))
  937. return page;
  938. page_cache_release(page);
  939. return NULL;
  940. }
  941. page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
  942. if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
  943. page_cache_release(page);
  944. page = NULL;
  945. }
  946. return page;
  947. }
  948. EXPORT_SYMBOL(grab_cache_page_nowait);
  949. /*
  950. * CD/DVDs are error prone. When a medium error occurs, the driver may fail
  951. * a _large_ part of the i/o request. Imagine the worst scenario:
  952. *
  953. * ---R__________________________________________B__________
  954. * ^ reading here ^ bad block(assume 4k)
  955. *
  956. * read(R) => miss => readahead(R...B) => media error => frustrating retries
  957. * => failing the whole request => read(R) => read(R+1) =>
  958. * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
  959. * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
  960. * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
  961. *
  962. * It is going insane. Fix it by quickly scaling down the readahead size.
  963. */
  964. static void shrink_readahead_size_eio(struct file *filp,
  965. struct file_ra_state *ra)
  966. {
  967. ra->ra_pages /= 4;
  968. }
  969. /**
  970. * do_generic_file_read - generic file read routine
  971. * @filp: the file to read
  972. * @ppos: current file position
  973. * @desc: read_descriptor
  974. * @actor: read method
  975. *
  976. * This is a generic file read routine, and uses the
  977. * mapping->a_ops->readpage() function for the actual low-level stuff.
  978. *
  979. * This is really ugly. But the goto's actually try to clarify some
  980. * of the logic when it comes to error handling etc.
  981. */
  982. static void do_generic_file_read(struct file *filp, loff_t *ppos,
  983. read_descriptor_t *desc, read_actor_t actor)
  984. {
  985. struct address_space *mapping = filp->f_mapping;
  986. struct inode *inode = mapping->host;
  987. struct file_ra_state *ra = &filp->f_ra;
  988. pgoff_t index;
  989. pgoff_t last_index;
  990. pgoff_t prev_index;
  991. unsigned long offset; /* offset into pagecache page */
  992. unsigned int prev_offset;
  993. int error;
  994. index = *ppos >> PAGE_CACHE_SHIFT;
  995. prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
  996. prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
  997. last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
  998. offset = *ppos & ~PAGE_CACHE_MASK;
  999. for (;;) {
  1000. struct page *page;
  1001. pgoff_t end_index;
  1002. loff_t isize;
  1003. unsigned long nr, ret;
  1004. cond_resched();
  1005. find_page:
  1006. page = find_get_page(mapping, index);
  1007. if (!page) {
  1008. page_cache_sync_readahead(mapping,
  1009. ra, filp,
  1010. index, last_index - index);
  1011. page = find_get_page(mapping, index);
  1012. if (unlikely(page == NULL))
  1013. goto no_cached_page;
  1014. }
  1015. if (PageReadahead(page)) {
  1016. page_cache_async_readahead(mapping,
  1017. ra, filp, page,
  1018. index, last_index - index);
  1019. }
  1020. if (!PageUptodate(page)) {
  1021. if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
  1022. !mapping->a_ops->is_partially_uptodate)
  1023. goto page_not_up_to_date;
  1024. if (!trylock_page(page))
  1025. goto page_not_up_to_date;
  1026. /* Did it get truncated before we got the lock? */
  1027. if (!page->mapping)
  1028. goto page_not_up_to_date_locked;
  1029. if (!mapping->a_ops->is_partially_uptodate(page,
  1030. desc, offset))
  1031. goto page_not_up_to_date_locked;
  1032. unlock_page(page);
  1033. }
  1034. page_ok:
  1035. /*
  1036. * i_size must be checked after we know the page is Uptodate.
  1037. *
  1038. * Checking i_size after the check allows us to calculate
  1039. * the correct value for "nr", which means the zero-filled
  1040. * part of the page is not copied back to userspace (unless
  1041. * another truncate extends the file - this is desired though).
  1042. */
  1043. isize = i_size_read(inode);
  1044. end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
  1045. if (unlikely(!isize || index > end_index)) {
  1046. page_cache_release(page);
  1047. goto out;
  1048. }
  1049. /* nr is the maximum number of bytes to copy from this page */
  1050. nr = PAGE_CACHE_SIZE;
  1051. if (index == end_index) {
  1052. nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
  1053. if (nr <= offset) {
  1054. page_cache_release(page);
  1055. goto out;
  1056. }
  1057. }
  1058. nr = nr - offset;
  1059. /* If users can be writing to this page using arbitrary
  1060. * virtual addresses, take care about potential aliasing
  1061. * before reading the page on the kernel side.
  1062. */
  1063. if (mapping_writably_mapped(mapping))
  1064. flush_dcache_page(page);
  1065. /*
  1066. * When a sequential read accesses a page several times,
  1067. * only mark it as accessed the first time.
  1068. */
  1069. if (prev_index != index || offset != prev_offset)
  1070. mark_page_accessed(page);
  1071. prev_index = index;
  1072. /*
  1073. * Ok, we have the page, and it's up-to-date, so
  1074. * now we can copy it to user space...
  1075. *
  1076. * The actor routine returns how many bytes were actually used..
  1077. * NOTE! This may not be the same as how much of a user buffer
  1078. * we filled up (we may be padding etc), so we can only update
  1079. * "pos" here (the actor routine has to update the user buffer
  1080. * pointers and the remaining count).
  1081. */
  1082. ret = actor(desc, page, offset, nr);
  1083. offset += ret;
  1084. index += offset >> PAGE_CACHE_SHIFT;
  1085. offset &= ~PAGE_CACHE_MASK;
  1086. prev_offset = offset;
  1087. page_cache_release(page);
  1088. if (ret == nr && desc->count)
  1089. continue;
  1090. goto out;
  1091. page_not_up_to_date:
  1092. /* Get exclusive access to the page ... */
  1093. error = lock_page_killable(page);
  1094. if (unlikely(error))
  1095. goto readpage_error;
  1096. page_not_up_to_date_locked:
  1097. /* Did it get truncated before we got the lock? */
  1098. if (!page->mapping) {
  1099. unlock_page(page);
  1100. page_cache_release(page);
  1101. continue;
  1102. }
  1103. /* Did somebody else fill it already? */
  1104. if (PageUptodate(page)) {
  1105. unlock_page(page);
  1106. goto page_ok;
  1107. }
  1108. readpage:
  1109. /*
  1110. * A previous I/O error may have been due to temporary
  1111. * failures, eg. multipath errors.
  1112. * PG_error will be set again if readpage fails.
  1113. */
  1114. ClearPageError(page);
  1115. /* Start the actual read. The read will unlock the page. */
  1116. error = mapping->a_ops->readpage(filp, page);
  1117. if (unlikely(error)) {
  1118. if (error == AOP_TRUNCATED_PAGE) {
  1119. page_cache_release(page);
  1120. goto find_page;
  1121. }
  1122. goto readpage_error;
  1123. }
  1124. if (!PageUptodate(page)) {
  1125. error = lock_page_killable(page);
  1126. if (unlikely(error))
  1127. goto readpage_error;
  1128. if (!PageUptodate(page)) {
  1129. if (page->mapping == NULL) {
  1130. /*
  1131. * invalidate_mapping_pages got it
  1132. */
  1133. unlock_page(page);
  1134. page_cache_release(page);
  1135. goto find_page;
  1136. }
  1137. unlock_page(page);
  1138. shrink_readahead_size_eio(filp, ra);
  1139. error = -EIO;
  1140. goto readpage_error;
  1141. }
  1142. unlock_page(page);
  1143. }
  1144. goto page_ok;
  1145. readpage_error:
  1146. /* UHHUH! A synchronous read error occurred. Report it */
  1147. desc->error = error;
  1148. page_cache_release(page);
  1149. goto out;
  1150. no_cached_page:
  1151. /*
  1152. * Ok, it wasn't cached, so we need to create a new
  1153. * page..
  1154. */
  1155. page = page_cache_alloc_cold(mapping);
  1156. if (!page) {
  1157. desc->error = -ENOMEM;
  1158. goto out;
  1159. }
  1160. error = add_to_page_cache_lru(page, mapping,
  1161. index, GFP_KERNEL);
  1162. if (error) {
  1163. page_cache_release(page);
  1164. if (error == -EEXIST)
  1165. goto find_page;
  1166. desc->error = error;
  1167. goto out;
  1168. }
  1169. goto readpage;
  1170. }
  1171. out:
  1172. ra->prev_pos = prev_index;
  1173. ra->prev_pos <<= PAGE_CACHE_SHIFT;
  1174. ra->prev_pos |= prev_offset;
  1175. *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
  1176. file_accessed(filp);
  1177. }
  1178. int file_read_actor(read_descriptor_t *desc, struct page *page,
  1179. unsigned long offset, unsigned long size)
  1180. {
  1181. char *kaddr;
  1182. unsigned long left, count = desc->count;
  1183. if (size > count)
  1184. size = count;
  1185. /*
  1186. * Faults on the destination of a read are common, so do it before
  1187. * taking the kmap.
  1188. */
  1189. if (!fault_in_pages_writeable(desc->arg.buf, size)) {
  1190. kaddr = kmap_atomic(page, KM_USER0);
  1191. left = __copy_to_user_inatomic(desc->arg.buf,
  1192. kaddr + offset, size);
  1193. kunmap_atomic(kaddr, KM_USER0);
  1194. if (left == 0)
  1195. goto success;
  1196. }
  1197. /* Do it the slow way */
  1198. kaddr = kmap(page);
  1199. left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
  1200. kunmap(page);
  1201. if (left) {
  1202. size -= left;
  1203. desc->error = -EFAULT;
  1204. }
  1205. success:
  1206. desc->count = count - size;
  1207. desc->written += size;
  1208. desc->arg.buf += size;
  1209. return size;
  1210. }
  1211. /*
  1212. * Performs necessary checks before doing a write
  1213. * @iov: io vector request
  1214. * @nr_segs: number of segments in the iovec
  1215. * @count: number of bytes to write
  1216. * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
  1217. *
  1218. * Adjust number of segments and amount of bytes to write (nr_segs should be
  1219. * properly initialized first). Returns appropriate error code that caller
  1220. * should return or zero in case that write should be allowed.
  1221. */
  1222. int generic_segment_checks(const struct iovec *iov,
  1223. unsigned long *nr_segs, size_t *count, int access_flags)
  1224. {
  1225. unsigned long seg;
  1226. size_t cnt = 0;
  1227. for (seg = 0; seg < *nr_segs; seg++) {
  1228. const struct iovec *iv = &iov[seg];
  1229. /*
  1230. * If any segment has a negative length, or the cumulative
  1231. * length ever wraps negative then return -EINVAL.
  1232. */
  1233. cnt += iv->iov_len;
  1234. if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
  1235. return -EINVAL;
  1236. if (access_ok(access_flags, iv->iov_base, iv->iov_len))
  1237. continue;
  1238. if (seg == 0)
  1239. return -EFAULT;
  1240. *nr_segs = seg;
  1241. cnt -= iv->iov_len; /* This segment is no good */
  1242. break;
  1243. }
  1244. *count = cnt;
  1245. return 0;
  1246. }
  1247. EXPORT_SYMBOL(generic_segment_checks);
  1248. /**
  1249. * generic_file_aio_read - generic filesystem read routine
  1250. * @iocb: kernel I/O control block
  1251. * @iov: io vector request
  1252. * @nr_segs: number of segments in the iovec
  1253. * @pos: current file position
  1254. *
  1255. * This is the "read()" routine for all filesystems
  1256. * that can use the page cache directly.
  1257. */
  1258. ssize_t
  1259. generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
  1260. unsigned long nr_segs, loff_t pos)
  1261. {
  1262. struct file *filp = iocb->ki_filp;
  1263. ssize_t retval;
  1264. unsigned long seg = 0;
  1265. size_t count;
  1266. loff_t *ppos = &iocb->ki_pos;
  1267. struct blk_plug plug;
  1268. count = 0;
  1269. retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
  1270. if (retval)
  1271. return retval;
  1272. blk_start_plug(&plug);
  1273. /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
  1274. if (filp->f_flags & O_DIRECT) {
  1275. loff_t size;
  1276. struct address_space *mapping;
  1277. struct inode *inode;
  1278. mapping = filp->f_mapping;
  1279. inode = mapping->host;
  1280. if (!count)
  1281. goto out; /* skip atime */
  1282. size = i_size_read(inode);
  1283. if (pos < size) {
  1284. retval = filemap_write_and_wait_range(mapping, pos,
  1285. pos + iov_length(iov, nr_segs) - 1);
  1286. if (!retval) {
  1287. retval = mapping->a_ops->direct_IO(READ, iocb,
  1288. iov, pos, nr_segs);
  1289. }
  1290. if (retval > 0) {
  1291. *ppos = pos + retval;
  1292. count -= retval;
  1293. }
  1294. /*
  1295. * Btrfs can have a short DIO read if we encounter
  1296. * compressed extents, so if there was an error, or if
  1297. * we've already read everything we wanted to, or if
  1298. * there was a short read because we hit EOF, go ahead
  1299. * and return. Otherwise fallthrough to buffered io for
  1300. * the rest of the read.
  1301. */
  1302. if (retval < 0 || !count || *ppos >= size) {
  1303. file_accessed(filp);
  1304. goto out;
  1305. }
  1306. }
  1307. }
  1308. count = retval;
  1309. for (seg = 0; seg < nr_segs; seg++) {
  1310. read_descriptor_t desc;
  1311. loff_t offset = 0;
  1312. /*
  1313. * If we did a short DIO read we need to skip the section of the
  1314. * iov that we've already read data into.
  1315. */
  1316. if (count) {
  1317. if (count > iov[seg].iov_len) {
  1318. count -= iov[seg].iov_len;
  1319. continue;
  1320. }
  1321. offset = count;
  1322. count = 0;
  1323. }
  1324. desc.written = 0;
  1325. desc.arg.buf = iov[seg].iov_base + offset;
  1326. desc.count = iov[seg].iov_len - offset;
  1327. if (desc.count == 0)
  1328. continue;
  1329. desc.error = 0;
  1330. do_generic_file_read(filp, ppos, &desc, file_read_actor);
  1331. retval += desc.written;
  1332. if (desc.error) {
  1333. retval = retval ?: desc.error;
  1334. break;
  1335. }
  1336. if (desc.count > 0)
  1337. break;
  1338. }
  1339. out:
  1340. blk_finish_plug(&plug);
  1341. return retval;
  1342. }
  1343. EXPORT_SYMBOL(generic_file_aio_read);
  1344. static ssize_t
  1345. do_readahead(struct address_space *mapping, struct file *filp,
  1346. pgoff_t index, unsigned long nr)
  1347. {
  1348. if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
  1349. return -EINVAL;
  1350. force_page_cache_readahead(mapping, filp, index, nr);
  1351. return 0;
  1352. }
  1353. SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
  1354. {
  1355. ssize_t ret;
  1356. struct file *file;
  1357. ret = -EBADF;
  1358. file = fget(fd);
  1359. if (file) {
  1360. if (file->f_mode & FMODE_READ) {
  1361. struct address_space *mapping = file->f_mapping;
  1362. pgoff_t start = offset >> PAGE_CACHE_SHIFT;
  1363. pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
  1364. unsigned long len = end - start + 1;
  1365. ret = do_readahead(mapping, file, start, len);
  1366. }
  1367. fput(file);
  1368. }
  1369. return ret;
  1370. }
  1371. #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
  1372. asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
  1373. {
  1374. return SYSC_readahead((int) fd, offset, (size_t) count);
  1375. }
  1376. SYSCALL_ALIAS(sys_readahead, SyS_readahead);
  1377. #endif
  1378. #ifdef CONFIG_MMU
  1379. /**
  1380. * page_cache_read - adds requested page to the page cache if not already there
  1381. * @file: file to read
  1382. * @offset: page index
  1383. *
  1384. * This adds the requested page to the page cache if it isn't already there,
  1385. * and schedules an I/O to read in its contents from disk.
  1386. */
  1387. static int page_cache_read(struct file *file, pgoff_t offset)
  1388. {
  1389. struct address_space *mapping = file->f_mapping;
  1390. struct page *page;
  1391. int ret;
  1392. do {
  1393. page = page_cache_alloc_cold(mapping);
  1394. if (!page)
  1395. return -ENOMEM;
  1396. ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
  1397. if (ret == 0)
  1398. ret = mapping->a_ops->readpage(file, page);
  1399. else if (ret == -EEXIST)
  1400. ret = 0; /* losing race to add is OK */
  1401. page_cache_release(page);
  1402. } while (ret == AOP_TRUNCATED_PAGE);
  1403. return ret;
  1404. }
  1405. #define MMAP_LOTSAMISS (100)
  1406. /*
  1407. * Synchronous readahead happens when we don't even find
  1408. * a page in the page cache at all.
  1409. */
  1410. static void do_sync_mmap_readahead(struct vm_area_struct *vma,
  1411. struct file_ra_state *ra,
  1412. struct file *file,
  1413. pgoff_t offset)
  1414. {
  1415. unsigned long ra_pages;
  1416. struct address_space *mapping = file->f_mapping;
  1417. /* If we don't want any read-ahead, don't bother */
  1418. if (VM_RandomReadHint(vma))
  1419. return;
  1420. if (!ra->ra_pages)
  1421. return;
  1422. if (VM_SequentialReadHint(vma)) {
  1423. page_cache_sync_readahead(mapping, ra, file, offset,
  1424. ra->ra_pages);
  1425. return;
  1426. }
  1427. /* Avoid banging the cache line if not needed */
  1428. if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
  1429. ra->mmap_miss++;
  1430. /*
  1431. * Do we miss much more than hit in this file? If so,
  1432. * stop bothering with read-ahead. It will only hurt.
  1433. */
  1434. if (ra->mmap_miss > MMAP_LOTSAMISS)
  1435. return;
  1436. /*
  1437. * mmap read-around
  1438. */
  1439. ra_pages = max_sane_readahead(ra->ra_pages);
  1440. ra->start = max_t(long, 0, offset - ra_pages / 2);
  1441. ra->size = ra_pages;
  1442. ra->async_size = ra_pages / 4;
  1443. ra_submit(ra, mapping, file);
  1444. }
  1445. /*
  1446. * Asynchronous readahead happens when we find the page and PG_readahead,
  1447. * so we want to possibly extend the readahead further..
  1448. */
  1449. static void do_async_mmap_readahead(struct vm_area_struct *vma,
  1450. struct file_ra_state *ra,
  1451. struct file *file,
  1452. struct page *page,
  1453. pgoff_t offset)
  1454. {
  1455. struct address_space *mapping = file->f_mapping;
  1456. /* If we don't want any read-ahead, don't bother */
  1457. if (VM_RandomReadHint(vma))
  1458. return;
  1459. if (ra->mmap_miss > 0)
  1460. ra->mmap_miss--;
  1461. if (PageReadahead(page))
  1462. page_cache_async_readahead(mapping, ra, file,
  1463. page, offset, ra->ra_pages);
  1464. }
  1465. /**
  1466. * filemap_fault - read in file data for page fault handling
  1467. * @vma: vma in which the fault was taken
  1468. * @vmf: struct vm_fault containing details of the fault
  1469. *
  1470. * filemap_fault() is invoked via the vma operations vector for a
  1471. * mapped memory region to read in file data during a page fault.
  1472. *
  1473. * The goto's are kind of ugly, but this streamlines the normal case of having
  1474. * it in the page cache, and handles the special cases reasonably without
  1475. * having a lot of duplicated code.
  1476. */
  1477. int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1478. {
  1479. int error;
  1480. struct file *file = vma->vm_file;
  1481. struct address_space *mapping = file->f_mapping;
  1482. struct file_ra_state *ra = &file->f_ra;
  1483. struct inode *inode = mapping->host;
  1484. pgoff_t offset = vmf->pgoff;
  1485. struct page *page;
  1486. pgoff_t size;
  1487. int ret = 0;
  1488. size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  1489. if (offset >= size)
  1490. return VM_FAULT_SIGBUS;
  1491. /*
  1492. * Do we have something in the page cache already?
  1493. */
  1494. page = find_get_page(mapping, offset);
  1495. if (likely(page)) {
  1496. /*
  1497. * We found the page, so try async readahead before
  1498. * waiting for the lock.
  1499. */
  1500. do_async_mmap_readahead(vma, ra, file, page, offset);
  1501. } else {
  1502. /* No page in the page cache at all */
  1503. do_sync_mmap_readahead(vma, ra, file, offset);
  1504. count_vm_event(PGMAJFAULT);
  1505. mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
  1506. ret = VM_FAULT_MAJOR;
  1507. retry_find:
  1508. page = find_get_page(mapping, offset);
  1509. if (!page)
  1510. goto no_cached_page;
  1511. }
  1512. if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
  1513. page_cache_release(page);
  1514. return ret | VM_FAULT_RETRY;
  1515. }
  1516. /* Did it get truncated? */
  1517. if (unlikely(page->mapping != mapping)) {
  1518. unlock_page(page);
  1519. put_page(page);
  1520. goto retry_find;
  1521. }
  1522. VM_BUG_ON(page->index != offset);
  1523. /*
  1524. * We have a locked page in the page cache, now we need to check
  1525. * that it's up-to-date. If not, it is going to be due to an error.
  1526. */
  1527. if (unlikely(!PageUptodate(page)))
  1528. goto page_not_uptodate;
  1529. /*
  1530. * Found the page and have a reference on it.
  1531. * We must recheck i_size under page lock.
  1532. */
  1533. size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  1534. if (unlikely(offset >= size)) {
  1535. unlock_page(page);
  1536. page_cache_release(page);
  1537. return VM_FAULT_SIGBUS;
  1538. }
  1539. vmf->page = page;
  1540. return ret | VM_FAULT_LOCKED;
  1541. no_cached_page:
  1542. /*
  1543. * We're only likely to ever get here if MADV_RANDOM is in
  1544. * effect.
  1545. */
  1546. error = page_cache_read(file, offset);
  1547. /*
  1548. * The page we want has now been added to the page cache.
  1549. * In the unlikely event that someone removed it in the
  1550. * meantime, we'll just come back here and read it again.
  1551. */
  1552. if (error >= 0)
  1553. goto retry_find;
  1554. /*
  1555. * An error return from page_cache_read can result if the
  1556. * system is low on memory, or a problem occurs while trying
  1557. * to schedule I/O.
  1558. */
  1559. if (error == -ENOMEM)
  1560. return VM_FAULT_OOM;
  1561. return VM_FAULT_SIGBUS;
  1562. page_not_uptodate:
  1563. /*
  1564. * Umm, take care of errors if the page isn't up-to-date.
  1565. * Try to re-read it _once_. We do this synchronously,
  1566. * because there really aren't any performance issues here
  1567. * and we need to check for errors.
  1568. */
  1569. ClearPageError(page);
  1570. error = mapping->a_ops->readpage(file, page);
  1571. if (!error) {
  1572. wait_on_page_locked(page);
  1573. if (!PageUptodate(page))
  1574. error = -EIO;
  1575. }
  1576. page_cache_release(page);
  1577. if (!error || error == AOP_TRUNCATED_PAGE)
  1578. goto retry_find;
  1579. /* Things didn't work out. Return zero to tell the mm layer so. */
  1580. shrink_readahead_size_eio(file, ra);
  1581. return VM_FAULT_SIGBUS;
  1582. }
  1583. EXPORT_SYMBOL(filemap_fault);
  1584. const struct vm_operations_struct generic_file_vm_ops = {
  1585. .fault = filemap_fault,
  1586. };
  1587. /* This is used for a general mmap of a disk file */
  1588. int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
  1589. {
  1590. struct address_space *mapping = file->f_mapping;
  1591. if (!mapping->a_ops->readpage)
  1592. return -ENOEXEC;
  1593. file_accessed(file);
  1594. vma->vm_ops = &generic_file_vm_ops;
  1595. vma->vm_flags |= VM_CAN_NONLINEAR;
  1596. return 0;
  1597. }
  1598. /*
  1599. * This is for filesystems which do not implement ->writepage.
  1600. */
  1601. int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
  1602. {
  1603. if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
  1604. return -EINVAL;
  1605. return generic_file_mmap(file, vma);
  1606. }
  1607. #else
  1608. int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
  1609. {
  1610. return -ENOSYS;
  1611. }
  1612. int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
  1613. {
  1614. return -ENOSYS;
  1615. }
  1616. #endif /* CONFIG_MMU */
  1617. EXPORT_SYMBOL(generic_file_mmap);
  1618. EXPORT_SYMBOL(generic_file_readonly_mmap);
  1619. static struct page *__read_cache_page(struct address_space *mapping,
  1620. pgoff_t index,
  1621. int (*filler)(void *, struct page *),
  1622. void *data,
  1623. gfp_t gfp)
  1624. {
  1625. struct page *page;
  1626. int err;
  1627. repeat:
  1628. page = find_get_page(mapping, index);
  1629. if (!page) {
  1630. page = __page_cache_alloc(gfp | __GFP_COLD);
  1631. if (!page)
  1632. return ERR_PTR(-ENOMEM);
  1633. err = add_to_page_cache_lru(page, mapping, index, gfp);
  1634. if (unlikely(err)) {
  1635. page_cache_release(page);
  1636. if (err == -EEXIST)
  1637. goto repeat;
  1638. /* Presumably ENOMEM for radix tree node */
  1639. return ERR_PTR(err);
  1640. }
  1641. err = filler(data, page);
  1642. if (err < 0) {
  1643. page_cache_release(page);
  1644. page = ERR_PTR(err);
  1645. }
  1646. }
  1647. return page;
  1648. }
  1649. static struct page *do_read_cache_page(struct address_space *mapping,
  1650. pgoff_t index,
  1651. int (*filler)(void *, struct page *),
  1652. void *data,
  1653. gfp_t gfp)
  1654. {
  1655. struct page *page;
  1656. int err;
  1657. retry:
  1658. page = __read_cache_page(mapping, index, filler, data, gfp);
  1659. if (IS_ERR(page))
  1660. return page;
  1661. if (PageUptodate(page))
  1662. goto out;
  1663. lock_page(page);
  1664. if (!page->mapping) {
  1665. unlock_page(page);
  1666. page_cache_release(page);
  1667. goto retry;
  1668. }
  1669. if (PageUptodate(page)) {
  1670. unlock_page(page);
  1671. goto out;
  1672. }
  1673. err = filler(data, page);
  1674. if (err < 0) {
  1675. page_cache_release(page);
  1676. return ERR_PTR(err);
  1677. }
  1678. out:
  1679. mark_page_accessed(page);
  1680. return page;
  1681. }
  1682. /**
  1683. * read_cache_page_async - read into page cache, fill it if needed
  1684. * @mapping: the page's address_space
  1685. * @index: the page index
  1686. * @filler: function to perform the read
  1687. * @data: first arg to filler(data, page) function, often left as NULL
  1688. *
  1689. * Same as read_cache_page, but don't wait for page to become unlocked
  1690. * after submitting it to the filler.
  1691. *
  1692. * Read into the page cache. If a page already exists, and PageUptodate() is
  1693. * not set, try to fill the page but don't wait for it to become unlocked.
  1694. *
  1695. * If the page does not get brought uptodate, return -EIO.
  1696. */
  1697. struct page *read_cache_page_async(struct address_space *mapping,
  1698. pgoff_t index,
  1699. int (*filler)(void *, struct page *),
  1700. void *data)
  1701. {
  1702. return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
  1703. }
  1704. EXPORT_SYMBOL(read_cache_page_async);
  1705. static struct page *wait_on_page_read(struct page *page)
  1706. {
  1707. if (!IS_ERR(page)) {
  1708. wait_on_page_locked(page);
  1709. if (!PageUptodate(page)) {
  1710. page_cache_release(page);
  1711. page = ERR_PTR(-EIO);
  1712. }
  1713. }
  1714. return page;
  1715. }
  1716. /**
  1717. * read_cache_page_gfp - read into page cache, using specified page allocation flags.
  1718. * @mapping: the page's address_space
  1719. * @index: the page index
  1720. * @gfp: the page allocator flags to use if allocating
  1721. *
  1722. * This is the same as "read_mapping_page(mapping, index, NULL)", but with
  1723. * any new page allocations done using the specified allocation flags.
  1724. *
  1725. * If the page does not get brought uptodate, return -EIO.
  1726. */
  1727. struct page *read_cache_page_gfp(struct address_space *mapping,
  1728. pgoff_t index,
  1729. gfp_t gfp)
  1730. {
  1731. filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  1732. return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
  1733. }
  1734. EXPORT_SYMBOL(read_cache_page_gfp);
  1735. /**
  1736. * read_cache_page - read into page cache, fill it if needed
  1737. * @mapping: the page's address_space
  1738. * @index: the page index
  1739. * @filler: function to perform the read
  1740. * @data: first arg to filler(data, page) function, often left as NULL
  1741. *
  1742. * Read into the page cache. If a page already exists, and PageUptodate() is
  1743. * not set, try to fill the page then wait for it to become unlocked.
  1744. *
  1745. * If the page does not get brought uptodate, return -EIO.
  1746. */
  1747. struct page *read_cache_page(struct address_space *mapping,
  1748. pgoff_t index,
  1749. int (*filler)(void *, struct page *),
  1750. void *data)
  1751. {
  1752. return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
  1753. }
  1754. EXPORT_SYMBOL(read_cache_page);
  1755. /*
  1756. * The logic we want is
  1757. *
  1758. * if suid or (sgid and xgrp)
  1759. * remove privs
  1760. */
  1761. int should_remove_suid(struct dentry *dentry)
  1762. {
  1763. umode_t mode = dentry->d_inode->i_mode;
  1764. int kill = 0;
  1765. /* suid always must be killed */
  1766. if (unlikely(mode & S_ISUID))
  1767. kill = ATTR_KILL_SUID;
  1768. /*
  1769. * sgid without any exec bits is just a mandatory locking mark; leave
  1770. * it alone. If some exec bits are set, it's a real sgid; kill it.
  1771. */
  1772. if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
  1773. kill |= ATTR_KILL_SGID;
  1774. if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
  1775. return kill;
  1776. return 0;
  1777. }
  1778. EXPORT_SYMBOL(should_remove_suid);
  1779. static int __remove_suid(struct dentry *dentry, int kill)
  1780. {
  1781. struct iattr newattrs;
  1782. newattrs.ia_valid = ATTR_FORCE | kill;
  1783. return notify_change(dentry, &newattrs);
  1784. }
  1785. int file_remove_suid(struct file *file)
  1786. {
  1787. struct dentry *dentry = file->f_path.dentry;
  1788. struct inode *inode = dentry->d_inode;
  1789. int killsuid;
  1790. int killpriv;
  1791. int error = 0;
  1792. /* Fast path for nothing security related */
  1793. if (IS_NOSEC(inode))
  1794. return 0;
  1795. killsuid = should_remove_suid(dentry);
  1796. killpriv = security_inode_need_killpriv(dentry);
  1797. if (killpriv < 0)
  1798. return killpriv;
  1799. if (killpriv)
  1800. error = security_inode_killpriv(dentry);
  1801. if (!error && killsuid)
  1802. error = __remove_suid(dentry, killsuid);
  1803. if (!error && (inode->i_sb->s_flags & MS_NOSEC))
  1804. inode->i_flags |= S_NOSEC;
  1805. return error;
  1806. }
  1807. EXPORT_SYMBOL(file_remove_suid);
  1808. static size_t __iovec_copy_from_user_inatomic(char *vaddr,
  1809. const struct iovec *iov, size_t base, size_t bytes)
  1810. {
  1811. size_t copied = 0, left = 0;
  1812. while (bytes) {
  1813. char __user *buf = iov->iov_base + base;
  1814. int copy = min(bytes, iov->iov_len - base);
  1815. base = 0;
  1816. left = __copy_from_user_inatomic(vaddr, buf, copy);
  1817. copied += copy;
  1818. bytes -= copy;
  1819. vaddr += copy;
  1820. iov++;
  1821. if (unlikely(left))
  1822. break;
  1823. }
  1824. return copied - left;
  1825. }
  1826. /*
  1827. * Copy as much as we can into the page and return the number of bytes which
  1828. * were successfully copied. If a fault is encountered then return the number of
  1829. * bytes which were copied.
  1830. */
  1831. size_t iov_iter_copy_from_user_atomic(struct page *page,
  1832. struct iov_iter *i, unsigned long offset, size_t bytes)
  1833. {
  1834. char *kaddr;
  1835. size_t copied;
  1836. BUG_ON(!in_atomic());
  1837. kaddr = kmap_atomic(page, KM_USER0);
  1838. if (likely(i->nr_segs == 1)) {
  1839. int left;
  1840. char __user *buf = i->iov->iov_base + i->iov_offset;
  1841. left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
  1842. copied = bytes - left;
  1843. } else {
  1844. copied = __iovec_copy_from_user_inatomic(kaddr + offset,
  1845. i->iov, i->iov_offset, bytes);
  1846. }
  1847. kunmap_atomic(kaddr, KM_USER0);
  1848. return copied;
  1849. }
  1850. EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
  1851. /*
  1852. * This has the same sideeffects and return value as
  1853. * iov_iter_copy_from_user_atomic().
  1854. * The difference is that it attempts to resolve faults.
  1855. * Page must not be locked.
  1856. */
  1857. size_t iov_iter_copy_from_user(struct page *page,
  1858. struct iov_iter *i, unsigned long offset, size_t bytes)
  1859. {
  1860. char *kaddr;
  1861. size_t copied;
  1862. kaddr = kmap(page);
  1863. if (likely(i->nr_segs == 1)) {
  1864. int left;
  1865. char __user *buf = i->iov->iov_base + i->iov_offset;
  1866. left = __copy_from_user(kaddr + offset, buf, bytes);
  1867. copied = bytes - left;
  1868. } else {
  1869. copied = __iovec_copy_from_user_inatomic(kaddr + offset,
  1870. i->iov, i->iov_offset, bytes);
  1871. }
  1872. kunmap(page);
  1873. return copied;
  1874. }
  1875. EXPORT_SYMBOL(iov_iter_copy_from_user);
  1876. void iov_iter_advance(struct iov_iter *i, size_t bytes)
  1877. {
  1878. BUG_ON(i->count < bytes);
  1879. if (likely(i->nr_segs == 1)) {
  1880. i->iov_offset += bytes;
  1881. i->count -= bytes;
  1882. } else {
  1883. const struct iovec *iov = i->iov;
  1884. size_t base = i->iov_offset;
  1885. unsigned long nr_segs = i->nr_segs;
  1886. /*
  1887. * The !iov->iov_len check ensures we skip over unlikely
  1888. * zero-length segments (without overruning the iovec).
  1889. */
  1890. while (bytes || unlikely(i->count && !iov->iov_len)) {
  1891. int copy;
  1892. copy = min(bytes, iov->iov_len - base);
  1893. BUG_ON(!i->count || i->count < copy);
  1894. i->count -= copy;
  1895. bytes -= copy;
  1896. base += copy;
  1897. if (iov->iov_len == base) {
  1898. iov++;
  1899. nr_segs--;
  1900. base = 0;
  1901. }
  1902. }
  1903. i->iov = iov;
  1904. i->iov_offset = base;
  1905. i->nr_segs = nr_segs;
  1906. }
  1907. }
  1908. EXPORT_SYMBOL(iov_iter_advance);
  1909. /*
  1910. * Fault in the first iovec of the given iov_iter, to a maximum length
  1911. * of bytes. Returns 0 on success, or non-zero if the memory could not be
  1912. * accessed (ie. because it is an invalid address).
  1913. *
  1914. * writev-intensive code may want this to prefault several iovecs -- that
  1915. * would be possible (callers must not rely on the fact that _only_ the
  1916. * first iovec will be faulted with the current implementation).
  1917. */
  1918. int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
  1919. {
  1920. char __user *buf = i->iov->iov_base + i->iov_offset;
  1921. bytes = min(bytes, i->iov->iov_len - i->iov_offset);
  1922. return fault_in_pages_readable(buf, bytes);
  1923. }
  1924. EXPORT_SYMBOL(iov_iter_fault_in_readable);
  1925. /*
  1926. * Return the count of just the current iov_iter segment.
  1927. */
  1928. size_t iov_iter_single_seg_count(struct iov_iter *i)
  1929. {
  1930. const struct iovec *iov = i->iov;
  1931. if (i->nr_segs == 1)
  1932. return i->count;
  1933. else
  1934. return min(i->count, iov->iov_len - i->iov_offset);
  1935. }
  1936. EXPORT_SYMBOL(iov_iter_single_seg_count);
  1937. /*
  1938. * Performs necessary checks before doing a write
  1939. *
  1940. * Can adjust writing position or amount of bytes to write.
  1941. * Returns appropriate error code that caller should return or
  1942. * zero in case that write should be allowed.
  1943. */
  1944. inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
  1945. {
  1946. struct inode *inode = file->f_mapping->host;
  1947. unsigned long limit = rlimit(RLIMIT_FSIZE);
  1948. if (unlikely(*pos < 0))
  1949. return -EINVAL;
  1950. if (!isblk) {
  1951. /* FIXME: this is for backwards compatibility with 2.4 */
  1952. if (file->f_flags & O_APPEND)
  1953. *pos = i_size_read(inode);
  1954. if (limit != RLIM_INFINITY) {
  1955. if (*pos >= limit) {
  1956. send_sig(SIGXFSZ, current, 0);
  1957. return -EFBIG;
  1958. }
  1959. if (*count > limit - (typeof(limit))*pos) {
  1960. *count = limit - (typeof(limit))*pos;
  1961. }
  1962. }
  1963. }
  1964. /*
  1965. * LFS rule
  1966. */
  1967. if (unlikely(*pos + *count > MAX_NON_LFS &&
  1968. !(file->f_flags & O_LARGEFILE))) {
  1969. if (*pos >= MAX_NON_LFS) {
  1970. return -EFBIG;
  1971. }
  1972. if (*count > MAX_NON_LFS - (unsigned long)*pos) {
  1973. *count = MAX_NON_LFS - (unsigned long)*pos;
  1974. }
  1975. }
  1976. /*
  1977. * Are we about to exceed the fs block limit ?
  1978. *
  1979. * If we have written data it becomes a short write. If we have
  1980. * exceeded without writing data we send a signal and return EFBIG.
  1981. * Linus frestrict idea will clean these up nicely..
  1982. */
  1983. if (likely(!isblk)) {
  1984. if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
  1985. if (*count || *pos > inode->i_sb->s_maxbytes) {
  1986. return -EFBIG;
  1987. }
  1988. /* zero-length writes at ->s_maxbytes are OK */
  1989. }
  1990. if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
  1991. *count = inode->i_sb->s_maxbytes - *pos;
  1992. } else {
  1993. #ifdef CONFIG_BLOCK
  1994. loff_t isize;
  1995. if (bdev_read_only(I_BDEV(inode)))
  1996. return -EPERM;
  1997. isize = i_size_read(inode);
  1998. if (*pos >= isize) {
  1999. if (*count || *pos > isize)
  2000. return -ENOSPC;
  2001. }
  2002. if (*pos + *count > isize)
  2003. *count = isize - *pos;
  2004. #else
  2005. return -EPERM;
  2006. #endif
  2007. }
  2008. return 0;
  2009. }
  2010. EXPORT_SYMBOL(generic_write_checks);
  2011. int pagecache_write_begin(struct file *file, struct address_space *mapping,
  2012. loff_t pos, unsigned len, unsigned flags,
  2013. struct page **pagep, void **fsdata)
  2014. {
  2015. const struct address_space_operations *aops = mapping->a_ops;
  2016. return aops->write_begin(file, mapping, pos, len, flags,
  2017. pagep, fsdata);
  2018. }
  2019. EXPORT_SYMBOL(pagecache_write_begin);
  2020. int pagecache_write_end(struct file *file, struct address_space *mapping,
  2021. loff_t pos, unsigned len, unsigned copied,
  2022. struct page *page, void *fsdata)
  2023. {
  2024. const struct address_space_operations *aops = mapping->a_ops;
  2025. mark_page_accessed(page);
  2026. return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
  2027. }
  2028. EXPORT_SYMBOL(pagecache_write_end);
  2029. ssize_t
  2030. generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
  2031. unsigned long *nr_segs, loff_t pos, loff_t *ppos,
  2032. size_t count, size_t ocount)
  2033. {
  2034. struct file *file = iocb->ki_filp;
  2035. struct address_space *mapping = file->f_mapping;
  2036. struct inode *inode = mapping->host;
  2037. ssize_t written;
  2038. size_t write_len;
  2039. pgoff_t end;
  2040. if (count != ocount)
  2041. *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
  2042. write_len = iov_length(iov, *nr_segs);
  2043. end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
  2044. written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
  2045. if (written)
  2046. goto out;
  2047. /*
  2048. * After a write we want buffered reads to be sure to go to disk to get
  2049. * the new data. We invalidate clean cached page from the region we're
  2050. * about to write. We do this *before* the write so that we can return
  2051. * without clobbering -EIOCBQUEUED from ->direct_IO().
  2052. */
  2053. if (mapping->nrpages) {
  2054. written = invalidate_inode_pages2_range(mapping,
  2055. pos >> PAGE_CACHE_SHIFT, end);
  2056. /*
  2057. * If a page can not be invalidated, return 0 to fall back
  2058. * to buffered write.
  2059. */
  2060. if (written) {
  2061. if (written == -EBUSY)
  2062. return 0;
  2063. goto out;
  2064. }
  2065. }
  2066. written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
  2067. /*
  2068. * Finally, try again to invalidate clean pages which might have been
  2069. * cached by non-direct readahead, or faulted in by get_user_pages()
  2070. * if the source of the write was an mmap'ed region of the file
  2071. * we're writing. Either one is a pretty crazy thing to do,
  2072. * so we don't support it 100%. If this invalidation
  2073. * fails, tough, the write still worked...
  2074. */
  2075. if (mapping->nrpages) {
  2076. invalidate_inode_pages2_range(mapping,
  2077. pos >> PAGE_CACHE_SHIFT, end);
  2078. }
  2079. if (written > 0) {
  2080. pos += written;
  2081. if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
  2082. i_size_write(inode, pos);
  2083. mark_inode_dirty(inode);
  2084. }
  2085. *ppos = pos;
  2086. }
  2087. out:
  2088. return written;
  2089. }
  2090. EXPORT_SYMBOL(generic_file_direct_write);
  2091. /*
  2092. * Find or create a page at the given pagecache position. Return the locked
  2093. * page. This function is specifically for buffered writes.
  2094. */
  2095. struct page *grab_cache_page_write_begin(struct address_space *mapping,
  2096. pgoff_t index, unsigned flags)
  2097. {
  2098. int status;
  2099. gfp_t gfp_mask;
  2100. struct page *page;
  2101. gfp_t gfp_notmask = 0;
  2102. gfp_mask = mapping_gfp_mask(mapping) | __GFP_WRITE;
  2103. if (flags & AOP_FLAG_NOFS)
  2104. gfp_notmask = __GFP_FS;
  2105. repeat:
  2106. page = find_lock_page(mapping, index);
  2107. if (page)
  2108. goto found;
  2109. page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
  2110. if (!page)
  2111. return NULL;
  2112. status = add_to_page_cache_lru(page, mapping, index,
  2113. GFP_KERNEL & ~gfp_notmask);
  2114. if (unlikely(status)) {
  2115. page_cache_release(page);
  2116. if (status == -EEXIST)
  2117. goto repeat;
  2118. return NULL;
  2119. }
  2120. found:
  2121. wait_on_page_writeback(page);
  2122. return page;
  2123. }
  2124. EXPORT_SYMBOL(grab_cache_page_write_begin);
  2125. static ssize_t generic_perform_write(struct file *file,
  2126. struct iov_iter *i, loff_t pos)
  2127. {
  2128. struct address_space *mapping = file->f_mapping;
  2129. const struct address_space_operations *a_ops = mapping->a_ops;
  2130. long status = 0;
  2131. ssize_t written = 0;
  2132. unsigned int flags = 0;
  2133. /*
  2134. * Copies from kernel address space cannot fail (NFSD is a big user).
  2135. */
  2136. if (segment_eq(get_fs(), KERNEL_DS))
  2137. flags |= AOP_FLAG_UNINTERRUPTIBLE;
  2138. do {
  2139. struct page *page;
  2140. unsigned long offset; /* Offset into pagecache page */
  2141. unsigned long bytes; /* Bytes to write to page */
  2142. size_t copied; /* Bytes copied from user */
  2143. void *fsdata;
  2144. offset = (pos & (PAGE_CACHE_SIZE - 1));
  2145. bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
  2146. iov_iter_count(i));
  2147. again:
  2148. /*
  2149. * Bring in the user page that we will copy from _first_.
  2150. * Otherwise there's a nasty deadlock on copying from the
  2151. * same page as we're writing to, without it being marked
  2152. * up-to-date.
  2153. *
  2154. * Not only is this an optimisation, but it is also required
  2155. * to check that the address is actually valid, when atomic
  2156. * usercopies are used, below.
  2157. */
  2158. if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
  2159. status = -EFAULT;
  2160. break;
  2161. }
  2162. status = a_ops->write_begin(file, mapping, pos, bytes, flags,
  2163. &page, &fsdata);
  2164. if (unlikely(status))
  2165. break;
  2166. if (mapping_writably_mapped(mapping))
  2167. flush_dcache_page(page);
  2168. pagefault_disable();
  2169. copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
  2170. pagefault_enable();
  2171. flush_dcache_page(page);
  2172. mark_page_accessed(page);
  2173. status = a_ops->write_end(file, mapping, pos, bytes, copied,
  2174. page, fsdata);
  2175. if (unlikely(status < 0))
  2176. break;
  2177. copied = status;
  2178. cond_resched();
  2179. iov_iter_advance(i, copied);
  2180. if (unlikely(copied == 0)) {
  2181. /*
  2182. * If we were unable to copy any data at all, we must
  2183. * fall back to a single segment length write.
  2184. *
  2185. * If we didn't fallback here, we could livelock
  2186. * because not all segments in the iov can be copied at
  2187. * once without a pagefault.
  2188. */
  2189. bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
  2190. iov_iter_single_seg_count(i));
  2191. goto again;
  2192. }
  2193. pos += copied;
  2194. written += copied;
  2195. balance_dirty_pages_ratelimited(mapping);
  2196. if (fatal_signal_pending(current)) {
  2197. status = -EINTR;
  2198. break;
  2199. }
  2200. } while (iov_iter_count(i));
  2201. return written ? written : status;
  2202. }
  2203. ssize_t
  2204. generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
  2205. unsigned long nr_segs, loff_t pos, loff_t *ppos,
  2206. size_t count, ssize_t written)
  2207. {
  2208. struct file *file = iocb->ki_filp;
  2209. ssize_t status;
  2210. struct iov_iter i;
  2211. iov_iter_init(&i, iov, nr_segs, count, written);
  2212. status = generic_perform_write(file, &i, pos);
  2213. if (likely(status >= 0)) {
  2214. written += status;
  2215. *ppos = pos + status;
  2216. }
  2217. return written ? written : status;
  2218. }
  2219. EXPORT_SYMBOL(generic_file_buffered_write);
  2220. /**
  2221. * __generic_file_aio_write - write data to a file
  2222. * @iocb: IO state structure (file, offset, etc.)
  2223. * @iov: vector with data to write
  2224. * @nr_segs: number of segments in the vector
  2225. * @ppos: position where to write
  2226. *
  2227. * This function does all the work needed for actually writing data to a
  2228. * file. It does all basic checks, removes SUID from the file, updates
  2229. * modification times and calls proper subroutines depending on whether we
  2230. * do direct IO or a standard buffered write.
  2231. *
  2232. * It expects i_mutex to be grabbed unless we work on a block device or similar
  2233. * object which does not need locking at all.
  2234. *
  2235. * This function does *not* take care of syncing data in case of O_SYNC write.
  2236. * A caller has to handle it. This is mainly due to the fact that we want to
  2237. * avoid syncing under i_mutex.
  2238. */
  2239. ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
  2240. unsigned long nr_segs, loff_t *ppos)
  2241. {
  2242. struct file *file = iocb->ki_filp;
  2243. struct address_space * mapping = file->f_mapping;
  2244. size_t ocount; /* original count */
  2245. size_t count; /* after file limit checks */
  2246. struct inode *inode = mapping->host;
  2247. loff_t pos;
  2248. ssize_t written;
  2249. ssize_t err;
  2250. ocount = 0;
  2251. err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
  2252. if (err)
  2253. return err;
  2254. count = ocount;
  2255. pos = *ppos;
  2256. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  2257. /* We can write back this queue in page reclaim */
  2258. current->backing_dev_info = mapping->backing_dev_info;
  2259. written = 0;
  2260. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  2261. if (err)
  2262. goto out;
  2263. if (count == 0)
  2264. goto out;
  2265. err = file_remove_suid(file);
  2266. if (err)
  2267. goto out;
  2268. file_update_time(file);
  2269. /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
  2270. if (unlikely(file->f_flags & O_DIRECT)) {
  2271. loff_t endbyte;
  2272. ssize_t written_buffered;
  2273. written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
  2274. ppos, count, ocount);
  2275. if (written < 0 || written == count)
  2276. goto out;
  2277. /*
  2278. * direct-io write to a hole: fall through to buffered I/O
  2279. * for completing the rest of the request.
  2280. */
  2281. pos += written;
  2282. count -= written;
  2283. written_buffered = generic_file_buffered_write(iocb, iov,
  2284. nr_segs, pos, ppos, count,
  2285. written);
  2286. /*
  2287. * If generic_file_buffered_write() retuned a synchronous error
  2288. * then we want to return the number of bytes which were
  2289. * direct-written, or the error code if that was zero. Note
  2290. * that this differs from normal direct-io semantics, which
  2291. * will return -EFOO even if some bytes were written.
  2292. */
  2293. if (written_buffered < 0) {
  2294. err = written_buffered;
  2295. goto out;
  2296. }
  2297. /*
  2298. * We need to ensure that the page cache pages are written to
  2299. * disk and invalidated to preserve the expected O_DIRECT
  2300. * semantics.
  2301. */
  2302. endbyte = pos + written_buffered - written - 1;
  2303. err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
  2304. if (err == 0) {
  2305. written = written_buffered;
  2306. invalidate_mapping_pages(mapping,
  2307. pos >> PAGE_CACHE_SHIFT,
  2308. endbyte >> PAGE_CACHE_SHIFT);
  2309. } else {
  2310. /*
  2311. * We don't know how much we wrote, so just return
  2312. * the number of bytes which were direct-written
  2313. */
  2314. }
  2315. } else {
  2316. written = generic_file_buffered_write(iocb, iov, nr_segs,
  2317. pos, ppos, count, written);
  2318. }
  2319. out:
  2320. current->backing_dev_info = NULL;
  2321. return written ? written : err;
  2322. }
  2323. EXPORT_SYMBOL(__generic_file_aio_write);
  2324. /**
  2325. * generic_file_aio_write - write data to a file
  2326. * @iocb: IO state structure
  2327. * @iov: vector with data to write
  2328. * @nr_segs: number of segments in the vector
  2329. * @pos: position in file where to write
  2330. *
  2331. * This is a wrapper around __generic_file_aio_write() to be used by most
  2332. * filesystems. It takes care of syncing the file in case of O_SYNC file
  2333. * and acquires i_mutex as needed.
  2334. */
  2335. ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
  2336. unsigned long nr_segs, loff_t pos)
  2337. {
  2338. struct file *file = iocb->ki_filp;
  2339. struct inode *inode = file->f_mapping->host;
  2340. struct blk_plug plug;
  2341. ssize_t ret;
  2342. BUG_ON(iocb->ki_pos != pos);
  2343. mutex_lock(&inode->i_mutex);
  2344. blk_start_plug(&plug);
  2345. ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
  2346. mutex_unlock(&inode->i_mutex);
  2347. if (ret > 0 || ret == -EIOCBQUEUED) {
  2348. ssize_t err;
  2349. err = generic_write_sync(file, pos, ret);
  2350. if (err < 0 && ret > 0)
  2351. ret = err;
  2352. }
  2353. blk_finish_plug(&plug);
  2354. return ret;
  2355. }
  2356. EXPORT_SYMBOL(generic_file_aio_write);
  2357. /**
  2358. * try_to_release_page() - release old fs-specific metadata on a page
  2359. *
  2360. * @page: the page which the kernel is trying to free
  2361. * @gfp_mask: memory allocation flags (and I/O mode)
  2362. *
  2363. * The address_space is to try to release any data against the page
  2364. * (presumably at page->private). If the release was successful, return `1'.
  2365. * Otherwise return zero.
  2366. *
  2367. * This may also be called if PG_fscache is set on a page, indicating that the
  2368. * page is known to the local caching routines.
  2369. *
  2370. * The @gfp_mask argument specifies whether I/O may be performed to release
  2371. * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
  2372. *
  2373. */
  2374. int try_to_release_page(struct page *page, gfp_t gfp_mask)
  2375. {
  2376. struct address_space * const mapping = page->mapping;
  2377. BUG_ON(!PageLocked(page));
  2378. if (PageWriteback(page))
  2379. return 0;
  2380. if (mapping && mapping->a_ops->releasepage)
  2381. return mapping->a_ops->releasepage(page, gfp_mask);
  2382. return try_to_free_buffers(page);
  2383. }
  2384. EXPORT_SYMBOL(try_to_release_page);