inode.c 136 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704
  1. /*
  2. * linux/fs/ext4/inode.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/inode.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  16. * (jj@sunsite.ms.mff.cuni.cz)
  17. *
  18. * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
  19. */
  20. #include <linux/module.h>
  21. #include <linux/fs.h>
  22. #include <linux/time.h>
  23. #include <linux/jbd2.h>
  24. #include <linux/highuid.h>
  25. #include <linux/pagemap.h>
  26. #include <linux/quotaops.h>
  27. #include <linux/string.h>
  28. #include <linux/buffer_head.h>
  29. #include <linux/writeback.h>
  30. #include <linux/pagevec.h>
  31. #include <linux/mpage.h>
  32. #include <linux/namei.h>
  33. #include <linux/uio.h>
  34. #include <linux/bio.h>
  35. #include <linux/workqueue.h>
  36. #include <linux/kernel.h>
  37. #include <linux/printk.h>
  38. #include <linux/slab.h>
  39. #include <linux/ratelimit.h>
  40. #include "ext4_jbd2.h"
  41. #include "xattr.h"
  42. #include "acl.h"
  43. #include "ext4_extents.h"
  44. #include "truncate.h"
  45. #include <trace/events/ext4.h>
  46. #define MPAGE_DA_EXTENT_TAIL 0x01
  47. static inline int ext4_begin_ordered_truncate(struct inode *inode,
  48. loff_t new_size)
  49. {
  50. trace_ext4_begin_ordered_truncate(inode, new_size);
  51. /*
  52. * If jinode is zero, then we never opened the file for
  53. * writing, so there's no need to call
  54. * jbd2_journal_begin_ordered_truncate() since there's no
  55. * outstanding writes we need to flush.
  56. */
  57. if (!EXT4_I(inode)->jinode)
  58. return 0;
  59. return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
  60. EXT4_I(inode)->jinode,
  61. new_size);
  62. }
  63. static void ext4_invalidatepage(struct page *page, unsigned long offset);
  64. static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
  65. struct buffer_head *bh_result, int create);
  66. static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
  67. static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
  68. static int __ext4_journalled_writepage(struct page *page, unsigned int len);
  69. static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
  70. /*
  71. * Test whether an inode is a fast symlink.
  72. */
  73. static int ext4_inode_is_fast_symlink(struct inode *inode)
  74. {
  75. int ea_blocks = EXT4_I(inode)->i_file_acl ?
  76. (inode->i_sb->s_blocksize >> 9) : 0;
  77. return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
  78. }
  79. /*
  80. * Restart the transaction associated with *handle. This does a commit,
  81. * so before we call here everything must be consistently dirtied against
  82. * this transaction.
  83. */
  84. int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
  85. int nblocks)
  86. {
  87. int ret;
  88. /*
  89. * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
  90. * moment, get_block can be called only for blocks inside i_size since
  91. * page cache has been already dropped and writes are blocked by
  92. * i_mutex. So we can safely drop the i_data_sem here.
  93. */
  94. BUG_ON(EXT4_JOURNAL(inode) == NULL);
  95. jbd_debug(2, "restarting handle %p\n", handle);
  96. up_write(&EXT4_I(inode)->i_data_sem);
  97. ret = ext4_journal_restart(handle, nblocks);
  98. down_write(&EXT4_I(inode)->i_data_sem);
  99. ext4_discard_preallocations(inode);
  100. return ret;
  101. }
  102. /*
  103. * Called at the last iput() if i_nlink is zero.
  104. */
  105. void ext4_evict_inode(struct inode *inode)
  106. {
  107. handle_t *handle;
  108. int err;
  109. trace_ext4_evict_inode(inode);
  110. ext4_ioend_wait(inode);
  111. if (inode->i_nlink) {
  112. /*
  113. * When journalling data dirty buffers are tracked only in the
  114. * journal. So although mm thinks everything is clean and
  115. * ready for reaping the inode might still have some pages to
  116. * write in the running transaction or waiting to be
  117. * checkpointed. Thus calling jbd2_journal_invalidatepage()
  118. * (via truncate_inode_pages()) to discard these buffers can
  119. * cause data loss. Also even if we did not discard these
  120. * buffers, we would have no way to find them after the inode
  121. * is reaped and thus user could see stale data if he tries to
  122. * read them before the transaction is checkpointed. So be
  123. * careful and force everything to disk here... We use
  124. * ei->i_datasync_tid to store the newest transaction
  125. * containing inode's data.
  126. *
  127. * Note that directories do not have this problem because they
  128. * don't use page cache.
  129. */
  130. if (ext4_should_journal_data(inode) &&
  131. (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
  132. journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
  133. tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
  134. jbd2_log_start_commit(journal, commit_tid);
  135. jbd2_log_wait_commit(journal, commit_tid);
  136. filemap_write_and_wait(&inode->i_data);
  137. }
  138. truncate_inode_pages(&inode->i_data, 0);
  139. goto no_delete;
  140. }
  141. if (!is_bad_inode(inode))
  142. dquot_initialize(inode);
  143. if (ext4_should_order_data(inode))
  144. ext4_begin_ordered_truncate(inode, 0);
  145. truncate_inode_pages(&inode->i_data, 0);
  146. if (is_bad_inode(inode))
  147. goto no_delete;
  148. handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
  149. if (IS_ERR(handle)) {
  150. ext4_std_error(inode->i_sb, PTR_ERR(handle));
  151. /*
  152. * If we're going to skip the normal cleanup, we still need to
  153. * make sure that the in-core orphan linked list is properly
  154. * cleaned up.
  155. */
  156. ext4_orphan_del(NULL, inode);
  157. goto no_delete;
  158. }
  159. if (IS_SYNC(inode))
  160. ext4_handle_sync(handle);
  161. inode->i_size = 0;
  162. err = ext4_mark_inode_dirty(handle, inode);
  163. if (err) {
  164. ext4_warning(inode->i_sb,
  165. "couldn't mark inode dirty (err %d)", err);
  166. goto stop_handle;
  167. }
  168. if (inode->i_blocks)
  169. ext4_truncate(inode);
  170. /*
  171. * ext4_ext_truncate() doesn't reserve any slop when it
  172. * restarts journal transactions; therefore there may not be
  173. * enough credits left in the handle to remove the inode from
  174. * the orphan list and set the dtime field.
  175. */
  176. if (!ext4_handle_has_enough_credits(handle, 3)) {
  177. err = ext4_journal_extend(handle, 3);
  178. if (err > 0)
  179. err = ext4_journal_restart(handle, 3);
  180. if (err != 0) {
  181. ext4_warning(inode->i_sb,
  182. "couldn't extend journal (err %d)", err);
  183. stop_handle:
  184. ext4_journal_stop(handle);
  185. ext4_orphan_del(NULL, inode);
  186. goto no_delete;
  187. }
  188. }
  189. /*
  190. * Kill off the orphan record which ext4_truncate created.
  191. * AKPM: I think this can be inside the above `if'.
  192. * Note that ext4_orphan_del() has to be able to cope with the
  193. * deletion of a non-existent orphan - this is because we don't
  194. * know if ext4_truncate() actually created an orphan record.
  195. * (Well, we could do this if we need to, but heck - it works)
  196. */
  197. ext4_orphan_del(handle, inode);
  198. EXT4_I(inode)->i_dtime = get_seconds();
  199. /*
  200. * One subtle ordering requirement: if anything has gone wrong
  201. * (transaction abort, IO errors, whatever), then we can still
  202. * do these next steps (the fs will already have been marked as
  203. * having errors), but we can't free the inode if the mark_dirty
  204. * fails.
  205. */
  206. if (ext4_mark_inode_dirty(handle, inode))
  207. /* If that failed, just do the required in-core inode clear. */
  208. ext4_clear_inode(inode);
  209. else
  210. ext4_free_inode(handle, inode);
  211. ext4_journal_stop(handle);
  212. return;
  213. no_delete:
  214. ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
  215. }
  216. #ifdef CONFIG_QUOTA
  217. qsize_t *ext4_get_reserved_space(struct inode *inode)
  218. {
  219. return &EXT4_I(inode)->i_reserved_quota;
  220. }
  221. #endif
  222. /*
  223. * Calculate the number of metadata blocks need to reserve
  224. * to allocate a block located at @lblock
  225. */
  226. static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
  227. {
  228. if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
  229. return ext4_ext_calc_metadata_amount(inode, lblock);
  230. return ext4_ind_calc_metadata_amount(inode, lblock);
  231. }
  232. /*
  233. * Called with i_data_sem down, which is important since we can call
  234. * ext4_discard_preallocations() from here.
  235. */
  236. void ext4_da_update_reserve_space(struct inode *inode,
  237. int used, int quota_claim)
  238. {
  239. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  240. struct ext4_inode_info *ei = EXT4_I(inode);
  241. spin_lock(&ei->i_block_reservation_lock);
  242. trace_ext4_da_update_reserve_space(inode, used);
  243. if (unlikely(used > ei->i_reserved_data_blocks)) {
  244. ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
  245. "with only %d reserved data blocks\n",
  246. __func__, inode->i_ino, used,
  247. ei->i_reserved_data_blocks);
  248. WARN_ON(1);
  249. used = ei->i_reserved_data_blocks;
  250. }
  251. /* Update per-inode reservations */
  252. ei->i_reserved_data_blocks -= used;
  253. ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
  254. percpu_counter_sub(&sbi->s_dirtyclusters_counter,
  255. used + ei->i_allocated_meta_blocks);
  256. ei->i_allocated_meta_blocks = 0;
  257. if (ei->i_reserved_data_blocks == 0) {
  258. /*
  259. * We can release all of the reserved metadata blocks
  260. * only when we have written all of the delayed
  261. * allocation blocks.
  262. */
  263. percpu_counter_sub(&sbi->s_dirtyclusters_counter,
  264. ei->i_reserved_meta_blocks);
  265. ei->i_reserved_meta_blocks = 0;
  266. ei->i_da_metadata_calc_len = 0;
  267. }
  268. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  269. /* Update quota subsystem for data blocks */
  270. if (quota_claim)
  271. dquot_claim_block(inode, EXT4_C2B(sbi, used));
  272. else {
  273. /*
  274. * We did fallocate with an offset that is already delayed
  275. * allocated. So on delayed allocated writeback we should
  276. * not re-claim the quota for fallocated blocks.
  277. */
  278. dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
  279. }
  280. /*
  281. * If we have done all the pending block allocations and if
  282. * there aren't any writers on the inode, we can discard the
  283. * inode's preallocations.
  284. */
  285. if ((ei->i_reserved_data_blocks == 0) &&
  286. (atomic_read(&inode->i_writecount) == 0))
  287. ext4_discard_preallocations(inode);
  288. }
  289. static int __check_block_validity(struct inode *inode, const char *func,
  290. unsigned int line,
  291. struct ext4_map_blocks *map)
  292. {
  293. if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
  294. map->m_len)) {
  295. ext4_error_inode(inode, func, line, map->m_pblk,
  296. "lblock %lu mapped to illegal pblock "
  297. "(length %d)", (unsigned long) map->m_lblk,
  298. map->m_len);
  299. return -EIO;
  300. }
  301. return 0;
  302. }
  303. #define check_block_validity(inode, map) \
  304. __check_block_validity((inode), __func__, __LINE__, (map))
  305. /*
  306. * Return the number of contiguous dirty pages in a given inode
  307. * starting at page frame idx.
  308. */
  309. static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
  310. unsigned int max_pages)
  311. {
  312. struct address_space *mapping = inode->i_mapping;
  313. pgoff_t index;
  314. struct pagevec pvec;
  315. pgoff_t num = 0;
  316. int i, nr_pages, done = 0;
  317. if (max_pages == 0)
  318. return 0;
  319. pagevec_init(&pvec, 0);
  320. while (!done) {
  321. index = idx;
  322. nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  323. PAGECACHE_TAG_DIRTY,
  324. (pgoff_t)PAGEVEC_SIZE);
  325. if (nr_pages == 0)
  326. break;
  327. for (i = 0; i < nr_pages; i++) {
  328. struct page *page = pvec.pages[i];
  329. struct buffer_head *bh, *head;
  330. lock_page(page);
  331. if (unlikely(page->mapping != mapping) ||
  332. !PageDirty(page) ||
  333. PageWriteback(page) ||
  334. page->index != idx) {
  335. done = 1;
  336. unlock_page(page);
  337. break;
  338. }
  339. if (page_has_buffers(page)) {
  340. bh = head = page_buffers(page);
  341. do {
  342. if (!buffer_delay(bh) &&
  343. !buffer_unwritten(bh))
  344. done = 1;
  345. bh = bh->b_this_page;
  346. } while (!done && (bh != head));
  347. }
  348. unlock_page(page);
  349. if (done)
  350. break;
  351. idx++;
  352. num++;
  353. if (num >= max_pages) {
  354. done = 1;
  355. break;
  356. }
  357. }
  358. pagevec_release(&pvec);
  359. }
  360. return num;
  361. }
  362. /*
  363. * The ext4_map_blocks() function tries to look up the requested blocks,
  364. * and returns if the blocks are already mapped.
  365. *
  366. * Otherwise it takes the write lock of the i_data_sem and allocate blocks
  367. * and store the allocated blocks in the result buffer head and mark it
  368. * mapped.
  369. *
  370. * If file type is extents based, it will call ext4_ext_map_blocks(),
  371. * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
  372. * based files
  373. *
  374. * On success, it returns the number of blocks being mapped or allocate.
  375. * if create==0 and the blocks are pre-allocated and uninitialized block,
  376. * the result buffer head is unmapped. If the create ==1, it will make sure
  377. * the buffer head is mapped.
  378. *
  379. * It returns 0 if plain look up failed (blocks have not been allocated), in
  380. * that casem, buffer head is unmapped
  381. *
  382. * It returns the error in case of allocation failure.
  383. */
  384. int ext4_map_blocks(handle_t *handle, struct inode *inode,
  385. struct ext4_map_blocks *map, int flags)
  386. {
  387. int retval;
  388. map->m_flags = 0;
  389. ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
  390. "logical block %lu\n", inode->i_ino, flags, map->m_len,
  391. (unsigned long) map->m_lblk);
  392. /*
  393. * Try to see if we can get the block without requesting a new
  394. * file system block.
  395. */
  396. down_read((&EXT4_I(inode)->i_data_sem));
  397. if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
  398. retval = ext4_ext_map_blocks(handle, inode, map, 0);
  399. } else {
  400. retval = ext4_ind_map_blocks(handle, inode, map, 0);
  401. }
  402. up_read((&EXT4_I(inode)->i_data_sem));
  403. if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
  404. int ret = check_block_validity(inode, map);
  405. if (ret != 0)
  406. return ret;
  407. }
  408. /* If it is only a block(s) look up */
  409. if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
  410. return retval;
  411. /*
  412. * Returns if the blocks have already allocated
  413. *
  414. * Note that if blocks have been preallocated
  415. * ext4_ext_get_block() returns th create = 0
  416. * with buffer head unmapped.
  417. */
  418. if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
  419. return retval;
  420. /*
  421. * When we call get_blocks without the create flag, the
  422. * BH_Unwritten flag could have gotten set if the blocks
  423. * requested were part of a uninitialized extent. We need to
  424. * clear this flag now that we are committed to convert all or
  425. * part of the uninitialized extent to be an initialized
  426. * extent. This is because we need to avoid the combination
  427. * of BH_Unwritten and BH_Mapped flags being simultaneously
  428. * set on the buffer_head.
  429. */
  430. map->m_flags &= ~EXT4_MAP_UNWRITTEN;
  431. /*
  432. * New blocks allocate and/or writing to uninitialized extent
  433. * will possibly result in updating i_data, so we take
  434. * the write lock of i_data_sem, and call get_blocks()
  435. * with create == 1 flag.
  436. */
  437. down_write((&EXT4_I(inode)->i_data_sem));
  438. /*
  439. * if the caller is from delayed allocation writeout path
  440. * we have already reserved fs blocks for allocation
  441. * let the underlying get_block() function know to
  442. * avoid double accounting
  443. */
  444. if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
  445. ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
  446. /*
  447. * We need to check for EXT4 here because migrate
  448. * could have changed the inode type in between
  449. */
  450. if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
  451. retval = ext4_ext_map_blocks(handle, inode, map, flags);
  452. } else {
  453. retval = ext4_ind_map_blocks(handle, inode, map, flags);
  454. if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
  455. /*
  456. * We allocated new blocks which will result in
  457. * i_data's format changing. Force the migrate
  458. * to fail by clearing migrate flags
  459. */
  460. ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
  461. }
  462. /*
  463. * Update reserved blocks/metadata blocks after successful
  464. * block allocation which had been deferred till now. We don't
  465. * support fallocate for non extent files. So we can update
  466. * reserve space here.
  467. */
  468. if ((retval > 0) &&
  469. (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
  470. ext4_da_update_reserve_space(inode, retval, 1);
  471. }
  472. if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
  473. ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
  474. up_write((&EXT4_I(inode)->i_data_sem));
  475. if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
  476. int ret = check_block_validity(inode, map);
  477. if (ret != 0)
  478. return ret;
  479. }
  480. return retval;
  481. }
  482. /* Maximum number of blocks we map for direct IO at once. */
  483. #define DIO_MAX_BLOCKS 4096
  484. static int _ext4_get_block(struct inode *inode, sector_t iblock,
  485. struct buffer_head *bh, int flags)
  486. {
  487. handle_t *handle = ext4_journal_current_handle();
  488. struct ext4_map_blocks map;
  489. int ret = 0, started = 0;
  490. int dio_credits;
  491. map.m_lblk = iblock;
  492. map.m_len = bh->b_size >> inode->i_blkbits;
  493. if (flags && !handle) {
  494. /* Direct IO write... */
  495. if (map.m_len > DIO_MAX_BLOCKS)
  496. map.m_len = DIO_MAX_BLOCKS;
  497. dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
  498. handle = ext4_journal_start(inode, dio_credits);
  499. if (IS_ERR(handle)) {
  500. ret = PTR_ERR(handle);
  501. return ret;
  502. }
  503. started = 1;
  504. }
  505. ret = ext4_map_blocks(handle, inode, &map, flags);
  506. if (ret > 0) {
  507. map_bh(bh, inode->i_sb, map.m_pblk);
  508. bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
  509. bh->b_size = inode->i_sb->s_blocksize * map.m_len;
  510. ret = 0;
  511. }
  512. if (started)
  513. ext4_journal_stop(handle);
  514. return ret;
  515. }
  516. int ext4_get_block(struct inode *inode, sector_t iblock,
  517. struct buffer_head *bh, int create)
  518. {
  519. return _ext4_get_block(inode, iblock, bh,
  520. create ? EXT4_GET_BLOCKS_CREATE : 0);
  521. }
  522. /*
  523. * `handle' can be NULL if create is zero
  524. */
  525. struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
  526. ext4_lblk_t block, int create, int *errp)
  527. {
  528. struct ext4_map_blocks map;
  529. struct buffer_head *bh;
  530. int fatal = 0, err;
  531. J_ASSERT(handle != NULL || create == 0);
  532. map.m_lblk = block;
  533. map.m_len = 1;
  534. err = ext4_map_blocks(handle, inode, &map,
  535. create ? EXT4_GET_BLOCKS_CREATE : 0);
  536. if (err < 0)
  537. *errp = err;
  538. if (err <= 0)
  539. return NULL;
  540. *errp = 0;
  541. bh = sb_getblk(inode->i_sb, map.m_pblk);
  542. if (!bh) {
  543. *errp = -EIO;
  544. return NULL;
  545. }
  546. if (map.m_flags & EXT4_MAP_NEW) {
  547. J_ASSERT(create != 0);
  548. J_ASSERT(handle != NULL);
  549. /*
  550. * Now that we do not always journal data, we should
  551. * keep in mind whether this should always journal the
  552. * new buffer as metadata. For now, regular file
  553. * writes use ext4_get_block instead, so it's not a
  554. * problem.
  555. */
  556. lock_buffer(bh);
  557. BUFFER_TRACE(bh, "call get_create_access");
  558. fatal = ext4_journal_get_create_access(handle, bh);
  559. if (!fatal && !buffer_uptodate(bh)) {
  560. memset(bh->b_data, 0, inode->i_sb->s_blocksize);
  561. set_buffer_uptodate(bh);
  562. }
  563. unlock_buffer(bh);
  564. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  565. err = ext4_handle_dirty_metadata(handle, inode, bh);
  566. if (!fatal)
  567. fatal = err;
  568. } else {
  569. BUFFER_TRACE(bh, "not a new buffer");
  570. }
  571. if (fatal) {
  572. *errp = fatal;
  573. brelse(bh);
  574. bh = NULL;
  575. }
  576. return bh;
  577. }
  578. struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
  579. ext4_lblk_t block, int create, int *err)
  580. {
  581. struct buffer_head *bh;
  582. bh = ext4_getblk(handle, inode, block, create, err);
  583. if (!bh)
  584. return bh;
  585. if (buffer_uptodate(bh))
  586. return bh;
  587. ll_rw_block(READ_META, 1, &bh);
  588. wait_on_buffer(bh);
  589. if (buffer_uptodate(bh))
  590. return bh;
  591. put_bh(bh);
  592. *err = -EIO;
  593. return NULL;
  594. }
  595. static int walk_page_buffers(handle_t *handle,
  596. struct buffer_head *head,
  597. unsigned from,
  598. unsigned to,
  599. int *partial,
  600. int (*fn)(handle_t *handle,
  601. struct buffer_head *bh))
  602. {
  603. struct buffer_head *bh;
  604. unsigned block_start, block_end;
  605. unsigned blocksize = head->b_size;
  606. int err, ret = 0;
  607. struct buffer_head *next;
  608. for (bh = head, block_start = 0;
  609. ret == 0 && (bh != head || !block_start);
  610. block_start = block_end, bh = next) {
  611. next = bh->b_this_page;
  612. block_end = block_start + blocksize;
  613. if (block_end <= from || block_start >= to) {
  614. if (partial && !buffer_uptodate(bh))
  615. *partial = 1;
  616. continue;
  617. }
  618. err = (*fn)(handle, bh);
  619. if (!ret)
  620. ret = err;
  621. }
  622. return ret;
  623. }
  624. /*
  625. * To preserve ordering, it is essential that the hole instantiation and
  626. * the data write be encapsulated in a single transaction. We cannot
  627. * close off a transaction and start a new one between the ext4_get_block()
  628. * and the commit_write(). So doing the jbd2_journal_start at the start of
  629. * prepare_write() is the right place.
  630. *
  631. * Also, this function can nest inside ext4_writepage() ->
  632. * block_write_full_page(). In that case, we *know* that ext4_writepage()
  633. * has generated enough buffer credits to do the whole page. So we won't
  634. * block on the journal in that case, which is good, because the caller may
  635. * be PF_MEMALLOC.
  636. *
  637. * By accident, ext4 can be reentered when a transaction is open via
  638. * quota file writes. If we were to commit the transaction while thus
  639. * reentered, there can be a deadlock - we would be holding a quota
  640. * lock, and the commit would never complete if another thread had a
  641. * transaction open and was blocking on the quota lock - a ranking
  642. * violation.
  643. *
  644. * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
  645. * will _not_ run commit under these circumstances because handle->h_ref
  646. * is elevated. We'll still have enough credits for the tiny quotafile
  647. * write.
  648. */
  649. static int do_journal_get_write_access(handle_t *handle,
  650. struct buffer_head *bh)
  651. {
  652. int dirty = buffer_dirty(bh);
  653. int ret;
  654. if (!buffer_mapped(bh) || buffer_freed(bh))
  655. return 0;
  656. /*
  657. * __block_write_begin() could have dirtied some buffers. Clean
  658. * the dirty bit as jbd2_journal_get_write_access() could complain
  659. * otherwise about fs integrity issues. Setting of the dirty bit
  660. * by __block_write_begin() isn't a real problem here as we clear
  661. * the bit before releasing a page lock and thus writeback cannot
  662. * ever write the buffer.
  663. */
  664. if (dirty)
  665. clear_buffer_dirty(bh);
  666. ret = ext4_journal_get_write_access(handle, bh);
  667. if (!ret && dirty)
  668. ret = ext4_handle_dirty_metadata(handle, NULL, bh);
  669. return ret;
  670. }
  671. static int ext4_get_block_write(struct inode *inode, sector_t iblock,
  672. struct buffer_head *bh_result, int create);
  673. static int ext4_write_begin(struct file *file, struct address_space *mapping,
  674. loff_t pos, unsigned len, unsigned flags,
  675. struct page **pagep, void **fsdata)
  676. {
  677. struct inode *inode = mapping->host;
  678. int ret, needed_blocks;
  679. handle_t *handle;
  680. int retries = 0;
  681. struct page *page;
  682. pgoff_t index;
  683. unsigned from, to;
  684. trace_ext4_write_begin(inode, pos, len, flags);
  685. /*
  686. * Reserve one block more for addition to orphan list in case
  687. * we allocate blocks but write fails for some reason
  688. */
  689. needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
  690. index = pos >> PAGE_CACHE_SHIFT;
  691. from = pos & (PAGE_CACHE_SIZE - 1);
  692. to = from + len;
  693. retry:
  694. handle = ext4_journal_start(inode, needed_blocks);
  695. if (IS_ERR(handle)) {
  696. ret = PTR_ERR(handle);
  697. goto out;
  698. }
  699. /* We cannot recurse into the filesystem as the transaction is already
  700. * started */
  701. flags |= AOP_FLAG_NOFS;
  702. page = grab_cache_page_write_begin(mapping, index, flags);
  703. if (!page) {
  704. ext4_journal_stop(handle);
  705. ret = -ENOMEM;
  706. goto out;
  707. }
  708. *pagep = page;
  709. if (ext4_should_dioread_nolock(inode))
  710. ret = __block_write_begin(page, pos, len, ext4_get_block_write);
  711. else
  712. ret = __block_write_begin(page, pos, len, ext4_get_block);
  713. if (!ret && ext4_should_journal_data(inode)) {
  714. ret = walk_page_buffers(handle, page_buffers(page),
  715. from, to, NULL, do_journal_get_write_access);
  716. }
  717. if (ret) {
  718. unlock_page(page);
  719. page_cache_release(page);
  720. /*
  721. * __block_write_begin may have instantiated a few blocks
  722. * outside i_size. Trim these off again. Don't need
  723. * i_size_read because we hold i_mutex.
  724. *
  725. * Add inode to orphan list in case we crash before
  726. * truncate finishes
  727. */
  728. if (pos + len > inode->i_size && ext4_can_truncate(inode))
  729. ext4_orphan_add(handle, inode);
  730. ext4_journal_stop(handle);
  731. if (pos + len > inode->i_size) {
  732. ext4_truncate_failed_write(inode);
  733. /*
  734. * If truncate failed early the inode might
  735. * still be on the orphan list; we need to
  736. * make sure the inode is removed from the
  737. * orphan list in that case.
  738. */
  739. if (inode->i_nlink)
  740. ext4_orphan_del(NULL, inode);
  741. }
  742. }
  743. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  744. goto retry;
  745. out:
  746. return ret;
  747. }
  748. /* For write_end() in data=journal mode */
  749. static int write_end_fn(handle_t *handle, struct buffer_head *bh)
  750. {
  751. if (!buffer_mapped(bh) || buffer_freed(bh))
  752. return 0;
  753. set_buffer_uptodate(bh);
  754. return ext4_handle_dirty_metadata(handle, NULL, bh);
  755. }
  756. static int ext4_generic_write_end(struct file *file,
  757. struct address_space *mapping,
  758. loff_t pos, unsigned len, unsigned copied,
  759. struct page *page, void *fsdata)
  760. {
  761. int i_size_changed = 0;
  762. struct inode *inode = mapping->host;
  763. handle_t *handle = ext4_journal_current_handle();
  764. copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
  765. /*
  766. * No need to use i_size_read() here, the i_size
  767. * cannot change under us because we hold i_mutex.
  768. *
  769. * But it's important to update i_size while still holding page lock:
  770. * page writeout could otherwise come in and zero beyond i_size.
  771. */
  772. if (pos + copied > inode->i_size) {
  773. i_size_write(inode, pos + copied);
  774. i_size_changed = 1;
  775. }
  776. if (pos + copied > EXT4_I(inode)->i_disksize) {
  777. /* We need to mark inode dirty even if
  778. * new_i_size is less that inode->i_size
  779. * bu greater than i_disksize.(hint delalloc)
  780. */
  781. ext4_update_i_disksize(inode, (pos + copied));
  782. i_size_changed = 1;
  783. }
  784. unlock_page(page);
  785. page_cache_release(page);
  786. /*
  787. * Don't mark the inode dirty under page lock. First, it unnecessarily
  788. * makes the holding time of page lock longer. Second, it forces lock
  789. * ordering of page lock and transaction start for journaling
  790. * filesystems.
  791. */
  792. if (i_size_changed)
  793. ext4_mark_inode_dirty(handle, inode);
  794. return copied;
  795. }
  796. /*
  797. * We need to pick up the new inode size which generic_commit_write gave us
  798. * `file' can be NULL - eg, when called from page_symlink().
  799. *
  800. * ext4 never places buffers on inode->i_mapping->private_list. metadata
  801. * buffers are managed internally.
  802. */
  803. static int ext4_ordered_write_end(struct file *file,
  804. struct address_space *mapping,
  805. loff_t pos, unsigned len, unsigned copied,
  806. struct page *page, void *fsdata)
  807. {
  808. handle_t *handle = ext4_journal_current_handle();
  809. struct inode *inode = mapping->host;
  810. int ret = 0, ret2;
  811. trace_ext4_ordered_write_end(inode, pos, len, copied);
  812. ret = ext4_jbd2_file_inode(handle, inode);
  813. if (ret == 0) {
  814. ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
  815. page, fsdata);
  816. copied = ret2;
  817. if (pos + len > inode->i_size && ext4_can_truncate(inode))
  818. /* if we have allocated more blocks and copied
  819. * less. We will have blocks allocated outside
  820. * inode->i_size. So truncate them
  821. */
  822. ext4_orphan_add(handle, inode);
  823. if (ret2 < 0)
  824. ret = ret2;
  825. }
  826. ret2 = ext4_journal_stop(handle);
  827. if (!ret)
  828. ret = ret2;
  829. if (pos + len > inode->i_size) {
  830. ext4_truncate_failed_write(inode);
  831. /*
  832. * If truncate failed early the inode might still be
  833. * on the orphan list; we need to make sure the inode
  834. * is removed from the orphan list in that case.
  835. */
  836. if (inode->i_nlink)
  837. ext4_orphan_del(NULL, inode);
  838. }
  839. return ret ? ret : copied;
  840. }
  841. static int ext4_writeback_write_end(struct file *file,
  842. struct address_space *mapping,
  843. loff_t pos, unsigned len, unsigned copied,
  844. struct page *page, void *fsdata)
  845. {
  846. handle_t *handle = ext4_journal_current_handle();
  847. struct inode *inode = mapping->host;
  848. int ret = 0, ret2;
  849. trace_ext4_writeback_write_end(inode, pos, len, copied);
  850. ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
  851. page, fsdata);
  852. copied = ret2;
  853. if (pos + len > inode->i_size && ext4_can_truncate(inode))
  854. /* if we have allocated more blocks and copied
  855. * less. We will have blocks allocated outside
  856. * inode->i_size. So truncate them
  857. */
  858. ext4_orphan_add(handle, inode);
  859. if (ret2 < 0)
  860. ret = ret2;
  861. ret2 = ext4_journal_stop(handle);
  862. if (!ret)
  863. ret = ret2;
  864. if (pos + len > inode->i_size) {
  865. ext4_truncate_failed_write(inode);
  866. /*
  867. * If truncate failed early the inode might still be
  868. * on the orphan list; we need to make sure the inode
  869. * is removed from the orphan list in that case.
  870. */
  871. if (inode->i_nlink)
  872. ext4_orphan_del(NULL, inode);
  873. }
  874. return ret ? ret : copied;
  875. }
  876. static int ext4_journalled_write_end(struct file *file,
  877. struct address_space *mapping,
  878. loff_t pos, unsigned len, unsigned copied,
  879. struct page *page, void *fsdata)
  880. {
  881. handle_t *handle = ext4_journal_current_handle();
  882. struct inode *inode = mapping->host;
  883. int ret = 0, ret2;
  884. int partial = 0;
  885. unsigned from, to;
  886. loff_t new_i_size;
  887. trace_ext4_journalled_write_end(inode, pos, len, copied);
  888. from = pos & (PAGE_CACHE_SIZE - 1);
  889. to = from + len;
  890. BUG_ON(!ext4_handle_valid(handle));
  891. if (copied < len) {
  892. if (!PageUptodate(page))
  893. copied = 0;
  894. page_zero_new_buffers(page, from+copied, to);
  895. }
  896. ret = walk_page_buffers(handle, page_buffers(page), from,
  897. to, &partial, write_end_fn);
  898. if (!partial)
  899. SetPageUptodate(page);
  900. new_i_size = pos + copied;
  901. if (new_i_size > inode->i_size)
  902. i_size_write(inode, pos+copied);
  903. ext4_set_inode_state(inode, EXT4_STATE_JDATA);
  904. EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
  905. if (new_i_size > EXT4_I(inode)->i_disksize) {
  906. ext4_update_i_disksize(inode, new_i_size);
  907. ret2 = ext4_mark_inode_dirty(handle, inode);
  908. if (!ret)
  909. ret = ret2;
  910. }
  911. unlock_page(page);
  912. page_cache_release(page);
  913. if (pos + len > inode->i_size && ext4_can_truncate(inode))
  914. /* if we have allocated more blocks and copied
  915. * less. We will have blocks allocated outside
  916. * inode->i_size. So truncate them
  917. */
  918. ext4_orphan_add(handle, inode);
  919. ret2 = ext4_journal_stop(handle);
  920. if (!ret)
  921. ret = ret2;
  922. if (pos + len > inode->i_size) {
  923. ext4_truncate_failed_write(inode);
  924. /*
  925. * If truncate failed early the inode might still be
  926. * on the orphan list; we need to make sure the inode
  927. * is removed from the orphan list in that case.
  928. */
  929. if (inode->i_nlink)
  930. ext4_orphan_del(NULL, inode);
  931. }
  932. return ret ? ret : copied;
  933. }
  934. /*
  935. * Reserve a single cluster located at lblock
  936. */
  937. int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
  938. {
  939. int retries = 0;
  940. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  941. struct ext4_inode_info *ei = EXT4_I(inode);
  942. unsigned int md_needed;
  943. int ret;
  944. /*
  945. * recalculate the amount of metadata blocks to reserve
  946. * in order to allocate nrblocks
  947. * worse case is one extent per block
  948. */
  949. repeat:
  950. spin_lock(&ei->i_block_reservation_lock);
  951. md_needed = EXT4_NUM_B2C(sbi,
  952. ext4_calc_metadata_amount(inode, lblock));
  953. trace_ext4_da_reserve_space(inode, md_needed);
  954. spin_unlock(&ei->i_block_reservation_lock);
  955. /*
  956. * We will charge metadata quota at writeout time; this saves
  957. * us from metadata over-estimation, though we may go over by
  958. * a small amount in the end. Here we just reserve for data.
  959. */
  960. ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
  961. if (ret)
  962. return ret;
  963. /*
  964. * We do still charge estimated metadata to the sb though;
  965. * we cannot afford to run out of free blocks.
  966. */
  967. if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
  968. dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
  969. if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
  970. yield();
  971. goto repeat;
  972. }
  973. return -ENOSPC;
  974. }
  975. spin_lock(&ei->i_block_reservation_lock);
  976. ei->i_reserved_data_blocks++;
  977. ei->i_reserved_meta_blocks += md_needed;
  978. spin_unlock(&ei->i_block_reservation_lock);
  979. return 0; /* success */
  980. }
  981. static void ext4_da_release_space(struct inode *inode, int to_free)
  982. {
  983. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  984. struct ext4_inode_info *ei = EXT4_I(inode);
  985. if (!to_free)
  986. return; /* Nothing to release, exit */
  987. spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
  988. trace_ext4_da_release_space(inode, to_free);
  989. if (unlikely(to_free > ei->i_reserved_data_blocks)) {
  990. /*
  991. * if there aren't enough reserved blocks, then the
  992. * counter is messed up somewhere. Since this
  993. * function is called from invalidate page, it's
  994. * harmless to return without any action.
  995. */
  996. ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
  997. "ino %lu, to_free %d with only %d reserved "
  998. "data blocks\n", inode->i_ino, to_free,
  999. ei->i_reserved_data_blocks);
  1000. WARN_ON(1);
  1001. to_free = ei->i_reserved_data_blocks;
  1002. }
  1003. ei->i_reserved_data_blocks -= to_free;
  1004. if (ei->i_reserved_data_blocks == 0) {
  1005. /*
  1006. * We can release all of the reserved metadata blocks
  1007. * only when we have written all of the delayed
  1008. * allocation blocks.
  1009. * Note that in case of bigalloc, i_reserved_meta_blocks,
  1010. * i_reserved_data_blocks, etc. refer to number of clusters.
  1011. */
  1012. percpu_counter_sub(&sbi->s_dirtyclusters_counter,
  1013. ei->i_reserved_meta_blocks);
  1014. ei->i_reserved_meta_blocks = 0;
  1015. ei->i_da_metadata_calc_len = 0;
  1016. }
  1017. /* update fs dirty data blocks counter */
  1018. percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
  1019. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  1020. dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
  1021. }
  1022. static void ext4_da_page_release_reservation(struct page *page,
  1023. unsigned long offset)
  1024. {
  1025. int to_release = 0;
  1026. struct buffer_head *head, *bh;
  1027. unsigned int curr_off = 0;
  1028. struct inode *inode = page->mapping->host;
  1029. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1030. int num_clusters;
  1031. head = page_buffers(page);
  1032. bh = head;
  1033. do {
  1034. unsigned int next_off = curr_off + bh->b_size;
  1035. if ((offset <= curr_off) && (buffer_delay(bh))) {
  1036. to_release++;
  1037. clear_buffer_delay(bh);
  1038. }
  1039. curr_off = next_off;
  1040. } while ((bh = bh->b_this_page) != head);
  1041. /* If we have released all the blocks belonging to a cluster, then we
  1042. * need to release the reserved space for that cluster. */
  1043. num_clusters = EXT4_NUM_B2C(sbi, to_release);
  1044. while (num_clusters > 0) {
  1045. ext4_fsblk_t lblk;
  1046. lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
  1047. ((num_clusters - 1) << sbi->s_cluster_bits);
  1048. if (sbi->s_cluster_ratio == 1 ||
  1049. !ext4_find_delalloc_cluster(inode, lblk, 1))
  1050. ext4_da_release_space(inode, 1);
  1051. num_clusters--;
  1052. }
  1053. }
  1054. /*
  1055. * Delayed allocation stuff
  1056. */
  1057. /*
  1058. * mpage_da_submit_io - walks through extent of pages and try to write
  1059. * them with writepage() call back
  1060. *
  1061. * @mpd->inode: inode
  1062. * @mpd->first_page: first page of the extent
  1063. * @mpd->next_page: page after the last page of the extent
  1064. *
  1065. * By the time mpage_da_submit_io() is called we expect all blocks
  1066. * to be allocated. this may be wrong if allocation failed.
  1067. *
  1068. * As pages are already locked by write_cache_pages(), we can't use it
  1069. */
  1070. static int mpage_da_submit_io(struct mpage_da_data *mpd,
  1071. struct ext4_map_blocks *map)
  1072. {
  1073. struct pagevec pvec;
  1074. unsigned long index, end;
  1075. int ret = 0, err, nr_pages, i;
  1076. struct inode *inode = mpd->inode;
  1077. struct address_space *mapping = inode->i_mapping;
  1078. loff_t size = i_size_read(inode);
  1079. unsigned int len, block_start;
  1080. struct buffer_head *bh, *page_bufs = NULL;
  1081. int journal_data = ext4_should_journal_data(inode);
  1082. sector_t pblock = 0, cur_logical = 0;
  1083. struct ext4_io_submit io_submit;
  1084. BUG_ON(mpd->next_page <= mpd->first_page);
  1085. memset(&io_submit, 0, sizeof(io_submit));
  1086. /*
  1087. * We need to start from the first_page to the next_page - 1
  1088. * to make sure we also write the mapped dirty buffer_heads.
  1089. * If we look at mpd->b_blocknr we would only be looking
  1090. * at the currently mapped buffer_heads.
  1091. */
  1092. index = mpd->first_page;
  1093. end = mpd->next_page - 1;
  1094. pagevec_init(&pvec, 0);
  1095. while (index <= end) {
  1096. nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
  1097. if (nr_pages == 0)
  1098. break;
  1099. for (i = 0; i < nr_pages; i++) {
  1100. int commit_write = 0, skip_page = 0;
  1101. struct page *page = pvec.pages[i];
  1102. index = page->index;
  1103. if (index > end)
  1104. break;
  1105. if (index == size >> PAGE_CACHE_SHIFT)
  1106. len = size & ~PAGE_CACHE_MASK;
  1107. else
  1108. len = PAGE_CACHE_SIZE;
  1109. if (map) {
  1110. cur_logical = index << (PAGE_CACHE_SHIFT -
  1111. inode->i_blkbits);
  1112. pblock = map->m_pblk + (cur_logical -
  1113. map->m_lblk);
  1114. }
  1115. index++;
  1116. BUG_ON(!PageLocked(page));
  1117. BUG_ON(PageWriteback(page));
  1118. /*
  1119. * If the page does not have buffers (for
  1120. * whatever reason), try to create them using
  1121. * __block_write_begin. If this fails,
  1122. * skip the page and move on.
  1123. */
  1124. if (!page_has_buffers(page)) {
  1125. if (__block_write_begin(page, 0, len,
  1126. noalloc_get_block_write)) {
  1127. skip_page:
  1128. unlock_page(page);
  1129. continue;
  1130. }
  1131. commit_write = 1;
  1132. }
  1133. bh = page_bufs = page_buffers(page);
  1134. block_start = 0;
  1135. do {
  1136. if (!bh)
  1137. goto skip_page;
  1138. if (map && (cur_logical >= map->m_lblk) &&
  1139. (cur_logical <= (map->m_lblk +
  1140. (map->m_len - 1)))) {
  1141. if (buffer_delay(bh)) {
  1142. clear_buffer_delay(bh);
  1143. bh->b_blocknr = pblock;
  1144. }
  1145. if (buffer_unwritten(bh) ||
  1146. buffer_mapped(bh))
  1147. BUG_ON(bh->b_blocknr != pblock);
  1148. if (map->m_flags & EXT4_MAP_UNINIT)
  1149. set_buffer_uninit(bh);
  1150. clear_buffer_unwritten(bh);
  1151. }
  1152. /* skip page if block allocation undone */
  1153. if (buffer_delay(bh) || buffer_unwritten(bh))
  1154. skip_page = 1;
  1155. bh = bh->b_this_page;
  1156. block_start += bh->b_size;
  1157. cur_logical++;
  1158. pblock++;
  1159. } while (bh != page_bufs);
  1160. if (skip_page)
  1161. goto skip_page;
  1162. if (commit_write)
  1163. /* mark the buffer_heads as dirty & uptodate */
  1164. block_commit_write(page, 0, len);
  1165. clear_page_dirty_for_io(page);
  1166. /*
  1167. * Delalloc doesn't support data journalling,
  1168. * but eventually maybe we'll lift this
  1169. * restriction.
  1170. */
  1171. if (unlikely(journal_data && PageChecked(page)))
  1172. err = __ext4_journalled_writepage(page, len);
  1173. else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
  1174. err = ext4_bio_write_page(&io_submit, page,
  1175. len, mpd->wbc);
  1176. else if (buffer_uninit(page_bufs)) {
  1177. ext4_set_bh_endio(page_bufs, inode);
  1178. err = block_write_full_page_endio(page,
  1179. noalloc_get_block_write,
  1180. mpd->wbc, ext4_end_io_buffer_write);
  1181. } else
  1182. err = block_write_full_page(page,
  1183. noalloc_get_block_write, mpd->wbc);
  1184. if (!err)
  1185. mpd->pages_written++;
  1186. /*
  1187. * In error case, we have to continue because
  1188. * remaining pages are still locked
  1189. */
  1190. if (ret == 0)
  1191. ret = err;
  1192. }
  1193. pagevec_release(&pvec);
  1194. }
  1195. ext4_io_submit(&io_submit);
  1196. return ret;
  1197. }
  1198. static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
  1199. {
  1200. int nr_pages, i;
  1201. pgoff_t index, end;
  1202. struct pagevec pvec;
  1203. struct inode *inode = mpd->inode;
  1204. struct address_space *mapping = inode->i_mapping;
  1205. index = mpd->first_page;
  1206. end = mpd->next_page - 1;
  1207. while (index <= end) {
  1208. nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
  1209. if (nr_pages == 0)
  1210. break;
  1211. for (i = 0; i < nr_pages; i++) {
  1212. struct page *page = pvec.pages[i];
  1213. if (page->index > end)
  1214. break;
  1215. BUG_ON(!PageLocked(page));
  1216. BUG_ON(PageWriteback(page));
  1217. block_invalidatepage(page, 0);
  1218. ClearPageUptodate(page);
  1219. unlock_page(page);
  1220. }
  1221. index = pvec.pages[nr_pages - 1]->index + 1;
  1222. pagevec_release(&pvec);
  1223. }
  1224. return;
  1225. }
  1226. static void ext4_print_free_blocks(struct inode *inode)
  1227. {
  1228. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1229. printk(KERN_CRIT "Total free blocks count %lld\n",
  1230. EXT4_C2B(EXT4_SB(inode->i_sb),
  1231. ext4_count_free_clusters(inode->i_sb)));
  1232. printk(KERN_CRIT "Free/Dirty block details\n");
  1233. printk(KERN_CRIT "free_blocks=%lld\n",
  1234. (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
  1235. percpu_counter_sum(&sbi->s_freeclusters_counter)));
  1236. printk(KERN_CRIT "dirty_blocks=%lld\n",
  1237. (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
  1238. percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
  1239. printk(KERN_CRIT "Block reservation details\n");
  1240. printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
  1241. EXT4_I(inode)->i_reserved_data_blocks);
  1242. printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
  1243. EXT4_I(inode)->i_reserved_meta_blocks);
  1244. return;
  1245. }
  1246. /*
  1247. * mpage_da_map_and_submit - go through given space, map them
  1248. * if necessary, and then submit them for I/O
  1249. *
  1250. * @mpd - bh describing space
  1251. *
  1252. * The function skips space we know is already mapped to disk blocks.
  1253. *
  1254. */
  1255. static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
  1256. {
  1257. int err, blks, get_blocks_flags;
  1258. struct ext4_map_blocks map, *mapp = NULL;
  1259. sector_t next = mpd->b_blocknr;
  1260. unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
  1261. loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
  1262. handle_t *handle = NULL;
  1263. /*
  1264. * If the blocks are mapped already, or we couldn't accumulate
  1265. * any blocks, then proceed immediately to the submission stage.
  1266. */
  1267. if ((mpd->b_size == 0) ||
  1268. ((mpd->b_state & (1 << BH_Mapped)) &&
  1269. !(mpd->b_state & (1 << BH_Delay)) &&
  1270. !(mpd->b_state & (1 << BH_Unwritten))))
  1271. goto submit_io;
  1272. handle = ext4_journal_current_handle();
  1273. BUG_ON(!handle);
  1274. /*
  1275. * Call ext4_map_blocks() to allocate any delayed allocation
  1276. * blocks, or to convert an uninitialized extent to be
  1277. * initialized (in the case where we have written into
  1278. * one or more preallocated blocks).
  1279. *
  1280. * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
  1281. * indicate that we are on the delayed allocation path. This
  1282. * affects functions in many different parts of the allocation
  1283. * call path. This flag exists primarily because we don't
  1284. * want to change *many* call functions, so ext4_map_blocks()
  1285. * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
  1286. * inode's allocation semaphore is taken.
  1287. *
  1288. * If the blocks in questions were delalloc blocks, set
  1289. * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
  1290. * variables are updated after the blocks have been allocated.
  1291. */
  1292. map.m_lblk = next;
  1293. map.m_len = max_blocks;
  1294. get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
  1295. if (ext4_should_dioread_nolock(mpd->inode))
  1296. get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
  1297. if (mpd->b_state & (1 << BH_Delay))
  1298. get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
  1299. blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
  1300. if (blks < 0) {
  1301. struct super_block *sb = mpd->inode->i_sb;
  1302. err = blks;
  1303. /*
  1304. * If get block returns EAGAIN or ENOSPC and there
  1305. * appears to be free blocks we will just let
  1306. * mpage_da_submit_io() unlock all of the pages.
  1307. */
  1308. if (err == -EAGAIN)
  1309. goto submit_io;
  1310. if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
  1311. mpd->retval = err;
  1312. goto submit_io;
  1313. }
  1314. /*
  1315. * get block failure will cause us to loop in
  1316. * writepages, because a_ops->writepage won't be able
  1317. * to make progress. The page will be redirtied by
  1318. * writepage and writepages will again try to write
  1319. * the same.
  1320. */
  1321. if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
  1322. ext4_msg(sb, KERN_CRIT,
  1323. "delayed block allocation failed for inode %lu "
  1324. "at logical offset %llu with max blocks %zd "
  1325. "with error %d", mpd->inode->i_ino,
  1326. (unsigned long long) next,
  1327. mpd->b_size >> mpd->inode->i_blkbits, err);
  1328. ext4_msg(sb, KERN_CRIT,
  1329. "This should not happen!! Data will be lost\n");
  1330. if (err == -ENOSPC)
  1331. ext4_print_free_blocks(mpd->inode);
  1332. }
  1333. /* invalidate all the pages */
  1334. ext4_da_block_invalidatepages(mpd);
  1335. /* Mark this page range as having been completed */
  1336. mpd->io_done = 1;
  1337. return;
  1338. }
  1339. BUG_ON(blks == 0);
  1340. mapp = &map;
  1341. if (map.m_flags & EXT4_MAP_NEW) {
  1342. struct block_device *bdev = mpd->inode->i_sb->s_bdev;
  1343. int i;
  1344. for (i = 0; i < map.m_len; i++)
  1345. unmap_underlying_metadata(bdev, map.m_pblk + i);
  1346. if (ext4_should_order_data(mpd->inode)) {
  1347. err = ext4_jbd2_file_inode(handle, mpd->inode);
  1348. if (err)
  1349. /* Only if the journal is aborted */
  1350. return;
  1351. }
  1352. }
  1353. /*
  1354. * Update on-disk size along with block allocation.
  1355. */
  1356. disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
  1357. if (disksize > i_size_read(mpd->inode))
  1358. disksize = i_size_read(mpd->inode);
  1359. if (disksize > EXT4_I(mpd->inode)->i_disksize) {
  1360. ext4_update_i_disksize(mpd->inode, disksize);
  1361. err = ext4_mark_inode_dirty(handle, mpd->inode);
  1362. if (err)
  1363. ext4_error(mpd->inode->i_sb,
  1364. "Failed to mark inode %lu dirty",
  1365. mpd->inode->i_ino);
  1366. }
  1367. submit_io:
  1368. mpage_da_submit_io(mpd, mapp);
  1369. mpd->io_done = 1;
  1370. }
  1371. #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
  1372. (1 << BH_Delay) | (1 << BH_Unwritten))
  1373. /*
  1374. * mpage_add_bh_to_extent - try to add one more block to extent of blocks
  1375. *
  1376. * @mpd->lbh - extent of blocks
  1377. * @logical - logical number of the block in the file
  1378. * @bh - bh of the block (used to access block's state)
  1379. *
  1380. * the function is used to collect contig. blocks in same state
  1381. */
  1382. static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
  1383. sector_t logical, size_t b_size,
  1384. unsigned long b_state)
  1385. {
  1386. sector_t next;
  1387. int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
  1388. /*
  1389. * XXX Don't go larger than mballoc is willing to allocate
  1390. * This is a stopgap solution. We eventually need to fold
  1391. * mpage_da_submit_io() into this function and then call
  1392. * ext4_map_blocks() multiple times in a loop
  1393. */
  1394. if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
  1395. goto flush_it;
  1396. /* check if thereserved journal credits might overflow */
  1397. if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
  1398. if (nrblocks >= EXT4_MAX_TRANS_DATA) {
  1399. /*
  1400. * With non-extent format we are limited by the journal
  1401. * credit available. Total credit needed to insert
  1402. * nrblocks contiguous blocks is dependent on the
  1403. * nrblocks. So limit nrblocks.
  1404. */
  1405. goto flush_it;
  1406. } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
  1407. EXT4_MAX_TRANS_DATA) {
  1408. /*
  1409. * Adding the new buffer_head would make it cross the
  1410. * allowed limit for which we have journal credit
  1411. * reserved. So limit the new bh->b_size
  1412. */
  1413. b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
  1414. mpd->inode->i_blkbits;
  1415. /* we will do mpage_da_submit_io in the next loop */
  1416. }
  1417. }
  1418. /*
  1419. * First block in the extent
  1420. */
  1421. if (mpd->b_size == 0) {
  1422. mpd->b_blocknr = logical;
  1423. mpd->b_size = b_size;
  1424. mpd->b_state = b_state & BH_FLAGS;
  1425. return;
  1426. }
  1427. next = mpd->b_blocknr + nrblocks;
  1428. /*
  1429. * Can we merge the block to our big extent?
  1430. */
  1431. if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
  1432. mpd->b_size += b_size;
  1433. return;
  1434. }
  1435. flush_it:
  1436. /*
  1437. * We couldn't merge the block to our extent, so we
  1438. * need to flush current extent and start new one
  1439. */
  1440. mpage_da_map_and_submit(mpd);
  1441. return;
  1442. }
  1443. static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
  1444. {
  1445. return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
  1446. }
  1447. /*
  1448. * This is a special get_blocks_t callback which is used by
  1449. * ext4_da_write_begin(). It will either return mapped block or
  1450. * reserve space for a single block.
  1451. *
  1452. * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
  1453. * We also have b_blocknr = -1 and b_bdev initialized properly
  1454. *
  1455. * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
  1456. * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
  1457. * initialized properly.
  1458. */
  1459. static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
  1460. struct buffer_head *bh, int create)
  1461. {
  1462. struct ext4_map_blocks map;
  1463. int ret = 0;
  1464. sector_t invalid_block = ~((sector_t) 0xffff);
  1465. if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
  1466. invalid_block = ~0;
  1467. BUG_ON(create == 0);
  1468. BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
  1469. map.m_lblk = iblock;
  1470. map.m_len = 1;
  1471. /*
  1472. * first, we need to know whether the block is allocated already
  1473. * preallocated blocks are unmapped but should treated
  1474. * the same as allocated blocks.
  1475. */
  1476. ret = ext4_map_blocks(NULL, inode, &map, 0);
  1477. if (ret < 0)
  1478. return ret;
  1479. if (ret == 0) {
  1480. if (buffer_delay(bh))
  1481. return 0; /* Not sure this could or should happen */
  1482. /*
  1483. * XXX: __block_write_begin() unmaps passed block, is it OK?
  1484. */
  1485. /* If the block was allocated from previously allocated cluster,
  1486. * then we dont need to reserve it again. */
  1487. if (!(map.m_flags & EXT4_MAP_FROM_CLUSTER)) {
  1488. ret = ext4_da_reserve_space(inode, iblock);
  1489. if (ret)
  1490. /* not enough space to reserve */
  1491. return ret;
  1492. }
  1493. map_bh(bh, inode->i_sb, invalid_block);
  1494. set_buffer_new(bh);
  1495. set_buffer_delay(bh);
  1496. return 0;
  1497. }
  1498. map_bh(bh, inode->i_sb, map.m_pblk);
  1499. bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
  1500. if (buffer_unwritten(bh)) {
  1501. /* A delayed write to unwritten bh should be marked
  1502. * new and mapped. Mapped ensures that we don't do
  1503. * get_block multiple times when we write to the same
  1504. * offset and new ensures that we do proper zero out
  1505. * for partial write.
  1506. */
  1507. set_buffer_new(bh);
  1508. set_buffer_mapped(bh);
  1509. }
  1510. return 0;
  1511. }
  1512. /*
  1513. * This function is used as a standard get_block_t calback function
  1514. * when there is no desire to allocate any blocks. It is used as a
  1515. * callback function for block_write_begin() and block_write_full_page().
  1516. * These functions should only try to map a single block at a time.
  1517. *
  1518. * Since this function doesn't do block allocations even if the caller
  1519. * requests it by passing in create=1, it is critically important that
  1520. * any caller checks to make sure that any buffer heads are returned
  1521. * by this function are either all already mapped or marked for
  1522. * delayed allocation before calling block_write_full_page(). Otherwise,
  1523. * b_blocknr could be left unitialized, and the page write functions will
  1524. * be taken by surprise.
  1525. */
  1526. static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
  1527. struct buffer_head *bh_result, int create)
  1528. {
  1529. BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
  1530. return _ext4_get_block(inode, iblock, bh_result, 0);
  1531. }
  1532. static int bget_one(handle_t *handle, struct buffer_head *bh)
  1533. {
  1534. get_bh(bh);
  1535. return 0;
  1536. }
  1537. static int bput_one(handle_t *handle, struct buffer_head *bh)
  1538. {
  1539. put_bh(bh);
  1540. return 0;
  1541. }
  1542. static int __ext4_journalled_writepage(struct page *page,
  1543. unsigned int len)
  1544. {
  1545. struct address_space *mapping = page->mapping;
  1546. struct inode *inode = mapping->host;
  1547. struct buffer_head *page_bufs;
  1548. handle_t *handle = NULL;
  1549. int ret = 0;
  1550. int err;
  1551. ClearPageChecked(page);
  1552. page_bufs = page_buffers(page);
  1553. BUG_ON(!page_bufs);
  1554. walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
  1555. /* As soon as we unlock the page, it can go away, but we have
  1556. * references to buffers so we are safe */
  1557. unlock_page(page);
  1558. handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
  1559. if (IS_ERR(handle)) {
  1560. ret = PTR_ERR(handle);
  1561. goto out;
  1562. }
  1563. BUG_ON(!ext4_handle_valid(handle));
  1564. ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
  1565. do_journal_get_write_access);
  1566. err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
  1567. write_end_fn);
  1568. if (ret == 0)
  1569. ret = err;
  1570. EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
  1571. err = ext4_journal_stop(handle);
  1572. if (!ret)
  1573. ret = err;
  1574. walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
  1575. ext4_set_inode_state(inode, EXT4_STATE_JDATA);
  1576. out:
  1577. return ret;
  1578. }
  1579. static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
  1580. static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
  1581. /*
  1582. * Note that we don't need to start a transaction unless we're journaling data
  1583. * because we should have holes filled from ext4_page_mkwrite(). We even don't
  1584. * need to file the inode to the transaction's list in ordered mode because if
  1585. * we are writing back data added by write(), the inode is already there and if
  1586. * we are writing back data modified via mmap(), no one guarantees in which
  1587. * transaction the data will hit the disk. In case we are journaling data, we
  1588. * cannot start transaction directly because transaction start ranks above page
  1589. * lock so we have to do some magic.
  1590. *
  1591. * This function can get called via...
  1592. * - ext4_da_writepages after taking page lock (have journal handle)
  1593. * - journal_submit_inode_data_buffers (no journal handle)
  1594. * - shrink_page_list via pdflush (no journal handle)
  1595. * - grab_page_cache when doing write_begin (have journal handle)
  1596. *
  1597. * We don't do any block allocation in this function. If we have page with
  1598. * multiple blocks we need to write those buffer_heads that are mapped. This
  1599. * is important for mmaped based write. So if we do with blocksize 1K
  1600. * truncate(f, 1024);
  1601. * a = mmap(f, 0, 4096);
  1602. * a[0] = 'a';
  1603. * truncate(f, 4096);
  1604. * we have in the page first buffer_head mapped via page_mkwrite call back
  1605. * but other bufer_heads would be unmapped but dirty(dirty done via the
  1606. * do_wp_page). So writepage should write the first block. If we modify
  1607. * the mmap area beyond 1024 we will again get a page_fault and the
  1608. * page_mkwrite callback will do the block allocation and mark the
  1609. * buffer_heads mapped.
  1610. *
  1611. * We redirty the page if we have any buffer_heads that is either delay or
  1612. * unwritten in the page.
  1613. *
  1614. * We can get recursively called as show below.
  1615. *
  1616. * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
  1617. * ext4_writepage()
  1618. *
  1619. * But since we don't do any block allocation we should not deadlock.
  1620. * Page also have the dirty flag cleared so we don't get recurive page_lock.
  1621. */
  1622. static int ext4_writepage(struct page *page,
  1623. struct writeback_control *wbc)
  1624. {
  1625. int ret = 0, commit_write = 0;
  1626. loff_t size;
  1627. unsigned int len;
  1628. struct buffer_head *page_bufs = NULL;
  1629. struct inode *inode = page->mapping->host;
  1630. trace_ext4_writepage(page);
  1631. size = i_size_read(inode);
  1632. if (page->index == size >> PAGE_CACHE_SHIFT)
  1633. len = size & ~PAGE_CACHE_MASK;
  1634. else
  1635. len = PAGE_CACHE_SIZE;
  1636. /*
  1637. * If the page does not have buffers (for whatever reason),
  1638. * try to create them using __block_write_begin. If this
  1639. * fails, redirty the page and move on.
  1640. */
  1641. if (!page_has_buffers(page)) {
  1642. if (__block_write_begin(page, 0, len,
  1643. noalloc_get_block_write)) {
  1644. redirty_page:
  1645. redirty_page_for_writepage(wbc, page);
  1646. unlock_page(page);
  1647. return 0;
  1648. }
  1649. commit_write = 1;
  1650. }
  1651. page_bufs = page_buffers(page);
  1652. if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
  1653. ext4_bh_delay_or_unwritten)) {
  1654. /*
  1655. * We don't want to do block allocation, so redirty
  1656. * the page and return. We may reach here when we do
  1657. * a journal commit via journal_submit_inode_data_buffers.
  1658. * We can also reach here via shrink_page_list
  1659. */
  1660. goto redirty_page;
  1661. }
  1662. if (commit_write)
  1663. /* now mark the buffer_heads as dirty and uptodate */
  1664. block_commit_write(page, 0, len);
  1665. if (PageChecked(page) && ext4_should_journal_data(inode))
  1666. /*
  1667. * It's mmapped pagecache. Add buffers and journal it. There
  1668. * doesn't seem much point in redirtying the page here.
  1669. */
  1670. return __ext4_journalled_writepage(page, len);
  1671. if (buffer_uninit(page_bufs)) {
  1672. ext4_set_bh_endio(page_bufs, inode);
  1673. ret = block_write_full_page_endio(page, noalloc_get_block_write,
  1674. wbc, ext4_end_io_buffer_write);
  1675. } else
  1676. ret = block_write_full_page(page, noalloc_get_block_write,
  1677. wbc);
  1678. return ret;
  1679. }
  1680. /*
  1681. * This is called via ext4_da_writepages() to
  1682. * calculate the total number of credits to reserve to fit
  1683. * a single extent allocation into a single transaction,
  1684. * ext4_da_writpeages() will loop calling this before
  1685. * the block allocation.
  1686. */
  1687. static int ext4_da_writepages_trans_blocks(struct inode *inode)
  1688. {
  1689. int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
  1690. /*
  1691. * With non-extent format the journal credit needed to
  1692. * insert nrblocks contiguous block is dependent on
  1693. * number of contiguous block. So we will limit
  1694. * number of contiguous block to a sane value
  1695. */
  1696. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
  1697. (max_blocks > EXT4_MAX_TRANS_DATA))
  1698. max_blocks = EXT4_MAX_TRANS_DATA;
  1699. return ext4_chunk_trans_blocks(inode, max_blocks);
  1700. }
  1701. /*
  1702. * write_cache_pages_da - walk the list of dirty pages of the given
  1703. * address space and accumulate pages that need writing, and call
  1704. * mpage_da_map_and_submit to map a single contiguous memory region
  1705. * and then write them.
  1706. */
  1707. static int write_cache_pages_da(struct address_space *mapping,
  1708. struct writeback_control *wbc,
  1709. struct mpage_da_data *mpd,
  1710. pgoff_t *done_index)
  1711. {
  1712. struct buffer_head *bh, *head;
  1713. struct inode *inode = mapping->host;
  1714. struct pagevec pvec;
  1715. unsigned int nr_pages;
  1716. sector_t logical;
  1717. pgoff_t index, end;
  1718. long nr_to_write = wbc->nr_to_write;
  1719. int i, tag, ret = 0;
  1720. memset(mpd, 0, sizeof(struct mpage_da_data));
  1721. mpd->wbc = wbc;
  1722. mpd->inode = inode;
  1723. pagevec_init(&pvec, 0);
  1724. index = wbc->range_start >> PAGE_CACHE_SHIFT;
  1725. end = wbc->range_end >> PAGE_CACHE_SHIFT;
  1726. if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
  1727. tag = PAGECACHE_TAG_TOWRITE;
  1728. else
  1729. tag = PAGECACHE_TAG_DIRTY;
  1730. *done_index = index;
  1731. while (index <= end) {
  1732. nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
  1733. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
  1734. if (nr_pages == 0)
  1735. return 0;
  1736. for (i = 0; i < nr_pages; i++) {
  1737. struct page *page = pvec.pages[i];
  1738. /*
  1739. * At this point, the page may be truncated or
  1740. * invalidated (changing page->mapping to NULL), or
  1741. * even swizzled back from swapper_space to tmpfs file
  1742. * mapping. However, page->index will not change
  1743. * because we have a reference on the page.
  1744. */
  1745. if (page->index > end)
  1746. goto out;
  1747. *done_index = page->index + 1;
  1748. /*
  1749. * If we can't merge this page, and we have
  1750. * accumulated an contiguous region, write it
  1751. */
  1752. if ((mpd->next_page != page->index) &&
  1753. (mpd->next_page != mpd->first_page)) {
  1754. mpage_da_map_and_submit(mpd);
  1755. goto ret_extent_tail;
  1756. }
  1757. lock_page(page);
  1758. /*
  1759. * If the page is no longer dirty, or its
  1760. * mapping no longer corresponds to inode we
  1761. * are writing (which means it has been
  1762. * truncated or invalidated), or the page is
  1763. * already under writeback and we are not
  1764. * doing a data integrity writeback, skip the page
  1765. */
  1766. if (!PageDirty(page) ||
  1767. (PageWriteback(page) &&
  1768. (wbc->sync_mode == WB_SYNC_NONE)) ||
  1769. unlikely(page->mapping != mapping)) {
  1770. unlock_page(page);
  1771. continue;
  1772. }
  1773. wait_on_page_writeback(page);
  1774. BUG_ON(PageWriteback(page));
  1775. if (mpd->next_page != page->index)
  1776. mpd->first_page = page->index;
  1777. mpd->next_page = page->index + 1;
  1778. logical = (sector_t) page->index <<
  1779. (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1780. if (!page_has_buffers(page)) {
  1781. mpage_add_bh_to_extent(mpd, logical,
  1782. PAGE_CACHE_SIZE,
  1783. (1 << BH_Dirty) | (1 << BH_Uptodate));
  1784. if (mpd->io_done)
  1785. goto ret_extent_tail;
  1786. } else {
  1787. /*
  1788. * Page with regular buffer heads,
  1789. * just add all dirty ones
  1790. */
  1791. head = page_buffers(page);
  1792. bh = head;
  1793. do {
  1794. BUG_ON(buffer_locked(bh));
  1795. /*
  1796. * We need to try to allocate
  1797. * unmapped blocks in the same page.
  1798. * Otherwise we won't make progress
  1799. * with the page in ext4_writepage
  1800. */
  1801. if (ext4_bh_delay_or_unwritten(NULL, bh)) {
  1802. mpage_add_bh_to_extent(mpd, logical,
  1803. bh->b_size,
  1804. bh->b_state);
  1805. if (mpd->io_done)
  1806. goto ret_extent_tail;
  1807. } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
  1808. /*
  1809. * mapped dirty buffer. We need
  1810. * to update the b_state
  1811. * because we look at b_state
  1812. * in mpage_da_map_blocks. We
  1813. * don't update b_size because
  1814. * if we find an unmapped
  1815. * buffer_head later we need to
  1816. * use the b_state flag of that
  1817. * buffer_head.
  1818. */
  1819. if (mpd->b_size == 0)
  1820. mpd->b_state = bh->b_state & BH_FLAGS;
  1821. }
  1822. logical++;
  1823. } while ((bh = bh->b_this_page) != head);
  1824. }
  1825. if (nr_to_write > 0) {
  1826. nr_to_write--;
  1827. if (nr_to_write == 0 &&
  1828. wbc->sync_mode == WB_SYNC_NONE)
  1829. /*
  1830. * We stop writing back only if we are
  1831. * not doing integrity sync. In case of
  1832. * integrity sync we have to keep going
  1833. * because someone may be concurrently
  1834. * dirtying pages, and we might have
  1835. * synced a lot of newly appeared dirty
  1836. * pages, but have not synced all of the
  1837. * old dirty pages.
  1838. */
  1839. goto out;
  1840. }
  1841. }
  1842. pagevec_release(&pvec);
  1843. cond_resched();
  1844. }
  1845. return 0;
  1846. ret_extent_tail:
  1847. ret = MPAGE_DA_EXTENT_TAIL;
  1848. out:
  1849. pagevec_release(&pvec);
  1850. cond_resched();
  1851. return ret;
  1852. }
  1853. static int ext4_da_writepages(struct address_space *mapping,
  1854. struct writeback_control *wbc)
  1855. {
  1856. pgoff_t index;
  1857. int range_whole = 0;
  1858. handle_t *handle = NULL;
  1859. struct mpage_da_data mpd;
  1860. struct inode *inode = mapping->host;
  1861. int pages_written = 0;
  1862. unsigned int max_pages;
  1863. int range_cyclic, cycled = 1, io_done = 0;
  1864. int needed_blocks, ret = 0;
  1865. long desired_nr_to_write, nr_to_writebump = 0;
  1866. loff_t range_start = wbc->range_start;
  1867. struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
  1868. pgoff_t done_index = 0;
  1869. pgoff_t end;
  1870. trace_ext4_da_writepages(inode, wbc);
  1871. /*
  1872. * No pages to write? This is mainly a kludge to avoid starting
  1873. * a transaction for special inodes like journal inode on last iput()
  1874. * because that could violate lock ordering on umount
  1875. */
  1876. if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
  1877. return 0;
  1878. /*
  1879. * If the filesystem has aborted, it is read-only, so return
  1880. * right away instead of dumping stack traces later on that
  1881. * will obscure the real source of the problem. We test
  1882. * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
  1883. * the latter could be true if the filesystem is mounted
  1884. * read-only, and in that case, ext4_da_writepages should
  1885. * *never* be called, so if that ever happens, we would want
  1886. * the stack trace.
  1887. */
  1888. if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
  1889. return -EROFS;
  1890. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  1891. range_whole = 1;
  1892. range_cyclic = wbc->range_cyclic;
  1893. if (wbc->range_cyclic) {
  1894. index = mapping->writeback_index;
  1895. if (index)
  1896. cycled = 0;
  1897. wbc->range_start = index << PAGE_CACHE_SHIFT;
  1898. wbc->range_end = LLONG_MAX;
  1899. wbc->range_cyclic = 0;
  1900. end = -1;
  1901. } else {
  1902. index = wbc->range_start >> PAGE_CACHE_SHIFT;
  1903. end = wbc->range_end >> PAGE_CACHE_SHIFT;
  1904. }
  1905. /*
  1906. * This works around two forms of stupidity. The first is in
  1907. * the writeback code, which caps the maximum number of pages
  1908. * written to be 1024 pages. This is wrong on multiple
  1909. * levels; different architectues have a different page size,
  1910. * which changes the maximum amount of data which gets
  1911. * written. Secondly, 4 megabytes is way too small. XFS
  1912. * forces this value to be 16 megabytes by multiplying
  1913. * nr_to_write parameter by four, and then relies on its
  1914. * allocator to allocate larger extents to make them
  1915. * contiguous. Unfortunately this brings us to the second
  1916. * stupidity, which is that ext4's mballoc code only allocates
  1917. * at most 2048 blocks. So we force contiguous writes up to
  1918. * the number of dirty blocks in the inode, or
  1919. * sbi->max_writeback_mb_bump whichever is smaller.
  1920. */
  1921. max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
  1922. if (!range_cyclic && range_whole) {
  1923. if (wbc->nr_to_write == LONG_MAX)
  1924. desired_nr_to_write = wbc->nr_to_write;
  1925. else
  1926. desired_nr_to_write = wbc->nr_to_write * 8;
  1927. } else
  1928. desired_nr_to_write = ext4_num_dirty_pages(inode, index,
  1929. max_pages);
  1930. if (desired_nr_to_write > max_pages)
  1931. desired_nr_to_write = max_pages;
  1932. if (wbc->nr_to_write < desired_nr_to_write) {
  1933. nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
  1934. wbc->nr_to_write = desired_nr_to_write;
  1935. }
  1936. retry:
  1937. if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
  1938. tag_pages_for_writeback(mapping, index, end);
  1939. while (!ret && wbc->nr_to_write > 0) {
  1940. /*
  1941. * we insert one extent at a time. So we need
  1942. * credit needed for single extent allocation.
  1943. * journalled mode is currently not supported
  1944. * by delalloc
  1945. */
  1946. BUG_ON(ext4_should_journal_data(inode));
  1947. needed_blocks = ext4_da_writepages_trans_blocks(inode);
  1948. /* start a new transaction*/
  1949. handle = ext4_journal_start(inode, needed_blocks);
  1950. if (IS_ERR(handle)) {
  1951. ret = PTR_ERR(handle);
  1952. ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
  1953. "%ld pages, ino %lu; err %d", __func__,
  1954. wbc->nr_to_write, inode->i_ino, ret);
  1955. goto out_writepages;
  1956. }
  1957. /*
  1958. * Now call write_cache_pages_da() to find the next
  1959. * contiguous region of logical blocks that need
  1960. * blocks to be allocated by ext4 and submit them.
  1961. */
  1962. ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
  1963. /*
  1964. * If we have a contiguous extent of pages and we
  1965. * haven't done the I/O yet, map the blocks and submit
  1966. * them for I/O.
  1967. */
  1968. if (!mpd.io_done && mpd.next_page != mpd.first_page) {
  1969. mpage_da_map_and_submit(&mpd);
  1970. ret = MPAGE_DA_EXTENT_TAIL;
  1971. }
  1972. trace_ext4_da_write_pages(inode, &mpd);
  1973. wbc->nr_to_write -= mpd.pages_written;
  1974. ext4_journal_stop(handle);
  1975. if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
  1976. /* commit the transaction which would
  1977. * free blocks released in the transaction
  1978. * and try again
  1979. */
  1980. jbd2_journal_force_commit_nested(sbi->s_journal);
  1981. ret = 0;
  1982. } else if (ret == MPAGE_DA_EXTENT_TAIL) {
  1983. /*
  1984. * got one extent now try with
  1985. * rest of the pages
  1986. */
  1987. pages_written += mpd.pages_written;
  1988. ret = 0;
  1989. io_done = 1;
  1990. } else if (wbc->nr_to_write)
  1991. /*
  1992. * There is no more writeout needed
  1993. * or we requested for a noblocking writeout
  1994. * and we found the device congested
  1995. */
  1996. break;
  1997. }
  1998. if (!io_done && !cycled) {
  1999. cycled = 1;
  2000. index = 0;
  2001. wbc->range_start = index << PAGE_CACHE_SHIFT;
  2002. wbc->range_end = mapping->writeback_index - 1;
  2003. goto retry;
  2004. }
  2005. /* Update index */
  2006. wbc->range_cyclic = range_cyclic;
  2007. if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  2008. /*
  2009. * set the writeback_index so that range_cyclic
  2010. * mode will write it back later
  2011. */
  2012. mapping->writeback_index = done_index;
  2013. out_writepages:
  2014. wbc->nr_to_write -= nr_to_writebump;
  2015. wbc->range_start = range_start;
  2016. trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
  2017. return ret;
  2018. }
  2019. #define FALL_BACK_TO_NONDELALLOC 1
  2020. static int ext4_nonda_switch(struct super_block *sb)
  2021. {
  2022. s64 free_blocks, dirty_blocks;
  2023. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2024. /*
  2025. * switch to non delalloc mode if we are running low
  2026. * on free block. The free block accounting via percpu
  2027. * counters can get slightly wrong with percpu_counter_batch getting
  2028. * accumulated on each CPU without updating global counters
  2029. * Delalloc need an accurate free block accounting. So switch
  2030. * to non delalloc when we are near to error range.
  2031. */
  2032. free_blocks = EXT4_C2B(sbi,
  2033. percpu_counter_read_positive(&sbi->s_freeclusters_counter));
  2034. dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
  2035. if (2 * free_blocks < 3 * dirty_blocks ||
  2036. free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
  2037. /*
  2038. * free block count is less than 150% of dirty blocks
  2039. * or free blocks is less than watermark
  2040. */
  2041. return 1;
  2042. }
  2043. /*
  2044. * Even if we don't switch but are nearing capacity,
  2045. * start pushing delalloc when 1/2 of free blocks are dirty.
  2046. */
  2047. if (free_blocks < 2 * dirty_blocks)
  2048. writeback_inodes_sb_if_idle(sb);
  2049. return 0;
  2050. }
  2051. static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
  2052. loff_t pos, unsigned len, unsigned flags,
  2053. struct page **pagep, void **fsdata)
  2054. {
  2055. int ret, retries = 0;
  2056. struct page *page;
  2057. pgoff_t index;
  2058. struct inode *inode = mapping->host;
  2059. handle_t *handle;
  2060. loff_t page_len;
  2061. index = pos >> PAGE_CACHE_SHIFT;
  2062. if (ext4_nonda_switch(inode->i_sb)) {
  2063. *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
  2064. return ext4_write_begin(file, mapping, pos,
  2065. len, flags, pagep, fsdata);
  2066. }
  2067. *fsdata = (void *)0;
  2068. trace_ext4_da_write_begin(inode, pos, len, flags);
  2069. retry:
  2070. /*
  2071. * With delayed allocation, we don't log the i_disksize update
  2072. * if there is delayed block allocation. But we still need
  2073. * to journalling the i_disksize update if writes to the end
  2074. * of file which has an already mapped buffer.
  2075. */
  2076. handle = ext4_journal_start(inode, 1);
  2077. if (IS_ERR(handle)) {
  2078. ret = PTR_ERR(handle);
  2079. goto out;
  2080. }
  2081. /* We cannot recurse into the filesystem as the transaction is already
  2082. * started */
  2083. flags |= AOP_FLAG_NOFS;
  2084. page = grab_cache_page_write_begin(mapping, index, flags);
  2085. if (!page) {
  2086. ext4_journal_stop(handle);
  2087. ret = -ENOMEM;
  2088. goto out;
  2089. }
  2090. *pagep = page;
  2091. ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
  2092. if (ret < 0) {
  2093. unlock_page(page);
  2094. ext4_journal_stop(handle);
  2095. page_cache_release(page);
  2096. /*
  2097. * block_write_begin may have instantiated a few blocks
  2098. * outside i_size. Trim these off again. Don't need
  2099. * i_size_read because we hold i_mutex.
  2100. */
  2101. if (pos + len > inode->i_size)
  2102. ext4_truncate_failed_write(inode);
  2103. } else {
  2104. page_len = pos & (PAGE_CACHE_SIZE - 1);
  2105. if (page_len > 0) {
  2106. ret = ext4_discard_partial_page_buffers_no_lock(handle,
  2107. inode, page, pos - page_len, page_len,
  2108. EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
  2109. }
  2110. }
  2111. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  2112. goto retry;
  2113. out:
  2114. return ret;
  2115. }
  2116. /*
  2117. * Check if we should update i_disksize
  2118. * when write to the end of file but not require block allocation
  2119. */
  2120. static int ext4_da_should_update_i_disksize(struct page *page,
  2121. unsigned long offset)
  2122. {
  2123. struct buffer_head *bh;
  2124. struct inode *inode = page->mapping->host;
  2125. unsigned int idx;
  2126. int i;
  2127. bh = page_buffers(page);
  2128. idx = offset >> inode->i_blkbits;
  2129. for (i = 0; i < idx; i++)
  2130. bh = bh->b_this_page;
  2131. if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
  2132. return 0;
  2133. return 1;
  2134. }
  2135. static int ext4_da_write_end(struct file *file,
  2136. struct address_space *mapping,
  2137. loff_t pos, unsigned len, unsigned copied,
  2138. struct page *page, void *fsdata)
  2139. {
  2140. struct inode *inode = mapping->host;
  2141. int ret = 0, ret2;
  2142. handle_t *handle = ext4_journal_current_handle();
  2143. loff_t new_i_size;
  2144. unsigned long start, end;
  2145. int write_mode = (int)(unsigned long)fsdata;
  2146. loff_t page_len;
  2147. if (write_mode == FALL_BACK_TO_NONDELALLOC) {
  2148. if (ext4_should_order_data(inode)) {
  2149. return ext4_ordered_write_end(file, mapping, pos,
  2150. len, copied, page, fsdata);
  2151. } else if (ext4_should_writeback_data(inode)) {
  2152. return ext4_writeback_write_end(file, mapping, pos,
  2153. len, copied, page, fsdata);
  2154. } else {
  2155. BUG();
  2156. }
  2157. }
  2158. trace_ext4_da_write_end(inode, pos, len, copied);
  2159. start = pos & (PAGE_CACHE_SIZE - 1);
  2160. end = start + copied - 1;
  2161. /*
  2162. * generic_write_end() will run mark_inode_dirty() if i_size
  2163. * changes. So let's piggyback the i_disksize mark_inode_dirty
  2164. * into that.
  2165. */
  2166. new_i_size = pos + copied;
  2167. if (new_i_size > EXT4_I(inode)->i_disksize) {
  2168. if (ext4_da_should_update_i_disksize(page, end)) {
  2169. down_write(&EXT4_I(inode)->i_data_sem);
  2170. if (new_i_size > EXT4_I(inode)->i_disksize) {
  2171. /*
  2172. * Updating i_disksize when extending file
  2173. * without needing block allocation
  2174. */
  2175. if (ext4_should_order_data(inode))
  2176. ret = ext4_jbd2_file_inode(handle,
  2177. inode);
  2178. EXT4_I(inode)->i_disksize = new_i_size;
  2179. }
  2180. up_write(&EXT4_I(inode)->i_data_sem);
  2181. /* We need to mark inode dirty even if
  2182. * new_i_size is less that inode->i_size
  2183. * bu greater than i_disksize.(hint delalloc)
  2184. */
  2185. ext4_mark_inode_dirty(handle, inode);
  2186. }
  2187. }
  2188. ret2 = generic_write_end(file, mapping, pos, len, copied,
  2189. page, fsdata);
  2190. page_len = PAGE_CACHE_SIZE -
  2191. ((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
  2192. if (page_len > 0) {
  2193. ret = ext4_discard_partial_page_buffers_no_lock(handle,
  2194. inode, page, pos + copied - 1, page_len,
  2195. EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
  2196. }
  2197. copied = ret2;
  2198. if (ret2 < 0)
  2199. ret = ret2;
  2200. ret2 = ext4_journal_stop(handle);
  2201. if (!ret)
  2202. ret = ret2;
  2203. return ret ? ret : copied;
  2204. }
  2205. static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
  2206. {
  2207. /*
  2208. * Drop reserved blocks
  2209. */
  2210. BUG_ON(!PageLocked(page));
  2211. if (!page_has_buffers(page))
  2212. goto out;
  2213. ext4_da_page_release_reservation(page, offset);
  2214. out:
  2215. ext4_invalidatepage(page, offset);
  2216. return;
  2217. }
  2218. /*
  2219. * Force all delayed allocation blocks to be allocated for a given inode.
  2220. */
  2221. int ext4_alloc_da_blocks(struct inode *inode)
  2222. {
  2223. trace_ext4_alloc_da_blocks(inode);
  2224. if (!EXT4_I(inode)->i_reserved_data_blocks &&
  2225. !EXT4_I(inode)->i_reserved_meta_blocks)
  2226. return 0;
  2227. /*
  2228. * We do something simple for now. The filemap_flush() will
  2229. * also start triggering a write of the data blocks, which is
  2230. * not strictly speaking necessary (and for users of
  2231. * laptop_mode, not even desirable). However, to do otherwise
  2232. * would require replicating code paths in:
  2233. *
  2234. * ext4_da_writepages() ->
  2235. * write_cache_pages() ---> (via passed in callback function)
  2236. * __mpage_da_writepage() -->
  2237. * mpage_add_bh_to_extent()
  2238. * mpage_da_map_blocks()
  2239. *
  2240. * The problem is that write_cache_pages(), located in
  2241. * mm/page-writeback.c, marks pages clean in preparation for
  2242. * doing I/O, which is not desirable if we're not planning on
  2243. * doing I/O at all.
  2244. *
  2245. * We could call write_cache_pages(), and then redirty all of
  2246. * the pages by calling redirty_page_for_writepage() but that
  2247. * would be ugly in the extreme. So instead we would need to
  2248. * replicate parts of the code in the above functions,
  2249. * simplifying them because we wouldn't actually intend to
  2250. * write out the pages, but rather only collect contiguous
  2251. * logical block extents, call the multi-block allocator, and
  2252. * then update the buffer heads with the block allocations.
  2253. *
  2254. * For now, though, we'll cheat by calling filemap_flush(),
  2255. * which will map the blocks, and start the I/O, but not
  2256. * actually wait for the I/O to complete.
  2257. */
  2258. return filemap_flush(inode->i_mapping);
  2259. }
  2260. /*
  2261. * bmap() is special. It gets used by applications such as lilo and by
  2262. * the swapper to find the on-disk block of a specific piece of data.
  2263. *
  2264. * Naturally, this is dangerous if the block concerned is still in the
  2265. * journal. If somebody makes a swapfile on an ext4 data-journaling
  2266. * filesystem and enables swap, then they may get a nasty shock when the
  2267. * data getting swapped to that swapfile suddenly gets overwritten by
  2268. * the original zero's written out previously to the journal and
  2269. * awaiting writeback in the kernel's buffer cache.
  2270. *
  2271. * So, if we see any bmap calls here on a modified, data-journaled file,
  2272. * take extra steps to flush any blocks which might be in the cache.
  2273. */
  2274. static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
  2275. {
  2276. struct inode *inode = mapping->host;
  2277. journal_t *journal;
  2278. int err;
  2279. if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
  2280. test_opt(inode->i_sb, DELALLOC)) {
  2281. /*
  2282. * With delalloc we want to sync the file
  2283. * so that we can make sure we allocate
  2284. * blocks for file
  2285. */
  2286. filemap_write_and_wait(mapping);
  2287. }
  2288. if (EXT4_JOURNAL(inode) &&
  2289. ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
  2290. /*
  2291. * This is a REALLY heavyweight approach, but the use of
  2292. * bmap on dirty files is expected to be extremely rare:
  2293. * only if we run lilo or swapon on a freshly made file
  2294. * do we expect this to happen.
  2295. *
  2296. * (bmap requires CAP_SYS_RAWIO so this does not
  2297. * represent an unprivileged user DOS attack --- we'd be
  2298. * in trouble if mortal users could trigger this path at
  2299. * will.)
  2300. *
  2301. * NB. EXT4_STATE_JDATA is not set on files other than
  2302. * regular files. If somebody wants to bmap a directory
  2303. * or symlink and gets confused because the buffer
  2304. * hasn't yet been flushed to disk, they deserve
  2305. * everything they get.
  2306. */
  2307. ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
  2308. journal = EXT4_JOURNAL(inode);
  2309. jbd2_journal_lock_updates(journal);
  2310. err = jbd2_journal_flush(journal);
  2311. jbd2_journal_unlock_updates(journal);
  2312. if (err)
  2313. return 0;
  2314. }
  2315. return generic_block_bmap(mapping, block, ext4_get_block);
  2316. }
  2317. static int ext4_readpage(struct file *file, struct page *page)
  2318. {
  2319. trace_ext4_readpage(page);
  2320. return mpage_readpage(page, ext4_get_block);
  2321. }
  2322. static int
  2323. ext4_readpages(struct file *file, struct address_space *mapping,
  2324. struct list_head *pages, unsigned nr_pages)
  2325. {
  2326. return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
  2327. }
  2328. static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
  2329. {
  2330. struct buffer_head *head, *bh;
  2331. unsigned int curr_off = 0;
  2332. if (!page_has_buffers(page))
  2333. return;
  2334. head = bh = page_buffers(page);
  2335. do {
  2336. if (offset <= curr_off && test_clear_buffer_uninit(bh)
  2337. && bh->b_private) {
  2338. ext4_free_io_end(bh->b_private);
  2339. bh->b_private = NULL;
  2340. bh->b_end_io = NULL;
  2341. }
  2342. curr_off = curr_off + bh->b_size;
  2343. bh = bh->b_this_page;
  2344. } while (bh != head);
  2345. }
  2346. static void ext4_invalidatepage(struct page *page, unsigned long offset)
  2347. {
  2348. journal_t *journal = EXT4_JOURNAL(page->mapping->host);
  2349. trace_ext4_invalidatepage(page, offset);
  2350. /*
  2351. * free any io_end structure allocated for buffers to be discarded
  2352. */
  2353. if (ext4_should_dioread_nolock(page->mapping->host))
  2354. ext4_invalidatepage_free_endio(page, offset);
  2355. /*
  2356. * If it's a full truncate we just forget about the pending dirtying
  2357. */
  2358. if (offset == 0)
  2359. ClearPageChecked(page);
  2360. if (journal)
  2361. jbd2_journal_invalidatepage(journal, page, offset);
  2362. else
  2363. block_invalidatepage(page, offset);
  2364. }
  2365. static int ext4_releasepage(struct page *page, gfp_t wait)
  2366. {
  2367. journal_t *journal = EXT4_JOURNAL(page->mapping->host);
  2368. trace_ext4_releasepage(page);
  2369. WARN_ON(PageChecked(page));
  2370. if (!page_has_buffers(page))
  2371. return 0;
  2372. if (journal)
  2373. return jbd2_journal_try_to_free_buffers(journal, page, wait);
  2374. else
  2375. return try_to_free_buffers(page);
  2376. }
  2377. /*
  2378. * ext4_get_block used when preparing for a DIO write or buffer write.
  2379. * We allocate an uinitialized extent if blocks haven't been allocated.
  2380. * The extent will be converted to initialized after the IO is complete.
  2381. */
  2382. static int ext4_get_block_write(struct inode *inode, sector_t iblock,
  2383. struct buffer_head *bh_result, int create)
  2384. {
  2385. ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
  2386. inode->i_ino, create);
  2387. return _ext4_get_block(inode, iblock, bh_result,
  2388. EXT4_GET_BLOCKS_IO_CREATE_EXT);
  2389. }
  2390. static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
  2391. ssize_t size, void *private, int ret,
  2392. bool is_async)
  2393. {
  2394. struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
  2395. ext4_io_end_t *io_end = iocb->private;
  2396. struct workqueue_struct *wq;
  2397. unsigned long flags;
  2398. struct ext4_inode_info *ei;
  2399. /* if not async direct IO or dio with 0 bytes write, just return */
  2400. if (!io_end || !size)
  2401. goto out;
  2402. ext_debug("ext4_end_io_dio(): io_end 0x%p"
  2403. "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
  2404. iocb->private, io_end->inode->i_ino, iocb, offset,
  2405. size);
  2406. /* if not aio dio with unwritten extents, just free io and return */
  2407. if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
  2408. ext4_free_io_end(io_end);
  2409. iocb->private = NULL;
  2410. out:
  2411. if (is_async)
  2412. aio_complete(iocb, ret, 0);
  2413. inode_dio_done(inode);
  2414. return;
  2415. }
  2416. io_end->offset = offset;
  2417. io_end->size = size;
  2418. if (is_async) {
  2419. io_end->iocb = iocb;
  2420. io_end->result = ret;
  2421. }
  2422. wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
  2423. /* Add the io_end to per-inode completed aio dio list*/
  2424. ei = EXT4_I(io_end->inode);
  2425. spin_lock_irqsave(&ei->i_completed_io_lock, flags);
  2426. list_add_tail(&io_end->list, &ei->i_completed_io_list);
  2427. spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
  2428. /* queue the work to convert unwritten extents to written */
  2429. queue_work(wq, &io_end->work);
  2430. iocb->private = NULL;
  2431. /* XXX: probably should move into the real I/O completion handler */
  2432. inode_dio_done(inode);
  2433. }
  2434. static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
  2435. {
  2436. ext4_io_end_t *io_end = bh->b_private;
  2437. struct workqueue_struct *wq;
  2438. struct inode *inode;
  2439. unsigned long flags;
  2440. if (!test_clear_buffer_uninit(bh) || !io_end)
  2441. goto out;
  2442. if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
  2443. printk("sb umounted, discard end_io request for inode %lu\n",
  2444. io_end->inode->i_ino);
  2445. ext4_free_io_end(io_end);
  2446. goto out;
  2447. }
  2448. /*
  2449. * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
  2450. * but being more careful is always safe for the future change.
  2451. */
  2452. inode = io_end->inode;
  2453. if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
  2454. io_end->flag |= EXT4_IO_END_UNWRITTEN;
  2455. atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
  2456. }
  2457. /* Add the io_end to per-inode completed io list*/
  2458. spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
  2459. list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
  2460. spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
  2461. wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
  2462. /* queue the work to convert unwritten extents to written */
  2463. queue_work(wq, &io_end->work);
  2464. out:
  2465. bh->b_private = NULL;
  2466. bh->b_end_io = NULL;
  2467. clear_buffer_uninit(bh);
  2468. end_buffer_async_write(bh, uptodate);
  2469. }
  2470. static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
  2471. {
  2472. ext4_io_end_t *io_end;
  2473. struct page *page = bh->b_page;
  2474. loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
  2475. size_t size = bh->b_size;
  2476. retry:
  2477. io_end = ext4_init_io_end(inode, GFP_ATOMIC);
  2478. if (!io_end) {
  2479. pr_warn_ratelimited("%s: allocation fail\n", __func__);
  2480. schedule();
  2481. goto retry;
  2482. }
  2483. io_end->offset = offset;
  2484. io_end->size = size;
  2485. /*
  2486. * We need to hold a reference to the page to make sure it
  2487. * doesn't get evicted before ext4_end_io_work() has a chance
  2488. * to convert the extent from written to unwritten.
  2489. */
  2490. io_end->page = page;
  2491. get_page(io_end->page);
  2492. bh->b_private = io_end;
  2493. bh->b_end_io = ext4_end_io_buffer_write;
  2494. return 0;
  2495. }
  2496. /*
  2497. * For ext4 extent files, ext4 will do direct-io write to holes,
  2498. * preallocated extents, and those write extend the file, no need to
  2499. * fall back to buffered IO.
  2500. *
  2501. * For holes, we fallocate those blocks, mark them as uninitialized
  2502. * If those blocks were preallocated, we mark sure they are splited, but
  2503. * still keep the range to write as uninitialized.
  2504. *
  2505. * The unwrritten extents will be converted to written when DIO is completed.
  2506. * For async direct IO, since the IO may still pending when return, we
  2507. * set up an end_io call back function, which will do the conversion
  2508. * when async direct IO completed.
  2509. *
  2510. * If the O_DIRECT write will extend the file then add this inode to the
  2511. * orphan list. So recovery will truncate it back to the original size
  2512. * if the machine crashes during the write.
  2513. *
  2514. */
  2515. static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
  2516. const struct iovec *iov, loff_t offset,
  2517. unsigned long nr_segs)
  2518. {
  2519. struct file *file = iocb->ki_filp;
  2520. struct inode *inode = file->f_mapping->host;
  2521. ssize_t ret;
  2522. size_t count = iov_length(iov, nr_segs);
  2523. loff_t final_size = offset + count;
  2524. if (rw == WRITE && final_size <= inode->i_size) {
  2525. /*
  2526. * We could direct write to holes and fallocate.
  2527. *
  2528. * Allocated blocks to fill the hole are marked as uninitialized
  2529. * to prevent parallel buffered read to expose the stale data
  2530. * before DIO complete the data IO.
  2531. *
  2532. * As to previously fallocated extents, ext4 get_block
  2533. * will just simply mark the buffer mapped but still
  2534. * keep the extents uninitialized.
  2535. *
  2536. * for non AIO case, we will convert those unwritten extents
  2537. * to written after return back from blockdev_direct_IO.
  2538. *
  2539. * for async DIO, the conversion needs to be defered when
  2540. * the IO is completed. The ext4 end_io callback function
  2541. * will be called to take care of the conversion work.
  2542. * Here for async case, we allocate an io_end structure to
  2543. * hook to the iocb.
  2544. */
  2545. iocb->private = NULL;
  2546. EXT4_I(inode)->cur_aio_dio = NULL;
  2547. if (!is_sync_kiocb(iocb)) {
  2548. iocb->private = ext4_init_io_end(inode, GFP_NOFS);
  2549. if (!iocb->private)
  2550. return -ENOMEM;
  2551. /*
  2552. * we save the io structure for current async
  2553. * direct IO, so that later ext4_map_blocks()
  2554. * could flag the io structure whether there
  2555. * is a unwritten extents needs to be converted
  2556. * when IO is completed.
  2557. */
  2558. EXT4_I(inode)->cur_aio_dio = iocb->private;
  2559. }
  2560. ret = __blockdev_direct_IO(rw, iocb, inode,
  2561. inode->i_sb->s_bdev, iov,
  2562. offset, nr_segs,
  2563. ext4_get_block_write,
  2564. ext4_end_io_dio,
  2565. NULL,
  2566. DIO_LOCKING | DIO_SKIP_HOLES);
  2567. if (iocb->private)
  2568. EXT4_I(inode)->cur_aio_dio = NULL;
  2569. /*
  2570. * The io_end structure takes a reference to the inode,
  2571. * that structure needs to be destroyed and the
  2572. * reference to the inode need to be dropped, when IO is
  2573. * complete, even with 0 byte write, or failed.
  2574. *
  2575. * In the successful AIO DIO case, the io_end structure will be
  2576. * desctroyed and the reference to the inode will be dropped
  2577. * after the end_io call back function is called.
  2578. *
  2579. * In the case there is 0 byte write, or error case, since
  2580. * VFS direct IO won't invoke the end_io call back function,
  2581. * we need to free the end_io structure here.
  2582. */
  2583. if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
  2584. ext4_free_io_end(iocb->private);
  2585. iocb->private = NULL;
  2586. } else if (ret > 0 && ext4_test_inode_state(inode,
  2587. EXT4_STATE_DIO_UNWRITTEN)) {
  2588. int err;
  2589. /*
  2590. * for non AIO case, since the IO is already
  2591. * completed, we could do the conversion right here
  2592. */
  2593. err = ext4_convert_unwritten_extents(inode,
  2594. offset, ret);
  2595. if (err < 0)
  2596. ret = err;
  2597. ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
  2598. }
  2599. return ret;
  2600. }
  2601. /* for write the the end of file case, we fall back to old way */
  2602. return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
  2603. }
  2604. static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
  2605. const struct iovec *iov, loff_t offset,
  2606. unsigned long nr_segs)
  2607. {
  2608. struct file *file = iocb->ki_filp;
  2609. struct inode *inode = file->f_mapping->host;
  2610. ssize_t ret;
  2611. /*
  2612. * If we are doing data journalling we don't support O_DIRECT
  2613. */
  2614. if (ext4_should_journal_data(inode))
  2615. return 0;
  2616. trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
  2617. if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
  2618. ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
  2619. else
  2620. ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
  2621. trace_ext4_direct_IO_exit(inode, offset,
  2622. iov_length(iov, nr_segs), rw, ret);
  2623. return ret;
  2624. }
  2625. /*
  2626. * Pages can be marked dirty completely asynchronously from ext4's journalling
  2627. * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
  2628. * much here because ->set_page_dirty is called under VFS locks. The page is
  2629. * not necessarily locked.
  2630. *
  2631. * We cannot just dirty the page and leave attached buffers clean, because the
  2632. * buffers' dirty state is "definitive". We cannot just set the buffers dirty
  2633. * or jbddirty because all the journalling code will explode.
  2634. *
  2635. * So what we do is to mark the page "pending dirty" and next time writepage
  2636. * is called, propagate that into the buffers appropriately.
  2637. */
  2638. static int ext4_journalled_set_page_dirty(struct page *page)
  2639. {
  2640. SetPageChecked(page);
  2641. return __set_page_dirty_nobuffers(page);
  2642. }
  2643. static const struct address_space_operations ext4_ordered_aops = {
  2644. .readpage = ext4_readpage,
  2645. .readpages = ext4_readpages,
  2646. .writepage = ext4_writepage,
  2647. .write_begin = ext4_write_begin,
  2648. .write_end = ext4_ordered_write_end,
  2649. .bmap = ext4_bmap,
  2650. .invalidatepage = ext4_invalidatepage,
  2651. .releasepage = ext4_releasepage,
  2652. .direct_IO = ext4_direct_IO,
  2653. .migratepage = buffer_migrate_page,
  2654. .is_partially_uptodate = block_is_partially_uptodate,
  2655. .error_remove_page = generic_error_remove_page,
  2656. };
  2657. static const struct address_space_operations ext4_writeback_aops = {
  2658. .readpage = ext4_readpage,
  2659. .readpages = ext4_readpages,
  2660. .writepage = ext4_writepage,
  2661. .write_begin = ext4_write_begin,
  2662. .write_end = ext4_writeback_write_end,
  2663. .bmap = ext4_bmap,
  2664. .invalidatepage = ext4_invalidatepage,
  2665. .releasepage = ext4_releasepage,
  2666. .direct_IO = ext4_direct_IO,
  2667. .migratepage = buffer_migrate_page,
  2668. .is_partially_uptodate = block_is_partially_uptodate,
  2669. .error_remove_page = generic_error_remove_page,
  2670. };
  2671. static const struct address_space_operations ext4_journalled_aops = {
  2672. .readpage = ext4_readpage,
  2673. .readpages = ext4_readpages,
  2674. .writepage = ext4_writepage,
  2675. .write_begin = ext4_write_begin,
  2676. .write_end = ext4_journalled_write_end,
  2677. .set_page_dirty = ext4_journalled_set_page_dirty,
  2678. .bmap = ext4_bmap,
  2679. .invalidatepage = ext4_invalidatepage,
  2680. .releasepage = ext4_releasepage,
  2681. .direct_IO = ext4_direct_IO,
  2682. .is_partially_uptodate = block_is_partially_uptodate,
  2683. .error_remove_page = generic_error_remove_page,
  2684. };
  2685. static const struct address_space_operations ext4_da_aops = {
  2686. .readpage = ext4_readpage,
  2687. .readpages = ext4_readpages,
  2688. .writepage = ext4_writepage,
  2689. .writepages = ext4_da_writepages,
  2690. .write_begin = ext4_da_write_begin,
  2691. .write_end = ext4_da_write_end,
  2692. .bmap = ext4_bmap,
  2693. .invalidatepage = ext4_da_invalidatepage,
  2694. .releasepage = ext4_releasepage,
  2695. .direct_IO = ext4_direct_IO,
  2696. .migratepage = buffer_migrate_page,
  2697. .is_partially_uptodate = block_is_partially_uptodate,
  2698. .error_remove_page = generic_error_remove_page,
  2699. };
  2700. void ext4_set_aops(struct inode *inode)
  2701. {
  2702. if (ext4_should_order_data(inode) &&
  2703. test_opt(inode->i_sb, DELALLOC))
  2704. inode->i_mapping->a_ops = &ext4_da_aops;
  2705. else if (ext4_should_order_data(inode))
  2706. inode->i_mapping->a_ops = &ext4_ordered_aops;
  2707. else if (ext4_should_writeback_data(inode) &&
  2708. test_opt(inode->i_sb, DELALLOC))
  2709. inode->i_mapping->a_ops = &ext4_da_aops;
  2710. else if (ext4_should_writeback_data(inode))
  2711. inode->i_mapping->a_ops = &ext4_writeback_aops;
  2712. else
  2713. inode->i_mapping->a_ops = &ext4_journalled_aops;
  2714. }
  2715. /*
  2716. * ext4_discard_partial_page_buffers()
  2717. * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
  2718. * This function finds and locks the page containing the offset
  2719. * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
  2720. * Calling functions that already have the page locked should call
  2721. * ext4_discard_partial_page_buffers_no_lock directly.
  2722. */
  2723. int ext4_discard_partial_page_buffers(handle_t *handle,
  2724. struct address_space *mapping, loff_t from,
  2725. loff_t length, int flags)
  2726. {
  2727. struct inode *inode = mapping->host;
  2728. struct page *page;
  2729. int err = 0;
  2730. page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
  2731. mapping_gfp_mask(mapping) & ~__GFP_FS);
  2732. if (!page)
  2733. return -EINVAL;
  2734. err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
  2735. from, length, flags);
  2736. unlock_page(page);
  2737. page_cache_release(page);
  2738. return err;
  2739. }
  2740. /*
  2741. * ext4_discard_partial_page_buffers_no_lock()
  2742. * Zeros a page range of length 'length' starting from offset 'from'.
  2743. * Buffer heads that correspond to the block aligned regions of the
  2744. * zeroed range will be unmapped. Unblock aligned regions
  2745. * will have the corresponding buffer head mapped if needed so that
  2746. * that region of the page can be updated with the partial zero out.
  2747. *
  2748. * This function assumes that the page has already been locked. The
  2749. * The range to be discarded must be contained with in the given page.
  2750. * If the specified range exceeds the end of the page it will be shortened
  2751. * to the end of the page that corresponds to 'from'. This function is
  2752. * appropriate for updating a page and it buffer heads to be unmapped and
  2753. * zeroed for blocks that have been either released, or are going to be
  2754. * released.
  2755. *
  2756. * handle: The journal handle
  2757. * inode: The files inode
  2758. * page: A locked page that contains the offset "from"
  2759. * from: The starting byte offset (from the begining of the file)
  2760. * to begin discarding
  2761. * len: The length of bytes to discard
  2762. * flags: Optional flags that may be used:
  2763. *
  2764. * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
  2765. * Only zero the regions of the page whose buffer heads
  2766. * have already been unmapped. This flag is appropriate
  2767. * for updateing the contents of a page whose blocks may
  2768. * have already been released, and we only want to zero
  2769. * out the regions that correspond to those released blocks.
  2770. *
  2771. * Returns zero on sucess or negative on failure.
  2772. */
  2773. int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
  2774. struct inode *inode, struct page *page, loff_t from,
  2775. loff_t length, int flags)
  2776. {
  2777. ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
  2778. unsigned int offset = from & (PAGE_CACHE_SIZE-1);
  2779. unsigned int blocksize, max, pos;
  2780. unsigned int end_of_block, range_to_discard;
  2781. ext4_lblk_t iblock;
  2782. struct buffer_head *bh;
  2783. int err = 0;
  2784. blocksize = inode->i_sb->s_blocksize;
  2785. max = PAGE_CACHE_SIZE - offset;
  2786. if (index != page->index)
  2787. return -EINVAL;
  2788. /*
  2789. * correct length if it does not fall between
  2790. * 'from' and the end of the page
  2791. */
  2792. if (length > max || length < 0)
  2793. length = max;
  2794. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  2795. if (!page_has_buffers(page)) {
  2796. /*
  2797. * If the range to be discarded covers a partial block
  2798. * we need to get the page buffers. This is because
  2799. * partial blocks cannot be released and the page needs
  2800. * to be updated with the contents of the block before
  2801. * we write the zeros on top of it.
  2802. */
  2803. if (!(from & (blocksize - 1)) ||
  2804. !((from + length) & (blocksize - 1))) {
  2805. create_empty_buffers(page, blocksize, 0);
  2806. } else {
  2807. /*
  2808. * If there are no partial blocks,
  2809. * there is nothing to update,
  2810. * so we can return now
  2811. */
  2812. return 0;
  2813. }
  2814. }
  2815. /* Find the buffer that contains "offset" */
  2816. bh = page_buffers(page);
  2817. pos = blocksize;
  2818. while (offset >= pos) {
  2819. bh = bh->b_this_page;
  2820. iblock++;
  2821. pos += blocksize;
  2822. }
  2823. pos = offset;
  2824. while (pos < offset + length) {
  2825. err = 0;
  2826. /* The length of space left to zero and unmap */
  2827. range_to_discard = offset + length - pos;
  2828. /* The length of space until the end of the block */
  2829. end_of_block = blocksize - (pos & (blocksize-1));
  2830. /*
  2831. * Do not unmap or zero past end of block
  2832. * for this buffer head
  2833. */
  2834. if (range_to_discard > end_of_block)
  2835. range_to_discard = end_of_block;
  2836. /*
  2837. * Skip this buffer head if we are only zeroing unampped
  2838. * regions of the page
  2839. */
  2840. if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
  2841. buffer_mapped(bh))
  2842. goto next;
  2843. /* If the range is block aligned, unmap */
  2844. if (range_to_discard == blocksize) {
  2845. clear_buffer_dirty(bh);
  2846. bh->b_bdev = NULL;
  2847. clear_buffer_mapped(bh);
  2848. clear_buffer_req(bh);
  2849. clear_buffer_new(bh);
  2850. clear_buffer_delay(bh);
  2851. clear_buffer_unwritten(bh);
  2852. clear_buffer_uptodate(bh);
  2853. zero_user(page, pos, range_to_discard);
  2854. BUFFER_TRACE(bh, "Buffer discarded");
  2855. goto next;
  2856. }
  2857. /*
  2858. * If this block is not completely contained in the range
  2859. * to be discarded, then it is not going to be released. Because
  2860. * we need to keep this block, we need to make sure this part
  2861. * of the page is uptodate before we modify it by writeing
  2862. * partial zeros on it.
  2863. */
  2864. if (!buffer_mapped(bh)) {
  2865. /*
  2866. * Buffer head must be mapped before we can read
  2867. * from the block
  2868. */
  2869. BUFFER_TRACE(bh, "unmapped");
  2870. ext4_get_block(inode, iblock, bh, 0);
  2871. /* unmapped? It's a hole - nothing to do */
  2872. if (!buffer_mapped(bh)) {
  2873. BUFFER_TRACE(bh, "still unmapped");
  2874. goto next;
  2875. }
  2876. }
  2877. /* Ok, it's mapped. Make sure it's up-to-date */
  2878. if (PageUptodate(page))
  2879. set_buffer_uptodate(bh);
  2880. if (!buffer_uptodate(bh)) {
  2881. err = -EIO;
  2882. ll_rw_block(READ, 1, &bh);
  2883. wait_on_buffer(bh);
  2884. /* Uhhuh. Read error. Complain and punt.*/
  2885. if (!buffer_uptodate(bh))
  2886. goto next;
  2887. }
  2888. if (ext4_should_journal_data(inode)) {
  2889. BUFFER_TRACE(bh, "get write access");
  2890. err = ext4_journal_get_write_access(handle, bh);
  2891. if (err)
  2892. goto next;
  2893. }
  2894. zero_user(page, pos, range_to_discard);
  2895. err = 0;
  2896. if (ext4_should_journal_data(inode)) {
  2897. err = ext4_handle_dirty_metadata(handle, inode, bh);
  2898. } else
  2899. mark_buffer_dirty(bh);
  2900. BUFFER_TRACE(bh, "Partial buffer zeroed");
  2901. next:
  2902. bh = bh->b_this_page;
  2903. iblock++;
  2904. pos += range_to_discard;
  2905. }
  2906. return err;
  2907. }
  2908. /*
  2909. * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
  2910. * up to the end of the block which corresponds to `from'.
  2911. * This required during truncate. We need to physically zero the tail end
  2912. * of that block so it doesn't yield old data if the file is later grown.
  2913. */
  2914. int ext4_block_truncate_page(handle_t *handle,
  2915. struct address_space *mapping, loff_t from)
  2916. {
  2917. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  2918. unsigned length;
  2919. unsigned blocksize;
  2920. struct inode *inode = mapping->host;
  2921. blocksize = inode->i_sb->s_blocksize;
  2922. length = blocksize - (offset & (blocksize - 1));
  2923. return ext4_block_zero_page_range(handle, mapping, from, length);
  2924. }
  2925. /*
  2926. * ext4_block_zero_page_range() zeros out a mapping of length 'length'
  2927. * starting from file offset 'from'. The range to be zero'd must
  2928. * be contained with in one block. If the specified range exceeds
  2929. * the end of the block it will be shortened to end of the block
  2930. * that cooresponds to 'from'
  2931. */
  2932. int ext4_block_zero_page_range(handle_t *handle,
  2933. struct address_space *mapping, loff_t from, loff_t length)
  2934. {
  2935. ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
  2936. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  2937. unsigned blocksize, max, pos;
  2938. ext4_lblk_t iblock;
  2939. struct inode *inode = mapping->host;
  2940. struct buffer_head *bh;
  2941. struct page *page;
  2942. int err = 0;
  2943. page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
  2944. mapping_gfp_mask(mapping) & ~__GFP_FS);
  2945. if (!page)
  2946. return -EINVAL;
  2947. blocksize = inode->i_sb->s_blocksize;
  2948. max = blocksize - (offset & (blocksize - 1));
  2949. /*
  2950. * correct length if it does not fall between
  2951. * 'from' and the end of the block
  2952. */
  2953. if (length > max || length < 0)
  2954. length = max;
  2955. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  2956. if (!page_has_buffers(page))
  2957. create_empty_buffers(page, blocksize, 0);
  2958. /* Find the buffer that contains "offset" */
  2959. bh = page_buffers(page);
  2960. pos = blocksize;
  2961. while (offset >= pos) {
  2962. bh = bh->b_this_page;
  2963. iblock++;
  2964. pos += blocksize;
  2965. }
  2966. err = 0;
  2967. if (buffer_freed(bh)) {
  2968. BUFFER_TRACE(bh, "freed: skip");
  2969. goto unlock;
  2970. }
  2971. if (!buffer_mapped(bh)) {
  2972. BUFFER_TRACE(bh, "unmapped");
  2973. ext4_get_block(inode, iblock, bh, 0);
  2974. /* unmapped? It's a hole - nothing to do */
  2975. if (!buffer_mapped(bh)) {
  2976. BUFFER_TRACE(bh, "still unmapped");
  2977. goto unlock;
  2978. }
  2979. }
  2980. /* Ok, it's mapped. Make sure it's up-to-date */
  2981. if (PageUptodate(page))
  2982. set_buffer_uptodate(bh);
  2983. if (!buffer_uptodate(bh)) {
  2984. err = -EIO;
  2985. ll_rw_block(READ, 1, &bh);
  2986. wait_on_buffer(bh);
  2987. /* Uhhuh. Read error. Complain and punt. */
  2988. if (!buffer_uptodate(bh))
  2989. goto unlock;
  2990. }
  2991. if (ext4_should_journal_data(inode)) {
  2992. BUFFER_TRACE(bh, "get write access");
  2993. err = ext4_journal_get_write_access(handle, bh);
  2994. if (err)
  2995. goto unlock;
  2996. }
  2997. zero_user(page, offset, length);
  2998. BUFFER_TRACE(bh, "zeroed end of block");
  2999. err = 0;
  3000. if (ext4_should_journal_data(inode)) {
  3001. err = ext4_handle_dirty_metadata(handle, inode, bh);
  3002. } else
  3003. mark_buffer_dirty(bh);
  3004. unlock:
  3005. unlock_page(page);
  3006. page_cache_release(page);
  3007. return err;
  3008. }
  3009. int ext4_can_truncate(struct inode *inode)
  3010. {
  3011. if (S_ISREG(inode->i_mode))
  3012. return 1;
  3013. if (S_ISDIR(inode->i_mode))
  3014. return 1;
  3015. if (S_ISLNK(inode->i_mode))
  3016. return !ext4_inode_is_fast_symlink(inode);
  3017. return 0;
  3018. }
  3019. /*
  3020. * ext4_punch_hole: punches a hole in a file by releaseing the blocks
  3021. * associated with the given offset and length
  3022. *
  3023. * @inode: File inode
  3024. * @offset: The offset where the hole will begin
  3025. * @len: The length of the hole
  3026. *
  3027. * Returns: 0 on sucess or negative on failure
  3028. */
  3029. int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
  3030. {
  3031. struct inode *inode = file->f_path.dentry->d_inode;
  3032. if (!S_ISREG(inode->i_mode))
  3033. return -ENOTSUPP;
  3034. if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
  3035. /* TODO: Add support for non extent hole punching */
  3036. return -ENOTSUPP;
  3037. }
  3038. if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) {
  3039. /* TODO: Add support for bigalloc file systems */
  3040. return -ENOTSUPP;
  3041. }
  3042. return ext4_ext_punch_hole(file, offset, length);
  3043. }
  3044. /*
  3045. * ext4_truncate()
  3046. *
  3047. * We block out ext4_get_block() block instantiations across the entire
  3048. * transaction, and VFS/VM ensures that ext4_truncate() cannot run
  3049. * simultaneously on behalf of the same inode.
  3050. *
  3051. * As we work through the truncate and commmit bits of it to the journal there
  3052. * is one core, guiding principle: the file's tree must always be consistent on
  3053. * disk. We must be able to restart the truncate after a crash.
  3054. *
  3055. * The file's tree may be transiently inconsistent in memory (although it
  3056. * probably isn't), but whenever we close off and commit a journal transaction,
  3057. * the contents of (the filesystem + the journal) must be consistent and
  3058. * restartable. It's pretty simple, really: bottom up, right to left (although
  3059. * left-to-right works OK too).
  3060. *
  3061. * Note that at recovery time, journal replay occurs *before* the restart of
  3062. * truncate against the orphan inode list.
  3063. *
  3064. * The committed inode has the new, desired i_size (which is the same as
  3065. * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
  3066. * that this inode's truncate did not complete and it will again call
  3067. * ext4_truncate() to have another go. So there will be instantiated blocks
  3068. * to the right of the truncation point in a crashed ext4 filesystem. But
  3069. * that's fine - as long as they are linked from the inode, the post-crash
  3070. * ext4_truncate() run will find them and release them.
  3071. */
  3072. void ext4_truncate(struct inode *inode)
  3073. {
  3074. trace_ext4_truncate_enter(inode);
  3075. if (!ext4_can_truncate(inode))
  3076. return;
  3077. ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
  3078. if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
  3079. ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
  3080. if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
  3081. ext4_ext_truncate(inode);
  3082. else
  3083. ext4_ind_truncate(inode);
  3084. trace_ext4_truncate_exit(inode);
  3085. }
  3086. /*
  3087. * ext4_get_inode_loc returns with an extra refcount against the inode's
  3088. * underlying buffer_head on success. If 'in_mem' is true, we have all
  3089. * data in memory that is needed to recreate the on-disk version of this
  3090. * inode.
  3091. */
  3092. static int __ext4_get_inode_loc(struct inode *inode,
  3093. struct ext4_iloc *iloc, int in_mem)
  3094. {
  3095. struct ext4_group_desc *gdp;
  3096. struct buffer_head *bh;
  3097. struct super_block *sb = inode->i_sb;
  3098. ext4_fsblk_t block;
  3099. int inodes_per_block, inode_offset;
  3100. iloc->bh = NULL;
  3101. if (!ext4_valid_inum(sb, inode->i_ino))
  3102. return -EIO;
  3103. iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
  3104. gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
  3105. if (!gdp)
  3106. return -EIO;
  3107. /*
  3108. * Figure out the offset within the block group inode table
  3109. */
  3110. inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
  3111. inode_offset = ((inode->i_ino - 1) %
  3112. EXT4_INODES_PER_GROUP(sb));
  3113. block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
  3114. iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
  3115. bh = sb_getblk(sb, block);
  3116. if (!bh) {
  3117. EXT4_ERROR_INODE_BLOCK(inode, block,
  3118. "unable to read itable block");
  3119. return -EIO;
  3120. }
  3121. if (!buffer_uptodate(bh)) {
  3122. lock_buffer(bh);
  3123. /*
  3124. * If the buffer has the write error flag, we have failed
  3125. * to write out another inode in the same block. In this
  3126. * case, we don't have to read the block because we may
  3127. * read the old inode data successfully.
  3128. */
  3129. if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
  3130. set_buffer_uptodate(bh);
  3131. if (buffer_uptodate(bh)) {
  3132. /* someone brought it uptodate while we waited */
  3133. unlock_buffer(bh);
  3134. goto has_buffer;
  3135. }
  3136. /*
  3137. * If we have all information of the inode in memory and this
  3138. * is the only valid inode in the block, we need not read the
  3139. * block.
  3140. */
  3141. if (in_mem) {
  3142. struct buffer_head *bitmap_bh;
  3143. int i, start;
  3144. start = inode_offset & ~(inodes_per_block - 1);
  3145. /* Is the inode bitmap in cache? */
  3146. bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
  3147. if (!bitmap_bh)
  3148. goto make_io;
  3149. /*
  3150. * If the inode bitmap isn't in cache then the
  3151. * optimisation may end up performing two reads instead
  3152. * of one, so skip it.
  3153. */
  3154. if (!buffer_uptodate(bitmap_bh)) {
  3155. brelse(bitmap_bh);
  3156. goto make_io;
  3157. }
  3158. for (i = start; i < start + inodes_per_block; i++) {
  3159. if (i == inode_offset)
  3160. continue;
  3161. if (ext4_test_bit(i, bitmap_bh->b_data))
  3162. break;
  3163. }
  3164. brelse(bitmap_bh);
  3165. if (i == start + inodes_per_block) {
  3166. /* all other inodes are free, so skip I/O */
  3167. memset(bh->b_data, 0, bh->b_size);
  3168. set_buffer_uptodate(bh);
  3169. unlock_buffer(bh);
  3170. goto has_buffer;
  3171. }
  3172. }
  3173. make_io:
  3174. /*
  3175. * If we need to do any I/O, try to pre-readahead extra
  3176. * blocks from the inode table.
  3177. */
  3178. if (EXT4_SB(sb)->s_inode_readahead_blks) {
  3179. ext4_fsblk_t b, end, table;
  3180. unsigned num;
  3181. table = ext4_inode_table(sb, gdp);
  3182. /* s_inode_readahead_blks is always a power of 2 */
  3183. b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
  3184. if (table > b)
  3185. b = table;
  3186. end = b + EXT4_SB(sb)->s_inode_readahead_blks;
  3187. num = EXT4_INODES_PER_GROUP(sb);
  3188. if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
  3189. EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
  3190. num -= ext4_itable_unused_count(sb, gdp);
  3191. table += num / inodes_per_block;
  3192. if (end > table)
  3193. end = table;
  3194. while (b <= end)
  3195. sb_breadahead(sb, b++);
  3196. }
  3197. /*
  3198. * There are other valid inodes in the buffer, this inode
  3199. * has in-inode xattrs, or we don't have this inode in memory.
  3200. * Read the block from disk.
  3201. */
  3202. trace_ext4_load_inode(inode);
  3203. get_bh(bh);
  3204. bh->b_end_io = end_buffer_read_sync;
  3205. submit_bh(READ_META, bh);
  3206. wait_on_buffer(bh);
  3207. if (!buffer_uptodate(bh)) {
  3208. EXT4_ERROR_INODE_BLOCK(inode, block,
  3209. "unable to read itable block");
  3210. brelse(bh);
  3211. return -EIO;
  3212. }
  3213. }
  3214. has_buffer:
  3215. iloc->bh = bh;
  3216. return 0;
  3217. }
  3218. int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
  3219. {
  3220. /* We have all inode data except xattrs in memory here. */
  3221. return __ext4_get_inode_loc(inode, iloc,
  3222. !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
  3223. }
  3224. void ext4_set_inode_flags(struct inode *inode)
  3225. {
  3226. unsigned int flags = EXT4_I(inode)->i_flags;
  3227. inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
  3228. if (flags & EXT4_SYNC_FL)
  3229. inode->i_flags |= S_SYNC;
  3230. if (flags & EXT4_APPEND_FL)
  3231. inode->i_flags |= S_APPEND;
  3232. if (flags & EXT4_IMMUTABLE_FL)
  3233. inode->i_flags |= S_IMMUTABLE;
  3234. if (flags & EXT4_NOATIME_FL)
  3235. inode->i_flags |= S_NOATIME;
  3236. if (flags & EXT4_DIRSYNC_FL)
  3237. inode->i_flags |= S_DIRSYNC;
  3238. }
  3239. /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
  3240. void ext4_get_inode_flags(struct ext4_inode_info *ei)
  3241. {
  3242. unsigned int vfs_fl;
  3243. unsigned long old_fl, new_fl;
  3244. do {
  3245. vfs_fl = ei->vfs_inode.i_flags;
  3246. old_fl = ei->i_flags;
  3247. new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
  3248. EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
  3249. EXT4_DIRSYNC_FL);
  3250. if (vfs_fl & S_SYNC)
  3251. new_fl |= EXT4_SYNC_FL;
  3252. if (vfs_fl & S_APPEND)
  3253. new_fl |= EXT4_APPEND_FL;
  3254. if (vfs_fl & S_IMMUTABLE)
  3255. new_fl |= EXT4_IMMUTABLE_FL;
  3256. if (vfs_fl & S_NOATIME)
  3257. new_fl |= EXT4_NOATIME_FL;
  3258. if (vfs_fl & S_DIRSYNC)
  3259. new_fl |= EXT4_DIRSYNC_FL;
  3260. } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
  3261. }
  3262. static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
  3263. struct ext4_inode_info *ei)
  3264. {
  3265. blkcnt_t i_blocks ;
  3266. struct inode *inode = &(ei->vfs_inode);
  3267. struct super_block *sb = inode->i_sb;
  3268. if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
  3269. EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
  3270. /* we are using combined 48 bit field */
  3271. i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
  3272. le32_to_cpu(raw_inode->i_blocks_lo);
  3273. if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
  3274. /* i_blocks represent file system block size */
  3275. return i_blocks << (inode->i_blkbits - 9);
  3276. } else {
  3277. return i_blocks;
  3278. }
  3279. } else {
  3280. return le32_to_cpu(raw_inode->i_blocks_lo);
  3281. }
  3282. }
  3283. struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
  3284. {
  3285. struct ext4_iloc iloc;
  3286. struct ext4_inode *raw_inode;
  3287. struct ext4_inode_info *ei;
  3288. struct inode *inode;
  3289. journal_t *journal = EXT4_SB(sb)->s_journal;
  3290. long ret;
  3291. int block;
  3292. inode = iget_locked(sb, ino);
  3293. if (!inode)
  3294. return ERR_PTR(-ENOMEM);
  3295. if (!(inode->i_state & I_NEW))
  3296. return inode;
  3297. ei = EXT4_I(inode);
  3298. iloc.bh = NULL;
  3299. ret = __ext4_get_inode_loc(inode, &iloc, 0);
  3300. if (ret < 0)
  3301. goto bad_inode;
  3302. raw_inode = ext4_raw_inode(&iloc);
  3303. inode->i_mode = le16_to_cpu(raw_inode->i_mode);
  3304. inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
  3305. inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
  3306. if (!(test_opt(inode->i_sb, NO_UID32))) {
  3307. inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
  3308. inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
  3309. }
  3310. inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
  3311. ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
  3312. ei->i_dir_start_lookup = 0;
  3313. ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
  3314. /* We now have enough fields to check if the inode was active or not.
  3315. * This is needed because nfsd might try to access dead inodes
  3316. * the test is that same one that e2fsck uses
  3317. * NeilBrown 1999oct15
  3318. */
  3319. if (inode->i_nlink == 0) {
  3320. if (inode->i_mode == 0 ||
  3321. !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
  3322. /* this inode is deleted */
  3323. ret = -ESTALE;
  3324. goto bad_inode;
  3325. }
  3326. /* The only unlinked inodes we let through here have
  3327. * valid i_mode and are being read by the orphan
  3328. * recovery code: that's fine, we're about to complete
  3329. * the process of deleting those. */
  3330. }
  3331. ei->i_flags = le32_to_cpu(raw_inode->i_flags);
  3332. inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
  3333. ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
  3334. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
  3335. ei->i_file_acl |=
  3336. ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
  3337. inode->i_size = ext4_isize(raw_inode);
  3338. ei->i_disksize = inode->i_size;
  3339. #ifdef CONFIG_QUOTA
  3340. ei->i_reserved_quota = 0;
  3341. #endif
  3342. inode->i_generation = le32_to_cpu(raw_inode->i_generation);
  3343. ei->i_block_group = iloc.block_group;
  3344. ei->i_last_alloc_group = ~0;
  3345. /*
  3346. * NOTE! The in-memory inode i_data array is in little-endian order
  3347. * even on big-endian machines: we do NOT byteswap the block numbers!
  3348. */
  3349. for (block = 0; block < EXT4_N_BLOCKS; block++)
  3350. ei->i_data[block] = raw_inode->i_block[block];
  3351. INIT_LIST_HEAD(&ei->i_orphan);
  3352. /*
  3353. * Set transaction id's of transactions that have to be committed
  3354. * to finish f[data]sync. We set them to currently running transaction
  3355. * as we cannot be sure that the inode or some of its metadata isn't
  3356. * part of the transaction - the inode could have been reclaimed and
  3357. * now it is reread from disk.
  3358. */
  3359. if (journal) {
  3360. transaction_t *transaction;
  3361. tid_t tid;
  3362. read_lock(&journal->j_state_lock);
  3363. if (journal->j_running_transaction)
  3364. transaction = journal->j_running_transaction;
  3365. else
  3366. transaction = journal->j_committing_transaction;
  3367. if (transaction)
  3368. tid = transaction->t_tid;
  3369. else
  3370. tid = journal->j_commit_sequence;
  3371. read_unlock(&journal->j_state_lock);
  3372. ei->i_sync_tid = tid;
  3373. ei->i_datasync_tid = tid;
  3374. }
  3375. if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
  3376. ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
  3377. if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
  3378. EXT4_INODE_SIZE(inode->i_sb)) {
  3379. ret = -EIO;
  3380. goto bad_inode;
  3381. }
  3382. if (ei->i_extra_isize == 0) {
  3383. /* The extra space is currently unused. Use it. */
  3384. ei->i_extra_isize = sizeof(struct ext4_inode) -
  3385. EXT4_GOOD_OLD_INODE_SIZE;
  3386. } else {
  3387. __le32 *magic = (void *)raw_inode +
  3388. EXT4_GOOD_OLD_INODE_SIZE +
  3389. ei->i_extra_isize;
  3390. if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
  3391. ext4_set_inode_state(inode, EXT4_STATE_XATTR);
  3392. }
  3393. } else
  3394. ei->i_extra_isize = 0;
  3395. EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
  3396. EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
  3397. EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
  3398. EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
  3399. inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
  3400. if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
  3401. if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
  3402. inode->i_version |=
  3403. (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
  3404. }
  3405. ret = 0;
  3406. if (ei->i_file_acl &&
  3407. !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
  3408. EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
  3409. ei->i_file_acl);
  3410. ret = -EIO;
  3411. goto bad_inode;
  3412. } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
  3413. if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  3414. (S_ISLNK(inode->i_mode) &&
  3415. !ext4_inode_is_fast_symlink(inode)))
  3416. /* Validate extent which is part of inode */
  3417. ret = ext4_ext_check_inode(inode);
  3418. } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  3419. (S_ISLNK(inode->i_mode) &&
  3420. !ext4_inode_is_fast_symlink(inode))) {
  3421. /* Validate block references which are part of inode */
  3422. ret = ext4_ind_check_inode(inode);
  3423. }
  3424. if (ret)
  3425. goto bad_inode;
  3426. if (S_ISREG(inode->i_mode)) {
  3427. inode->i_op = &ext4_file_inode_operations;
  3428. inode->i_fop = &ext4_file_operations;
  3429. ext4_set_aops(inode);
  3430. } else if (S_ISDIR(inode->i_mode)) {
  3431. inode->i_op = &ext4_dir_inode_operations;
  3432. inode->i_fop = &ext4_dir_operations;
  3433. } else if (S_ISLNK(inode->i_mode)) {
  3434. if (ext4_inode_is_fast_symlink(inode)) {
  3435. inode->i_op = &ext4_fast_symlink_inode_operations;
  3436. nd_terminate_link(ei->i_data, inode->i_size,
  3437. sizeof(ei->i_data) - 1);
  3438. } else {
  3439. inode->i_op = &ext4_symlink_inode_operations;
  3440. ext4_set_aops(inode);
  3441. }
  3442. } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
  3443. S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
  3444. inode->i_op = &ext4_special_inode_operations;
  3445. if (raw_inode->i_block[0])
  3446. init_special_inode(inode, inode->i_mode,
  3447. old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
  3448. else
  3449. init_special_inode(inode, inode->i_mode,
  3450. new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
  3451. } else {
  3452. ret = -EIO;
  3453. EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
  3454. goto bad_inode;
  3455. }
  3456. brelse(iloc.bh);
  3457. ext4_set_inode_flags(inode);
  3458. unlock_new_inode(inode);
  3459. return inode;
  3460. bad_inode:
  3461. brelse(iloc.bh);
  3462. iget_failed(inode);
  3463. return ERR_PTR(ret);
  3464. }
  3465. static int ext4_inode_blocks_set(handle_t *handle,
  3466. struct ext4_inode *raw_inode,
  3467. struct ext4_inode_info *ei)
  3468. {
  3469. struct inode *inode = &(ei->vfs_inode);
  3470. u64 i_blocks = inode->i_blocks;
  3471. struct super_block *sb = inode->i_sb;
  3472. if (i_blocks <= ~0U) {
  3473. /*
  3474. * i_blocks can be represnted in a 32 bit variable
  3475. * as multiple of 512 bytes
  3476. */
  3477. raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
  3478. raw_inode->i_blocks_high = 0;
  3479. ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
  3480. return 0;
  3481. }
  3482. if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
  3483. return -EFBIG;
  3484. if (i_blocks <= 0xffffffffffffULL) {
  3485. /*
  3486. * i_blocks can be represented in a 48 bit variable
  3487. * as multiple of 512 bytes
  3488. */
  3489. raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
  3490. raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
  3491. ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
  3492. } else {
  3493. ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
  3494. /* i_block is stored in file system block size */
  3495. i_blocks = i_blocks >> (inode->i_blkbits - 9);
  3496. raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
  3497. raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
  3498. }
  3499. return 0;
  3500. }
  3501. /*
  3502. * Post the struct inode info into an on-disk inode location in the
  3503. * buffer-cache. This gobbles the caller's reference to the
  3504. * buffer_head in the inode location struct.
  3505. *
  3506. * The caller must have write access to iloc->bh.
  3507. */
  3508. static int ext4_do_update_inode(handle_t *handle,
  3509. struct inode *inode,
  3510. struct ext4_iloc *iloc)
  3511. {
  3512. struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
  3513. struct ext4_inode_info *ei = EXT4_I(inode);
  3514. struct buffer_head *bh = iloc->bh;
  3515. int err = 0, rc, block;
  3516. /* For fields not not tracking in the in-memory inode,
  3517. * initialise them to zero for new inodes. */
  3518. if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
  3519. memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
  3520. ext4_get_inode_flags(ei);
  3521. raw_inode->i_mode = cpu_to_le16(inode->i_mode);
  3522. if (!(test_opt(inode->i_sb, NO_UID32))) {
  3523. raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
  3524. raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
  3525. /*
  3526. * Fix up interoperability with old kernels. Otherwise, old inodes get
  3527. * re-used with the upper 16 bits of the uid/gid intact
  3528. */
  3529. if (!ei->i_dtime) {
  3530. raw_inode->i_uid_high =
  3531. cpu_to_le16(high_16_bits(inode->i_uid));
  3532. raw_inode->i_gid_high =
  3533. cpu_to_le16(high_16_bits(inode->i_gid));
  3534. } else {
  3535. raw_inode->i_uid_high = 0;
  3536. raw_inode->i_gid_high = 0;
  3537. }
  3538. } else {
  3539. raw_inode->i_uid_low =
  3540. cpu_to_le16(fs_high2lowuid(inode->i_uid));
  3541. raw_inode->i_gid_low =
  3542. cpu_to_le16(fs_high2lowgid(inode->i_gid));
  3543. raw_inode->i_uid_high = 0;
  3544. raw_inode->i_gid_high = 0;
  3545. }
  3546. raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
  3547. EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
  3548. EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
  3549. EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
  3550. EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
  3551. if (ext4_inode_blocks_set(handle, raw_inode, ei))
  3552. goto out_brelse;
  3553. raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
  3554. raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
  3555. if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
  3556. cpu_to_le32(EXT4_OS_HURD))
  3557. raw_inode->i_file_acl_high =
  3558. cpu_to_le16(ei->i_file_acl >> 32);
  3559. raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
  3560. ext4_isize_set(raw_inode, ei->i_disksize);
  3561. if (ei->i_disksize > 0x7fffffffULL) {
  3562. struct super_block *sb = inode->i_sb;
  3563. if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
  3564. EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
  3565. EXT4_SB(sb)->s_es->s_rev_level ==
  3566. cpu_to_le32(EXT4_GOOD_OLD_REV)) {
  3567. /* If this is the first large file
  3568. * created, add a flag to the superblock.
  3569. */
  3570. err = ext4_journal_get_write_access(handle,
  3571. EXT4_SB(sb)->s_sbh);
  3572. if (err)
  3573. goto out_brelse;
  3574. ext4_update_dynamic_rev(sb);
  3575. EXT4_SET_RO_COMPAT_FEATURE(sb,
  3576. EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
  3577. sb->s_dirt = 1;
  3578. ext4_handle_sync(handle);
  3579. err = ext4_handle_dirty_metadata(handle, NULL,
  3580. EXT4_SB(sb)->s_sbh);
  3581. }
  3582. }
  3583. raw_inode->i_generation = cpu_to_le32(inode->i_generation);
  3584. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  3585. if (old_valid_dev(inode->i_rdev)) {
  3586. raw_inode->i_block[0] =
  3587. cpu_to_le32(old_encode_dev(inode->i_rdev));
  3588. raw_inode->i_block[1] = 0;
  3589. } else {
  3590. raw_inode->i_block[0] = 0;
  3591. raw_inode->i_block[1] =
  3592. cpu_to_le32(new_encode_dev(inode->i_rdev));
  3593. raw_inode->i_block[2] = 0;
  3594. }
  3595. } else
  3596. for (block = 0; block < EXT4_N_BLOCKS; block++)
  3597. raw_inode->i_block[block] = ei->i_data[block];
  3598. raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
  3599. if (ei->i_extra_isize) {
  3600. if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
  3601. raw_inode->i_version_hi =
  3602. cpu_to_le32(inode->i_version >> 32);
  3603. raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
  3604. }
  3605. BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
  3606. rc = ext4_handle_dirty_metadata(handle, NULL, bh);
  3607. if (!err)
  3608. err = rc;
  3609. ext4_clear_inode_state(inode, EXT4_STATE_NEW);
  3610. ext4_update_inode_fsync_trans(handle, inode, 0);
  3611. out_brelse:
  3612. brelse(bh);
  3613. ext4_std_error(inode->i_sb, err);
  3614. return err;
  3615. }
  3616. /*
  3617. * ext4_write_inode()
  3618. *
  3619. * We are called from a few places:
  3620. *
  3621. * - Within generic_file_write() for O_SYNC files.
  3622. * Here, there will be no transaction running. We wait for any running
  3623. * trasnaction to commit.
  3624. *
  3625. * - Within sys_sync(), kupdate and such.
  3626. * We wait on commit, if tol to.
  3627. *
  3628. * - Within prune_icache() (PF_MEMALLOC == true)
  3629. * Here we simply return. We can't afford to block kswapd on the
  3630. * journal commit.
  3631. *
  3632. * In all cases it is actually safe for us to return without doing anything,
  3633. * because the inode has been copied into a raw inode buffer in
  3634. * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
  3635. * knfsd.
  3636. *
  3637. * Note that we are absolutely dependent upon all inode dirtiers doing the
  3638. * right thing: they *must* call mark_inode_dirty() after dirtying info in
  3639. * which we are interested.
  3640. *
  3641. * It would be a bug for them to not do this. The code:
  3642. *
  3643. * mark_inode_dirty(inode)
  3644. * stuff();
  3645. * inode->i_size = expr;
  3646. *
  3647. * is in error because a kswapd-driven write_inode() could occur while
  3648. * `stuff()' is running, and the new i_size will be lost. Plus the inode
  3649. * will no longer be on the superblock's dirty inode list.
  3650. */
  3651. int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
  3652. {
  3653. int err;
  3654. if (current->flags & PF_MEMALLOC)
  3655. return 0;
  3656. if (EXT4_SB(inode->i_sb)->s_journal) {
  3657. if (ext4_journal_current_handle()) {
  3658. jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
  3659. dump_stack();
  3660. return -EIO;
  3661. }
  3662. if (wbc->sync_mode != WB_SYNC_ALL)
  3663. return 0;
  3664. err = ext4_force_commit(inode->i_sb);
  3665. } else {
  3666. struct ext4_iloc iloc;
  3667. err = __ext4_get_inode_loc(inode, &iloc, 0);
  3668. if (err)
  3669. return err;
  3670. if (wbc->sync_mode == WB_SYNC_ALL)
  3671. sync_dirty_buffer(iloc.bh);
  3672. if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
  3673. EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
  3674. "IO error syncing inode");
  3675. err = -EIO;
  3676. }
  3677. brelse(iloc.bh);
  3678. }
  3679. return err;
  3680. }
  3681. /*
  3682. * ext4_setattr()
  3683. *
  3684. * Called from notify_change.
  3685. *
  3686. * We want to trap VFS attempts to truncate the file as soon as
  3687. * possible. In particular, we want to make sure that when the VFS
  3688. * shrinks i_size, we put the inode on the orphan list and modify
  3689. * i_disksize immediately, so that during the subsequent flushing of
  3690. * dirty pages and freeing of disk blocks, we can guarantee that any
  3691. * commit will leave the blocks being flushed in an unused state on
  3692. * disk. (On recovery, the inode will get truncated and the blocks will
  3693. * be freed, so we have a strong guarantee that no future commit will
  3694. * leave these blocks visible to the user.)
  3695. *
  3696. * Another thing we have to assure is that if we are in ordered mode
  3697. * and inode is still attached to the committing transaction, we must
  3698. * we start writeout of all the dirty pages which are being truncated.
  3699. * This way we are sure that all the data written in the previous
  3700. * transaction are already on disk (truncate waits for pages under
  3701. * writeback).
  3702. *
  3703. * Called with inode->i_mutex down.
  3704. */
  3705. int ext4_setattr(struct dentry *dentry, struct iattr *attr)
  3706. {
  3707. struct inode *inode = dentry->d_inode;
  3708. int error, rc = 0;
  3709. int orphan = 0;
  3710. const unsigned int ia_valid = attr->ia_valid;
  3711. error = inode_change_ok(inode, attr);
  3712. if (error)
  3713. return error;
  3714. if (is_quota_modification(inode, attr))
  3715. dquot_initialize(inode);
  3716. if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
  3717. (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
  3718. handle_t *handle;
  3719. /* (user+group)*(old+new) structure, inode write (sb,
  3720. * inode block, ? - but truncate inode update has it) */
  3721. handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
  3722. EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
  3723. if (IS_ERR(handle)) {
  3724. error = PTR_ERR(handle);
  3725. goto err_out;
  3726. }
  3727. error = dquot_transfer(inode, attr);
  3728. if (error) {
  3729. ext4_journal_stop(handle);
  3730. return error;
  3731. }
  3732. /* Update corresponding info in inode so that everything is in
  3733. * one transaction */
  3734. if (attr->ia_valid & ATTR_UID)
  3735. inode->i_uid = attr->ia_uid;
  3736. if (attr->ia_valid & ATTR_GID)
  3737. inode->i_gid = attr->ia_gid;
  3738. error = ext4_mark_inode_dirty(handle, inode);
  3739. ext4_journal_stop(handle);
  3740. }
  3741. if (attr->ia_valid & ATTR_SIZE) {
  3742. inode_dio_wait(inode);
  3743. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
  3744. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  3745. if (attr->ia_size > sbi->s_bitmap_maxbytes)
  3746. return -EFBIG;
  3747. }
  3748. }
  3749. if (S_ISREG(inode->i_mode) &&
  3750. attr->ia_valid & ATTR_SIZE &&
  3751. (attr->ia_size < inode->i_size)) {
  3752. handle_t *handle;
  3753. handle = ext4_journal_start(inode, 3);
  3754. if (IS_ERR(handle)) {
  3755. error = PTR_ERR(handle);
  3756. goto err_out;
  3757. }
  3758. if (ext4_handle_valid(handle)) {
  3759. error = ext4_orphan_add(handle, inode);
  3760. orphan = 1;
  3761. }
  3762. EXT4_I(inode)->i_disksize = attr->ia_size;
  3763. rc = ext4_mark_inode_dirty(handle, inode);
  3764. if (!error)
  3765. error = rc;
  3766. ext4_journal_stop(handle);
  3767. if (ext4_should_order_data(inode)) {
  3768. error = ext4_begin_ordered_truncate(inode,
  3769. attr->ia_size);
  3770. if (error) {
  3771. /* Do as much error cleanup as possible */
  3772. handle = ext4_journal_start(inode, 3);
  3773. if (IS_ERR(handle)) {
  3774. ext4_orphan_del(NULL, inode);
  3775. goto err_out;
  3776. }
  3777. ext4_orphan_del(handle, inode);
  3778. orphan = 0;
  3779. ext4_journal_stop(handle);
  3780. goto err_out;
  3781. }
  3782. }
  3783. }
  3784. if (attr->ia_valid & ATTR_SIZE) {
  3785. if (attr->ia_size != i_size_read(inode)) {
  3786. truncate_setsize(inode, attr->ia_size);
  3787. ext4_truncate(inode);
  3788. } else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
  3789. ext4_truncate(inode);
  3790. }
  3791. if (!rc) {
  3792. setattr_copy(inode, attr);
  3793. mark_inode_dirty(inode);
  3794. }
  3795. /*
  3796. * If the call to ext4_truncate failed to get a transaction handle at
  3797. * all, we need to clean up the in-core orphan list manually.
  3798. */
  3799. if (orphan && inode->i_nlink)
  3800. ext4_orphan_del(NULL, inode);
  3801. if (!rc && (ia_valid & ATTR_MODE))
  3802. rc = ext4_acl_chmod(inode);
  3803. err_out:
  3804. ext4_std_error(inode->i_sb, error);
  3805. if (!error)
  3806. error = rc;
  3807. return error;
  3808. }
  3809. int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
  3810. struct kstat *stat)
  3811. {
  3812. struct inode *inode;
  3813. unsigned long delalloc_blocks;
  3814. inode = dentry->d_inode;
  3815. generic_fillattr(inode, stat);
  3816. /*
  3817. * We can't update i_blocks if the block allocation is delayed
  3818. * otherwise in the case of system crash before the real block
  3819. * allocation is done, we will have i_blocks inconsistent with
  3820. * on-disk file blocks.
  3821. * We always keep i_blocks updated together with real
  3822. * allocation. But to not confuse with user, stat
  3823. * will return the blocks that include the delayed allocation
  3824. * blocks for this file.
  3825. */
  3826. delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
  3827. stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
  3828. return 0;
  3829. }
  3830. static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
  3831. {
  3832. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  3833. return ext4_ind_trans_blocks(inode, nrblocks, chunk);
  3834. return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
  3835. }
  3836. /*
  3837. * Account for index blocks, block groups bitmaps and block group
  3838. * descriptor blocks if modify datablocks and index blocks
  3839. * worse case, the indexs blocks spread over different block groups
  3840. *
  3841. * If datablocks are discontiguous, they are possible to spread over
  3842. * different block groups too. If they are contiuguous, with flexbg,
  3843. * they could still across block group boundary.
  3844. *
  3845. * Also account for superblock, inode, quota and xattr blocks
  3846. */
  3847. static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
  3848. {
  3849. ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
  3850. int gdpblocks;
  3851. int idxblocks;
  3852. int ret = 0;
  3853. /*
  3854. * How many index blocks need to touch to modify nrblocks?
  3855. * The "Chunk" flag indicating whether the nrblocks is
  3856. * physically contiguous on disk
  3857. *
  3858. * For Direct IO and fallocate, they calls get_block to allocate
  3859. * one single extent at a time, so they could set the "Chunk" flag
  3860. */
  3861. idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
  3862. ret = idxblocks;
  3863. /*
  3864. * Now let's see how many group bitmaps and group descriptors need
  3865. * to account
  3866. */
  3867. groups = idxblocks;
  3868. if (chunk)
  3869. groups += 1;
  3870. else
  3871. groups += nrblocks;
  3872. gdpblocks = groups;
  3873. if (groups > ngroups)
  3874. groups = ngroups;
  3875. if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
  3876. gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
  3877. /* bitmaps and block group descriptor blocks */
  3878. ret += groups + gdpblocks;
  3879. /* Blocks for super block, inode, quota and xattr blocks */
  3880. ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
  3881. return ret;
  3882. }
  3883. /*
  3884. * Calculate the total number of credits to reserve to fit
  3885. * the modification of a single pages into a single transaction,
  3886. * which may include multiple chunks of block allocations.
  3887. *
  3888. * This could be called via ext4_write_begin()
  3889. *
  3890. * We need to consider the worse case, when
  3891. * one new block per extent.
  3892. */
  3893. int ext4_writepage_trans_blocks(struct inode *inode)
  3894. {
  3895. int bpp = ext4_journal_blocks_per_page(inode);
  3896. int ret;
  3897. ret = ext4_meta_trans_blocks(inode, bpp, 0);
  3898. /* Account for data blocks for journalled mode */
  3899. if (ext4_should_journal_data(inode))
  3900. ret += bpp;
  3901. return ret;
  3902. }
  3903. /*
  3904. * Calculate the journal credits for a chunk of data modification.
  3905. *
  3906. * This is called from DIO, fallocate or whoever calling
  3907. * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
  3908. *
  3909. * journal buffers for data blocks are not included here, as DIO
  3910. * and fallocate do no need to journal data buffers.
  3911. */
  3912. int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
  3913. {
  3914. return ext4_meta_trans_blocks(inode, nrblocks, 1);
  3915. }
  3916. /*
  3917. * The caller must have previously called ext4_reserve_inode_write().
  3918. * Give this, we know that the caller already has write access to iloc->bh.
  3919. */
  3920. int ext4_mark_iloc_dirty(handle_t *handle,
  3921. struct inode *inode, struct ext4_iloc *iloc)
  3922. {
  3923. int err = 0;
  3924. if (test_opt(inode->i_sb, I_VERSION))
  3925. inode_inc_iversion(inode);
  3926. /* the do_update_inode consumes one bh->b_count */
  3927. get_bh(iloc->bh);
  3928. /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
  3929. err = ext4_do_update_inode(handle, inode, iloc);
  3930. put_bh(iloc->bh);
  3931. return err;
  3932. }
  3933. /*
  3934. * On success, We end up with an outstanding reference count against
  3935. * iloc->bh. This _must_ be cleaned up later.
  3936. */
  3937. int
  3938. ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
  3939. struct ext4_iloc *iloc)
  3940. {
  3941. int err;
  3942. err = ext4_get_inode_loc(inode, iloc);
  3943. if (!err) {
  3944. BUFFER_TRACE(iloc->bh, "get_write_access");
  3945. err = ext4_journal_get_write_access(handle, iloc->bh);
  3946. if (err) {
  3947. brelse(iloc->bh);
  3948. iloc->bh = NULL;
  3949. }
  3950. }
  3951. ext4_std_error(inode->i_sb, err);
  3952. return err;
  3953. }
  3954. /*
  3955. * Expand an inode by new_extra_isize bytes.
  3956. * Returns 0 on success or negative error number on failure.
  3957. */
  3958. static int ext4_expand_extra_isize(struct inode *inode,
  3959. unsigned int new_extra_isize,
  3960. struct ext4_iloc iloc,
  3961. handle_t *handle)
  3962. {
  3963. struct ext4_inode *raw_inode;
  3964. struct ext4_xattr_ibody_header *header;
  3965. if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
  3966. return 0;
  3967. raw_inode = ext4_raw_inode(&iloc);
  3968. header = IHDR(inode, raw_inode);
  3969. /* No extended attributes present */
  3970. if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
  3971. header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
  3972. memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
  3973. new_extra_isize);
  3974. EXT4_I(inode)->i_extra_isize = new_extra_isize;
  3975. return 0;
  3976. }
  3977. /* try to expand with EAs present */
  3978. return ext4_expand_extra_isize_ea(inode, new_extra_isize,
  3979. raw_inode, handle);
  3980. }
  3981. /*
  3982. * What we do here is to mark the in-core inode as clean with respect to inode
  3983. * dirtiness (it may still be data-dirty).
  3984. * This means that the in-core inode may be reaped by prune_icache
  3985. * without having to perform any I/O. This is a very good thing,
  3986. * because *any* task may call prune_icache - even ones which
  3987. * have a transaction open against a different journal.
  3988. *
  3989. * Is this cheating? Not really. Sure, we haven't written the
  3990. * inode out, but prune_icache isn't a user-visible syncing function.
  3991. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
  3992. * we start and wait on commits.
  3993. *
  3994. * Is this efficient/effective? Well, we're being nice to the system
  3995. * by cleaning up our inodes proactively so they can be reaped
  3996. * without I/O. But we are potentially leaving up to five seconds'
  3997. * worth of inodes floating about which prune_icache wants us to
  3998. * write out. One way to fix that would be to get prune_icache()
  3999. * to do a write_super() to free up some memory. It has the desired
  4000. * effect.
  4001. */
  4002. int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
  4003. {
  4004. struct ext4_iloc iloc;
  4005. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  4006. static unsigned int mnt_count;
  4007. int err, ret;
  4008. might_sleep();
  4009. trace_ext4_mark_inode_dirty(inode, _RET_IP_);
  4010. err = ext4_reserve_inode_write(handle, inode, &iloc);
  4011. if (ext4_handle_valid(handle) &&
  4012. EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
  4013. !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
  4014. /*
  4015. * We need extra buffer credits since we may write into EA block
  4016. * with this same handle. If journal_extend fails, then it will
  4017. * only result in a minor loss of functionality for that inode.
  4018. * If this is felt to be critical, then e2fsck should be run to
  4019. * force a large enough s_min_extra_isize.
  4020. */
  4021. if ((jbd2_journal_extend(handle,
  4022. EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
  4023. ret = ext4_expand_extra_isize(inode,
  4024. sbi->s_want_extra_isize,
  4025. iloc, handle);
  4026. if (ret) {
  4027. ext4_set_inode_state(inode,
  4028. EXT4_STATE_NO_EXPAND);
  4029. if (mnt_count !=
  4030. le16_to_cpu(sbi->s_es->s_mnt_count)) {
  4031. ext4_warning(inode->i_sb,
  4032. "Unable to expand inode %lu. Delete"
  4033. " some EAs or run e2fsck.",
  4034. inode->i_ino);
  4035. mnt_count =
  4036. le16_to_cpu(sbi->s_es->s_mnt_count);
  4037. }
  4038. }
  4039. }
  4040. }
  4041. if (!err)
  4042. err = ext4_mark_iloc_dirty(handle, inode, &iloc);
  4043. return err;
  4044. }
  4045. /*
  4046. * ext4_dirty_inode() is called from __mark_inode_dirty()
  4047. *
  4048. * We're really interested in the case where a file is being extended.
  4049. * i_size has been changed by generic_commit_write() and we thus need
  4050. * to include the updated inode in the current transaction.
  4051. *
  4052. * Also, dquot_alloc_block() will always dirty the inode when blocks
  4053. * are allocated to the file.
  4054. *
  4055. * If the inode is marked synchronous, we don't honour that here - doing
  4056. * so would cause a commit on atime updates, which we don't bother doing.
  4057. * We handle synchronous inodes at the highest possible level.
  4058. */
  4059. void ext4_dirty_inode(struct inode *inode, int flags)
  4060. {
  4061. handle_t *handle;
  4062. handle = ext4_journal_start(inode, 2);
  4063. if (IS_ERR(handle))
  4064. goto out;
  4065. ext4_mark_inode_dirty(handle, inode);
  4066. ext4_journal_stop(handle);
  4067. out:
  4068. return;
  4069. }
  4070. #if 0
  4071. /*
  4072. * Bind an inode's backing buffer_head into this transaction, to prevent
  4073. * it from being flushed to disk early. Unlike
  4074. * ext4_reserve_inode_write, this leaves behind no bh reference and
  4075. * returns no iloc structure, so the caller needs to repeat the iloc
  4076. * lookup to mark the inode dirty later.
  4077. */
  4078. static int ext4_pin_inode(handle_t *handle, struct inode *inode)
  4079. {
  4080. struct ext4_iloc iloc;
  4081. int err = 0;
  4082. if (handle) {
  4083. err = ext4_get_inode_loc(inode, &iloc);
  4084. if (!err) {
  4085. BUFFER_TRACE(iloc.bh, "get_write_access");
  4086. err = jbd2_journal_get_write_access(handle, iloc.bh);
  4087. if (!err)
  4088. err = ext4_handle_dirty_metadata(handle,
  4089. NULL,
  4090. iloc.bh);
  4091. brelse(iloc.bh);
  4092. }
  4093. }
  4094. ext4_std_error(inode->i_sb, err);
  4095. return err;
  4096. }
  4097. #endif
  4098. int ext4_change_inode_journal_flag(struct inode *inode, int val)
  4099. {
  4100. journal_t *journal;
  4101. handle_t *handle;
  4102. int err;
  4103. /*
  4104. * We have to be very careful here: changing a data block's
  4105. * journaling status dynamically is dangerous. If we write a
  4106. * data block to the journal, change the status and then delete
  4107. * that block, we risk forgetting to revoke the old log record
  4108. * from the journal and so a subsequent replay can corrupt data.
  4109. * So, first we make sure that the journal is empty and that
  4110. * nobody is changing anything.
  4111. */
  4112. journal = EXT4_JOURNAL(inode);
  4113. if (!journal)
  4114. return 0;
  4115. if (is_journal_aborted(journal))
  4116. return -EROFS;
  4117. jbd2_journal_lock_updates(journal);
  4118. jbd2_journal_flush(journal);
  4119. /*
  4120. * OK, there are no updates running now, and all cached data is
  4121. * synced to disk. We are now in a completely consistent state
  4122. * which doesn't have anything in the journal, and we know that
  4123. * no filesystem updates are running, so it is safe to modify
  4124. * the inode's in-core data-journaling state flag now.
  4125. */
  4126. if (val)
  4127. ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
  4128. else
  4129. ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
  4130. ext4_set_aops(inode);
  4131. jbd2_journal_unlock_updates(journal);
  4132. /* Finally we can mark the inode as dirty. */
  4133. handle = ext4_journal_start(inode, 1);
  4134. if (IS_ERR(handle))
  4135. return PTR_ERR(handle);
  4136. err = ext4_mark_inode_dirty(handle, inode);
  4137. ext4_handle_sync(handle);
  4138. ext4_journal_stop(handle);
  4139. ext4_std_error(inode->i_sb, err);
  4140. return err;
  4141. }
  4142. static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
  4143. {
  4144. return !buffer_mapped(bh);
  4145. }
  4146. int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  4147. {
  4148. struct page *page = vmf->page;
  4149. loff_t size;
  4150. unsigned long len;
  4151. int ret;
  4152. struct file *file = vma->vm_file;
  4153. struct inode *inode = file->f_path.dentry->d_inode;
  4154. struct address_space *mapping = inode->i_mapping;
  4155. handle_t *handle;
  4156. get_block_t *get_block;
  4157. int retries = 0;
  4158. /*
  4159. * This check is racy but catches the common case. We rely on
  4160. * __block_page_mkwrite() to do a reliable check.
  4161. */
  4162. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  4163. /* Delalloc case is easy... */
  4164. if (test_opt(inode->i_sb, DELALLOC) &&
  4165. !ext4_should_journal_data(inode) &&
  4166. !ext4_nonda_switch(inode->i_sb)) {
  4167. do {
  4168. ret = __block_page_mkwrite(vma, vmf,
  4169. ext4_da_get_block_prep);
  4170. } while (ret == -ENOSPC &&
  4171. ext4_should_retry_alloc(inode->i_sb, &retries));
  4172. goto out_ret;
  4173. }
  4174. lock_page(page);
  4175. size = i_size_read(inode);
  4176. /* Page got truncated from under us? */
  4177. if (page->mapping != mapping || page_offset(page) > size) {
  4178. unlock_page(page);
  4179. ret = VM_FAULT_NOPAGE;
  4180. goto out;
  4181. }
  4182. if (page->index == size >> PAGE_CACHE_SHIFT)
  4183. len = size & ~PAGE_CACHE_MASK;
  4184. else
  4185. len = PAGE_CACHE_SIZE;
  4186. /*
  4187. * Return if we have all the buffers mapped. This avoids the need to do
  4188. * journal_start/journal_stop which can block and take a long time
  4189. */
  4190. if (page_has_buffers(page)) {
  4191. if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
  4192. ext4_bh_unmapped)) {
  4193. /* Wait so that we don't change page under IO */
  4194. wait_on_page_writeback(page);
  4195. ret = VM_FAULT_LOCKED;
  4196. goto out;
  4197. }
  4198. }
  4199. unlock_page(page);
  4200. /* OK, we need to fill the hole... */
  4201. if (ext4_should_dioread_nolock(inode))
  4202. get_block = ext4_get_block_write;
  4203. else
  4204. get_block = ext4_get_block;
  4205. retry_alloc:
  4206. handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
  4207. if (IS_ERR(handle)) {
  4208. ret = VM_FAULT_SIGBUS;
  4209. goto out;
  4210. }
  4211. ret = __block_page_mkwrite(vma, vmf, get_block);
  4212. if (!ret && ext4_should_journal_data(inode)) {
  4213. if (walk_page_buffers(handle, page_buffers(page), 0,
  4214. PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
  4215. unlock_page(page);
  4216. ret = VM_FAULT_SIGBUS;
  4217. goto out;
  4218. }
  4219. ext4_set_inode_state(inode, EXT4_STATE_JDATA);
  4220. }
  4221. ext4_journal_stop(handle);
  4222. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  4223. goto retry_alloc;
  4224. out_ret:
  4225. ret = block_page_mkwrite_return(ret);
  4226. out:
  4227. return ret;
  4228. }