inode.c 134 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699
  1. /*
  2. * linux/fs/ext4/inode.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/inode.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * Goal-directed block allocation by Stephen Tweedie
  16. * (sct@redhat.com), 1993, 1998
  17. * Big-endian to little-endian byte-swapping/bitmaps by
  18. * David S. Miller (davem@caip.rutgers.edu), 1995
  19. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  20. * (jj@sunsite.ms.mff.cuni.cz)
  21. *
  22. * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
  23. */
  24. #include <linux/module.h>
  25. #include <linux/fs.h>
  26. #include <linux/time.h>
  27. #include <linux/jbd2.h>
  28. #include <linux/highuid.h>
  29. #include <linux/pagemap.h>
  30. #include <linux/quotaops.h>
  31. #include <linux/string.h>
  32. #include <linux/buffer_head.h>
  33. #include <linux/writeback.h>
  34. #include <linux/pagevec.h>
  35. #include <linux/mpage.h>
  36. #include <linux/uio.h>
  37. #include <linux/bio.h>
  38. #include "ext4_jbd2.h"
  39. #include "xattr.h"
  40. #include "acl.h"
  41. #include "ext4_extents.h"
  42. static inline int ext4_begin_ordered_truncate(struct inode *inode,
  43. loff_t new_size)
  44. {
  45. return jbd2_journal_begin_ordered_truncate(&EXT4_I(inode)->jinode,
  46. new_size);
  47. }
  48. static void ext4_invalidatepage(struct page *page, unsigned long offset);
  49. /*
  50. * Test whether an inode is a fast symlink.
  51. */
  52. static int ext4_inode_is_fast_symlink(struct inode *inode)
  53. {
  54. int ea_blocks = EXT4_I(inode)->i_file_acl ?
  55. (inode->i_sb->s_blocksize >> 9) : 0;
  56. return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
  57. }
  58. /*
  59. * The ext4 forget function must perform a revoke if we are freeing data
  60. * which has been journaled. Metadata (eg. indirect blocks) must be
  61. * revoked in all cases.
  62. *
  63. * "bh" may be NULL: a metadata block may have been freed from memory
  64. * but there may still be a record of it in the journal, and that record
  65. * still needs to be revoked.
  66. */
  67. int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
  68. struct buffer_head *bh, ext4_fsblk_t blocknr)
  69. {
  70. int err;
  71. might_sleep();
  72. BUFFER_TRACE(bh, "enter");
  73. jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
  74. "data mode %lx\n",
  75. bh, is_metadata, inode->i_mode,
  76. test_opt(inode->i_sb, DATA_FLAGS));
  77. /* Never use the revoke function if we are doing full data
  78. * journaling: there is no need to, and a V1 superblock won't
  79. * support it. Otherwise, only skip the revoke on un-journaled
  80. * data blocks. */
  81. if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
  82. (!is_metadata && !ext4_should_journal_data(inode))) {
  83. if (bh) {
  84. BUFFER_TRACE(bh, "call jbd2_journal_forget");
  85. return ext4_journal_forget(handle, bh);
  86. }
  87. return 0;
  88. }
  89. /*
  90. * data!=journal && (is_metadata || should_journal_data(inode))
  91. */
  92. BUFFER_TRACE(bh, "call ext4_journal_revoke");
  93. err = ext4_journal_revoke(handle, blocknr, bh);
  94. if (err)
  95. ext4_abort(inode->i_sb, __func__,
  96. "error %d when attempting revoke", err);
  97. BUFFER_TRACE(bh, "exit");
  98. return err;
  99. }
  100. /*
  101. * Work out how many blocks we need to proceed with the next chunk of a
  102. * truncate transaction.
  103. */
  104. static unsigned long blocks_for_truncate(struct inode *inode)
  105. {
  106. ext4_lblk_t needed;
  107. needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
  108. /* Give ourselves just enough room to cope with inodes in which
  109. * i_blocks is corrupt: we've seen disk corruptions in the past
  110. * which resulted in random data in an inode which looked enough
  111. * like a regular file for ext4 to try to delete it. Things
  112. * will go a bit crazy if that happens, but at least we should
  113. * try not to panic the whole kernel. */
  114. if (needed < 2)
  115. needed = 2;
  116. /* But we need to bound the transaction so we don't overflow the
  117. * journal. */
  118. if (needed > EXT4_MAX_TRANS_DATA)
  119. needed = EXT4_MAX_TRANS_DATA;
  120. return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
  121. }
  122. /*
  123. * Truncate transactions can be complex and absolutely huge. So we need to
  124. * be able to restart the transaction at a conventient checkpoint to make
  125. * sure we don't overflow the journal.
  126. *
  127. * start_transaction gets us a new handle for a truncate transaction,
  128. * and extend_transaction tries to extend the existing one a bit. If
  129. * extend fails, we need to propagate the failure up and restart the
  130. * transaction in the top-level truncate loop. --sct
  131. */
  132. static handle_t *start_transaction(struct inode *inode)
  133. {
  134. handle_t *result;
  135. result = ext4_journal_start(inode, blocks_for_truncate(inode));
  136. if (!IS_ERR(result))
  137. return result;
  138. ext4_std_error(inode->i_sb, PTR_ERR(result));
  139. return result;
  140. }
  141. /*
  142. * Try to extend this transaction for the purposes of truncation.
  143. *
  144. * Returns 0 if we managed to create more room. If we can't create more
  145. * room, and the transaction must be restarted we return 1.
  146. */
  147. static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
  148. {
  149. if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
  150. return 0;
  151. if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
  152. return 0;
  153. return 1;
  154. }
  155. /*
  156. * Restart the transaction associated with *handle. This does a commit,
  157. * so before we call here everything must be consistently dirtied against
  158. * this transaction.
  159. */
  160. static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
  161. {
  162. jbd_debug(2, "restarting handle %p\n", handle);
  163. return ext4_journal_restart(handle, blocks_for_truncate(inode));
  164. }
  165. /*
  166. * Called at the last iput() if i_nlink is zero.
  167. */
  168. void ext4_delete_inode (struct inode * inode)
  169. {
  170. handle_t *handle;
  171. int err;
  172. if (ext4_should_order_data(inode))
  173. ext4_begin_ordered_truncate(inode, 0);
  174. truncate_inode_pages(&inode->i_data, 0);
  175. if (is_bad_inode(inode))
  176. goto no_delete;
  177. handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
  178. if (IS_ERR(handle)) {
  179. ext4_std_error(inode->i_sb, PTR_ERR(handle));
  180. /*
  181. * If we're going to skip the normal cleanup, we still need to
  182. * make sure that the in-core orphan linked list is properly
  183. * cleaned up.
  184. */
  185. ext4_orphan_del(NULL, inode);
  186. goto no_delete;
  187. }
  188. if (IS_SYNC(inode))
  189. handle->h_sync = 1;
  190. inode->i_size = 0;
  191. err = ext4_mark_inode_dirty(handle, inode);
  192. if (err) {
  193. ext4_warning(inode->i_sb, __func__,
  194. "couldn't mark inode dirty (err %d)", err);
  195. goto stop_handle;
  196. }
  197. if (inode->i_blocks)
  198. ext4_truncate(inode);
  199. /*
  200. * ext4_ext_truncate() doesn't reserve any slop when it
  201. * restarts journal transactions; therefore there may not be
  202. * enough credits left in the handle to remove the inode from
  203. * the orphan list and set the dtime field.
  204. */
  205. if (handle->h_buffer_credits < 3) {
  206. err = ext4_journal_extend(handle, 3);
  207. if (err > 0)
  208. err = ext4_journal_restart(handle, 3);
  209. if (err != 0) {
  210. ext4_warning(inode->i_sb, __func__,
  211. "couldn't extend journal (err %d)", err);
  212. stop_handle:
  213. ext4_journal_stop(handle);
  214. goto no_delete;
  215. }
  216. }
  217. /*
  218. * Kill off the orphan record which ext4_truncate created.
  219. * AKPM: I think this can be inside the above `if'.
  220. * Note that ext4_orphan_del() has to be able to cope with the
  221. * deletion of a non-existent orphan - this is because we don't
  222. * know if ext4_truncate() actually created an orphan record.
  223. * (Well, we could do this if we need to, but heck - it works)
  224. */
  225. ext4_orphan_del(handle, inode);
  226. EXT4_I(inode)->i_dtime = get_seconds();
  227. /*
  228. * One subtle ordering requirement: if anything has gone wrong
  229. * (transaction abort, IO errors, whatever), then we can still
  230. * do these next steps (the fs will already have been marked as
  231. * having errors), but we can't free the inode if the mark_dirty
  232. * fails.
  233. */
  234. if (ext4_mark_inode_dirty(handle, inode))
  235. /* If that failed, just do the required in-core inode clear. */
  236. clear_inode(inode);
  237. else
  238. ext4_free_inode(handle, inode);
  239. ext4_journal_stop(handle);
  240. return;
  241. no_delete:
  242. clear_inode(inode); /* We must guarantee clearing of inode... */
  243. }
  244. typedef struct {
  245. __le32 *p;
  246. __le32 key;
  247. struct buffer_head *bh;
  248. } Indirect;
  249. static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
  250. {
  251. p->key = *(p->p = v);
  252. p->bh = bh;
  253. }
  254. /**
  255. * ext4_block_to_path - parse the block number into array of offsets
  256. * @inode: inode in question (we are only interested in its superblock)
  257. * @i_block: block number to be parsed
  258. * @offsets: array to store the offsets in
  259. * @boundary: set this non-zero if the referred-to block is likely to be
  260. * followed (on disk) by an indirect block.
  261. *
  262. * To store the locations of file's data ext4 uses a data structure common
  263. * for UNIX filesystems - tree of pointers anchored in the inode, with
  264. * data blocks at leaves and indirect blocks in intermediate nodes.
  265. * This function translates the block number into path in that tree -
  266. * return value is the path length and @offsets[n] is the offset of
  267. * pointer to (n+1)th node in the nth one. If @block is out of range
  268. * (negative or too large) warning is printed and zero returned.
  269. *
  270. * Note: function doesn't find node addresses, so no IO is needed. All
  271. * we need to know is the capacity of indirect blocks (taken from the
  272. * inode->i_sb).
  273. */
  274. /*
  275. * Portability note: the last comparison (check that we fit into triple
  276. * indirect block) is spelled differently, because otherwise on an
  277. * architecture with 32-bit longs and 8Kb pages we might get into trouble
  278. * if our filesystem had 8Kb blocks. We might use long long, but that would
  279. * kill us on x86. Oh, well, at least the sign propagation does not matter -
  280. * i_block would have to be negative in the very beginning, so we would not
  281. * get there at all.
  282. */
  283. static int ext4_block_to_path(struct inode *inode,
  284. ext4_lblk_t i_block,
  285. ext4_lblk_t offsets[4], int *boundary)
  286. {
  287. int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  288. int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
  289. const long direct_blocks = EXT4_NDIR_BLOCKS,
  290. indirect_blocks = ptrs,
  291. double_blocks = (1 << (ptrs_bits * 2));
  292. int n = 0;
  293. int final = 0;
  294. if (i_block < 0) {
  295. ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
  296. } else if (i_block < direct_blocks) {
  297. offsets[n++] = i_block;
  298. final = direct_blocks;
  299. } else if ( (i_block -= direct_blocks) < indirect_blocks) {
  300. offsets[n++] = EXT4_IND_BLOCK;
  301. offsets[n++] = i_block;
  302. final = ptrs;
  303. } else if ((i_block -= indirect_blocks) < double_blocks) {
  304. offsets[n++] = EXT4_DIND_BLOCK;
  305. offsets[n++] = i_block >> ptrs_bits;
  306. offsets[n++] = i_block & (ptrs - 1);
  307. final = ptrs;
  308. } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
  309. offsets[n++] = EXT4_TIND_BLOCK;
  310. offsets[n++] = i_block >> (ptrs_bits * 2);
  311. offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
  312. offsets[n++] = i_block & (ptrs - 1);
  313. final = ptrs;
  314. } else {
  315. ext4_warning(inode->i_sb, "ext4_block_to_path",
  316. "block %lu > max",
  317. i_block + direct_blocks +
  318. indirect_blocks + double_blocks);
  319. }
  320. if (boundary)
  321. *boundary = final - 1 - (i_block & (ptrs - 1));
  322. return n;
  323. }
  324. /**
  325. * ext4_get_branch - read the chain of indirect blocks leading to data
  326. * @inode: inode in question
  327. * @depth: depth of the chain (1 - direct pointer, etc.)
  328. * @offsets: offsets of pointers in inode/indirect blocks
  329. * @chain: place to store the result
  330. * @err: here we store the error value
  331. *
  332. * Function fills the array of triples <key, p, bh> and returns %NULL
  333. * if everything went OK or the pointer to the last filled triple
  334. * (incomplete one) otherwise. Upon the return chain[i].key contains
  335. * the number of (i+1)-th block in the chain (as it is stored in memory,
  336. * i.e. little-endian 32-bit), chain[i].p contains the address of that
  337. * number (it points into struct inode for i==0 and into the bh->b_data
  338. * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
  339. * block for i>0 and NULL for i==0. In other words, it holds the block
  340. * numbers of the chain, addresses they were taken from (and where we can
  341. * verify that chain did not change) and buffer_heads hosting these
  342. * numbers.
  343. *
  344. * Function stops when it stumbles upon zero pointer (absent block)
  345. * (pointer to last triple returned, *@err == 0)
  346. * or when it gets an IO error reading an indirect block
  347. * (ditto, *@err == -EIO)
  348. * or when it reads all @depth-1 indirect blocks successfully and finds
  349. * the whole chain, all way to the data (returns %NULL, *err == 0).
  350. *
  351. * Need to be called with
  352. * down_read(&EXT4_I(inode)->i_data_sem)
  353. */
  354. static Indirect *ext4_get_branch(struct inode *inode, int depth,
  355. ext4_lblk_t *offsets,
  356. Indirect chain[4], int *err)
  357. {
  358. struct super_block *sb = inode->i_sb;
  359. Indirect *p = chain;
  360. struct buffer_head *bh;
  361. *err = 0;
  362. /* i_data is not going away, no lock needed */
  363. add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
  364. if (!p->key)
  365. goto no_block;
  366. while (--depth) {
  367. bh = sb_bread(sb, le32_to_cpu(p->key));
  368. if (!bh)
  369. goto failure;
  370. add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
  371. /* Reader: end */
  372. if (!p->key)
  373. goto no_block;
  374. }
  375. return NULL;
  376. failure:
  377. *err = -EIO;
  378. no_block:
  379. return p;
  380. }
  381. /**
  382. * ext4_find_near - find a place for allocation with sufficient locality
  383. * @inode: owner
  384. * @ind: descriptor of indirect block.
  385. *
  386. * This function returns the preferred place for block allocation.
  387. * It is used when heuristic for sequential allocation fails.
  388. * Rules are:
  389. * + if there is a block to the left of our position - allocate near it.
  390. * + if pointer will live in indirect block - allocate near that block.
  391. * + if pointer will live in inode - allocate in the same
  392. * cylinder group.
  393. *
  394. * In the latter case we colour the starting block by the callers PID to
  395. * prevent it from clashing with concurrent allocations for a different inode
  396. * in the same block group. The PID is used here so that functionally related
  397. * files will be close-by on-disk.
  398. *
  399. * Caller must make sure that @ind is valid and will stay that way.
  400. */
  401. static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
  402. {
  403. struct ext4_inode_info *ei = EXT4_I(inode);
  404. __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
  405. __le32 *p;
  406. ext4_fsblk_t bg_start;
  407. ext4_fsblk_t last_block;
  408. ext4_grpblk_t colour;
  409. /* Try to find previous block */
  410. for (p = ind->p - 1; p >= start; p--) {
  411. if (*p)
  412. return le32_to_cpu(*p);
  413. }
  414. /* No such thing, so let's try location of indirect block */
  415. if (ind->bh)
  416. return ind->bh->b_blocknr;
  417. /*
  418. * It is going to be referred to from the inode itself? OK, just put it
  419. * into the same cylinder group then.
  420. */
  421. bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
  422. last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
  423. if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
  424. colour = (current->pid % 16) *
  425. (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
  426. else
  427. colour = (current->pid % 16) * ((last_block - bg_start) / 16);
  428. return bg_start + colour;
  429. }
  430. /**
  431. * ext4_find_goal - find a preferred place for allocation.
  432. * @inode: owner
  433. * @block: block we want
  434. * @partial: pointer to the last triple within a chain
  435. *
  436. * Normally this function find the preferred place for block allocation,
  437. * returns it.
  438. */
  439. static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
  440. Indirect *partial)
  441. {
  442. struct ext4_block_alloc_info *block_i;
  443. block_i = EXT4_I(inode)->i_block_alloc_info;
  444. /*
  445. * try the heuristic for sequential allocation,
  446. * failing that at least try to get decent locality.
  447. */
  448. if (block_i && (block == block_i->last_alloc_logical_block + 1)
  449. && (block_i->last_alloc_physical_block != 0)) {
  450. return block_i->last_alloc_physical_block + 1;
  451. }
  452. return ext4_find_near(inode, partial);
  453. }
  454. /**
  455. * ext4_blks_to_allocate: Look up the block map and count the number
  456. * of direct blocks need to be allocated for the given branch.
  457. *
  458. * @branch: chain of indirect blocks
  459. * @k: number of blocks need for indirect blocks
  460. * @blks: number of data blocks to be mapped.
  461. * @blocks_to_boundary: the offset in the indirect block
  462. *
  463. * return the total number of blocks to be allocate, including the
  464. * direct and indirect blocks.
  465. */
  466. static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
  467. int blocks_to_boundary)
  468. {
  469. unsigned long count = 0;
  470. /*
  471. * Simple case, [t,d]Indirect block(s) has not allocated yet
  472. * then it's clear blocks on that path have not allocated
  473. */
  474. if (k > 0) {
  475. /* right now we don't handle cross boundary allocation */
  476. if (blks < blocks_to_boundary + 1)
  477. count += blks;
  478. else
  479. count += blocks_to_boundary + 1;
  480. return count;
  481. }
  482. count++;
  483. while (count < blks && count <= blocks_to_boundary &&
  484. le32_to_cpu(*(branch[0].p + count)) == 0) {
  485. count++;
  486. }
  487. return count;
  488. }
  489. /**
  490. * ext4_alloc_blocks: multiple allocate blocks needed for a branch
  491. * @indirect_blks: the number of blocks need to allocate for indirect
  492. * blocks
  493. *
  494. * @new_blocks: on return it will store the new block numbers for
  495. * the indirect blocks(if needed) and the first direct block,
  496. * @blks: on return it will store the total number of allocated
  497. * direct blocks
  498. */
  499. static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
  500. ext4_lblk_t iblock, ext4_fsblk_t goal,
  501. int indirect_blks, int blks,
  502. ext4_fsblk_t new_blocks[4], int *err)
  503. {
  504. int target, i;
  505. unsigned long count = 0, blk_allocated = 0;
  506. int index = 0;
  507. ext4_fsblk_t current_block = 0;
  508. int ret = 0;
  509. /*
  510. * Here we try to allocate the requested multiple blocks at once,
  511. * on a best-effort basis.
  512. * To build a branch, we should allocate blocks for
  513. * the indirect blocks(if not allocated yet), and at least
  514. * the first direct block of this branch. That's the
  515. * minimum number of blocks need to allocate(required)
  516. */
  517. /* first we try to allocate the indirect blocks */
  518. target = indirect_blks;
  519. while (target > 0) {
  520. count = target;
  521. /* allocating blocks for indirect blocks and direct blocks */
  522. current_block = ext4_new_meta_blocks(handle, inode,
  523. goal, &count, err);
  524. if (*err)
  525. goto failed_out;
  526. target -= count;
  527. /* allocate blocks for indirect blocks */
  528. while (index < indirect_blks && count) {
  529. new_blocks[index++] = current_block++;
  530. count--;
  531. }
  532. if (count > 0) {
  533. /*
  534. * save the new block number
  535. * for the first direct block
  536. */
  537. new_blocks[index] = current_block;
  538. printk(KERN_INFO "%s returned more blocks than "
  539. "requested\n", __func__);
  540. WARN_ON(1);
  541. break;
  542. }
  543. }
  544. target = blks - count ;
  545. blk_allocated = count;
  546. if (!target)
  547. goto allocated;
  548. /* Now allocate data blocks */
  549. count = target;
  550. /* allocating blocks for data blocks */
  551. current_block = ext4_new_blocks(handle, inode, iblock,
  552. goal, &count, err);
  553. if (*err && (target == blks)) {
  554. /*
  555. * if the allocation failed and we didn't allocate
  556. * any blocks before
  557. */
  558. goto failed_out;
  559. }
  560. if (!*err) {
  561. if (target == blks) {
  562. /*
  563. * save the new block number
  564. * for the first direct block
  565. */
  566. new_blocks[index] = current_block;
  567. }
  568. blk_allocated += count;
  569. }
  570. allocated:
  571. /* total number of blocks allocated for direct blocks */
  572. ret = blk_allocated;
  573. *err = 0;
  574. return ret;
  575. failed_out:
  576. for (i = 0; i <index; i++)
  577. ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
  578. return ret;
  579. }
  580. /**
  581. * ext4_alloc_branch - allocate and set up a chain of blocks.
  582. * @inode: owner
  583. * @indirect_blks: number of allocated indirect blocks
  584. * @blks: number of allocated direct blocks
  585. * @offsets: offsets (in the blocks) to store the pointers to next.
  586. * @branch: place to store the chain in.
  587. *
  588. * This function allocates blocks, zeroes out all but the last one,
  589. * links them into chain and (if we are synchronous) writes them to disk.
  590. * In other words, it prepares a branch that can be spliced onto the
  591. * inode. It stores the information about that chain in the branch[], in
  592. * the same format as ext4_get_branch() would do. We are calling it after
  593. * we had read the existing part of chain and partial points to the last
  594. * triple of that (one with zero ->key). Upon the exit we have the same
  595. * picture as after the successful ext4_get_block(), except that in one
  596. * place chain is disconnected - *branch->p is still zero (we did not
  597. * set the last link), but branch->key contains the number that should
  598. * be placed into *branch->p to fill that gap.
  599. *
  600. * If allocation fails we free all blocks we've allocated (and forget
  601. * their buffer_heads) and return the error value the from failed
  602. * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
  603. * as described above and return 0.
  604. */
  605. static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
  606. ext4_lblk_t iblock, int indirect_blks,
  607. int *blks, ext4_fsblk_t goal,
  608. ext4_lblk_t *offsets, Indirect *branch)
  609. {
  610. int blocksize = inode->i_sb->s_blocksize;
  611. int i, n = 0;
  612. int err = 0;
  613. struct buffer_head *bh;
  614. int num;
  615. ext4_fsblk_t new_blocks[4];
  616. ext4_fsblk_t current_block;
  617. num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
  618. *blks, new_blocks, &err);
  619. if (err)
  620. return err;
  621. branch[0].key = cpu_to_le32(new_blocks[0]);
  622. /*
  623. * metadata blocks and data blocks are allocated.
  624. */
  625. for (n = 1; n <= indirect_blks; n++) {
  626. /*
  627. * Get buffer_head for parent block, zero it out
  628. * and set the pointer to new one, then send
  629. * parent to disk.
  630. */
  631. bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
  632. branch[n].bh = bh;
  633. lock_buffer(bh);
  634. BUFFER_TRACE(bh, "call get_create_access");
  635. err = ext4_journal_get_create_access(handle, bh);
  636. if (err) {
  637. unlock_buffer(bh);
  638. brelse(bh);
  639. goto failed;
  640. }
  641. memset(bh->b_data, 0, blocksize);
  642. branch[n].p = (__le32 *) bh->b_data + offsets[n];
  643. branch[n].key = cpu_to_le32(new_blocks[n]);
  644. *branch[n].p = branch[n].key;
  645. if ( n == indirect_blks) {
  646. current_block = new_blocks[n];
  647. /*
  648. * End of chain, update the last new metablock of
  649. * the chain to point to the new allocated
  650. * data blocks numbers
  651. */
  652. for (i=1; i < num; i++)
  653. *(branch[n].p + i) = cpu_to_le32(++current_block);
  654. }
  655. BUFFER_TRACE(bh, "marking uptodate");
  656. set_buffer_uptodate(bh);
  657. unlock_buffer(bh);
  658. BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
  659. err = ext4_journal_dirty_metadata(handle, bh);
  660. if (err)
  661. goto failed;
  662. }
  663. *blks = num;
  664. return err;
  665. failed:
  666. /* Allocation failed, free what we already allocated */
  667. for (i = 1; i <= n ; i++) {
  668. BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
  669. ext4_journal_forget(handle, branch[i].bh);
  670. }
  671. for (i = 0; i <indirect_blks; i++)
  672. ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
  673. ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
  674. return err;
  675. }
  676. /**
  677. * ext4_splice_branch - splice the allocated branch onto inode.
  678. * @inode: owner
  679. * @block: (logical) number of block we are adding
  680. * @chain: chain of indirect blocks (with a missing link - see
  681. * ext4_alloc_branch)
  682. * @where: location of missing link
  683. * @num: number of indirect blocks we are adding
  684. * @blks: number of direct blocks we are adding
  685. *
  686. * This function fills the missing link and does all housekeeping needed in
  687. * inode (->i_blocks, etc.). In case of success we end up with the full
  688. * chain to new block and return 0.
  689. */
  690. static int ext4_splice_branch(handle_t *handle, struct inode *inode,
  691. ext4_lblk_t block, Indirect *where, int num, int blks)
  692. {
  693. int i;
  694. int err = 0;
  695. struct ext4_block_alloc_info *block_i;
  696. ext4_fsblk_t current_block;
  697. block_i = EXT4_I(inode)->i_block_alloc_info;
  698. /*
  699. * If we're splicing into a [td]indirect block (as opposed to the
  700. * inode) then we need to get write access to the [td]indirect block
  701. * before the splice.
  702. */
  703. if (where->bh) {
  704. BUFFER_TRACE(where->bh, "get_write_access");
  705. err = ext4_journal_get_write_access(handle, where->bh);
  706. if (err)
  707. goto err_out;
  708. }
  709. /* That's it */
  710. *where->p = where->key;
  711. /*
  712. * Update the host buffer_head or inode to point to more just allocated
  713. * direct blocks blocks
  714. */
  715. if (num == 0 && blks > 1) {
  716. current_block = le32_to_cpu(where->key) + 1;
  717. for (i = 1; i < blks; i++)
  718. *(where->p + i ) = cpu_to_le32(current_block++);
  719. }
  720. /*
  721. * update the most recently allocated logical & physical block
  722. * in i_block_alloc_info, to assist find the proper goal block for next
  723. * allocation
  724. */
  725. if (block_i) {
  726. block_i->last_alloc_logical_block = block + blks - 1;
  727. block_i->last_alloc_physical_block =
  728. le32_to_cpu(where[num].key) + blks - 1;
  729. }
  730. /* We are done with atomic stuff, now do the rest of housekeeping */
  731. inode->i_ctime = ext4_current_time(inode);
  732. ext4_mark_inode_dirty(handle, inode);
  733. /* had we spliced it onto indirect block? */
  734. if (where->bh) {
  735. /*
  736. * If we spliced it onto an indirect block, we haven't
  737. * altered the inode. Note however that if it is being spliced
  738. * onto an indirect block at the very end of the file (the
  739. * file is growing) then we *will* alter the inode to reflect
  740. * the new i_size. But that is not done here - it is done in
  741. * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
  742. */
  743. jbd_debug(5, "splicing indirect only\n");
  744. BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
  745. err = ext4_journal_dirty_metadata(handle, where->bh);
  746. if (err)
  747. goto err_out;
  748. } else {
  749. /*
  750. * OK, we spliced it into the inode itself on a direct block.
  751. * Inode was dirtied above.
  752. */
  753. jbd_debug(5, "splicing direct\n");
  754. }
  755. return err;
  756. err_out:
  757. for (i = 1; i <= num; i++) {
  758. BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
  759. ext4_journal_forget(handle, where[i].bh);
  760. ext4_free_blocks(handle, inode,
  761. le32_to_cpu(where[i-1].key), 1, 0);
  762. }
  763. ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
  764. return err;
  765. }
  766. /*
  767. * Allocation strategy is simple: if we have to allocate something, we will
  768. * have to go the whole way to leaf. So let's do it before attaching anything
  769. * to tree, set linkage between the newborn blocks, write them if sync is
  770. * required, recheck the path, free and repeat if check fails, otherwise
  771. * set the last missing link (that will protect us from any truncate-generated
  772. * removals - all blocks on the path are immune now) and possibly force the
  773. * write on the parent block.
  774. * That has a nice additional property: no special recovery from the failed
  775. * allocations is needed - we simply release blocks and do not touch anything
  776. * reachable from inode.
  777. *
  778. * `handle' can be NULL if create == 0.
  779. *
  780. * return > 0, # of blocks mapped or allocated.
  781. * return = 0, if plain lookup failed.
  782. * return < 0, error case.
  783. *
  784. *
  785. * Need to be called with
  786. * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
  787. * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
  788. */
  789. int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
  790. ext4_lblk_t iblock, unsigned long maxblocks,
  791. struct buffer_head *bh_result,
  792. int create, int extend_disksize)
  793. {
  794. int err = -EIO;
  795. ext4_lblk_t offsets[4];
  796. Indirect chain[4];
  797. Indirect *partial;
  798. ext4_fsblk_t goal;
  799. int indirect_blks;
  800. int blocks_to_boundary = 0;
  801. int depth;
  802. struct ext4_inode_info *ei = EXT4_I(inode);
  803. int count = 0;
  804. ext4_fsblk_t first_block = 0;
  805. loff_t disksize;
  806. J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
  807. J_ASSERT(handle != NULL || create == 0);
  808. depth = ext4_block_to_path(inode, iblock, offsets,
  809. &blocks_to_boundary);
  810. if (depth == 0)
  811. goto out;
  812. partial = ext4_get_branch(inode, depth, offsets, chain, &err);
  813. /* Simplest case - block found, no allocation needed */
  814. if (!partial) {
  815. first_block = le32_to_cpu(chain[depth - 1].key);
  816. clear_buffer_new(bh_result);
  817. count++;
  818. /*map more blocks*/
  819. while (count < maxblocks && count <= blocks_to_boundary) {
  820. ext4_fsblk_t blk;
  821. blk = le32_to_cpu(*(chain[depth-1].p + count));
  822. if (blk == first_block + count)
  823. count++;
  824. else
  825. break;
  826. }
  827. goto got_it;
  828. }
  829. /* Next simple case - plain lookup or failed read of indirect block */
  830. if (!create || err == -EIO)
  831. goto cleanup;
  832. /*
  833. * Okay, we need to do block allocation. Lazily initialize the block
  834. * allocation info here if necessary
  835. */
  836. if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
  837. ext4_init_block_alloc_info(inode);
  838. goal = ext4_find_goal(inode, iblock, partial);
  839. /* the number of blocks need to allocate for [d,t]indirect blocks */
  840. indirect_blks = (chain + depth) - partial - 1;
  841. /*
  842. * Next look up the indirect map to count the totoal number of
  843. * direct blocks to allocate for this branch.
  844. */
  845. count = ext4_blks_to_allocate(partial, indirect_blks,
  846. maxblocks, blocks_to_boundary);
  847. /*
  848. * Block out ext4_truncate while we alter the tree
  849. */
  850. err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
  851. &count, goal,
  852. offsets + (partial - chain), partial);
  853. /*
  854. * The ext4_splice_branch call will free and forget any buffers
  855. * on the new chain if there is a failure, but that risks using
  856. * up transaction credits, especially for bitmaps where the
  857. * credits cannot be returned. Can we handle this somehow? We
  858. * may need to return -EAGAIN upwards in the worst case. --sct
  859. */
  860. if (!err)
  861. err = ext4_splice_branch(handle, inode, iblock,
  862. partial, indirect_blks, count);
  863. /*
  864. * i_disksize growing is protected by i_data_sem. Don't forget to
  865. * protect it if you're about to implement concurrent
  866. * ext4_get_block() -bzzz
  867. */
  868. if (!err && extend_disksize) {
  869. disksize = ((loff_t) iblock + count) << inode->i_blkbits;
  870. if (disksize > i_size_read(inode))
  871. disksize = i_size_read(inode);
  872. if (disksize > ei->i_disksize)
  873. ei->i_disksize = disksize;
  874. }
  875. if (err)
  876. goto cleanup;
  877. set_buffer_new(bh_result);
  878. got_it:
  879. map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
  880. if (count > blocks_to_boundary)
  881. set_buffer_boundary(bh_result);
  882. err = count;
  883. /* Clean up and exit */
  884. partial = chain + depth - 1; /* the whole chain */
  885. cleanup:
  886. while (partial > chain) {
  887. BUFFER_TRACE(partial->bh, "call brelse");
  888. brelse(partial->bh);
  889. partial--;
  890. }
  891. BUFFER_TRACE(bh_result, "returned");
  892. out:
  893. return err;
  894. }
  895. /*
  896. * Calculate the number of metadata blocks need to reserve
  897. * to allocate @blocks for non extent file based file
  898. */
  899. static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
  900. {
  901. int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  902. int ind_blks, dind_blks, tind_blks;
  903. /* number of new indirect blocks needed */
  904. ind_blks = (blocks + icap - 1) / icap;
  905. dind_blks = (ind_blks + icap - 1) / icap;
  906. tind_blks = 1;
  907. return ind_blks + dind_blks + tind_blks;
  908. }
  909. /*
  910. * Calculate the number of metadata blocks need to reserve
  911. * to allocate given number of blocks
  912. */
  913. static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
  914. {
  915. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
  916. return ext4_ext_calc_metadata_amount(inode, blocks);
  917. return ext4_indirect_calc_metadata_amount(inode, blocks);
  918. }
  919. static void ext4_da_update_reserve_space(struct inode *inode, int used)
  920. {
  921. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  922. int total, mdb, mdb_free;
  923. spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
  924. /* recalculate the number of metablocks still need to be reserved */
  925. total = EXT4_I(inode)->i_reserved_data_blocks - used;
  926. mdb = ext4_calc_metadata_amount(inode, total);
  927. /* figure out how many metablocks to release */
  928. BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
  929. mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
  930. /* Account for allocated meta_blocks */
  931. mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
  932. /* update fs free blocks counter for truncate case */
  933. percpu_counter_add(&sbi->s_freeblocks_counter, mdb_free);
  934. /* update per-inode reservations */
  935. BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
  936. EXT4_I(inode)->i_reserved_data_blocks -= used;
  937. BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
  938. EXT4_I(inode)->i_reserved_meta_blocks = mdb;
  939. EXT4_I(inode)->i_allocated_meta_blocks = 0;
  940. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  941. }
  942. /* Maximum number of blocks we map for direct IO at once. */
  943. #define DIO_MAX_BLOCKS 4096
  944. /*
  945. * Number of credits we need for writing DIO_MAX_BLOCKS:
  946. * We need sb + group descriptor + bitmap + inode -> 4
  947. * For B blocks with A block pointers per block we need:
  948. * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
  949. * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
  950. */
  951. #define DIO_CREDITS 25
  952. /*
  953. * The ext4_get_blocks_wrap() function try to look up the requested blocks,
  954. * and returns if the blocks are already mapped.
  955. *
  956. * Otherwise it takes the write lock of the i_data_sem and allocate blocks
  957. * and store the allocated blocks in the result buffer head and mark it
  958. * mapped.
  959. *
  960. * If file type is extents based, it will call ext4_ext_get_blocks(),
  961. * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
  962. * based files
  963. *
  964. * On success, it returns the number of blocks being mapped or allocate.
  965. * if create==0 and the blocks are pre-allocated and uninitialized block,
  966. * the result buffer head is unmapped. If the create ==1, it will make sure
  967. * the buffer head is mapped.
  968. *
  969. * It returns 0 if plain look up failed (blocks have not been allocated), in
  970. * that casem, buffer head is unmapped
  971. *
  972. * It returns the error in case of allocation failure.
  973. */
  974. int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
  975. unsigned long max_blocks, struct buffer_head *bh,
  976. int create, int extend_disksize, int flag)
  977. {
  978. int retval;
  979. clear_buffer_mapped(bh);
  980. /*
  981. * Try to see if we can get the block without requesting
  982. * for new file system block.
  983. */
  984. down_read((&EXT4_I(inode)->i_data_sem));
  985. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
  986. retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
  987. bh, 0, 0);
  988. } else {
  989. retval = ext4_get_blocks_handle(handle,
  990. inode, block, max_blocks, bh, 0, 0);
  991. }
  992. up_read((&EXT4_I(inode)->i_data_sem));
  993. /* If it is only a block(s) look up */
  994. if (!create)
  995. return retval;
  996. /*
  997. * Returns if the blocks have already allocated
  998. *
  999. * Note that if blocks have been preallocated
  1000. * ext4_ext_get_block() returns th create = 0
  1001. * with buffer head unmapped.
  1002. */
  1003. if (retval > 0 && buffer_mapped(bh))
  1004. return retval;
  1005. /*
  1006. * New blocks allocate and/or writing to uninitialized extent
  1007. * will possibly result in updating i_data, so we take
  1008. * the write lock of i_data_sem, and call get_blocks()
  1009. * with create == 1 flag.
  1010. */
  1011. down_write((&EXT4_I(inode)->i_data_sem));
  1012. /*
  1013. * if the caller is from delayed allocation writeout path
  1014. * we have already reserved fs blocks for allocation
  1015. * let the underlying get_block() function know to
  1016. * avoid double accounting
  1017. */
  1018. if (flag)
  1019. EXT4_I(inode)->i_delalloc_reserved_flag = 1;
  1020. /*
  1021. * We need to check for EXT4 here because migrate
  1022. * could have changed the inode type in between
  1023. */
  1024. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
  1025. retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
  1026. bh, create, extend_disksize);
  1027. } else {
  1028. retval = ext4_get_blocks_handle(handle, inode, block,
  1029. max_blocks, bh, create, extend_disksize);
  1030. if (retval > 0 && buffer_new(bh)) {
  1031. /*
  1032. * We allocated new blocks which will result in
  1033. * i_data's format changing. Force the migrate
  1034. * to fail by clearing migrate flags
  1035. */
  1036. EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
  1037. ~EXT4_EXT_MIGRATE;
  1038. }
  1039. }
  1040. if (flag) {
  1041. EXT4_I(inode)->i_delalloc_reserved_flag = 0;
  1042. /*
  1043. * Update reserved blocks/metadata blocks
  1044. * after successful block allocation
  1045. * which were deferred till now
  1046. */
  1047. if ((retval > 0) && buffer_delay(bh))
  1048. ext4_da_update_reserve_space(inode, retval);
  1049. }
  1050. up_write((&EXT4_I(inode)->i_data_sem));
  1051. return retval;
  1052. }
  1053. static int ext4_get_block(struct inode *inode, sector_t iblock,
  1054. struct buffer_head *bh_result, int create)
  1055. {
  1056. handle_t *handle = ext4_journal_current_handle();
  1057. int ret = 0, started = 0;
  1058. unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
  1059. if (create && !handle) {
  1060. /* Direct IO write... */
  1061. if (max_blocks > DIO_MAX_BLOCKS)
  1062. max_blocks = DIO_MAX_BLOCKS;
  1063. handle = ext4_journal_start(inode, DIO_CREDITS +
  1064. 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb));
  1065. if (IS_ERR(handle)) {
  1066. ret = PTR_ERR(handle);
  1067. goto out;
  1068. }
  1069. started = 1;
  1070. }
  1071. ret = ext4_get_blocks_wrap(handle, inode, iblock,
  1072. max_blocks, bh_result, create, 0, 0);
  1073. if (ret > 0) {
  1074. bh_result->b_size = (ret << inode->i_blkbits);
  1075. ret = 0;
  1076. }
  1077. if (started)
  1078. ext4_journal_stop(handle);
  1079. out:
  1080. return ret;
  1081. }
  1082. /*
  1083. * `handle' can be NULL if create is zero
  1084. */
  1085. struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
  1086. ext4_lblk_t block, int create, int *errp)
  1087. {
  1088. struct buffer_head dummy;
  1089. int fatal = 0, err;
  1090. J_ASSERT(handle != NULL || create == 0);
  1091. dummy.b_state = 0;
  1092. dummy.b_blocknr = -1000;
  1093. buffer_trace_init(&dummy.b_history);
  1094. err = ext4_get_blocks_wrap(handle, inode, block, 1,
  1095. &dummy, create, 1, 0);
  1096. /*
  1097. * ext4_get_blocks_handle() returns number of blocks
  1098. * mapped. 0 in case of a HOLE.
  1099. */
  1100. if (err > 0) {
  1101. if (err > 1)
  1102. WARN_ON(1);
  1103. err = 0;
  1104. }
  1105. *errp = err;
  1106. if (!err && buffer_mapped(&dummy)) {
  1107. struct buffer_head *bh;
  1108. bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
  1109. if (!bh) {
  1110. *errp = -EIO;
  1111. goto err;
  1112. }
  1113. if (buffer_new(&dummy)) {
  1114. J_ASSERT(create != 0);
  1115. J_ASSERT(handle != NULL);
  1116. /*
  1117. * Now that we do not always journal data, we should
  1118. * keep in mind whether this should always journal the
  1119. * new buffer as metadata. For now, regular file
  1120. * writes use ext4_get_block instead, so it's not a
  1121. * problem.
  1122. */
  1123. lock_buffer(bh);
  1124. BUFFER_TRACE(bh, "call get_create_access");
  1125. fatal = ext4_journal_get_create_access(handle, bh);
  1126. if (!fatal && !buffer_uptodate(bh)) {
  1127. memset(bh->b_data,0,inode->i_sb->s_blocksize);
  1128. set_buffer_uptodate(bh);
  1129. }
  1130. unlock_buffer(bh);
  1131. BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
  1132. err = ext4_journal_dirty_metadata(handle, bh);
  1133. if (!fatal)
  1134. fatal = err;
  1135. } else {
  1136. BUFFER_TRACE(bh, "not a new buffer");
  1137. }
  1138. if (fatal) {
  1139. *errp = fatal;
  1140. brelse(bh);
  1141. bh = NULL;
  1142. }
  1143. return bh;
  1144. }
  1145. err:
  1146. return NULL;
  1147. }
  1148. struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
  1149. ext4_lblk_t block, int create, int *err)
  1150. {
  1151. struct buffer_head * bh;
  1152. bh = ext4_getblk(handle, inode, block, create, err);
  1153. if (!bh)
  1154. return bh;
  1155. if (buffer_uptodate(bh))
  1156. return bh;
  1157. ll_rw_block(READ_META, 1, &bh);
  1158. wait_on_buffer(bh);
  1159. if (buffer_uptodate(bh))
  1160. return bh;
  1161. put_bh(bh);
  1162. *err = -EIO;
  1163. return NULL;
  1164. }
  1165. static int walk_page_buffers( handle_t *handle,
  1166. struct buffer_head *head,
  1167. unsigned from,
  1168. unsigned to,
  1169. int *partial,
  1170. int (*fn)( handle_t *handle,
  1171. struct buffer_head *bh))
  1172. {
  1173. struct buffer_head *bh;
  1174. unsigned block_start, block_end;
  1175. unsigned blocksize = head->b_size;
  1176. int err, ret = 0;
  1177. struct buffer_head *next;
  1178. for ( bh = head, block_start = 0;
  1179. ret == 0 && (bh != head || !block_start);
  1180. block_start = block_end, bh = next)
  1181. {
  1182. next = bh->b_this_page;
  1183. block_end = block_start + blocksize;
  1184. if (block_end <= from || block_start >= to) {
  1185. if (partial && !buffer_uptodate(bh))
  1186. *partial = 1;
  1187. continue;
  1188. }
  1189. err = (*fn)(handle, bh);
  1190. if (!ret)
  1191. ret = err;
  1192. }
  1193. return ret;
  1194. }
  1195. /*
  1196. * To preserve ordering, it is essential that the hole instantiation and
  1197. * the data write be encapsulated in a single transaction. We cannot
  1198. * close off a transaction and start a new one between the ext4_get_block()
  1199. * and the commit_write(). So doing the jbd2_journal_start at the start of
  1200. * prepare_write() is the right place.
  1201. *
  1202. * Also, this function can nest inside ext4_writepage() ->
  1203. * block_write_full_page(). In that case, we *know* that ext4_writepage()
  1204. * has generated enough buffer credits to do the whole page. So we won't
  1205. * block on the journal in that case, which is good, because the caller may
  1206. * be PF_MEMALLOC.
  1207. *
  1208. * By accident, ext4 can be reentered when a transaction is open via
  1209. * quota file writes. If we were to commit the transaction while thus
  1210. * reentered, there can be a deadlock - we would be holding a quota
  1211. * lock, and the commit would never complete if another thread had a
  1212. * transaction open and was blocking on the quota lock - a ranking
  1213. * violation.
  1214. *
  1215. * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
  1216. * will _not_ run commit under these circumstances because handle->h_ref
  1217. * is elevated. We'll still have enough credits for the tiny quotafile
  1218. * write.
  1219. */
  1220. static int do_journal_get_write_access(handle_t *handle,
  1221. struct buffer_head *bh)
  1222. {
  1223. if (!buffer_mapped(bh) || buffer_freed(bh))
  1224. return 0;
  1225. return ext4_journal_get_write_access(handle, bh);
  1226. }
  1227. static int ext4_write_begin(struct file *file, struct address_space *mapping,
  1228. loff_t pos, unsigned len, unsigned flags,
  1229. struct page **pagep, void **fsdata)
  1230. {
  1231. struct inode *inode = mapping->host;
  1232. int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
  1233. handle_t *handle;
  1234. int retries = 0;
  1235. struct page *page;
  1236. pgoff_t index;
  1237. unsigned from, to;
  1238. index = pos >> PAGE_CACHE_SHIFT;
  1239. from = pos & (PAGE_CACHE_SIZE - 1);
  1240. to = from + len;
  1241. retry:
  1242. handle = ext4_journal_start(inode, needed_blocks);
  1243. if (IS_ERR(handle)) {
  1244. ret = PTR_ERR(handle);
  1245. goto out;
  1246. }
  1247. page = __grab_cache_page(mapping, index);
  1248. if (!page) {
  1249. ext4_journal_stop(handle);
  1250. ret = -ENOMEM;
  1251. goto out;
  1252. }
  1253. *pagep = page;
  1254. ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  1255. ext4_get_block);
  1256. if (!ret && ext4_should_journal_data(inode)) {
  1257. ret = walk_page_buffers(handle, page_buffers(page),
  1258. from, to, NULL, do_journal_get_write_access);
  1259. }
  1260. if (ret) {
  1261. unlock_page(page);
  1262. ext4_journal_stop(handle);
  1263. page_cache_release(page);
  1264. }
  1265. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  1266. goto retry;
  1267. out:
  1268. return ret;
  1269. }
  1270. /* For write_end() in data=journal mode */
  1271. static int write_end_fn(handle_t *handle, struct buffer_head *bh)
  1272. {
  1273. if (!buffer_mapped(bh) || buffer_freed(bh))
  1274. return 0;
  1275. set_buffer_uptodate(bh);
  1276. return ext4_journal_dirty_metadata(handle, bh);
  1277. }
  1278. /*
  1279. * We need to pick up the new inode size which generic_commit_write gave us
  1280. * `file' can be NULL - eg, when called from page_symlink().
  1281. *
  1282. * ext4 never places buffers on inode->i_mapping->private_list. metadata
  1283. * buffers are managed internally.
  1284. */
  1285. static int ext4_ordered_write_end(struct file *file,
  1286. struct address_space *mapping,
  1287. loff_t pos, unsigned len, unsigned copied,
  1288. struct page *page, void *fsdata)
  1289. {
  1290. handle_t *handle = ext4_journal_current_handle();
  1291. struct inode *inode = mapping->host;
  1292. int ret = 0, ret2;
  1293. ret = ext4_jbd2_file_inode(handle, inode);
  1294. if (ret == 0) {
  1295. /*
  1296. * generic_write_end() will run mark_inode_dirty() if i_size
  1297. * changes. So let's piggyback the i_disksize mark_inode_dirty
  1298. * into that.
  1299. */
  1300. loff_t new_i_size;
  1301. new_i_size = pos + copied;
  1302. if (new_i_size > EXT4_I(inode)->i_disksize)
  1303. EXT4_I(inode)->i_disksize = new_i_size;
  1304. ret2 = generic_write_end(file, mapping, pos, len, copied,
  1305. page, fsdata);
  1306. copied = ret2;
  1307. if (ret2 < 0)
  1308. ret = ret2;
  1309. }
  1310. ret2 = ext4_journal_stop(handle);
  1311. if (!ret)
  1312. ret = ret2;
  1313. return ret ? ret : copied;
  1314. }
  1315. static int ext4_writeback_write_end(struct file *file,
  1316. struct address_space *mapping,
  1317. loff_t pos, unsigned len, unsigned copied,
  1318. struct page *page, void *fsdata)
  1319. {
  1320. handle_t *handle = ext4_journal_current_handle();
  1321. struct inode *inode = mapping->host;
  1322. int ret = 0, ret2;
  1323. loff_t new_i_size;
  1324. new_i_size = pos + copied;
  1325. if (new_i_size > EXT4_I(inode)->i_disksize)
  1326. EXT4_I(inode)->i_disksize = new_i_size;
  1327. ret2 = generic_write_end(file, mapping, pos, len, copied,
  1328. page, fsdata);
  1329. copied = ret2;
  1330. if (ret2 < 0)
  1331. ret = ret2;
  1332. ret2 = ext4_journal_stop(handle);
  1333. if (!ret)
  1334. ret = ret2;
  1335. return ret ? ret : copied;
  1336. }
  1337. static int ext4_journalled_write_end(struct file *file,
  1338. struct address_space *mapping,
  1339. loff_t pos, unsigned len, unsigned copied,
  1340. struct page *page, void *fsdata)
  1341. {
  1342. handle_t *handle = ext4_journal_current_handle();
  1343. struct inode *inode = mapping->host;
  1344. int ret = 0, ret2;
  1345. int partial = 0;
  1346. unsigned from, to;
  1347. from = pos & (PAGE_CACHE_SIZE - 1);
  1348. to = from + len;
  1349. if (copied < len) {
  1350. if (!PageUptodate(page))
  1351. copied = 0;
  1352. page_zero_new_buffers(page, from+copied, to);
  1353. }
  1354. ret = walk_page_buffers(handle, page_buffers(page), from,
  1355. to, &partial, write_end_fn);
  1356. if (!partial)
  1357. SetPageUptodate(page);
  1358. if (pos+copied > inode->i_size)
  1359. i_size_write(inode, pos+copied);
  1360. EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
  1361. if (inode->i_size > EXT4_I(inode)->i_disksize) {
  1362. EXT4_I(inode)->i_disksize = inode->i_size;
  1363. ret2 = ext4_mark_inode_dirty(handle, inode);
  1364. if (!ret)
  1365. ret = ret2;
  1366. }
  1367. unlock_page(page);
  1368. ret2 = ext4_journal_stop(handle);
  1369. if (!ret)
  1370. ret = ret2;
  1371. page_cache_release(page);
  1372. return ret ? ret : copied;
  1373. }
  1374. static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
  1375. {
  1376. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1377. unsigned long md_needed, mdblocks, total = 0;
  1378. /*
  1379. * recalculate the amount of metadata blocks to reserve
  1380. * in order to allocate nrblocks
  1381. * worse case is one extent per block
  1382. */
  1383. spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
  1384. total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks;
  1385. mdblocks = ext4_calc_metadata_amount(inode, total);
  1386. BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks);
  1387. md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
  1388. total = md_needed + nrblocks;
  1389. if (ext4_has_free_blocks(sbi, total) < total) {
  1390. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  1391. return -ENOSPC;
  1392. }
  1393. /* reduce fs free blocks counter */
  1394. percpu_counter_sub(&sbi->s_freeblocks_counter, total);
  1395. EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
  1396. EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
  1397. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  1398. return 0; /* success */
  1399. }
  1400. static void ext4_da_release_space(struct inode *inode, int to_free)
  1401. {
  1402. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1403. int total, mdb, mdb_free, release;
  1404. spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
  1405. /* recalculate the number of metablocks still need to be reserved */
  1406. total = EXT4_I(inode)->i_reserved_data_blocks - to_free;
  1407. mdb = ext4_calc_metadata_amount(inode, total);
  1408. /* figure out how many metablocks to release */
  1409. BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
  1410. mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
  1411. release = to_free + mdb_free;
  1412. /* update fs free blocks counter for truncate case */
  1413. percpu_counter_add(&sbi->s_freeblocks_counter, release);
  1414. /* update per-inode reservations */
  1415. BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
  1416. EXT4_I(inode)->i_reserved_data_blocks -= to_free;
  1417. BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
  1418. EXT4_I(inode)->i_reserved_meta_blocks = mdb;
  1419. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  1420. }
  1421. static void ext4_da_page_release_reservation(struct page *page,
  1422. unsigned long offset)
  1423. {
  1424. int to_release = 0;
  1425. struct buffer_head *head, *bh;
  1426. unsigned int curr_off = 0;
  1427. head = page_buffers(page);
  1428. bh = head;
  1429. do {
  1430. unsigned int next_off = curr_off + bh->b_size;
  1431. if ((offset <= curr_off) && (buffer_delay(bh))) {
  1432. to_release++;
  1433. clear_buffer_delay(bh);
  1434. }
  1435. curr_off = next_off;
  1436. } while ((bh = bh->b_this_page) != head);
  1437. ext4_da_release_space(page->mapping->host, to_release);
  1438. }
  1439. /*
  1440. * Delayed allocation stuff
  1441. */
  1442. struct mpage_da_data {
  1443. struct inode *inode;
  1444. struct buffer_head lbh; /* extent of blocks */
  1445. unsigned long first_page, next_page; /* extent of pages */
  1446. get_block_t *get_block;
  1447. struct writeback_control *wbc;
  1448. };
  1449. /*
  1450. * mpage_da_submit_io - walks through extent of pages and try to write
  1451. * them with __mpage_writepage()
  1452. *
  1453. * @mpd->inode: inode
  1454. * @mpd->first_page: first page of the extent
  1455. * @mpd->next_page: page after the last page of the extent
  1456. * @mpd->get_block: the filesystem's block mapper function
  1457. *
  1458. * By the time mpage_da_submit_io() is called we expect all blocks
  1459. * to be allocated. this may be wrong if allocation failed.
  1460. *
  1461. * As pages are already locked by write_cache_pages(), we can't use it
  1462. */
  1463. static int mpage_da_submit_io(struct mpage_da_data *mpd)
  1464. {
  1465. struct address_space *mapping = mpd->inode->i_mapping;
  1466. struct mpage_data mpd_pp = {
  1467. .bio = NULL,
  1468. .last_block_in_bio = 0,
  1469. .get_block = mpd->get_block,
  1470. .use_writepage = 1,
  1471. };
  1472. int ret = 0, err, nr_pages, i;
  1473. unsigned long index, end;
  1474. struct pagevec pvec;
  1475. BUG_ON(mpd->next_page <= mpd->first_page);
  1476. pagevec_init(&pvec, 0);
  1477. index = mpd->first_page;
  1478. end = mpd->next_page - 1;
  1479. while (index <= end) {
  1480. /* XXX: optimize tail */
  1481. nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
  1482. if (nr_pages == 0)
  1483. break;
  1484. for (i = 0; i < nr_pages; i++) {
  1485. struct page *page = pvec.pages[i];
  1486. index = page->index;
  1487. if (index > end)
  1488. break;
  1489. index++;
  1490. err = __mpage_writepage(page, mpd->wbc, &mpd_pp);
  1491. /*
  1492. * In error case, we have to continue because
  1493. * remaining pages are still locked
  1494. * XXX: unlock and re-dirty them?
  1495. */
  1496. if (ret == 0)
  1497. ret = err;
  1498. }
  1499. pagevec_release(&pvec);
  1500. }
  1501. if (mpd_pp.bio)
  1502. mpage_bio_submit(WRITE, mpd_pp.bio);
  1503. return ret;
  1504. }
  1505. /*
  1506. * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
  1507. *
  1508. * @mpd->inode - inode to walk through
  1509. * @exbh->b_blocknr - first block on a disk
  1510. * @exbh->b_size - amount of space in bytes
  1511. * @logical - first logical block to start assignment with
  1512. *
  1513. * the function goes through all passed space and put actual disk
  1514. * block numbers into buffer heads, dropping BH_Delay
  1515. */
  1516. static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
  1517. struct buffer_head *exbh)
  1518. {
  1519. struct inode *inode = mpd->inode;
  1520. struct address_space *mapping = inode->i_mapping;
  1521. int blocks = exbh->b_size >> inode->i_blkbits;
  1522. sector_t pblock = exbh->b_blocknr, cur_logical;
  1523. struct buffer_head *head, *bh;
  1524. unsigned long index, end;
  1525. struct pagevec pvec;
  1526. int nr_pages, i;
  1527. index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1528. end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1529. cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1530. pagevec_init(&pvec, 0);
  1531. while (index <= end) {
  1532. /* XXX: optimize tail */
  1533. nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
  1534. if (nr_pages == 0)
  1535. break;
  1536. for (i = 0; i < nr_pages; i++) {
  1537. struct page *page = pvec.pages[i];
  1538. index = page->index;
  1539. if (index > end)
  1540. break;
  1541. index++;
  1542. BUG_ON(!PageLocked(page));
  1543. BUG_ON(PageWriteback(page));
  1544. BUG_ON(!page_has_buffers(page));
  1545. bh = page_buffers(page);
  1546. head = bh;
  1547. /* skip blocks out of the range */
  1548. do {
  1549. if (cur_logical >= logical)
  1550. break;
  1551. cur_logical++;
  1552. } while ((bh = bh->b_this_page) != head);
  1553. do {
  1554. if (cur_logical >= logical + blocks)
  1555. break;
  1556. if (buffer_delay(bh)) {
  1557. bh->b_blocknr = pblock;
  1558. clear_buffer_delay(bh);
  1559. } else if (buffer_mapped(bh))
  1560. BUG_ON(bh->b_blocknr != pblock);
  1561. cur_logical++;
  1562. pblock++;
  1563. } while ((bh = bh->b_this_page) != head);
  1564. }
  1565. pagevec_release(&pvec);
  1566. }
  1567. }
  1568. /*
  1569. * __unmap_underlying_blocks - just a helper function to unmap
  1570. * set of blocks described by @bh
  1571. */
  1572. static inline void __unmap_underlying_blocks(struct inode *inode,
  1573. struct buffer_head *bh)
  1574. {
  1575. struct block_device *bdev = inode->i_sb->s_bdev;
  1576. int blocks, i;
  1577. blocks = bh->b_size >> inode->i_blkbits;
  1578. for (i = 0; i < blocks; i++)
  1579. unmap_underlying_metadata(bdev, bh->b_blocknr + i);
  1580. }
  1581. /*
  1582. * mpage_da_map_blocks - go through given space
  1583. *
  1584. * @mpd->lbh - bh describing space
  1585. * @mpd->get_block - the filesystem's block mapper function
  1586. *
  1587. * The function skips space we know is already mapped to disk blocks.
  1588. *
  1589. * The function ignores errors ->get_block() returns, thus real
  1590. * error handling is postponed to __mpage_writepage()
  1591. */
  1592. static void mpage_da_map_blocks(struct mpage_da_data *mpd)
  1593. {
  1594. struct buffer_head *lbh = &mpd->lbh;
  1595. int err = 0, remain = lbh->b_size;
  1596. sector_t next = lbh->b_blocknr;
  1597. struct buffer_head new;
  1598. /*
  1599. * We consider only non-mapped and non-allocated blocks
  1600. */
  1601. if (buffer_mapped(lbh) && !buffer_delay(lbh))
  1602. return;
  1603. while (remain) {
  1604. new.b_state = lbh->b_state;
  1605. new.b_blocknr = 0;
  1606. new.b_size = remain;
  1607. err = mpd->get_block(mpd->inode, next, &new, 1);
  1608. if (err) {
  1609. /*
  1610. * Rather than implement own error handling
  1611. * here, we just leave remaining blocks
  1612. * unallocated and try again with ->writepage()
  1613. */
  1614. break;
  1615. }
  1616. BUG_ON(new.b_size == 0);
  1617. if (buffer_new(&new))
  1618. __unmap_underlying_blocks(mpd->inode, &new);
  1619. /*
  1620. * If blocks are delayed marked, we need to
  1621. * put actual blocknr and drop delayed bit
  1622. */
  1623. if (buffer_delay(lbh))
  1624. mpage_put_bnr_to_bhs(mpd, next, &new);
  1625. /* go for the remaining blocks */
  1626. next += new.b_size >> mpd->inode->i_blkbits;
  1627. remain -= new.b_size;
  1628. }
  1629. }
  1630. #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | (1 << BH_Delay))
  1631. /*
  1632. * mpage_add_bh_to_extent - try to add one more block to extent of blocks
  1633. *
  1634. * @mpd->lbh - extent of blocks
  1635. * @logical - logical number of the block in the file
  1636. * @bh - bh of the block (used to access block's state)
  1637. *
  1638. * the function is used to collect contig. blocks in same state
  1639. */
  1640. static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
  1641. sector_t logical, struct buffer_head *bh)
  1642. {
  1643. struct buffer_head *lbh = &mpd->lbh;
  1644. sector_t next;
  1645. next = lbh->b_blocknr + (lbh->b_size >> mpd->inode->i_blkbits);
  1646. /*
  1647. * First block in the extent
  1648. */
  1649. if (lbh->b_size == 0) {
  1650. lbh->b_blocknr = logical;
  1651. lbh->b_size = bh->b_size;
  1652. lbh->b_state = bh->b_state & BH_FLAGS;
  1653. return;
  1654. }
  1655. /*
  1656. * Can we merge the block to our big extent?
  1657. */
  1658. if (logical == next && (bh->b_state & BH_FLAGS) == lbh->b_state) {
  1659. lbh->b_size += bh->b_size;
  1660. return;
  1661. }
  1662. /*
  1663. * We couldn't merge the block to our extent, so we
  1664. * need to flush current extent and start new one
  1665. */
  1666. mpage_da_map_blocks(mpd);
  1667. /*
  1668. * Now start a new extent
  1669. */
  1670. lbh->b_size = bh->b_size;
  1671. lbh->b_state = bh->b_state & BH_FLAGS;
  1672. lbh->b_blocknr = logical;
  1673. }
  1674. /*
  1675. * __mpage_da_writepage - finds extent of pages and blocks
  1676. *
  1677. * @page: page to consider
  1678. * @wbc: not used, we just follow rules
  1679. * @data: context
  1680. *
  1681. * The function finds extents of pages and scan them for all blocks.
  1682. */
  1683. static int __mpage_da_writepage(struct page *page,
  1684. struct writeback_control *wbc, void *data)
  1685. {
  1686. struct mpage_da_data *mpd = data;
  1687. struct inode *inode = mpd->inode;
  1688. struct buffer_head *bh, *head, fake;
  1689. sector_t logical;
  1690. /*
  1691. * Can we merge this page to current extent?
  1692. */
  1693. if (mpd->next_page != page->index) {
  1694. /*
  1695. * Nope, we can't. So, we map non-allocated blocks
  1696. * and start IO on them using __mpage_writepage()
  1697. */
  1698. if (mpd->next_page != mpd->first_page) {
  1699. mpage_da_map_blocks(mpd);
  1700. mpage_da_submit_io(mpd);
  1701. }
  1702. /*
  1703. * Start next extent of pages ...
  1704. */
  1705. mpd->first_page = page->index;
  1706. /*
  1707. * ... and blocks
  1708. */
  1709. mpd->lbh.b_size = 0;
  1710. mpd->lbh.b_state = 0;
  1711. mpd->lbh.b_blocknr = 0;
  1712. }
  1713. mpd->next_page = page->index + 1;
  1714. logical = (sector_t) page->index <<
  1715. (PAGE_CACHE_SHIFT - inode->i_blkbits);
  1716. if (!page_has_buffers(page)) {
  1717. /*
  1718. * There is no attached buffer heads yet (mmap?)
  1719. * we treat the page asfull of dirty blocks
  1720. */
  1721. bh = &fake;
  1722. bh->b_size = PAGE_CACHE_SIZE;
  1723. bh->b_state = 0;
  1724. set_buffer_dirty(bh);
  1725. set_buffer_uptodate(bh);
  1726. mpage_add_bh_to_extent(mpd, logical, bh);
  1727. } else {
  1728. /*
  1729. * Page with regular buffer heads, just add all dirty ones
  1730. */
  1731. head = page_buffers(page);
  1732. bh = head;
  1733. do {
  1734. BUG_ON(buffer_locked(bh));
  1735. if (buffer_dirty(bh))
  1736. mpage_add_bh_to_extent(mpd, logical, bh);
  1737. logical++;
  1738. } while ((bh = bh->b_this_page) != head);
  1739. }
  1740. return 0;
  1741. }
  1742. /*
  1743. * mpage_da_writepages - walk the list of dirty pages of the given
  1744. * address space, allocates non-allocated blocks, maps newly-allocated
  1745. * blocks to existing bhs and issue IO them
  1746. *
  1747. * @mapping: address space structure to write
  1748. * @wbc: subtract the number of written pages from *@wbc->nr_to_write
  1749. * @get_block: the filesystem's block mapper function.
  1750. *
  1751. * This is a library function, which implements the writepages()
  1752. * address_space_operation.
  1753. *
  1754. * In order to avoid duplication of logic that deals with partial pages,
  1755. * multiple bio per page, etc, we find non-allocated blocks, allocate
  1756. * them with minimal calls to ->get_block() and re-use __mpage_writepage()
  1757. *
  1758. * It's important that we call __mpage_writepage() only once for each
  1759. * involved page, otherwise we'd have to implement more complicated logic
  1760. * to deal with pages w/o PG_lock or w/ PG_writeback and so on.
  1761. *
  1762. * See comments to mpage_writepages()
  1763. */
  1764. static int mpage_da_writepages(struct address_space *mapping,
  1765. struct writeback_control *wbc,
  1766. get_block_t get_block)
  1767. {
  1768. struct mpage_da_data mpd;
  1769. int ret;
  1770. if (!get_block)
  1771. return generic_writepages(mapping, wbc);
  1772. mpd.wbc = wbc;
  1773. mpd.inode = mapping->host;
  1774. mpd.lbh.b_size = 0;
  1775. mpd.lbh.b_state = 0;
  1776. mpd.lbh.b_blocknr = 0;
  1777. mpd.first_page = 0;
  1778. mpd.next_page = 0;
  1779. mpd.get_block = get_block;
  1780. ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, &mpd);
  1781. /*
  1782. * Handle last extent of pages
  1783. */
  1784. if (mpd.next_page != mpd.first_page) {
  1785. mpage_da_map_blocks(&mpd);
  1786. mpage_da_submit_io(&mpd);
  1787. }
  1788. return ret;
  1789. }
  1790. /*
  1791. * this is a special callback for ->write_begin() only
  1792. * it's intention is to return mapped block or reserve space
  1793. */
  1794. static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
  1795. struct buffer_head *bh_result, int create)
  1796. {
  1797. int ret = 0;
  1798. BUG_ON(create == 0);
  1799. BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
  1800. /*
  1801. * first, we need to know whether the block is allocated already
  1802. * preallocated blocks are unmapped but should treated
  1803. * the same as allocated blocks.
  1804. */
  1805. ret = ext4_get_blocks_wrap(NULL, inode, iblock, 1, bh_result, 0, 0, 0);
  1806. if ((ret == 0) && !buffer_delay(bh_result)) {
  1807. /* the block isn't (pre)allocated yet, let's reserve space */
  1808. /*
  1809. * XXX: __block_prepare_write() unmaps passed block,
  1810. * is it OK?
  1811. */
  1812. ret = ext4_da_reserve_space(inode, 1);
  1813. if (ret)
  1814. /* not enough space to reserve */
  1815. return ret;
  1816. map_bh(bh_result, inode->i_sb, 0);
  1817. set_buffer_new(bh_result);
  1818. set_buffer_delay(bh_result);
  1819. } else if (ret > 0) {
  1820. bh_result->b_size = (ret << inode->i_blkbits);
  1821. ret = 0;
  1822. }
  1823. return ret;
  1824. }
  1825. #define EXT4_DELALLOC_RSVED 1
  1826. static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
  1827. struct buffer_head *bh_result, int create)
  1828. {
  1829. int ret;
  1830. unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
  1831. loff_t disksize = EXT4_I(inode)->i_disksize;
  1832. handle_t *handle = NULL;
  1833. handle = ext4_journal_current_handle();
  1834. if (!handle) {
  1835. ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
  1836. bh_result, 0, 0, 0);
  1837. BUG_ON(!ret);
  1838. } else {
  1839. ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
  1840. bh_result, create, 0, EXT4_DELALLOC_RSVED);
  1841. }
  1842. if (ret > 0) {
  1843. bh_result->b_size = (ret << inode->i_blkbits);
  1844. /*
  1845. * Update on-disk size along with block allocation
  1846. * we don't use 'extend_disksize' as size may change
  1847. * within already allocated block -bzzz
  1848. */
  1849. disksize = ((loff_t) iblock + ret) << inode->i_blkbits;
  1850. if (disksize > i_size_read(inode))
  1851. disksize = i_size_read(inode);
  1852. if (disksize > EXT4_I(inode)->i_disksize) {
  1853. /*
  1854. * XXX: replace with spinlock if seen contended -bzzz
  1855. */
  1856. down_write(&EXT4_I(inode)->i_data_sem);
  1857. if (disksize > EXT4_I(inode)->i_disksize)
  1858. EXT4_I(inode)->i_disksize = disksize;
  1859. up_write(&EXT4_I(inode)->i_data_sem);
  1860. if (EXT4_I(inode)->i_disksize == disksize) {
  1861. ret = ext4_mark_inode_dirty(handle, inode);
  1862. return ret;
  1863. }
  1864. }
  1865. ret = 0;
  1866. }
  1867. return ret;
  1868. }
  1869. static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
  1870. {
  1871. /*
  1872. * unmapped buffer is possible for holes.
  1873. * delay buffer is possible with delayed allocation
  1874. */
  1875. return ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh));
  1876. }
  1877. static int ext4_normal_get_block_write(struct inode *inode, sector_t iblock,
  1878. struct buffer_head *bh_result, int create)
  1879. {
  1880. int ret = 0;
  1881. unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
  1882. /*
  1883. * we don't want to do block allocation in writepage
  1884. * so call get_block_wrap with create = 0
  1885. */
  1886. ret = ext4_get_blocks_wrap(NULL, inode, iblock, max_blocks,
  1887. bh_result, 0, 0, 0);
  1888. if (ret > 0) {
  1889. bh_result->b_size = (ret << inode->i_blkbits);
  1890. ret = 0;
  1891. }
  1892. return ret;
  1893. }
  1894. /*
  1895. * get called vi ext4_da_writepages after taking page lock (have journal handle)
  1896. * get called via journal_submit_inode_data_buffers (no journal handle)
  1897. * get called via shrink_page_list via pdflush (no journal handle)
  1898. * or grab_page_cache when doing write_begin (have journal handle)
  1899. */
  1900. static int ext4_da_writepage(struct page *page,
  1901. struct writeback_control *wbc)
  1902. {
  1903. int ret = 0;
  1904. loff_t size;
  1905. unsigned long len;
  1906. struct buffer_head *page_bufs;
  1907. struct inode *inode = page->mapping->host;
  1908. size = i_size_read(inode);
  1909. if (page->index == size >> PAGE_CACHE_SHIFT)
  1910. len = size & ~PAGE_CACHE_MASK;
  1911. else
  1912. len = PAGE_CACHE_SIZE;
  1913. if (page_has_buffers(page)) {
  1914. page_bufs = page_buffers(page);
  1915. if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
  1916. ext4_bh_unmapped_or_delay)) {
  1917. /*
  1918. * We don't want to do block allocation
  1919. * So redirty the page and return
  1920. * We may reach here when we do a journal commit
  1921. * via journal_submit_inode_data_buffers.
  1922. * If we don't have mapping block we just ignore
  1923. * them. We can also reach here via shrink_page_list
  1924. */
  1925. redirty_page_for_writepage(wbc, page);
  1926. unlock_page(page);
  1927. return 0;
  1928. }
  1929. } else {
  1930. /*
  1931. * The test for page_has_buffers() is subtle:
  1932. * We know the page is dirty but it lost buffers. That means
  1933. * that at some moment in time after write_begin()/write_end()
  1934. * has been called all buffers have been clean and thus they
  1935. * must have been written at least once. So they are all
  1936. * mapped and we can happily proceed with mapping them
  1937. * and writing the page.
  1938. *
  1939. * Try to initialize the buffer_heads and check whether
  1940. * all are mapped and non delay. We don't want to
  1941. * do block allocation here.
  1942. */
  1943. ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
  1944. ext4_normal_get_block_write);
  1945. if (!ret) {
  1946. page_bufs = page_buffers(page);
  1947. /* check whether all are mapped and non delay */
  1948. if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
  1949. ext4_bh_unmapped_or_delay)) {
  1950. redirty_page_for_writepage(wbc, page);
  1951. unlock_page(page);
  1952. return 0;
  1953. }
  1954. } else {
  1955. /*
  1956. * We can't do block allocation here
  1957. * so just redity the page and unlock
  1958. * and return
  1959. */
  1960. redirty_page_for_writepage(wbc, page);
  1961. unlock_page(page);
  1962. return 0;
  1963. }
  1964. }
  1965. if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
  1966. ret = nobh_writepage(page, ext4_normal_get_block_write, wbc);
  1967. else
  1968. ret = block_write_full_page(page,
  1969. ext4_normal_get_block_write,
  1970. wbc);
  1971. return ret;
  1972. }
  1973. /*
  1974. * For now just follow the DIO way to estimate the max credits
  1975. * needed to write out EXT4_MAX_WRITEBACK_PAGES.
  1976. * todo: need to calculate the max credits need for
  1977. * extent based files, currently the DIO credits is based on
  1978. * indirect-blocks mapping way.
  1979. *
  1980. * Probably should have a generic way to calculate credits
  1981. * for DIO, writepages, and truncate
  1982. */
  1983. #define EXT4_MAX_WRITEBACK_PAGES DIO_MAX_BLOCKS
  1984. #define EXT4_MAX_WRITEBACK_CREDITS DIO_CREDITS
  1985. static int ext4_da_writepages(struct address_space *mapping,
  1986. struct writeback_control *wbc)
  1987. {
  1988. struct inode *inode = mapping->host;
  1989. handle_t *handle = NULL;
  1990. int needed_blocks;
  1991. int ret = 0;
  1992. long to_write;
  1993. loff_t range_start = 0;
  1994. /*
  1995. * No pages to write? This is mainly a kludge to avoid starting
  1996. * a transaction for special inodes like journal inode on last iput()
  1997. * because that could violate lock ordering on umount
  1998. */
  1999. if (!mapping->nrpages)
  2000. return 0;
  2001. /*
  2002. * Estimate the worse case needed credits to write out
  2003. * EXT4_MAX_BUF_BLOCKS pages
  2004. */
  2005. needed_blocks = EXT4_MAX_WRITEBACK_CREDITS;
  2006. to_write = wbc->nr_to_write;
  2007. if (!wbc->range_cyclic) {
  2008. /*
  2009. * If range_cyclic is not set force range_cont
  2010. * and save the old writeback_index
  2011. */
  2012. wbc->range_cont = 1;
  2013. range_start = wbc->range_start;
  2014. }
  2015. while (!ret && to_write) {
  2016. /* start a new transaction*/
  2017. handle = ext4_journal_start(inode, needed_blocks);
  2018. if (IS_ERR(handle)) {
  2019. ret = PTR_ERR(handle);
  2020. goto out_writepages;
  2021. }
  2022. if (ext4_should_order_data(inode)) {
  2023. /*
  2024. * With ordered mode we need to add
  2025. * the inode to the journal handle
  2026. * when we do block allocation.
  2027. */
  2028. ret = ext4_jbd2_file_inode(handle, inode);
  2029. if (ret) {
  2030. ext4_journal_stop(handle);
  2031. goto out_writepages;
  2032. }
  2033. }
  2034. /*
  2035. * set the max dirty pages could be write at a time
  2036. * to fit into the reserved transaction credits
  2037. */
  2038. if (wbc->nr_to_write > EXT4_MAX_WRITEBACK_PAGES)
  2039. wbc->nr_to_write = EXT4_MAX_WRITEBACK_PAGES;
  2040. to_write -= wbc->nr_to_write;
  2041. ret = mpage_da_writepages(mapping, wbc,
  2042. ext4_da_get_block_write);
  2043. ext4_journal_stop(handle);
  2044. if (wbc->nr_to_write) {
  2045. /*
  2046. * There is no more writeout needed
  2047. * or we requested for a noblocking writeout
  2048. * and we found the device congested
  2049. */
  2050. to_write += wbc->nr_to_write;
  2051. break;
  2052. }
  2053. wbc->nr_to_write = to_write;
  2054. }
  2055. out_writepages:
  2056. wbc->nr_to_write = to_write;
  2057. if (range_start)
  2058. wbc->range_start = range_start;
  2059. return ret;
  2060. }
  2061. static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
  2062. loff_t pos, unsigned len, unsigned flags,
  2063. struct page **pagep, void **fsdata)
  2064. {
  2065. int ret, retries = 0;
  2066. struct page *page;
  2067. pgoff_t index;
  2068. unsigned from, to;
  2069. struct inode *inode = mapping->host;
  2070. handle_t *handle;
  2071. index = pos >> PAGE_CACHE_SHIFT;
  2072. from = pos & (PAGE_CACHE_SIZE - 1);
  2073. to = from + len;
  2074. retry:
  2075. /*
  2076. * With delayed allocation, we don't log the i_disksize update
  2077. * if there is delayed block allocation. But we still need
  2078. * to journalling the i_disksize update if writes to the end
  2079. * of file which has an already mapped buffer.
  2080. */
  2081. handle = ext4_journal_start(inode, 1);
  2082. if (IS_ERR(handle)) {
  2083. ret = PTR_ERR(handle);
  2084. goto out;
  2085. }
  2086. page = __grab_cache_page(mapping, index);
  2087. if (!page) {
  2088. ext4_journal_stop(handle);
  2089. ret = -ENOMEM;
  2090. goto out;
  2091. }
  2092. *pagep = page;
  2093. ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  2094. ext4_da_get_block_prep);
  2095. if (ret < 0) {
  2096. unlock_page(page);
  2097. ext4_journal_stop(handle);
  2098. page_cache_release(page);
  2099. }
  2100. if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  2101. goto retry;
  2102. out:
  2103. return ret;
  2104. }
  2105. /*
  2106. * Check if we should update i_disksize
  2107. * when write to the end of file but not require block allocation
  2108. */
  2109. static int ext4_da_should_update_i_disksize(struct page *page,
  2110. unsigned long offset)
  2111. {
  2112. struct buffer_head *bh;
  2113. struct inode *inode = page->mapping->host;
  2114. unsigned int idx;
  2115. int i;
  2116. bh = page_buffers(page);
  2117. idx = offset >> inode->i_blkbits;
  2118. for (i=0; i < idx; i++)
  2119. bh = bh->b_this_page;
  2120. if (!buffer_mapped(bh) || (buffer_delay(bh)))
  2121. return 0;
  2122. return 1;
  2123. }
  2124. static int ext4_da_write_end(struct file *file,
  2125. struct address_space *mapping,
  2126. loff_t pos, unsigned len, unsigned copied,
  2127. struct page *page, void *fsdata)
  2128. {
  2129. struct inode *inode = mapping->host;
  2130. int ret = 0, ret2;
  2131. handle_t *handle = ext4_journal_current_handle();
  2132. loff_t new_i_size;
  2133. unsigned long start, end;
  2134. start = pos & (PAGE_CACHE_SIZE - 1);
  2135. end = start + copied -1;
  2136. /*
  2137. * generic_write_end() will run mark_inode_dirty() if i_size
  2138. * changes. So let's piggyback the i_disksize mark_inode_dirty
  2139. * into that.
  2140. */
  2141. new_i_size = pos + copied;
  2142. if (new_i_size > EXT4_I(inode)->i_disksize) {
  2143. if (ext4_da_should_update_i_disksize(page, end)) {
  2144. down_write(&EXT4_I(inode)->i_data_sem);
  2145. if (new_i_size > EXT4_I(inode)->i_disksize) {
  2146. /*
  2147. * Updating i_disksize when extending file
  2148. * without needing block allocation
  2149. */
  2150. if (ext4_should_order_data(inode))
  2151. ret = ext4_jbd2_file_inode(handle,
  2152. inode);
  2153. EXT4_I(inode)->i_disksize = new_i_size;
  2154. }
  2155. up_write(&EXT4_I(inode)->i_data_sem);
  2156. }
  2157. }
  2158. ret2 = generic_write_end(file, mapping, pos, len, copied,
  2159. page, fsdata);
  2160. copied = ret2;
  2161. if (ret2 < 0)
  2162. ret = ret2;
  2163. ret2 = ext4_journal_stop(handle);
  2164. if (!ret)
  2165. ret = ret2;
  2166. return ret ? ret : copied;
  2167. }
  2168. static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
  2169. {
  2170. /*
  2171. * Drop reserved blocks
  2172. */
  2173. BUG_ON(!PageLocked(page));
  2174. if (!page_has_buffers(page))
  2175. goto out;
  2176. ext4_da_page_release_reservation(page, offset);
  2177. out:
  2178. ext4_invalidatepage(page, offset);
  2179. return;
  2180. }
  2181. /*
  2182. * bmap() is special. It gets used by applications such as lilo and by
  2183. * the swapper to find the on-disk block of a specific piece of data.
  2184. *
  2185. * Naturally, this is dangerous if the block concerned is still in the
  2186. * journal. If somebody makes a swapfile on an ext4 data-journaling
  2187. * filesystem and enables swap, then they may get a nasty shock when the
  2188. * data getting swapped to that swapfile suddenly gets overwritten by
  2189. * the original zero's written out previously to the journal and
  2190. * awaiting writeback in the kernel's buffer cache.
  2191. *
  2192. * So, if we see any bmap calls here on a modified, data-journaled file,
  2193. * take extra steps to flush any blocks which might be in the cache.
  2194. */
  2195. static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
  2196. {
  2197. struct inode *inode = mapping->host;
  2198. journal_t *journal;
  2199. int err;
  2200. if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
  2201. test_opt(inode->i_sb, DELALLOC)) {
  2202. /*
  2203. * With delalloc we want to sync the file
  2204. * so that we can make sure we allocate
  2205. * blocks for file
  2206. */
  2207. filemap_write_and_wait(mapping);
  2208. }
  2209. if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
  2210. /*
  2211. * This is a REALLY heavyweight approach, but the use of
  2212. * bmap on dirty files is expected to be extremely rare:
  2213. * only if we run lilo or swapon on a freshly made file
  2214. * do we expect this to happen.
  2215. *
  2216. * (bmap requires CAP_SYS_RAWIO so this does not
  2217. * represent an unprivileged user DOS attack --- we'd be
  2218. * in trouble if mortal users could trigger this path at
  2219. * will.)
  2220. *
  2221. * NB. EXT4_STATE_JDATA is not set on files other than
  2222. * regular files. If somebody wants to bmap a directory
  2223. * or symlink and gets confused because the buffer
  2224. * hasn't yet been flushed to disk, they deserve
  2225. * everything they get.
  2226. */
  2227. EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
  2228. journal = EXT4_JOURNAL(inode);
  2229. jbd2_journal_lock_updates(journal);
  2230. err = jbd2_journal_flush(journal);
  2231. jbd2_journal_unlock_updates(journal);
  2232. if (err)
  2233. return 0;
  2234. }
  2235. return generic_block_bmap(mapping,block,ext4_get_block);
  2236. }
  2237. static int bget_one(handle_t *handle, struct buffer_head *bh)
  2238. {
  2239. get_bh(bh);
  2240. return 0;
  2241. }
  2242. static int bput_one(handle_t *handle, struct buffer_head *bh)
  2243. {
  2244. put_bh(bh);
  2245. return 0;
  2246. }
  2247. /*
  2248. * Note that we don't need to start a transaction unless we're journaling data
  2249. * because we should have holes filled from ext4_page_mkwrite(). We even don't
  2250. * need to file the inode to the transaction's list in ordered mode because if
  2251. * we are writing back data added by write(), the inode is already there and if
  2252. * we are writing back data modified via mmap(), noone guarantees in which
  2253. * transaction the data will hit the disk. In case we are journaling data, we
  2254. * cannot start transaction directly because transaction start ranks above page
  2255. * lock so we have to do some magic.
  2256. *
  2257. * In all journaling modes block_write_full_page() will start the I/O.
  2258. *
  2259. * Problem:
  2260. *
  2261. * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
  2262. * ext4_writepage()
  2263. *
  2264. * Similar for:
  2265. *
  2266. * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
  2267. *
  2268. * Same applies to ext4_get_block(). We will deadlock on various things like
  2269. * lock_journal and i_data_sem
  2270. *
  2271. * Setting PF_MEMALLOC here doesn't work - too many internal memory
  2272. * allocations fail.
  2273. *
  2274. * 16May01: If we're reentered then journal_current_handle() will be
  2275. * non-zero. We simply *return*.
  2276. *
  2277. * 1 July 2001: @@@ FIXME:
  2278. * In journalled data mode, a data buffer may be metadata against the
  2279. * current transaction. But the same file is part of a shared mapping
  2280. * and someone does a writepage() on it.
  2281. *
  2282. * We will move the buffer onto the async_data list, but *after* it has
  2283. * been dirtied. So there's a small window where we have dirty data on
  2284. * BJ_Metadata.
  2285. *
  2286. * Note that this only applies to the last partial page in the file. The
  2287. * bit which block_write_full_page() uses prepare/commit for. (That's
  2288. * broken code anyway: it's wrong for msync()).
  2289. *
  2290. * It's a rare case: affects the final partial page, for journalled data
  2291. * where the file is subject to bith write() and writepage() in the same
  2292. * transction. To fix it we'll need a custom block_write_full_page().
  2293. * We'll probably need that anyway for journalling writepage() output.
  2294. *
  2295. * We don't honour synchronous mounts for writepage(). That would be
  2296. * disastrous. Any write() or metadata operation will sync the fs for
  2297. * us.
  2298. *
  2299. */
  2300. static int __ext4_normal_writepage(struct page *page,
  2301. struct writeback_control *wbc)
  2302. {
  2303. struct inode *inode = page->mapping->host;
  2304. if (test_opt(inode->i_sb, NOBH))
  2305. return nobh_writepage(page,
  2306. ext4_normal_get_block_write, wbc);
  2307. else
  2308. return block_write_full_page(page,
  2309. ext4_normal_get_block_write,
  2310. wbc);
  2311. }
  2312. static int ext4_normal_writepage(struct page *page,
  2313. struct writeback_control *wbc)
  2314. {
  2315. struct inode *inode = page->mapping->host;
  2316. loff_t size = i_size_read(inode);
  2317. loff_t len;
  2318. J_ASSERT(PageLocked(page));
  2319. if (page->index == size >> PAGE_CACHE_SHIFT)
  2320. len = size & ~PAGE_CACHE_MASK;
  2321. else
  2322. len = PAGE_CACHE_SIZE;
  2323. if (page_has_buffers(page)) {
  2324. /* if page has buffers it should all be mapped
  2325. * and allocated. If there are not buffers attached
  2326. * to the page we know the page is dirty but it lost
  2327. * buffers. That means that at some moment in time
  2328. * after write_begin() / write_end() has been called
  2329. * all buffers have been clean and thus they must have been
  2330. * written at least once. So they are all mapped and we can
  2331. * happily proceed with mapping them and writing the page.
  2332. */
  2333. BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
  2334. ext4_bh_unmapped_or_delay));
  2335. }
  2336. if (!ext4_journal_current_handle())
  2337. return __ext4_normal_writepage(page, wbc);
  2338. redirty_page_for_writepage(wbc, page);
  2339. unlock_page(page);
  2340. return 0;
  2341. }
  2342. static int __ext4_journalled_writepage(struct page *page,
  2343. struct writeback_control *wbc)
  2344. {
  2345. struct address_space *mapping = page->mapping;
  2346. struct inode *inode = mapping->host;
  2347. struct buffer_head *page_bufs;
  2348. handle_t *handle = NULL;
  2349. int ret = 0;
  2350. int err;
  2351. ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
  2352. ext4_normal_get_block_write);
  2353. if (ret != 0)
  2354. goto out_unlock;
  2355. page_bufs = page_buffers(page);
  2356. walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
  2357. bget_one);
  2358. /* As soon as we unlock the page, it can go away, but we have
  2359. * references to buffers so we are safe */
  2360. unlock_page(page);
  2361. handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
  2362. if (IS_ERR(handle)) {
  2363. ret = PTR_ERR(handle);
  2364. goto out;
  2365. }
  2366. ret = walk_page_buffers(handle, page_bufs, 0,
  2367. PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
  2368. err = walk_page_buffers(handle, page_bufs, 0,
  2369. PAGE_CACHE_SIZE, NULL, write_end_fn);
  2370. if (ret == 0)
  2371. ret = err;
  2372. err = ext4_journal_stop(handle);
  2373. if (!ret)
  2374. ret = err;
  2375. walk_page_buffers(handle, page_bufs, 0,
  2376. PAGE_CACHE_SIZE, NULL, bput_one);
  2377. EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
  2378. goto out;
  2379. out_unlock:
  2380. unlock_page(page);
  2381. out:
  2382. return ret;
  2383. }
  2384. static int ext4_journalled_writepage(struct page *page,
  2385. struct writeback_control *wbc)
  2386. {
  2387. struct inode *inode = page->mapping->host;
  2388. loff_t size = i_size_read(inode);
  2389. loff_t len;
  2390. J_ASSERT(PageLocked(page));
  2391. if (page->index == size >> PAGE_CACHE_SHIFT)
  2392. len = size & ~PAGE_CACHE_MASK;
  2393. else
  2394. len = PAGE_CACHE_SIZE;
  2395. if (page_has_buffers(page)) {
  2396. /* if page has buffers it should all be mapped
  2397. * and allocated. If there are not buffers attached
  2398. * to the page we know the page is dirty but it lost
  2399. * buffers. That means that at some moment in time
  2400. * after write_begin() / write_end() has been called
  2401. * all buffers have been clean and thus they must have been
  2402. * written at least once. So they are all mapped and we can
  2403. * happily proceed with mapping them and writing the page.
  2404. */
  2405. BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
  2406. ext4_bh_unmapped_or_delay));
  2407. }
  2408. if (ext4_journal_current_handle())
  2409. goto no_write;
  2410. if (PageChecked(page)) {
  2411. /*
  2412. * It's mmapped pagecache. Add buffers and journal it. There
  2413. * doesn't seem much point in redirtying the page here.
  2414. */
  2415. ClearPageChecked(page);
  2416. return __ext4_journalled_writepage(page, wbc);
  2417. } else {
  2418. /*
  2419. * It may be a page full of checkpoint-mode buffers. We don't
  2420. * really know unless we go poke around in the buffer_heads.
  2421. * But block_write_full_page will do the right thing.
  2422. */
  2423. return block_write_full_page(page,
  2424. ext4_normal_get_block_write,
  2425. wbc);
  2426. }
  2427. no_write:
  2428. redirty_page_for_writepage(wbc, page);
  2429. unlock_page(page);
  2430. return 0;
  2431. }
  2432. static int ext4_readpage(struct file *file, struct page *page)
  2433. {
  2434. return mpage_readpage(page, ext4_get_block);
  2435. }
  2436. static int
  2437. ext4_readpages(struct file *file, struct address_space *mapping,
  2438. struct list_head *pages, unsigned nr_pages)
  2439. {
  2440. return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
  2441. }
  2442. static void ext4_invalidatepage(struct page *page, unsigned long offset)
  2443. {
  2444. journal_t *journal = EXT4_JOURNAL(page->mapping->host);
  2445. /*
  2446. * If it's a full truncate we just forget about the pending dirtying
  2447. */
  2448. if (offset == 0)
  2449. ClearPageChecked(page);
  2450. jbd2_journal_invalidatepage(journal, page, offset);
  2451. }
  2452. static int ext4_releasepage(struct page *page, gfp_t wait)
  2453. {
  2454. journal_t *journal = EXT4_JOURNAL(page->mapping->host);
  2455. WARN_ON(PageChecked(page));
  2456. if (!page_has_buffers(page))
  2457. return 0;
  2458. return jbd2_journal_try_to_free_buffers(journal, page, wait);
  2459. }
  2460. /*
  2461. * If the O_DIRECT write will extend the file then add this inode to the
  2462. * orphan list. So recovery will truncate it back to the original size
  2463. * if the machine crashes during the write.
  2464. *
  2465. * If the O_DIRECT write is intantiating holes inside i_size and the machine
  2466. * crashes then stale disk data _may_ be exposed inside the file. But current
  2467. * VFS code falls back into buffered path in that case so we are safe.
  2468. */
  2469. static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
  2470. const struct iovec *iov, loff_t offset,
  2471. unsigned long nr_segs)
  2472. {
  2473. struct file *file = iocb->ki_filp;
  2474. struct inode *inode = file->f_mapping->host;
  2475. struct ext4_inode_info *ei = EXT4_I(inode);
  2476. handle_t *handle;
  2477. ssize_t ret;
  2478. int orphan = 0;
  2479. size_t count = iov_length(iov, nr_segs);
  2480. if (rw == WRITE) {
  2481. loff_t final_size = offset + count;
  2482. if (final_size > inode->i_size) {
  2483. /* Credits for sb + inode write */
  2484. handle = ext4_journal_start(inode, 2);
  2485. if (IS_ERR(handle)) {
  2486. ret = PTR_ERR(handle);
  2487. goto out;
  2488. }
  2489. ret = ext4_orphan_add(handle, inode);
  2490. if (ret) {
  2491. ext4_journal_stop(handle);
  2492. goto out;
  2493. }
  2494. orphan = 1;
  2495. ei->i_disksize = inode->i_size;
  2496. ext4_journal_stop(handle);
  2497. }
  2498. }
  2499. ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
  2500. offset, nr_segs,
  2501. ext4_get_block, NULL);
  2502. if (orphan) {
  2503. int err;
  2504. /* Credits for sb + inode write */
  2505. handle = ext4_journal_start(inode, 2);
  2506. if (IS_ERR(handle)) {
  2507. /* This is really bad luck. We've written the data
  2508. * but cannot extend i_size. Bail out and pretend
  2509. * the write failed... */
  2510. ret = PTR_ERR(handle);
  2511. goto out;
  2512. }
  2513. if (inode->i_nlink)
  2514. ext4_orphan_del(handle, inode);
  2515. if (ret > 0) {
  2516. loff_t end = offset + ret;
  2517. if (end > inode->i_size) {
  2518. ei->i_disksize = end;
  2519. i_size_write(inode, end);
  2520. /*
  2521. * We're going to return a positive `ret'
  2522. * here due to non-zero-length I/O, so there's
  2523. * no way of reporting error returns from
  2524. * ext4_mark_inode_dirty() to userspace. So
  2525. * ignore it.
  2526. */
  2527. ext4_mark_inode_dirty(handle, inode);
  2528. }
  2529. }
  2530. err = ext4_journal_stop(handle);
  2531. if (ret == 0)
  2532. ret = err;
  2533. }
  2534. out:
  2535. return ret;
  2536. }
  2537. /*
  2538. * Pages can be marked dirty completely asynchronously from ext4's journalling
  2539. * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
  2540. * much here because ->set_page_dirty is called under VFS locks. The page is
  2541. * not necessarily locked.
  2542. *
  2543. * We cannot just dirty the page and leave attached buffers clean, because the
  2544. * buffers' dirty state is "definitive". We cannot just set the buffers dirty
  2545. * or jbddirty because all the journalling code will explode.
  2546. *
  2547. * So what we do is to mark the page "pending dirty" and next time writepage
  2548. * is called, propagate that into the buffers appropriately.
  2549. */
  2550. static int ext4_journalled_set_page_dirty(struct page *page)
  2551. {
  2552. SetPageChecked(page);
  2553. return __set_page_dirty_nobuffers(page);
  2554. }
  2555. static const struct address_space_operations ext4_ordered_aops = {
  2556. .readpage = ext4_readpage,
  2557. .readpages = ext4_readpages,
  2558. .writepage = ext4_normal_writepage,
  2559. .sync_page = block_sync_page,
  2560. .write_begin = ext4_write_begin,
  2561. .write_end = ext4_ordered_write_end,
  2562. .bmap = ext4_bmap,
  2563. .invalidatepage = ext4_invalidatepage,
  2564. .releasepage = ext4_releasepage,
  2565. .direct_IO = ext4_direct_IO,
  2566. .migratepage = buffer_migrate_page,
  2567. .is_partially_uptodate = block_is_partially_uptodate,
  2568. };
  2569. static const struct address_space_operations ext4_writeback_aops = {
  2570. .readpage = ext4_readpage,
  2571. .readpages = ext4_readpages,
  2572. .writepage = ext4_normal_writepage,
  2573. .sync_page = block_sync_page,
  2574. .write_begin = ext4_write_begin,
  2575. .write_end = ext4_writeback_write_end,
  2576. .bmap = ext4_bmap,
  2577. .invalidatepage = ext4_invalidatepage,
  2578. .releasepage = ext4_releasepage,
  2579. .direct_IO = ext4_direct_IO,
  2580. .migratepage = buffer_migrate_page,
  2581. .is_partially_uptodate = block_is_partially_uptodate,
  2582. };
  2583. static const struct address_space_operations ext4_journalled_aops = {
  2584. .readpage = ext4_readpage,
  2585. .readpages = ext4_readpages,
  2586. .writepage = ext4_journalled_writepage,
  2587. .sync_page = block_sync_page,
  2588. .write_begin = ext4_write_begin,
  2589. .write_end = ext4_journalled_write_end,
  2590. .set_page_dirty = ext4_journalled_set_page_dirty,
  2591. .bmap = ext4_bmap,
  2592. .invalidatepage = ext4_invalidatepage,
  2593. .releasepage = ext4_releasepage,
  2594. .is_partially_uptodate = block_is_partially_uptodate,
  2595. };
  2596. static const struct address_space_operations ext4_da_aops = {
  2597. .readpage = ext4_readpage,
  2598. .readpages = ext4_readpages,
  2599. .writepage = ext4_da_writepage,
  2600. .writepages = ext4_da_writepages,
  2601. .sync_page = block_sync_page,
  2602. .write_begin = ext4_da_write_begin,
  2603. .write_end = ext4_da_write_end,
  2604. .bmap = ext4_bmap,
  2605. .invalidatepage = ext4_da_invalidatepage,
  2606. .releasepage = ext4_releasepage,
  2607. .direct_IO = ext4_direct_IO,
  2608. .migratepage = buffer_migrate_page,
  2609. .is_partially_uptodate = block_is_partially_uptodate,
  2610. };
  2611. void ext4_set_aops(struct inode *inode)
  2612. {
  2613. if (ext4_should_order_data(inode) &&
  2614. test_opt(inode->i_sb, DELALLOC))
  2615. inode->i_mapping->a_ops = &ext4_da_aops;
  2616. else if (ext4_should_order_data(inode))
  2617. inode->i_mapping->a_ops = &ext4_ordered_aops;
  2618. else if (ext4_should_writeback_data(inode) &&
  2619. test_opt(inode->i_sb, DELALLOC))
  2620. inode->i_mapping->a_ops = &ext4_da_aops;
  2621. else if (ext4_should_writeback_data(inode))
  2622. inode->i_mapping->a_ops = &ext4_writeback_aops;
  2623. else
  2624. inode->i_mapping->a_ops = &ext4_journalled_aops;
  2625. }
  2626. /*
  2627. * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
  2628. * up to the end of the block which corresponds to `from'.
  2629. * This required during truncate. We need to physically zero the tail end
  2630. * of that block so it doesn't yield old data if the file is later grown.
  2631. */
  2632. int ext4_block_truncate_page(handle_t *handle,
  2633. struct address_space *mapping, loff_t from)
  2634. {
  2635. ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
  2636. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  2637. unsigned blocksize, length, pos;
  2638. ext4_lblk_t iblock;
  2639. struct inode *inode = mapping->host;
  2640. struct buffer_head *bh;
  2641. struct page *page;
  2642. int err = 0;
  2643. page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
  2644. if (!page)
  2645. return -EINVAL;
  2646. blocksize = inode->i_sb->s_blocksize;
  2647. length = blocksize - (offset & (blocksize - 1));
  2648. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  2649. /*
  2650. * For "nobh" option, we can only work if we don't need to
  2651. * read-in the page - otherwise we create buffers to do the IO.
  2652. */
  2653. if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
  2654. ext4_should_writeback_data(inode) && PageUptodate(page)) {
  2655. zero_user(page, offset, length);
  2656. set_page_dirty(page);
  2657. goto unlock;
  2658. }
  2659. if (!page_has_buffers(page))
  2660. create_empty_buffers(page, blocksize, 0);
  2661. /* Find the buffer that contains "offset" */
  2662. bh = page_buffers(page);
  2663. pos = blocksize;
  2664. while (offset >= pos) {
  2665. bh = bh->b_this_page;
  2666. iblock++;
  2667. pos += blocksize;
  2668. }
  2669. err = 0;
  2670. if (buffer_freed(bh)) {
  2671. BUFFER_TRACE(bh, "freed: skip");
  2672. goto unlock;
  2673. }
  2674. if (!buffer_mapped(bh)) {
  2675. BUFFER_TRACE(bh, "unmapped");
  2676. ext4_get_block(inode, iblock, bh, 0);
  2677. /* unmapped? It's a hole - nothing to do */
  2678. if (!buffer_mapped(bh)) {
  2679. BUFFER_TRACE(bh, "still unmapped");
  2680. goto unlock;
  2681. }
  2682. }
  2683. /* Ok, it's mapped. Make sure it's up-to-date */
  2684. if (PageUptodate(page))
  2685. set_buffer_uptodate(bh);
  2686. if (!buffer_uptodate(bh)) {
  2687. err = -EIO;
  2688. ll_rw_block(READ, 1, &bh);
  2689. wait_on_buffer(bh);
  2690. /* Uhhuh. Read error. Complain and punt. */
  2691. if (!buffer_uptodate(bh))
  2692. goto unlock;
  2693. }
  2694. if (ext4_should_journal_data(inode)) {
  2695. BUFFER_TRACE(bh, "get write access");
  2696. err = ext4_journal_get_write_access(handle, bh);
  2697. if (err)
  2698. goto unlock;
  2699. }
  2700. zero_user(page, offset, length);
  2701. BUFFER_TRACE(bh, "zeroed end of block");
  2702. err = 0;
  2703. if (ext4_should_journal_data(inode)) {
  2704. err = ext4_journal_dirty_metadata(handle, bh);
  2705. } else {
  2706. if (ext4_should_order_data(inode))
  2707. err = ext4_jbd2_file_inode(handle, inode);
  2708. mark_buffer_dirty(bh);
  2709. }
  2710. unlock:
  2711. unlock_page(page);
  2712. page_cache_release(page);
  2713. return err;
  2714. }
  2715. /*
  2716. * Probably it should be a library function... search for first non-zero word
  2717. * or memcmp with zero_page, whatever is better for particular architecture.
  2718. * Linus?
  2719. */
  2720. static inline int all_zeroes(__le32 *p, __le32 *q)
  2721. {
  2722. while (p < q)
  2723. if (*p++)
  2724. return 0;
  2725. return 1;
  2726. }
  2727. /**
  2728. * ext4_find_shared - find the indirect blocks for partial truncation.
  2729. * @inode: inode in question
  2730. * @depth: depth of the affected branch
  2731. * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
  2732. * @chain: place to store the pointers to partial indirect blocks
  2733. * @top: place to the (detached) top of branch
  2734. *
  2735. * This is a helper function used by ext4_truncate().
  2736. *
  2737. * When we do truncate() we may have to clean the ends of several
  2738. * indirect blocks but leave the blocks themselves alive. Block is
  2739. * partially truncated if some data below the new i_size is refered
  2740. * from it (and it is on the path to the first completely truncated
  2741. * data block, indeed). We have to free the top of that path along
  2742. * with everything to the right of the path. Since no allocation
  2743. * past the truncation point is possible until ext4_truncate()
  2744. * finishes, we may safely do the latter, but top of branch may
  2745. * require special attention - pageout below the truncation point
  2746. * might try to populate it.
  2747. *
  2748. * We atomically detach the top of branch from the tree, store the
  2749. * block number of its root in *@top, pointers to buffer_heads of
  2750. * partially truncated blocks - in @chain[].bh and pointers to
  2751. * their last elements that should not be removed - in
  2752. * @chain[].p. Return value is the pointer to last filled element
  2753. * of @chain.
  2754. *
  2755. * The work left to caller to do the actual freeing of subtrees:
  2756. * a) free the subtree starting from *@top
  2757. * b) free the subtrees whose roots are stored in
  2758. * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
  2759. * c) free the subtrees growing from the inode past the @chain[0].
  2760. * (no partially truncated stuff there). */
  2761. static Indirect *ext4_find_shared(struct inode *inode, int depth,
  2762. ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
  2763. {
  2764. Indirect *partial, *p;
  2765. int k, err;
  2766. *top = 0;
  2767. /* Make k index the deepest non-null offest + 1 */
  2768. for (k = depth; k > 1 && !offsets[k-1]; k--)
  2769. ;
  2770. partial = ext4_get_branch(inode, k, offsets, chain, &err);
  2771. /* Writer: pointers */
  2772. if (!partial)
  2773. partial = chain + k-1;
  2774. /*
  2775. * If the branch acquired continuation since we've looked at it -
  2776. * fine, it should all survive and (new) top doesn't belong to us.
  2777. */
  2778. if (!partial->key && *partial->p)
  2779. /* Writer: end */
  2780. goto no_top;
  2781. for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
  2782. ;
  2783. /*
  2784. * OK, we've found the last block that must survive. The rest of our
  2785. * branch should be detached before unlocking. However, if that rest
  2786. * of branch is all ours and does not grow immediately from the inode
  2787. * it's easier to cheat and just decrement partial->p.
  2788. */
  2789. if (p == chain + k - 1 && p > chain) {
  2790. p->p--;
  2791. } else {
  2792. *top = *p->p;
  2793. /* Nope, don't do this in ext4. Must leave the tree intact */
  2794. #if 0
  2795. *p->p = 0;
  2796. #endif
  2797. }
  2798. /* Writer: end */
  2799. while(partial > p) {
  2800. brelse(partial->bh);
  2801. partial--;
  2802. }
  2803. no_top:
  2804. return partial;
  2805. }
  2806. /*
  2807. * Zero a number of block pointers in either an inode or an indirect block.
  2808. * If we restart the transaction we must again get write access to the
  2809. * indirect block for further modification.
  2810. *
  2811. * We release `count' blocks on disk, but (last - first) may be greater
  2812. * than `count' because there can be holes in there.
  2813. */
  2814. static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
  2815. struct buffer_head *bh, ext4_fsblk_t block_to_free,
  2816. unsigned long count, __le32 *first, __le32 *last)
  2817. {
  2818. __le32 *p;
  2819. if (try_to_extend_transaction(handle, inode)) {
  2820. if (bh) {
  2821. BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
  2822. ext4_journal_dirty_metadata(handle, bh);
  2823. }
  2824. ext4_mark_inode_dirty(handle, inode);
  2825. ext4_journal_test_restart(handle, inode);
  2826. if (bh) {
  2827. BUFFER_TRACE(bh, "retaking write access");
  2828. ext4_journal_get_write_access(handle, bh);
  2829. }
  2830. }
  2831. /*
  2832. * Any buffers which are on the journal will be in memory. We find
  2833. * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
  2834. * on them. We've already detached each block from the file, so
  2835. * bforget() in jbd2_journal_forget() should be safe.
  2836. *
  2837. * AKPM: turn on bforget in jbd2_journal_forget()!!!
  2838. */
  2839. for (p = first; p < last; p++) {
  2840. u32 nr = le32_to_cpu(*p);
  2841. if (nr) {
  2842. struct buffer_head *tbh;
  2843. *p = 0;
  2844. tbh = sb_find_get_block(inode->i_sb, nr);
  2845. ext4_forget(handle, 0, inode, tbh, nr);
  2846. }
  2847. }
  2848. ext4_free_blocks(handle, inode, block_to_free, count, 0);
  2849. }
  2850. /**
  2851. * ext4_free_data - free a list of data blocks
  2852. * @handle: handle for this transaction
  2853. * @inode: inode we are dealing with
  2854. * @this_bh: indirect buffer_head which contains *@first and *@last
  2855. * @first: array of block numbers
  2856. * @last: points immediately past the end of array
  2857. *
  2858. * We are freeing all blocks refered from that array (numbers are stored as
  2859. * little-endian 32-bit) and updating @inode->i_blocks appropriately.
  2860. *
  2861. * We accumulate contiguous runs of blocks to free. Conveniently, if these
  2862. * blocks are contiguous then releasing them at one time will only affect one
  2863. * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
  2864. * actually use a lot of journal space.
  2865. *
  2866. * @this_bh will be %NULL if @first and @last point into the inode's direct
  2867. * block pointers.
  2868. */
  2869. static void ext4_free_data(handle_t *handle, struct inode *inode,
  2870. struct buffer_head *this_bh,
  2871. __le32 *first, __le32 *last)
  2872. {
  2873. ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
  2874. unsigned long count = 0; /* Number of blocks in the run */
  2875. __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
  2876. corresponding to
  2877. block_to_free */
  2878. ext4_fsblk_t nr; /* Current block # */
  2879. __le32 *p; /* Pointer into inode/ind
  2880. for current block */
  2881. int err;
  2882. if (this_bh) { /* For indirect block */
  2883. BUFFER_TRACE(this_bh, "get_write_access");
  2884. err = ext4_journal_get_write_access(handle, this_bh);
  2885. /* Important: if we can't update the indirect pointers
  2886. * to the blocks, we can't free them. */
  2887. if (err)
  2888. return;
  2889. }
  2890. for (p = first; p < last; p++) {
  2891. nr = le32_to_cpu(*p);
  2892. if (nr) {
  2893. /* accumulate blocks to free if they're contiguous */
  2894. if (count == 0) {
  2895. block_to_free = nr;
  2896. block_to_free_p = p;
  2897. count = 1;
  2898. } else if (nr == block_to_free + count) {
  2899. count++;
  2900. } else {
  2901. ext4_clear_blocks(handle, inode, this_bh,
  2902. block_to_free,
  2903. count, block_to_free_p, p);
  2904. block_to_free = nr;
  2905. block_to_free_p = p;
  2906. count = 1;
  2907. }
  2908. }
  2909. }
  2910. if (count > 0)
  2911. ext4_clear_blocks(handle, inode, this_bh, block_to_free,
  2912. count, block_to_free_p, p);
  2913. if (this_bh) {
  2914. BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
  2915. /*
  2916. * The buffer head should have an attached journal head at this
  2917. * point. However, if the data is corrupted and an indirect
  2918. * block pointed to itself, it would have been detached when
  2919. * the block was cleared. Check for this instead of OOPSing.
  2920. */
  2921. if (bh2jh(this_bh))
  2922. ext4_journal_dirty_metadata(handle, this_bh);
  2923. else
  2924. ext4_error(inode->i_sb, __func__,
  2925. "circular indirect block detected, "
  2926. "inode=%lu, block=%llu",
  2927. inode->i_ino,
  2928. (unsigned long long) this_bh->b_blocknr);
  2929. }
  2930. }
  2931. /**
  2932. * ext4_free_branches - free an array of branches
  2933. * @handle: JBD handle for this transaction
  2934. * @inode: inode we are dealing with
  2935. * @parent_bh: the buffer_head which contains *@first and *@last
  2936. * @first: array of block numbers
  2937. * @last: pointer immediately past the end of array
  2938. * @depth: depth of the branches to free
  2939. *
  2940. * We are freeing all blocks refered from these branches (numbers are
  2941. * stored as little-endian 32-bit) and updating @inode->i_blocks
  2942. * appropriately.
  2943. */
  2944. static void ext4_free_branches(handle_t *handle, struct inode *inode,
  2945. struct buffer_head *parent_bh,
  2946. __le32 *first, __le32 *last, int depth)
  2947. {
  2948. ext4_fsblk_t nr;
  2949. __le32 *p;
  2950. if (is_handle_aborted(handle))
  2951. return;
  2952. if (depth--) {
  2953. struct buffer_head *bh;
  2954. int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  2955. p = last;
  2956. while (--p >= first) {
  2957. nr = le32_to_cpu(*p);
  2958. if (!nr)
  2959. continue; /* A hole */
  2960. /* Go read the buffer for the next level down */
  2961. bh = sb_bread(inode->i_sb, nr);
  2962. /*
  2963. * A read failure? Report error and clear slot
  2964. * (should be rare).
  2965. */
  2966. if (!bh) {
  2967. ext4_error(inode->i_sb, "ext4_free_branches",
  2968. "Read failure, inode=%lu, block=%llu",
  2969. inode->i_ino, nr);
  2970. continue;
  2971. }
  2972. /* This zaps the entire block. Bottom up. */
  2973. BUFFER_TRACE(bh, "free child branches");
  2974. ext4_free_branches(handle, inode, bh,
  2975. (__le32*)bh->b_data,
  2976. (__le32*)bh->b_data + addr_per_block,
  2977. depth);
  2978. /*
  2979. * We've probably journalled the indirect block several
  2980. * times during the truncate. But it's no longer
  2981. * needed and we now drop it from the transaction via
  2982. * jbd2_journal_revoke().
  2983. *
  2984. * That's easy if it's exclusively part of this
  2985. * transaction. But if it's part of the committing
  2986. * transaction then jbd2_journal_forget() will simply
  2987. * brelse() it. That means that if the underlying
  2988. * block is reallocated in ext4_get_block(),
  2989. * unmap_underlying_metadata() will find this block
  2990. * and will try to get rid of it. damn, damn.
  2991. *
  2992. * If this block has already been committed to the
  2993. * journal, a revoke record will be written. And
  2994. * revoke records must be emitted *before* clearing
  2995. * this block's bit in the bitmaps.
  2996. */
  2997. ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
  2998. /*
  2999. * Everything below this this pointer has been
  3000. * released. Now let this top-of-subtree go.
  3001. *
  3002. * We want the freeing of this indirect block to be
  3003. * atomic in the journal with the updating of the
  3004. * bitmap block which owns it. So make some room in
  3005. * the journal.
  3006. *
  3007. * We zero the parent pointer *after* freeing its
  3008. * pointee in the bitmaps, so if extend_transaction()
  3009. * for some reason fails to put the bitmap changes and
  3010. * the release into the same transaction, recovery
  3011. * will merely complain about releasing a free block,
  3012. * rather than leaking blocks.
  3013. */
  3014. if (is_handle_aborted(handle))
  3015. return;
  3016. if (try_to_extend_transaction(handle, inode)) {
  3017. ext4_mark_inode_dirty(handle, inode);
  3018. ext4_journal_test_restart(handle, inode);
  3019. }
  3020. ext4_free_blocks(handle, inode, nr, 1, 1);
  3021. if (parent_bh) {
  3022. /*
  3023. * The block which we have just freed is
  3024. * pointed to by an indirect block: journal it
  3025. */
  3026. BUFFER_TRACE(parent_bh, "get_write_access");
  3027. if (!ext4_journal_get_write_access(handle,
  3028. parent_bh)){
  3029. *p = 0;
  3030. BUFFER_TRACE(parent_bh,
  3031. "call ext4_journal_dirty_metadata");
  3032. ext4_journal_dirty_metadata(handle,
  3033. parent_bh);
  3034. }
  3035. }
  3036. }
  3037. } else {
  3038. /* We have reached the bottom of the tree. */
  3039. BUFFER_TRACE(parent_bh, "free data blocks");
  3040. ext4_free_data(handle, inode, parent_bh, first, last);
  3041. }
  3042. }
  3043. int ext4_can_truncate(struct inode *inode)
  3044. {
  3045. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  3046. return 0;
  3047. if (S_ISREG(inode->i_mode))
  3048. return 1;
  3049. if (S_ISDIR(inode->i_mode))
  3050. return 1;
  3051. if (S_ISLNK(inode->i_mode))
  3052. return !ext4_inode_is_fast_symlink(inode);
  3053. return 0;
  3054. }
  3055. /*
  3056. * ext4_truncate()
  3057. *
  3058. * We block out ext4_get_block() block instantiations across the entire
  3059. * transaction, and VFS/VM ensures that ext4_truncate() cannot run
  3060. * simultaneously on behalf of the same inode.
  3061. *
  3062. * As we work through the truncate and commmit bits of it to the journal there
  3063. * is one core, guiding principle: the file's tree must always be consistent on
  3064. * disk. We must be able to restart the truncate after a crash.
  3065. *
  3066. * The file's tree may be transiently inconsistent in memory (although it
  3067. * probably isn't), but whenever we close off and commit a journal transaction,
  3068. * the contents of (the filesystem + the journal) must be consistent and
  3069. * restartable. It's pretty simple, really: bottom up, right to left (although
  3070. * left-to-right works OK too).
  3071. *
  3072. * Note that at recovery time, journal replay occurs *before* the restart of
  3073. * truncate against the orphan inode list.
  3074. *
  3075. * The committed inode has the new, desired i_size (which is the same as
  3076. * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
  3077. * that this inode's truncate did not complete and it will again call
  3078. * ext4_truncate() to have another go. So there will be instantiated blocks
  3079. * to the right of the truncation point in a crashed ext4 filesystem. But
  3080. * that's fine - as long as they are linked from the inode, the post-crash
  3081. * ext4_truncate() run will find them and release them.
  3082. */
  3083. void ext4_truncate(struct inode *inode)
  3084. {
  3085. handle_t *handle;
  3086. struct ext4_inode_info *ei = EXT4_I(inode);
  3087. __le32 *i_data = ei->i_data;
  3088. int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
  3089. struct address_space *mapping = inode->i_mapping;
  3090. ext4_lblk_t offsets[4];
  3091. Indirect chain[4];
  3092. Indirect *partial;
  3093. __le32 nr = 0;
  3094. int n;
  3095. ext4_lblk_t last_block;
  3096. unsigned blocksize = inode->i_sb->s_blocksize;
  3097. if (!ext4_can_truncate(inode))
  3098. return;
  3099. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
  3100. ext4_ext_truncate(inode);
  3101. return;
  3102. }
  3103. handle = start_transaction(inode);
  3104. if (IS_ERR(handle))
  3105. return; /* AKPM: return what? */
  3106. last_block = (inode->i_size + blocksize-1)
  3107. >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
  3108. if (inode->i_size & (blocksize - 1))
  3109. if (ext4_block_truncate_page(handle, mapping, inode->i_size))
  3110. goto out_stop;
  3111. n = ext4_block_to_path(inode, last_block, offsets, NULL);
  3112. if (n == 0)
  3113. goto out_stop; /* error */
  3114. /*
  3115. * OK. This truncate is going to happen. We add the inode to the
  3116. * orphan list, so that if this truncate spans multiple transactions,
  3117. * and we crash, we will resume the truncate when the filesystem
  3118. * recovers. It also marks the inode dirty, to catch the new size.
  3119. *
  3120. * Implication: the file must always be in a sane, consistent
  3121. * truncatable state while each transaction commits.
  3122. */
  3123. if (ext4_orphan_add(handle, inode))
  3124. goto out_stop;
  3125. /*
  3126. * From here we block out all ext4_get_block() callers who want to
  3127. * modify the block allocation tree.
  3128. */
  3129. down_write(&ei->i_data_sem);
  3130. /*
  3131. * The orphan list entry will now protect us from any crash which
  3132. * occurs before the truncate completes, so it is now safe to propagate
  3133. * the new, shorter inode size (held for now in i_size) into the
  3134. * on-disk inode. We do this via i_disksize, which is the value which
  3135. * ext4 *really* writes onto the disk inode.
  3136. */
  3137. ei->i_disksize = inode->i_size;
  3138. if (n == 1) { /* direct blocks */
  3139. ext4_free_data(handle, inode, NULL, i_data+offsets[0],
  3140. i_data + EXT4_NDIR_BLOCKS);
  3141. goto do_indirects;
  3142. }
  3143. partial = ext4_find_shared(inode, n, offsets, chain, &nr);
  3144. /* Kill the top of shared branch (not detached) */
  3145. if (nr) {
  3146. if (partial == chain) {
  3147. /* Shared branch grows from the inode */
  3148. ext4_free_branches(handle, inode, NULL,
  3149. &nr, &nr+1, (chain+n-1) - partial);
  3150. *partial->p = 0;
  3151. /*
  3152. * We mark the inode dirty prior to restart,
  3153. * and prior to stop. No need for it here.
  3154. */
  3155. } else {
  3156. /* Shared branch grows from an indirect block */
  3157. BUFFER_TRACE(partial->bh, "get_write_access");
  3158. ext4_free_branches(handle, inode, partial->bh,
  3159. partial->p,
  3160. partial->p+1, (chain+n-1) - partial);
  3161. }
  3162. }
  3163. /* Clear the ends of indirect blocks on the shared branch */
  3164. while (partial > chain) {
  3165. ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
  3166. (__le32*)partial->bh->b_data+addr_per_block,
  3167. (chain+n-1) - partial);
  3168. BUFFER_TRACE(partial->bh, "call brelse");
  3169. brelse (partial->bh);
  3170. partial--;
  3171. }
  3172. do_indirects:
  3173. /* Kill the remaining (whole) subtrees */
  3174. switch (offsets[0]) {
  3175. default:
  3176. nr = i_data[EXT4_IND_BLOCK];
  3177. if (nr) {
  3178. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
  3179. i_data[EXT4_IND_BLOCK] = 0;
  3180. }
  3181. case EXT4_IND_BLOCK:
  3182. nr = i_data[EXT4_DIND_BLOCK];
  3183. if (nr) {
  3184. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
  3185. i_data[EXT4_DIND_BLOCK] = 0;
  3186. }
  3187. case EXT4_DIND_BLOCK:
  3188. nr = i_data[EXT4_TIND_BLOCK];
  3189. if (nr) {
  3190. ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
  3191. i_data[EXT4_TIND_BLOCK] = 0;
  3192. }
  3193. case EXT4_TIND_BLOCK:
  3194. ;
  3195. }
  3196. ext4_discard_reservation(inode);
  3197. up_write(&ei->i_data_sem);
  3198. inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
  3199. ext4_mark_inode_dirty(handle, inode);
  3200. /*
  3201. * In a multi-transaction truncate, we only make the final transaction
  3202. * synchronous
  3203. */
  3204. if (IS_SYNC(inode))
  3205. handle->h_sync = 1;
  3206. out_stop:
  3207. /*
  3208. * If this was a simple ftruncate(), and the file will remain alive
  3209. * then we need to clear up the orphan record which we created above.
  3210. * However, if this was a real unlink then we were called by
  3211. * ext4_delete_inode(), and we allow that function to clean up the
  3212. * orphan info for us.
  3213. */
  3214. if (inode->i_nlink)
  3215. ext4_orphan_del(handle, inode);
  3216. ext4_journal_stop(handle);
  3217. }
  3218. static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
  3219. unsigned long ino, struct ext4_iloc *iloc)
  3220. {
  3221. ext4_group_t block_group;
  3222. unsigned long offset;
  3223. ext4_fsblk_t block;
  3224. struct ext4_group_desc *gdp;
  3225. if (!ext4_valid_inum(sb, ino)) {
  3226. /*
  3227. * This error is already checked for in namei.c unless we are
  3228. * looking at an NFS filehandle, in which case no error
  3229. * report is needed
  3230. */
  3231. return 0;
  3232. }
  3233. block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
  3234. gdp = ext4_get_group_desc(sb, block_group, NULL);
  3235. if (!gdp)
  3236. return 0;
  3237. /*
  3238. * Figure out the offset within the block group inode table
  3239. */
  3240. offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) *
  3241. EXT4_INODE_SIZE(sb);
  3242. block = ext4_inode_table(sb, gdp) +
  3243. (offset >> EXT4_BLOCK_SIZE_BITS(sb));
  3244. iloc->block_group = block_group;
  3245. iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1);
  3246. return block;
  3247. }
  3248. /*
  3249. * ext4_get_inode_loc returns with an extra refcount against the inode's
  3250. * underlying buffer_head on success. If 'in_mem' is true, we have all
  3251. * data in memory that is needed to recreate the on-disk version of this
  3252. * inode.
  3253. */
  3254. static int __ext4_get_inode_loc(struct inode *inode,
  3255. struct ext4_iloc *iloc, int in_mem)
  3256. {
  3257. ext4_fsblk_t block;
  3258. struct buffer_head *bh;
  3259. block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc);
  3260. if (!block)
  3261. return -EIO;
  3262. bh = sb_getblk(inode->i_sb, block);
  3263. if (!bh) {
  3264. ext4_error (inode->i_sb, "ext4_get_inode_loc",
  3265. "unable to read inode block - "
  3266. "inode=%lu, block=%llu",
  3267. inode->i_ino, block);
  3268. return -EIO;
  3269. }
  3270. if (!buffer_uptodate(bh)) {
  3271. lock_buffer(bh);
  3272. /*
  3273. * If the buffer has the write error flag, we have failed
  3274. * to write out another inode in the same block. In this
  3275. * case, we don't have to read the block because we may
  3276. * read the old inode data successfully.
  3277. */
  3278. if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
  3279. set_buffer_uptodate(bh);
  3280. if (buffer_uptodate(bh)) {
  3281. /* someone brought it uptodate while we waited */
  3282. unlock_buffer(bh);
  3283. goto has_buffer;
  3284. }
  3285. /*
  3286. * If we have all information of the inode in memory and this
  3287. * is the only valid inode in the block, we need not read the
  3288. * block.
  3289. */
  3290. if (in_mem) {
  3291. struct buffer_head *bitmap_bh;
  3292. struct ext4_group_desc *desc;
  3293. int inodes_per_buffer;
  3294. int inode_offset, i;
  3295. ext4_group_t block_group;
  3296. int start;
  3297. block_group = (inode->i_ino - 1) /
  3298. EXT4_INODES_PER_GROUP(inode->i_sb);
  3299. inodes_per_buffer = bh->b_size /
  3300. EXT4_INODE_SIZE(inode->i_sb);
  3301. inode_offset = ((inode->i_ino - 1) %
  3302. EXT4_INODES_PER_GROUP(inode->i_sb));
  3303. start = inode_offset & ~(inodes_per_buffer - 1);
  3304. /* Is the inode bitmap in cache? */
  3305. desc = ext4_get_group_desc(inode->i_sb,
  3306. block_group, NULL);
  3307. if (!desc)
  3308. goto make_io;
  3309. bitmap_bh = sb_getblk(inode->i_sb,
  3310. ext4_inode_bitmap(inode->i_sb, desc));
  3311. if (!bitmap_bh)
  3312. goto make_io;
  3313. /*
  3314. * If the inode bitmap isn't in cache then the
  3315. * optimisation may end up performing two reads instead
  3316. * of one, so skip it.
  3317. */
  3318. if (!buffer_uptodate(bitmap_bh)) {
  3319. brelse(bitmap_bh);
  3320. goto make_io;
  3321. }
  3322. for (i = start; i < start + inodes_per_buffer; i++) {
  3323. if (i == inode_offset)
  3324. continue;
  3325. if (ext4_test_bit(i, bitmap_bh->b_data))
  3326. break;
  3327. }
  3328. brelse(bitmap_bh);
  3329. if (i == start + inodes_per_buffer) {
  3330. /* all other inodes are free, so skip I/O */
  3331. memset(bh->b_data, 0, bh->b_size);
  3332. set_buffer_uptodate(bh);
  3333. unlock_buffer(bh);
  3334. goto has_buffer;
  3335. }
  3336. }
  3337. make_io:
  3338. /*
  3339. * There are other valid inodes in the buffer, this inode
  3340. * has in-inode xattrs, or we don't have this inode in memory.
  3341. * Read the block from disk.
  3342. */
  3343. get_bh(bh);
  3344. bh->b_end_io = end_buffer_read_sync;
  3345. submit_bh(READ_META, bh);
  3346. wait_on_buffer(bh);
  3347. if (!buffer_uptodate(bh)) {
  3348. ext4_error(inode->i_sb, "ext4_get_inode_loc",
  3349. "unable to read inode block - "
  3350. "inode=%lu, block=%llu",
  3351. inode->i_ino, block);
  3352. brelse(bh);
  3353. return -EIO;
  3354. }
  3355. }
  3356. has_buffer:
  3357. iloc->bh = bh;
  3358. return 0;
  3359. }
  3360. int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
  3361. {
  3362. /* We have all inode data except xattrs in memory here. */
  3363. return __ext4_get_inode_loc(inode, iloc,
  3364. !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
  3365. }
  3366. void ext4_set_inode_flags(struct inode *inode)
  3367. {
  3368. unsigned int flags = EXT4_I(inode)->i_flags;
  3369. inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
  3370. if (flags & EXT4_SYNC_FL)
  3371. inode->i_flags |= S_SYNC;
  3372. if (flags & EXT4_APPEND_FL)
  3373. inode->i_flags |= S_APPEND;
  3374. if (flags & EXT4_IMMUTABLE_FL)
  3375. inode->i_flags |= S_IMMUTABLE;
  3376. if (flags & EXT4_NOATIME_FL)
  3377. inode->i_flags |= S_NOATIME;
  3378. if (flags & EXT4_DIRSYNC_FL)
  3379. inode->i_flags |= S_DIRSYNC;
  3380. }
  3381. /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
  3382. void ext4_get_inode_flags(struct ext4_inode_info *ei)
  3383. {
  3384. unsigned int flags = ei->vfs_inode.i_flags;
  3385. ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
  3386. EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
  3387. if (flags & S_SYNC)
  3388. ei->i_flags |= EXT4_SYNC_FL;
  3389. if (flags & S_APPEND)
  3390. ei->i_flags |= EXT4_APPEND_FL;
  3391. if (flags & S_IMMUTABLE)
  3392. ei->i_flags |= EXT4_IMMUTABLE_FL;
  3393. if (flags & S_NOATIME)
  3394. ei->i_flags |= EXT4_NOATIME_FL;
  3395. if (flags & S_DIRSYNC)
  3396. ei->i_flags |= EXT4_DIRSYNC_FL;
  3397. }
  3398. static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
  3399. struct ext4_inode_info *ei)
  3400. {
  3401. blkcnt_t i_blocks ;
  3402. struct inode *inode = &(ei->vfs_inode);
  3403. struct super_block *sb = inode->i_sb;
  3404. if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
  3405. EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
  3406. /* we are using combined 48 bit field */
  3407. i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
  3408. le32_to_cpu(raw_inode->i_blocks_lo);
  3409. if (ei->i_flags & EXT4_HUGE_FILE_FL) {
  3410. /* i_blocks represent file system block size */
  3411. return i_blocks << (inode->i_blkbits - 9);
  3412. } else {
  3413. return i_blocks;
  3414. }
  3415. } else {
  3416. return le32_to_cpu(raw_inode->i_blocks_lo);
  3417. }
  3418. }
  3419. struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
  3420. {
  3421. struct ext4_iloc iloc;
  3422. struct ext4_inode *raw_inode;
  3423. struct ext4_inode_info *ei;
  3424. struct buffer_head *bh;
  3425. struct inode *inode;
  3426. long ret;
  3427. int block;
  3428. inode = iget_locked(sb, ino);
  3429. if (!inode)
  3430. return ERR_PTR(-ENOMEM);
  3431. if (!(inode->i_state & I_NEW))
  3432. return inode;
  3433. ei = EXT4_I(inode);
  3434. #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
  3435. ei->i_acl = EXT4_ACL_NOT_CACHED;
  3436. ei->i_default_acl = EXT4_ACL_NOT_CACHED;
  3437. #endif
  3438. ei->i_block_alloc_info = NULL;
  3439. ret = __ext4_get_inode_loc(inode, &iloc, 0);
  3440. if (ret < 0)
  3441. goto bad_inode;
  3442. bh = iloc.bh;
  3443. raw_inode = ext4_raw_inode(&iloc);
  3444. inode->i_mode = le16_to_cpu(raw_inode->i_mode);
  3445. inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
  3446. inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
  3447. if(!(test_opt (inode->i_sb, NO_UID32))) {
  3448. inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
  3449. inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
  3450. }
  3451. inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
  3452. ei->i_state = 0;
  3453. ei->i_dir_start_lookup = 0;
  3454. ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
  3455. /* We now have enough fields to check if the inode was active or not.
  3456. * This is needed because nfsd might try to access dead inodes
  3457. * the test is that same one that e2fsck uses
  3458. * NeilBrown 1999oct15
  3459. */
  3460. if (inode->i_nlink == 0) {
  3461. if (inode->i_mode == 0 ||
  3462. !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
  3463. /* this inode is deleted */
  3464. brelse (bh);
  3465. ret = -ESTALE;
  3466. goto bad_inode;
  3467. }
  3468. /* The only unlinked inodes we let through here have
  3469. * valid i_mode and are being read by the orphan
  3470. * recovery code: that's fine, we're about to complete
  3471. * the process of deleting those. */
  3472. }
  3473. ei->i_flags = le32_to_cpu(raw_inode->i_flags);
  3474. inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
  3475. ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
  3476. if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
  3477. cpu_to_le32(EXT4_OS_HURD)) {
  3478. ei->i_file_acl |=
  3479. ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
  3480. }
  3481. inode->i_size = ext4_isize(raw_inode);
  3482. ei->i_disksize = inode->i_size;
  3483. inode->i_generation = le32_to_cpu(raw_inode->i_generation);
  3484. ei->i_block_group = iloc.block_group;
  3485. /*
  3486. * NOTE! The in-memory inode i_data array is in little-endian order
  3487. * even on big-endian machines: we do NOT byteswap the block numbers!
  3488. */
  3489. for (block = 0; block < EXT4_N_BLOCKS; block++)
  3490. ei->i_data[block] = raw_inode->i_block[block];
  3491. INIT_LIST_HEAD(&ei->i_orphan);
  3492. if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
  3493. ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
  3494. if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
  3495. EXT4_INODE_SIZE(inode->i_sb)) {
  3496. brelse (bh);
  3497. ret = -EIO;
  3498. goto bad_inode;
  3499. }
  3500. if (ei->i_extra_isize == 0) {
  3501. /* The extra space is currently unused. Use it. */
  3502. ei->i_extra_isize = sizeof(struct ext4_inode) -
  3503. EXT4_GOOD_OLD_INODE_SIZE;
  3504. } else {
  3505. __le32 *magic = (void *)raw_inode +
  3506. EXT4_GOOD_OLD_INODE_SIZE +
  3507. ei->i_extra_isize;
  3508. if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
  3509. ei->i_state |= EXT4_STATE_XATTR;
  3510. }
  3511. } else
  3512. ei->i_extra_isize = 0;
  3513. EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
  3514. EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
  3515. EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
  3516. EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
  3517. inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
  3518. if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
  3519. if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
  3520. inode->i_version |=
  3521. (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
  3522. }
  3523. if (S_ISREG(inode->i_mode)) {
  3524. inode->i_op = &ext4_file_inode_operations;
  3525. inode->i_fop = &ext4_file_operations;
  3526. ext4_set_aops(inode);
  3527. } else if (S_ISDIR(inode->i_mode)) {
  3528. inode->i_op = &ext4_dir_inode_operations;
  3529. inode->i_fop = &ext4_dir_operations;
  3530. } else if (S_ISLNK(inode->i_mode)) {
  3531. if (ext4_inode_is_fast_symlink(inode))
  3532. inode->i_op = &ext4_fast_symlink_inode_operations;
  3533. else {
  3534. inode->i_op = &ext4_symlink_inode_operations;
  3535. ext4_set_aops(inode);
  3536. }
  3537. } else {
  3538. inode->i_op = &ext4_special_inode_operations;
  3539. if (raw_inode->i_block[0])
  3540. init_special_inode(inode, inode->i_mode,
  3541. old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
  3542. else
  3543. init_special_inode(inode, inode->i_mode,
  3544. new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
  3545. }
  3546. brelse (iloc.bh);
  3547. ext4_set_inode_flags(inode);
  3548. unlock_new_inode(inode);
  3549. return inode;
  3550. bad_inode:
  3551. iget_failed(inode);
  3552. return ERR_PTR(ret);
  3553. }
  3554. static int ext4_inode_blocks_set(handle_t *handle,
  3555. struct ext4_inode *raw_inode,
  3556. struct ext4_inode_info *ei)
  3557. {
  3558. struct inode *inode = &(ei->vfs_inode);
  3559. u64 i_blocks = inode->i_blocks;
  3560. struct super_block *sb = inode->i_sb;
  3561. int err = 0;
  3562. if (i_blocks <= ~0U) {
  3563. /*
  3564. * i_blocks can be represnted in a 32 bit variable
  3565. * as multiple of 512 bytes
  3566. */
  3567. raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
  3568. raw_inode->i_blocks_high = 0;
  3569. ei->i_flags &= ~EXT4_HUGE_FILE_FL;
  3570. } else if (i_blocks <= 0xffffffffffffULL) {
  3571. /*
  3572. * i_blocks can be represented in a 48 bit variable
  3573. * as multiple of 512 bytes
  3574. */
  3575. err = ext4_update_rocompat_feature(handle, sb,
  3576. EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
  3577. if (err)
  3578. goto err_out;
  3579. /* i_block is stored in the split 48 bit fields */
  3580. raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
  3581. raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
  3582. ei->i_flags &= ~EXT4_HUGE_FILE_FL;
  3583. } else {
  3584. /*
  3585. * i_blocks should be represented in a 48 bit variable
  3586. * as multiple of file system block size
  3587. */
  3588. err = ext4_update_rocompat_feature(handle, sb,
  3589. EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
  3590. if (err)
  3591. goto err_out;
  3592. ei->i_flags |= EXT4_HUGE_FILE_FL;
  3593. /* i_block is stored in file system block size */
  3594. i_blocks = i_blocks >> (inode->i_blkbits - 9);
  3595. raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
  3596. raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
  3597. }
  3598. err_out:
  3599. return err;
  3600. }
  3601. /*
  3602. * Post the struct inode info into an on-disk inode location in the
  3603. * buffer-cache. This gobbles the caller's reference to the
  3604. * buffer_head in the inode location struct.
  3605. *
  3606. * The caller must have write access to iloc->bh.
  3607. */
  3608. static int ext4_do_update_inode(handle_t *handle,
  3609. struct inode *inode,
  3610. struct ext4_iloc *iloc)
  3611. {
  3612. struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
  3613. struct ext4_inode_info *ei = EXT4_I(inode);
  3614. struct buffer_head *bh = iloc->bh;
  3615. int err = 0, rc, block;
  3616. /* For fields not not tracking in the in-memory inode,
  3617. * initialise them to zero for new inodes. */
  3618. if (ei->i_state & EXT4_STATE_NEW)
  3619. memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
  3620. ext4_get_inode_flags(ei);
  3621. raw_inode->i_mode = cpu_to_le16(inode->i_mode);
  3622. if(!(test_opt(inode->i_sb, NO_UID32))) {
  3623. raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
  3624. raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
  3625. /*
  3626. * Fix up interoperability with old kernels. Otherwise, old inodes get
  3627. * re-used with the upper 16 bits of the uid/gid intact
  3628. */
  3629. if(!ei->i_dtime) {
  3630. raw_inode->i_uid_high =
  3631. cpu_to_le16(high_16_bits(inode->i_uid));
  3632. raw_inode->i_gid_high =
  3633. cpu_to_le16(high_16_bits(inode->i_gid));
  3634. } else {
  3635. raw_inode->i_uid_high = 0;
  3636. raw_inode->i_gid_high = 0;
  3637. }
  3638. } else {
  3639. raw_inode->i_uid_low =
  3640. cpu_to_le16(fs_high2lowuid(inode->i_uid));
  3641. raw_inode->i_gid_low =
  3642. cpu_to_le16(fs_high2lowgid(inode->i_gid));
  3643. raw_inode->i_uid_high = 0;
  3644. raw_inode->i_gid_high = 0;
  3645. }
  3646. raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
  3647. EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
  3648. EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
  3649. EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
  3650. EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
  3651. if (ext4_inode_blocks_set(handle, raw_inode, ei))
  3652. goto out_brelse;
  3653. raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
  3654. /* clear the migrate flag in the raw_inode */
  3655. raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
  3656. if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
  3657. cpu_to_le32(EXT4_OS_HURD))
  3658. raw_inode->i_file_acl_high =
  3659. cpu_to_le16(ei->i_file_acl >> 32);
  3660. raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
  3661. ext4_isize_set(raw_inode, ei->i_disksize);
  3662. if (ei->i_disksize > 0x7fffffffULL) {
  3663. struct super_block *sb = inode->i_sb;
  3664. if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
  3665. EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
  3666. EXT4_SB(sb)->s_es->s_rev_level ==
  3667. cpu_to_le32(EXT4_GOOD_OLD_REV)) {
  3668. /* If this is the first large file
  3669. * created, add a flag to the superblock.
  3670. */
  3671. err = ext4_journal_get_write_access(handle,
  3672. EXT4_SB(sb)->s_sbh);
  3673. if (err)
  3674. goto out_brelse;
  3675. ext4_update_dynamic_rev(sb);
  3676. EXT4_SET_RO_COMPAT_FEATURE(sb,
  3677. EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
  3678. sb->s_dirt = 1;
  3679. handle->h_sync = 1;
  3680. err = ext4_journal_dirty_metadata(handle,
  3681. EXT4_SB(sb)->s_sbh);
  3682. }
  3683. }
  3684. raw_inode->i_generation = cpu_to_le32(inode->i_generation);
  3685. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  3686. if (old_valid_dev(inode->i_rdev)) {
  3687. raw_inode->i_block[0] =
  3688. cpu_to_le32(old_encode_dev(inode->i_rdev));
  3689. raw_inode->i_block[1] = 0;
  3690. } else {
  3691. raw_inode->i_block[0] = 0;
  3692. raw_inode->i_block[1] =
  3693. cpu_to_le32(new_encode_dev(inode->i_rdev));
  3694. raw_inode->i_block[2] = 0;
  3695. }
  3696. } else for (block = 0; block < EXT4_N_BLOCKS; block++)
  3697. raw_inode->i_block[block] = ei->i_data[block];
  3698. raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
  3699. if (ei->i_extra_isize) {
  3700. if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
  3701. raw_inode->i_version_hi =
  3702. cpu_to_le32(inode->i_version >> 32);
  3703. raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
  3704. }
  3705. BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
  3706. rc = ext4_journal_dirty_metadata(handle, bh);
  3707. if (!err)
  3708. err = rc;
  3709. ei->i_state &= ~EXT4_STATE_NEW;
  3710. out_brelse:
  3711. brelse (bh);
  3712. ext4_std_error(inode->i_sb, err);
  3713. return err;
  3714. }
  3715. /*
  3716. * ext4_write_inode()
  3717. *
  3718. * We are called from a few places:
  3719. *
  3720. * - Within generic_file_write() for O_SYNC files.
  3721. * Here, there will be no transaction running. We wait for any running
  3722. * trasnaction to commit.
  3723. *
  3724. * - Within sys_sync(), kupdate and such.
  3725. * We wait on commit, if tol to.
  3726. *
  3727. * - Within prune_icache() (PF_MEMALLOC == true)
  3728. * Here we simply return. We can't afford to block kswapd on the
  3729. * journal commit.
  3730. *
  3731. * In all cases it is actually safe for us to return without doing anything,
  3732. * because the inode has been copied into a raw inode buffer in
  3733. * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
  3734. * knfsd.
  3735. *
  3736. * Note that we are absolutely dependent upon all inode dirtiers doing the
  3737. * right thing: they *must* call mark_inode_dirty() after dirtying info in
  3738. * which we are interested.
  3739. *
  3740. * It would be a bug for them to not do this. The code:
  3741. *
  3742. * mark_inode_dirty(inode)
  3743. * stuff();
  3744. * inode->i_size = expr;
  3745. *
  3746. * is in error because a kswapd-driven write_inode() could occur while
  3747. * `stuff()' is running, and the new i_size will be lost. Plus the inode
  3748. * will no longer be on the superblock's dirty inode list.
  3749. */
  3750. int ext4_write_inode(struct inode *inode, int wait)
  3751. {
  3752. if (current->flags & PF_MEMALLOC)
  3753. return 0;
  3754. if (ext4_journal_current_handle()) {
  3755. jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
  3756. dump_stack();
  3757. return -EIO;
  3758. }
  3759. if (!wait)
  3760. return 0;
  3761. return ext4_force_commit(inode->i_sb);
  3762. }
  3763. /*
  3764. * ext4_setattr()
  3765. *
  3766. * Called from notify_change.
  3767. *
  3768. * We want to trap VFS attempts to truncate the file as soon as
  3769. * possible. In particular, we want to make sure that when the VFS
  3770. * shrinks i_size, we put the inode on the orphan list and modify
  3771. * i_disksize immediately, so that during the subsequent flushing of
  3772. * dirty pages and freeing of disk blocks, we can guarantee that any
  3773. * commit will leave the blocks being flushed in an unused state on
  3774. * disk. (On recovery, the inode will get truncated and the blocks will
  3775. * be freed, so we have a strong guarantee that no future commit will
  3776. * leave these blocks visible to the user.)
  3777. *
  3778. * Another thing we have to assure is that if we are in ordered mode
  3779. * and inode is still attached to the committing transaction, we must
  3780. * we start writeout of all the dirty pages which are being truncated.
  3781. * This way we are sure that all the data written in the previous
  3782. * transaction are already on disk (truncate waits for pages under
  3783. * writeback).
  3784. *
  3785. * Called with inode->i_mutex down.
  3786. */
  3787. int ext4_setattr(struct dentry *dentry, struct iattr *attr)
  3788. {
  3789. struct inode *inode = dentry->d_inode;
  3790. int error, rc = 0;
  3791. const unsigned int ia_valid = attr->ia_valid;
  3792. error = inode_change_ok(inode, attr);
  3793. if (error)
  3794. return error;
  3795. if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
  3796. (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
  3797. handle_t *handle;
  3798. /* (user+group)*(old+new) structure, inode write (sb,
  3799. * inode block, ? - but truncate inode update has it) */
  3800. handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
  3801. EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
  3802. if (IS_ERR(handle)) {
  3803. error = PTR_ERR(handle);
  3804. goto err_out;
  3805. }
  3806. error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
  3807. if (error) {
  3808. ext4_journal_stop(handle);
  3809. return error;
  3810. }
  3811. /* Update corresponding info in inode so that everything is in
  3812. * one transaction */
  3813. if (attr->ia_valid & ATTR_UID)
  3814. inode->i_uid = attr->ia_uid;
  3815. if (attr->ia_valid & ATTR_GID)
  3816. inode->i_gid = attr->ia_gid;
  3817. error = ext4_mark_inode_dirty(handle, inode);
  3818. ext4_journal_stop(handle);
  3819. }
  3820. if (attr->ia_valid & ATTR_SIZE) {
  3821. if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
  3822. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  3823. if (attr->ia_size > sbi->s_bitmap_maxbytes) {
  3824. error = -EFBIG;
  3825. goto err_out;
  3826. }
  3827. }
  3828. }
  3829. if (S_ISREG(inode->i_mode) &&
  3830. attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
  3831. handle_t *handle;
  3832. handle = ext4_journal_start(inode, 3);
  3833. if (IS_ERR(handle)) {
  3834. error = PTR_ERR(handle);
  3835. goto err_out;
  3836. }
  3837. error = ext4_orphan_add(handle, inode);
  3838. EXT4_I(inode)->i_disksize = attr->ia_size;
  3839. rc = ext4_mark_inode_dirty(handle, inode);
  3840. if (!error)
  3841. error = rc;
  3842. ext4_journal_stop(handle);
  3843. if (ext4_should_order_data(inode)) {
  3844. error = ext4_begin_ordered_truncate(inode,
  3845. attr->ia_size);
  3846. if (error) {
  3847. /* Do as much error cleanup as possible */
  3848. handle = ext4_journal_start(inode, 3);
  3849. if (IS_ERR(handle)) {
  3850. ext4_orphan_del(NULL, inode);
  3851. goto err_out;
  3852. }
  3853. ext4_orphan_del(handle, inode);
  3854. ext4_journal_stop(handle);
  3855. goto err_out;
  3856. }
  3857. }
  3858. }
  3859. rc = inode_setattr(inode, attr);
  3860. /* If inode_setattr's call to ext4_truncate failed to get a
  3861. * transaction handle at all, we need to clean up the in-core
  3862. * orphan list manually. */
  3863. if (inode->i_nlink)
  3864. ext4_orphan_del(NULL, inode);
  3865. if (!rc && (ia_valid & ATTR_MODE))
  3866. rc = ext4_acl_chmod(inode);
  3867. err_out:
  3868. ext4_std_error(inode->i_sb, error);
  3869. if (!error)
  3870. error = rc;
  3871. return error;
  3872. }
  3873. int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
  3874. struct kstat *stat)
  3875. {
  3876. struct inode *inode;
  3877. unsigned long delalloc_blocks;
  3878. inode = dentry->d_inode;
  3879. generic_fillattr(inode, stat);
  3880. /*
  3881. * We can't update i_blocks if the block allocation is delayed
  3882. * otherwise in the case of system crash before the real block
  3883. * allocation is done, we will have i_blocks inconsistent with
  3884. * on-disk file blocks.
  3885. * We always keep i_blocks updated together with real
  3886. * allocation. But to not confuse with user, stat
  3887. * will return the blocks that include the delayed allocation
  3888. * blocks for this file.
  3889. */
  3890. spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
  3891. delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
  3892. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  3893. stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
  3894. return 0;
  3895. }
  3896. /*
  3897. * How many blocks doth make a writepage()?
  3898. *
  3899. * With N blocks per page, it may be:
  3900. * N data blocks
  3901. * 2 indirect block
  3902. * 2 dindirect
  3903. * 1 tindirect
  3904. * N+5 bitmap blocks (from the above)
  3905. * N+5 group descriptor summary blocks
  3906. * 1 inode block
  3907. * 1 superblock.
  3908. * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files
  3909. *
  3910. * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS
  3911. *
  3912. * With ordered or writeback data it's the same, less the N data blocks.
  3913. *
  3914. * If the inode's direct blocks can hold an integral number of pages then a
  3915. * page cannot straddle two indirect blocks, and we can only touch one indirect
  3916. * and dindirect block, and the "5" above becomes "3".
  3917. *
  3918. * This still overestimates under most circumstances. If we were to pass the
  3919. * start and end offsets in here as well we could do block_to_path() on each
  3920. * block and work out the exact number of indirects which are touched. Pah.
  3921. */
  3922. int ext4_writepage_trans_blocks(struct inode *inode)
  3923. {
  3924. int bpp = ext4_journal_blocks_per_page(inode);
  3925. int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3;
  3926. int ret;
  3927. if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
  3928. return ext4_ext_writepage_trans_blocks(inode, bpp);
  3929. if (ext4_should_journal_data(inode))
  3930. ret = 3 * (bpp + indirects) + 2;
  3931. else
  3932. ret = 2 * (bpp + indirects) + 2;
  3933. #ifdef CONFIG_QUOTA
  3934. /* We know that structure was already allocated during DQUOT_INIT so
  3935. * we will be updating only the data blocks + inodes */
  3936. ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
  3937. #endif
  3938. return ret;
  3939. }
  3940. /*
  3941. * The caller must have previously called ext4_reserve_inode_write().
  3942. * Give this, we know that the caller already has write access to iloc->bh.
  3943. */
  3944. int ext4_mark_iloc_dirty(handle_t *handle,
  3945. struct inode *inode, struct ext4_iloc *iloc)
  3946. {
  3947. int err = 0;
  3948. if (test_opt(inode->i_sb, I_VERSION))
  3949. inode_inc_iversion(inode);
  3950. /* the do_update_inode consumes one bh->b_count */
  3951. get_bh(iloc->bh);
  3952. /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
  3953. err = ext4_do_update_inode(handle, inode, iloc);
  3954. put_bh(iloc->bh);
  3955. return err;
  3956. }
  3957. /*
  3958. * On success, We end up with an outstanding reference count against
  3959. * iloc->bh. This _must_ be cleaned up later.
  3960. */
  3961. int
  3962. ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
  3963. struct ext4_iloc *iloc)
  3964. {
  3965. int err = 0;
  3966. if (handle) {
  3967. err = ext4_get_inode_loc(inode, iloc);
  3968. if (!err) {
  3969. BUFFER_TRACE(iloc->bh, "get_write_access");
  3970. err = ext4_journal_get_write_access(handle, iloc->bh);
  3971. if (err) {
  3972. brelse(iloc->bh);
  3973. iloc->bh = NULL;
  3974. }
  3975. }
  3976. }
  3977. ext4_std_error(inode->i_sb, err);
  3978. return err;
  3979. }
  3980. /*
  3981. * Expand an inode by new_extra_isize bytes.
  3982. * Returns 0 on success or negative error number on failure.
  3983. */
  3984. static int ext4_expand_extra_isize(struct inode *inode,
  3985. unsigned int new_extra_isize,
  3986. struct ext4_iloc iloc,
  3987. handle_t *handle)
  3988. {
  3989. struct ext4_inode *raw_inode;
  3990. struct ext4_xattr_ibody_header *header;
  3991. struct ext4_xattr_entry *entry;
  3992. if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
  3993. return 0;
  3994. raw_inode = ext4_raw_inode(&iloc);
  3995. header = IHDR(inode, raw_inode);
  3996. entry = IFIRST(header);
  3997. /* No extended attributes present */
  3998. if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
  3999. header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
  4000. memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
  4001. new_extra_isize);
  4002. EXT4_I(inode)->i_extra_isize = new_extra_isize;
  4003. return 0;
  4004. }
  4005. /* try to expand with EAs present */
  4006. return ext4_expand_extra_isize_ea(inode, new_extra_isize,
  4007. raw_inode, handle);
  4008. }
  4009. /*
  4010. * What we do here is to mark the in-core inode as clean with respect to inode
  4011. * dirtiness (it may still be data-dirty).
  4012. * This means that the in-core inode may be reaped by prune_icache
  4013. * without having to perform any I/O. This is a very good thing,
  4014. * because *any* task may call prune_icache - even ones which
  4015. * have a transaction open against a different journal.
  4016. *
  4017. * Is this cheating? Not really. Sure, we haven't written the
  4018. * inode out, but prune_icache isn't a user-visible syncing function.
  4019. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
  4020. * we start and wait on commits.
  4021. *
  4022. * Is this efficient/effective? Well, we're being nice to the system
  4023. * by cleaning up our inodes proactively so they can be reaped
  4024. * without I/O. But we are potentially leaving up to five seconds'
  4025. * worth of inodes floating about which prune_icache wants us to
  4026. * write out. One way to fix that would be to get prune_icache()
  4027. * to do a write_super() to free up some memory. It has the desired
  4028. * effect.
  4029. */
  4030. int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
  4031. {
  4032. struct ext4_iloc iloc;
  4033. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  4034. static unsigned int mnt_count;
  4035. int err, ret;
  4036. might_sleep();
  4037. err = ext4_reserve_inode_write(handle, inode, &iloc);
  4038. if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
  4039. !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
  4040. /*
  4041. * We need extra buffer credits since we may write into EA block
  4042. * with this same handle. If journal_extend fails, then it will
  4043. * only result in a minor loss of functionality for that inode.
  4044. * If this is felt to be critical, then e2fsck should be run to
  4045. * force a large enough s_min_extra_isize.
  4046. */
  4047. if ((jbd2_journal_extend(handle,
  4048. EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
  4049. ret = ext4_expand_extra_isize(inode,
  4050. sbi->s_want_extra_isize,
  4051. iloc, handle);
  4052. if (ret) {
  4053. EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
  4054. if (mnt_count !=
  4055. le16_to_cpu(sbi->s_es->s_mnt_count)) {
  4056. ext4_warning(inode->i_sb, __func__,
  4057. "Unable to expand inode %lu. Delete"
  4058. " some EAs or run e2fsck.",
  4059. inode->i_ino);
  4060. mnt_count =
  4061. le16_to_cpu(sbi->s_es->s_mnt_count);
  4062. }
  4063. }
  4064. }
  4065. }
  4066. if (!err)
  4067. err = ext4_mark_iloc_dirty(handle, inode, &iloc);
  4068. return err;
  4069. }
  4070. /*
  4071. * ext4_dirty_inode() is called from __mark_inode_dirty()
  4072. *
  4073. * We're really interested in the case where a file is being extended.
  4074. * i_size has been changed by generic_commit_write() and we thus need
  4075. * to include the updated inode in the current transaction.
  4076. *
  4077. * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
  4078. * are allocated to the file.
  4079. *
  4080. * If the inode is marked synchronous, we don't honour that here - doing
  4081. * so would cause a commit on atime updates, which we don't bother doing.
  4082. * We handle synchronous inodes at the highest possible level.
  4083. */
  4084. void ext4_dirty_inode(struct inode *inode)
  4085. {
  4086. handle_t *current_handle = ext4_journal_current_handle();
  4087. handle_t *handle;
  4088. handle = ext4_journal_start(inode, 2);
  4089. if (IS_ERR(handle))
  4090. goto out;
  4091. if (current_handle &&
  4092. current_handle->h_transaction != handle->h_transaction) {
  4093. /* This task has a transaction open against a different fs */
  4094. printk(KERN_EMERG "%s: transactions do not match!\n",
  4095. __func__);
  4096. } else {
  4097. jbd_debug(5, "marking dirty. outer handle=%p\n",
  4098. current_handle);
  4099. ext4_mark_inode_dirty(handle, inode);
  4100. }
  4101. ext4_journal_stop(handle);
  4102. out:
  4103. return;
  4104. }
  4105. #if 0
  4106. /*
  4107. * Bind an inode's backing buffer_head into this transaction, to prevent
  4108. * it from being flushed to disk early. Unlike
  4109. * ext4_reserve_inode_write, this leaves behind no bh reference and
  4110. * returns no iloc structure, so the caller needs to repeat the iloc
  4111. * lookup to mark the inode dirty later.
  4112. */
  4113. static int ext4_pin_inode(handle_t *handle, struct inode *inode)
  4114. {
  4115. struct ext4_iloc iloc;
  4116. int err = 0;
  4117. if (handle) {
  4118. err = ext4_get_inode_loc(inode, &iloc);
  4119. if (!err) {
  4120. BUFFER_TRACE(iloc.bh, "get_write_access");
  4121. err = jbd2_journal_get_write_access(handle, iloc.bh);
  4122. if (!err)
  4123. err = ext4_journal_dirty_metadata(handle,
  4124. iloc.bh);
  4125. brelse(iloc.bh);
  4126. }
  4127. }
  4128. ext4_std_error(inode->i_sb, err);
  4129. return err;
  4130. }
  4131. #endif
  4132. int ext4_change_inode_journal_flag(struct inode *inode, int val)
  4133. {
  4134. journal_t *journal;
  4135. handle_t *handle;
  4136. int err;
  4137. /*
  4138. * We have to be very careful here: changing a data block's
  4139. * journaling status dynamically is dangerous. If we write a
  4140. * data block to the journal, change the status and then delete
  4141. * that block, we risk forgetting to revoke the old log record
  4142. * from the journal and so a subsequent replay can corrupt data.
  4143. * So, first we make sure that the journal is empty and that
  4144. * nobody is changing anything.
  4145. */
  4146. journal = EXT4_JOURNAL(inode);
  4147. if (is_journal_aborted(journal))
  4148. return -EROFS;
  4149. jbd2_journal_lock_updates(journal);
  4150. jbd2_journal_flush(journal);
  4151. /*
  4152. * OK, there are no updates running now, and all cached data is
  4153. * synced to disk. We are now in a completely consistent state
  4154. * which doesn't have anything in the journal, and we know that
  4155. * no filesystem updates are running, so it is safe to modify
  4156. * the inode's in-core data-journaling state flag now.
  4157. */
  4158. if (val)
  4159. EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
  4160. else
  4161. EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
  4162. ext4_set_aops(inode);
  4163. jbd2_journal_unlock_updates(journal);
  4164. /* Finally we can mark the inode as dirty. */
  4165. handle = ext4_journal_start(inode, 1);
  4166. if (IS_ERR(handle))
  4167. return PTR_ERR(handle);
  4168. err = ext4_mark_inode_dirty(handle, inode);
  4169. handle->h_sync = 1;
  4170. ext4_journal_stop(handle);
  4171. ext4_std_error(inode->i_sb, err);
  4172. return err;
  4173. }
  4174. static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
  4175. {
  4176. return !buffer_mapped(bh);
  4177. }
  4178. int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
  4179. {
  4180. loff_t size;
  4181. unsigned long len;
  4182. int ret = -EINVAL;
  4183. struct file *file = vma->vm_file;
  4184. struct inode *inode = file->f_path.dentry->d_inode;
  4185. struct address_space *mapping = inode->i_mapping;
  4186. /*
  4187. * Get i_alloc_sem to stop truncates messing with the inode. We cannot
  4188. * get i_mutex because we are already holding mmap_sem.
  4189. */
  4190. down_read(&inode->i_alloc_sem);
  4191. size = i_size_read(inode);
  4192. if (page->mapping != mapping || size <= page_offset(page)
  4193. || !PageUptodate(page)) {
  4194. /* page got truncated from under us? */
  4195. goto out_unlock;
  4196. }
  4197. ret = 0;
  4198. if (PageMappedToDisk(page))
  4199. goto out_unlock;
  4200. if (page->index == size >> PAGE_CACHE_SHIFT)
  4201. len = size & ~PAGE_CACHE_MASK;
  4202. else
  4203. len = PAGE_CACHE_SIZE;
  4204. if (page_has_buffers(page)) {
  4205. /* return if we have all the buffers mapped */
  4206. if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
  4207. ext4_bh_unmapped))
  4208. goto out_unlock;
  4209. }
  4210. /*
  4211. * OK, we need to fill the hole... Do write_begin write_end
  4212. * to do block allocation/reservation.We are not holding
  4213. * inode.i__mutex here. That allow * parallel write_begin,
  4214. * write_end call. lock_page prevent this from happening
  4215. * on the same page though
  4216. */
  4217. ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
  4218. len, AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
  4219. if (ret < 0)
  4220. goto out_unlock;
  4221. ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
  4222. len, len, page, NULL);
  4223. if (ret < 0)
  4224. goto out_unlock;
  4225. ret = 0;
  4226. out_unlock:
  4227. up_read(&inode->i_alloc_sem);
  4228. return ret;
  4229. }