inode.c 95 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297
  1. /*
  2. * linux/fs/ext3/inode.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/inode.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * Goal-directed block allocation by Stephen Tweedie
  16. * (sct@redhat.com), 1993, 1998
  17. * Big-endian to little-endian byte-swapping/bitmaps by
  18. * David S. Miller (davem@caip.rutgers.edu), 1995
  19. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  20. * (jj@sunsite.ms.mff.cuni.cz)
  21. *
  22. * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
  23. */
  24. #include <linux/module.h>
  25. #include <linux/fs.h>
  26. #include <linux/time.h>
  27. #include <linux/ext3_jbd.h>
  28. #include <linux/jbd.h>
  29. #include <linux/highuid.h>
  30. #include <linux/pagemap.h>
  31. #include <linux/quotaops.h>
  32. #include <linux/string.h>
  33. #include <linux/buffer_head.h>
  34. #include <linux/writeback.h>
  35. #include <linux/mpage.h>
  36. #include <linux/uio.h>
  37. #include <linux/bio.h>
  38. #include "xattr.h"
  39. #include "acl.h"
  40. static int ext3_writepage_trans_blocks(struct inode *inode);
  41. /*
  42. * Test whether an inode is a fast symlink.
  43. */
  44. static int ext3_inode_is_fast_symlink(struct inode *inode)
  45. {
  46. int ea_blocks = EXT3_I(inode)->i_file_acl ?
  47. (inode->i_sb->s_blocksize >> 9) : 0;
  48. return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
  49. }
  50. /*
  51. * The ext3 forget function must perform a revoke if we are freeing data
  52. * which has been journaled. Metadata (eg. indirect blocks) must be
  53. * revoked in all cases.
  54. *
  55. * "bh" may be NULL: a metadata block may have been freed from memory
  56. * but there may still be a record of it in the journal, and that record
  57. * still needs to be revoked.
  58. */
  59. int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
  60. struct buffer_head *bh, ext3_fsblk_t blocknr)
  61. {
  62. int err;
  63. might_sleep();
  64. BUFFER_TRACE(bh, "enter");
  65. jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
  66. "data mode %lx\n",
  67. bh, is_metadata, inode->i_mode,
  68. test_opt(inode->i_sb, DATA_FLAGS));
  69. /* Never use the revoke function if we are doing full data
  70. * journaling: there is no need to, and a V1 superblock won't
  71. * support it. Otherwise, only skip the revoke on un-journaled
  72. * data blocks. */
  73. if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
  74. (!is_metadata && !ext3_should_journal_data(inode))) {
  75. if (bh) {
  76. BUFFER_TRACE(bh, "call journal_forget");
  77. return ext3_journal_forget(handle, bh);
  78. }
  79. return 0;
  80. }
  81. /*
  82. * data!=journal && (is_metadata || should_journal_data(inode))
  83. */
  84. BUFFER_TRACE(bh, "call ext3_journal_revoke");
  85. err = ext3_journal_revoke(handle, blocknr, bh);
  86. if (err)
  87. ext3_abort(inode->i_sb, __func__,
  88. "error %d when attempting revoke", err);
  89. BUFFER_TRACE(bh, "exit");
  90. return err;
  91. }
  92. /*
  93. * Work out how many blocks we need to proceed with the next chunk of a
  94. * truncate transaction.
  95. */
  96. static unsigned long blocks_for_truncate(struct inode *inode)
  97. {
  98. unsigned long needed;
  99. needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
  100. /* Give ourselves just enough room to cope with inodes in which
  101. * i_blocks is corrupt: we've seen disk corruptions in the past
  102. * which resulted in random data in an inode which looked enough
  103. * like a regular file for ext3 to try to delete it. Things
  104. * will go a bit crazy if that happens, but at least we should
  105. * try not to panic the whole kernel. */
  106. if (needed < 2)
  107. needed = 2;
  108. /* But we need to bound the transaction so we don't overflow the
  109. * journal. */
  110. if (needed > EXT3_MAX_TRANS_DATA)
  111. needed = EXT3_MAX_TRANS_DATA;
  112. return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
  113. }
  114. /*
  115. * Truncate transactions can be complex and absolutely huge. So we need to
  116. * be able to restart the transaction at a conventient checkpoint to make
  117. * sure we don't overflow the journal.
  118. *
  119. * start_transaction gets us a new handle for a truncate transaction,
  120. * and extend_transaction tries to extend the existing one a bit. If
  121. * extend fails, we need to propagate the failure up and restart the
  122. * transaction in the top-level truncate loop. --sct
  123. */
  124. static handle_t *start_transaction(struct inode *inode)
  125. {
  126. handle_t *result;
  127. result = ext3_journal_start(inode, blocks_for_truncate(inode));
  128. if (!IS_ERR(result))
  129. return result;
  130. ext3_std_error(inode->i_sb, PTR_ERR(result));
  131. return result;
  132. }
  133. /*
  134. * Try to extend this transaction for the purposes of truncation.
  135. *
  136. * Returns 0 if we managed to create more room. If we can't create more
  137. * room, and the transaction must be restarted we return 1.
  138. */
  139. static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
  140. {
  141. if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
  142. return 0;
  143. if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
  144. return 0;
  145. return 1;
  146. }
  147. /*
  148. * Restart the transaction associated with *handle. This does a commit,
  149. * so before we call here everything must be consistently dirtied against
  150. * this transaction.
  151. */
  152. static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
  153. {
  154. jbd_debug(2, "restarting handle %p\n", handle);
  155. return ext3_journal_restart(handle, blocks_for_truncate(inode));
  156. }
  157. /*
  158. * Called at the last iput() if i_nlink is zero.
  159. */
  160. void ext3_delete_inode (struct inode * inode)
  161. {
  162. handle_t *handle;
  163. truncate_inode_pages(&inode->i_data, 0);
  164. if (is_bad_inode(inode))
  165. goto no_delete;
  166. handle = start_transaction(inode);
  167. if (IS_ERR(handle)) {
  168. /*
  169. * If we're going to skip the normal cleanup, we still need to
  170. * make sure that the in-core orphan linked list is properly
  171. * cleaned up.
  172. */
  173. ext3_orphan_del(NULL, inode);
  174. goto no_delete;
  175. }
  176. if (IS_SYNC(inode))
  177. handle->h_sync = 1;
  178. inode->i_size = 0;
  179. if (inode->i_blocks)
  180. ext3_truncate(inode);
  181. /*
  182. * Kill off the orphan record which ext3_truncate created.
  183. * AKPM: I think this can be inside the above `if'.
  184. * Note that ext3_orphan_del() has to be able to cope with the
  185. * deletion of a non-existent orphan - this is because we don't
  186. * know if ext3_truncate() actually created an orphan record.
  187. * (Well, we could do this if we need to, but heck - it works)
  188. */
  189. ext3_orphan_del(handle, inode);
  190. EXT3_I(inode)->i_dtime = get_seconds();
  191. /*
  192. * One subtle ordering requirement: if anything has gone wrong
  193. * (transaction abort, IO errors, whatever), then we can still
  194. * do these next steps (the fs will already have been marked as
  195. * having errors), but we can't free the inode if the mark_dirty
  196. * fails.
  197. */
  198. if (ext3_mark_inode_dirty(handle, inode))
  199. /* If that failed, just do the required in-core inode clear. */
  200. clear_inode(inode);
  201. else
  202. ext3_free_inode(handle, inode);
  203. ext3_journal_stop(handle);
  204. return;
  205. no_delete:
  206. clear_inode(inode); /* We must guarantee clearing of inode... */
  207. }
  208. typedef struct {
  209. __le32 *p;
  210. __le32 key;
  211. struct buffer_head *bh;
  212. } Indirect;
  213. static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
  214. {
  215. p->key = *(p->p = v);
  216. p->bh = bh;
  217. }
  218. static int verify_chain(Indirect *from, Indirect *to)
  219. {
  220. while (from <= to && from->key == *from->p)
  221. from++;
  222. return (from > to);
  223. }
  224. /**
  225. * ext3_block_to_path - parse the block number into array of offsets
  226. * @inode: inode in question (we are only interested in its superblock)
  227. * @i_block: block number to be parsed
  228. * @offsets: array to store the offsets in
  229. * @boundary: set this non-zero if the referred-to block is likely to be
  230. * followed (on disk) by an indirect block.
  231. *
  232. * To store the locations of file's data ext3 uses a data structure common
  233. * for UNIX filesystems - tree of pointers anchored in the inode, with
  234. * data blocks at leaves and indirect blocks in intermediate nodes.
  235. * This function translates the block number into path in that tree -
  236. * return value is the path length and @offsets[n] is the offset of
  237. * pointer to (n+1)th node in the nth one. If @block is out of range
  238. * (negative or too large) warning is printed and zero returned.
  239. *
  240. * Note: function doesn't find node addresses, so no IO is needed. All
  241. * we need to know is the capacity of indirect blocks (taken from the
  242. * inode->i_sb).
  243. */
  244. /*
  245. * Portability note: the last comparison (check that we fit into triple
  246. * indirect block) is spelled differently, because otherwise on an
  247. * architecture with 32-bit longs and 8Kb pages we might get into trouble
  248. * if our filesystem had 8Kb blocks. We might use long long, but that would
  249. * kill us on x86. Oh, well, at least the sign propagation does not matter -
  250. * i_block would have to be negative in the very beginning, so we would not
  251. * get there at all.
  252. */
  253. static int ext3_block_to_path(struct inode *inode,
  254. long i_block, int offsets[4], int *boundary)
  255. {
  256. int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
  257. int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
  258. const long direct_blocks = EXT3_NDIR_BLOCKS,
  259. indirect_blocks = ptrs,
  260. double_blocks = (1 << (ptrs_bits * 2));
  261. int n = 0;
  262. int final = 0;
  263. if (i_block < 0) {
  264. ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
  265. } else if (i_block < direct_blocks) {
  266. offsets[n++] = i_block;
  267. final = direct_blocks;
  268. } else if ( (i_block -= direct_blocks) < indirect_blocks) {
  269. offsets[n++] = EXT3_IND_BLOCK;
  270. offsets[n++] = i_block;
  271. final = ptrs;
  272. } else if ((i_block -= indirect_blocks) < double_blocks) {
  273. offsets[n++] = EXT3_DIND_BLOCK;
  274. offsets[n++] = i_block >> ptrs_bits;
  275. offsets[n++] = i_block & (ptrs - 1);
  276. final = ptrs;
  277. } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
  278. offsets[n++] = EXT3_TIND_BLOCK;
  279. offsets[n++] = i_block >> (ptrs_bits * 2);
  280. offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
  281. offsets[n++] = i_block & (ptrs - 1);
  282. final = ptrs;
  283. } else {
  284. ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
  285. }
  286. if (boundary)
  287. *boundary = final - 1 - (i_block & (ptrs - 1));
  288. return n;
  289. }
  290. /**
  291. * ext3_get_branch - read the chain of indirect blocks leading to data
  292. * @inode: inode in question
  293. * @depth: depth of the chain (1 - direct pointer, etc.)
  294. * @offsets: offsets of pointers in inode/indirect blocks
  295. * @chain: place to store the result
  296. * @err: here we store the error value
  297. *
  298. * Function fills the array of triples <key, p, bh> and returns %NULL
  299. * if everything went OK or the pointer to the last filled triple
  300. * (incomplete one) otherwise. Upon the return chain[i].key contains
  301. * the number of (i+1)-th block in the chain (as it is stored in memory,
  302. * i.e. little-endian 32-bit), chain[i].p contains the address of that
  303. * number (it points into struct inode for i==0 and into the bh->b_data
  304. * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
  305. * block for i>0 and NULL for i==0. In other words, it holds the block
  306. * numbers of the chain, addresses they were taken from (and where we can
  307. * verify that chain did not change) and buffer_heads hosting these
  308. * numbers.
  309. *
  310. * Function stops when it stumbles upon zero pointer (absent block)
  311. * (pointer to last triple returned, *@err == 0)
  312. * or when it gets an IO error reading an indirect block
  313. * (ditto, *@err == -EIO)
  314. * or when it notices that chain had been changed while it was reading
  315. * (ditto, *@err == -EAGAIN)
  316. * or when it reads all @depth-1 indirect blocks successfully and finds
  317. * the whole chain, all way to the data (returns %NULL, *err == 0).
  318. */
  319. static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
  320. Indirect chain[4], int *err)
  321. {
  322. struct super_block *sb = inode->i_sb;
  323. Indirect *p = chain;
  324. struct buffer_head *bh;
  325. *err = 0;
  326. /* i_data is not going away, no lock needed */
  327. add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
  328. if (!p->key)
  329. goto no_block;
  330. while (--depth) {
  331. bh = sb_bread(sb, le32_to_cpu(p->key));
  332. if (!bh)
  333. goto failure;
  334. /* Reader: pointers */
  335. if (!verify_chain(chain, p))
  336. goto changed;
  337. add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
  338. /* Reader: end */
  339. if (!p->key)
  340. goto no_block;
  341. }
  342. return NULL;
  343. changed:
  344. brelse(bh);
  345. *err = -EAGAIN;
  346. goto no_block;
  347. failure:
  348. *err = -EIO;
  349. no_block:
  350. return p;
  351. }
  352. /**
  353. * ext3_find_near - find a place for allocation with sufficient locality
  354. * @inode: owner
  355. * @ind: descriptor of indirect block.
  356. *
  357. * This function returns the preferred place for block allocation.
  358. * It is used when heuristic for sequential allocation fails.
  359. * Rules are:
  360. * + if there is a block to the left of our position - allocate near it.
  361. * + if pointer will live in indirect block - allocate near that block.
  362. * + if pointer will live in inode - allocate in the same
  363. * cylinder group.
  364. *
  365. * In the latter case we colour the starting block by the callers PID to
  366. * prevent it from clashing with concurrent allocations for a different inode
  367. * in the same block group. The PID is used here so that functionally related
  368. * files will be close-by on-disk.
  369. *
  370. * Caller must make sure that @ind is valid and will stay that way.
  371. */
  372. static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
  373. {
  374. struct ext3_inode_info *ei = EXT3_I(inode);
  375. __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
  376. __le32 *p;
  377. ext3_fsblk_t bg_start;
  378. ext3_grpblk_t colour;
  379. /* Try to find previous block */
  380. for (p = ind->p - 1; p >= start; p--) {
  381. if (*p)
  382. return le32_to_cpu(*p);
  383. }
  384. /* No such thing, so let's try location of indirect block */
  385. if (ind->bh)
  386. return ind->bh->b_blocknr;
  387. /*
  388. * It is going to be referred to from the inode itself? OK, just put it
  389. * into the same cylinder group then.
  390. */
  391. bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
  392. colour = (current->pid % 16) *
  393. (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
  394. return bg_start + colour;
  395. }
  396. /**
  397. * ext3_find_goal - find a preferred place for allocation.
  398. * @inode: owner
  399. * @block: block we want
  400. * @partial: pointer to the last triple within a chain
  401. *
  402. * Normally this function find the preferred place for block allocation,
  403. * returns it.
  404. */
  405. static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
  406. Indirect *partial)
  407. {
  408. struct ext3_block_alloc_info *block_i;
  409. block_i = EXT3_I(inode)->i_block_alloc_info;
  410. /*
  411. * try the heuristic for sequential allocation,
  412. * failing that at least try to get decent locality.
  413. */
  414. if (block_i && (block == block_i->last_alloc_logical_block + 1)
  415. && (block_i->last_alloc_physical_block != 0)) {
  416. return block_i->last_alloc_physical_block + 1;
  417. }
  418. return ext3_find_near(inode, partial);
  419. }
  420. /**
  421. * ext3_blks_to_allocate: Look up the block map and count the number
  422. * of direct blocks need to be allocated for the given branch.
  423. *
  424. * @branch: chain of indirect blocks
  425. * @k: number of blocks need for indirect blocks
  426. * @blks: number of data blocks to be mapped.
  427. * @blocks_to_boundary: the offset in the indirect block
  428. *
  429. * return the total number of blocks to be allocate, including the
  430. * direct and indirect blocks.
  431. */
  432. static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
  433. int blocks_to_boundary)
  434. {
  435. unsigned long count = 0;
  436. /*
  437. * Simple case, [t,d]Indirect block(s) has not allocated yet
  438. * then it's clear blocks on that path have not allocated
  439. */
  440. if (k > 0) {
  441. /* right now we don't handle cross boundary allocation */
  442. if (blks < blocks_to_boundary + 1)
  443. count += blks;
  444. else
  445. count += blocks_to_boundary + 1;
  446. return count;
  447. }
  448. count++;
  449. while (count < blks && count <= blocks_to_boundary &&
  450. le32_to_cpu(*(branch[0].p + count)) == 0) {
  451. count++;
  452. }
  453. return count;
  454. }
  455. /**
  456. * ext3_alloc_blocks: multiple allocate blocks needed for a branch
  457. * @indirect_blks: the number of blocks need to allocate for indirect
  458. * blocks
  459. *
  460. * @new_blocks: on return it will store the new block numbers for
  461. * the indirect blocks(if needed) and the first direct block,
  462. * @blks: on return it will store the total number of allocated
  463. * direct blocks
  464. */
  465. static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
  466. ext3_fsblk_t goal, int indirect_blks, int blks,
  467. ext3_fsblk_t new_blocks[4], int *err)
  468. {
  469. int target, i;
  470. unsigned long count = 0;
  471. int index = 0;
  472. ext3_fsblk_t current_block = 0;
  473. int ret = 0;
  474. /*
  475. * Here we try to allocate the requested multiple blocks at once,
  476. * on a best-effort basis.
  477. * To build a branch, we should allocate blocks for
  478. * the indirect blocks(if not allocated yet), and at least
  479. * the first direct block of this branch. That's the
  480. * minimum number of blocks need to allocate(required)
  481. */
  482. target = blks + indirect_blks;
  483. while (1) {
  484. count = target;
  485. /* allocating blocks for indirect blocks and direct blocks */
  486. current_block = ext3_new_blocks(handle,inode,goal,&count,err);
  487. if (*err)
  488. goto failed_out;
  489. target -= count;
  490. /* allocate blocks for indirect blocks */
  491. while (index < indirect_blks && count) {
  492. new_blocks[index++] = current_block++;
  493. count--;
  494. }
  495. if (count > 0)
  496. break;
  497. }
  498. /* save the new block number for the first direct block */
  499. new_blocks[index] = current_block;
  500. /* total number of blocks allocated for direct blocks */
  501. ret = count;
  502. *err = 0;
  503. return ret;
  504. failed_out:
  505. for (i = 0; i <index; i++)
  506. ext3_free_blocks(handle, inode, new_blocks[i], 1);
  507. return ret;
  508. }
  509. /**
  510. * ext3_alloc_branch - allocate and set up a chain of blocks.
  511. * @inode: owner
  512. * @indirect_blks: number of allocated indirect blocks
  513. * @blks: number of allocated direct blocks
  514. * @offsets: offsets (in the blocks) to store the pointers to next.
  515. * @branch: place to store the chain in.
  516. *
  517. * This function allocates blocks, zeroes out all but the last one,
  518. * links them into chain and (if we are synchronous) writes them to disk.
  519. * In other words, it prepares a branch that can be spliced onto the
  520. * inode. It stores the information about that chain in the branch[], in
  521. * the same format as ext3_get_branch() would do. We are calling it after
  522. * we had read the existing part of chain and partial points to the last
  523. * triple of that (one with zero ->key). Upon the exit we have the same
  524. * picture as after the successful ext3_get_block(), except that in one
  525. * place chain is disconnected - *branch->p is still zero (we did not
  526. * set the last link), but branch->key contains the number that should
  527. * be placed into *branch->p to fill that gap.
  528. *
  529. * If allocation fails we free all blocks we've allocated (and forget
  530. * their buffer_heads) and return the error value the from failed
  531. * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
  532. * as described above and return 0.
  533. */
  534. static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
  535. int indirect_blks, int *blks, ext3_fsblk_t goal,
  536. int *offsets, Indirect *branch)
  537. {
  538. int blocksize = inode->i_sb->s_blocksize;
  539. int i, n = 0;
  540. int err = 0;
  541. struct buffer_head *bh;
  542. int num;
  543. ext3_fsblk_t new_blocks[4];
  544. ext3_fsblk_t current_block;
  545. num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
  546. *blks, new_blocks, &err);
  547. if (err)
  548. return err;
  549. branch[0].key = cpu_to_le32(new_blocks[0]);
  550. /*
  551. * metadata blocks and data blocks are allocated.
  552. */
  553. for (n = 1; n <= indirect_blks; n++) {
  554. /*
  555. * Get buffer_head for parent block, zero it out
  556. * and set the pointer to new one, then send
  557. * parent to disk.
  558. */
  559. bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
  560. branch[n].bh = bh;
  561. lock_buffer(bh);
  562. BUFFER_TRACE(bh, "call get_create_access");
  563. err = ext3_journal_get_create_access(handle, bh);
  564. if (err) {
  565. unlock_buffer(bh);
  566. brelse(bh);
  567. goto failed;
  568. }
  569. memset(bh->b_data, 0, blocksize);
  570. branch[n].p = (__le32 *) bh->b_data + offsets[n];
  571. branch[n].key = cpu_to_le32(new_blocks[n]);
  572. *branch[n].p = branch[n].key;
  573. if ( n == indirect_blks) {
  574. current_block = new_blocks[n];
  575. /*
  576. * End of chain, update the last new metablock of
  577. * the chain to point to the new allocated
  578. * data blocks numbers
  579. */
  580. for (i=1; i < num; i++)
  581. *(branch[n].p + i) = cpu_to_le32(++current_block);
  582. }
  583. BUFFER_TRACE(bh, "marking uptodate");
  584. set_buffer_uptodate(bh);
  585. unlock_buffer(bh);
  586. BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
  587. err = ext3_journal_dirty_metadata(handle, bh);
  588. if (err)
  589. goto failed;
  590. }
  591. *blks = num;
  592. return err;
  593. failed:
  594. /* Allocation failed, free what we already allocated */
  595. for (i = 1; i <= n ; i++) {
  596. BUFFER_TRACE(branch[i].bh, "call journal_forget");
  597. ext3_journal_forget(handle, branch[i].bh);
  598. }
  599. for (i = 0; i <indirect_blks; i++)
  600. ext3_free_blocks(handle, inode, new_blocks[i], 1);
  601. ext3_free_blocks(handle, inode, new_blocks[i], num);
  602. return err;
  603. }
  604. /**
  605. * ext3_splice_branch - splice the allocated branch onto inode.
  606. * @inode: owner
  607. * @block: (logical) number of block we are adding
  608. * @chain: chain of indirect blocks (with a missing link - see
  609. * ext3_alloc_branch)
  610. * @where: location of missing link
  611. * @num: number of indirect blocks we are adding
  612. * @blks: number of direct blocks we are adding
  613. *
  614. * This function fills the missing link and does all housekeeping needed in
  615. * inode (->i_blocks, etc.). In case of success we end up with the full
  616. * chain to new block and return 0.
  617. */
  618. static int ext3_splice_branch(handle_t *handle, struct inode *inode,
  619. long block, Indirect *where, int num, int blks)
  620. {
  621. int i;
  622. int err = 0;
  623. struct ext3_block_alloc_info *block_i;
  624. ext3_fsblk_t current_block;
  625. block_i = EXT3_I(inode)->i_block_alloc_info;
  626. /*
  627. * If we're splicing into a [td]indirect block (as opposed to the
  628. * inode) then we need to get write access to the [td]indirect block
  629. * before the splice.
  630. */
  631. if (where->bh) {
  632. BUFFER_TRACE(where->bh, "get_write_access");
  633. err = ext3_journal_get_write_access(handle, where->bh);
  634. if (err)
  635. goto err_out;
  636. }
  637. /* That's it */
  638. *where->p = where->key;
  639. /*
  640. * Update the host buffer_head or inode to point to more just allocated
  641. * direct blocks blocks
  642. */
  643. if (num == 0 && blks > 1) {
  644. current_block = le32_to_cpu(where->key) + 1;
  645. for (i = 1; i < blks; i++)
  646. *(where->p + i ) = cpu_to_le32(current_block++);
  647. }
  648. /*
  649. * update the most recently allocated logical & physical block
  650. * in i_block_alloc_info, to assist find the proper goal block for next
  651. * allocation
  652. */
  653. if (block_i) {
  654. block_i->last_alloc_logical_block = block + blks - 1;
  655. block_i->last_alloc_physical_block =
  656. le32_to_cpu(where[num].key) + blks - 1;
  657. }
  658. /* We are done with atomic stuff, now do the rest of housekeeping */
  659. inode->i_ctime = CURRENT_TIME_SEC;
  660. ext3_mark_inode_dirty(handle, inode);
  661. /* had we spliced it onto indirect block? */
  662. if (where->bh) {
  663. /*
  664. * If we spliced it onto an indirect block, we haven't
  665. * altered the inode. Note however that if it is being spliced
  666. * onto an indirect block at the very end of the file (the
  667. * file is growing) then we *will* alter the inode to reflect
  668. * the new i_size. But that is not done here - it is done in
  669. * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
  670. */
  671. jbd_debug(5, "splicing indirect only\n");
  672. BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
  673. err = ext3_journal_dirty_metadata(handle, where->bh);
  674. if (err)
  675. goto err_out;
  676. } else {
  677. /*
  678. * OK, we spliced it into the inode itself on a direct block.
  679. * Inode was dirtied above.
  680. */
  681. jbd_debug(5, "splicing direct\n");
  682. }
  683. return err;
  684. err_out:
  685. for (i = 1; i <= num; i++) {
  686. BUFFER_TRACE(where[i].bh, "call journal_forget");
  687. ext3_journal_forget(handle, where[i].bh);
  688. ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
  689. }
  690. ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
  691. return err;
  692. }
  693. /*
  694. * Allocation strategy is simple: if we have to allocate something, we will
  695. * have to go the whole way to leaf. So let's do it before attaching anything
  696. * to tree, set linkage between the newborn blocks, write them if sync is
  697. * required, recheck the path, free and repeat if check fails, otherwise
  698. * set the last missing link (that will protect us from any truncate-generated
  699. * removals - all blocks on the path are immune now) and possibly force the
  700. * write on the parent block.
  701. * That has a nice additional property: no special recovery from the failed
  702. * allocations is needed - we simply release blocks and do not touch anything
  703. * reachable from inode.
  704. *
  705. * `handle' can be NULL if create == 0.
  706. *
  707. * The BKL may not be held on entry here. Be sure to take it early.
  708. * return > 0, # of blocks mapped or allocated.
  709. * return = 0, if plain lookup failed.
  710. * return < 0, error case.
  711. */
  712. int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
  713. sector_t iblock, unsigned long maxblocks,
  714. struct buffer_head *bh_result,
  715. int create, int extend_disksize)
  716. {
  717. int err = -EIO;
  718. int offsets[4];
  719. Indirect chain[4];
  720. Indirect *partial;
  721. ext3_fsblk_t goal;
  722. int indirect_blks;
  723. int blocks_to_boundary = 0;
  724. int depth;
  725. struct ext3_inode_info *ei = EXT3_I(inode);
  726. int count = 0;
  727. ext3_fsblk_t first_block = 0;
  728. J_ASSERT(handle != NULL || create == 0);
  729. depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
  730. if (depth == 0)
  731. goto out;
  732. partial = ext3_get_branch(inode, depth, offsets, chain, &err);
  733. /* Simplest case - block found, no allocation needed */
  734. if (!partial) {
  735. first_block = le32_to_cpu(chain[depth - 1].key);
  736. clear_buffer_new(bh_result);
  737. count++;
  738. /*map more blocks*/
  739. while (count < maxblocks && count <= blocks_to_boundary) {
  740. ext3_fsblk_t blk;
  741. if (!verify_chain(chain, partial)) {
  742. /*
  743. * Indirect block might be removed by
  744. * truncate while we were reading it.
  745. * Handling of that case: forget what we've
  746. * got now. Flag the err as EAGAIN, so it
  747. * will reread.
  748. */
  749. err = -EAGAIN;
  750. count = 0;
  751. break;
  752. }
  753. blk = le32_to_cpu(*(chain[depth-1].p + count));
  754. if (blk == first_block + count)
  755. count++;
  756. else
  757. break;
  758. }
  759. if (err != -EAGAIN)
  760. goto got_it;
  761. }
  762. /* Next simple case - plain lookup or failed read of indirect block */
  763. if (!create || err == -EIO)
  764. goto cleanup;
  765. mutex_lock(&ei->truncate_mutex);
  766. /*
  767. * If the indirect block is missing while we are reading
  768. * the chain(ext3_get_branch() returns -EAGAIN err), or
  769. * if the chain has been changed after we grab the semaphore,
  770. * (either because another process truncated this branch, or
  771. * another get_block allocated this branch) re-grab the chain to see if
  772. * the request block has been allocated or not.
  773. *
  774. * Since we already block the truncate/other get_block
  775. * at this point, we will have the current copy of the chain when we
  776. * splice the branch into the tree.
  777. */
  778. if (err == -EAGAIN || !verify_chain(chain, partial)) {
  779. while (partial > chain) {
  780. brelse(partial->bh);
  781. partial--;
  782. }
  783. partial = ext3_get_branch(inode, depth, offsets, chain, &err);
  784. if (!partial) {
  785. count++;
  786. mutex_unlock(&ei->truncate_mutex);
  787. if (err)
  788. goto cleanup;
  789. clear_buffer_new(bh_result);
  790. goto got_it;
  791. }
  792. }
  793. /*
  794. * Okay, we need to do block allocation. Lazily initialize the block
  795. * allocation info here if necessary
  796. */
  797. if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
  798. ext3_init_block_alloc_info(inode);
  799. goal = ext3_find_goal(inode, iblock, partial);
  800. /* the number of blocks need to allocate for [d,t]indirect blocks */
  801. indirect_blks = (chain + depth) - partial - 1;
  802. /*
  803. * Next look up the indirect map to count the totoal number of
  804. * direct blocks to allocate for this branch.
  805. */
  806. count = ext3_blks_to_allocate(partial, indirect_blks,
  807. maxblocks, blocks_to_boundary);
  808. /*
  809. * Block out ext3_truncate while we alter the tree
  810. */
  811. err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
  812. offsets + (partial - chain), partial);
  813. /*
  814. * The ext3_splice_branch call will free and forget any buffers
  815. * on the new chain if there is a failure, but that risks using
  816. * up transaction credits, especially for bitmaps where the
  817. * credits cannot be returned. Can we handle this somehow? We
  818. * may need to return -EAGAIN upwards in the worst case. --sct
  819. */
  820. if (!err)
  821. err = ext3_splice_branch(handle, inode, iblock,
  822. partial, indirect_blks, count);
  823. /*
  824. * i_disksize growing is protected by truncate_mutex. Don't forget to
  825. * protect it if you're about to implement concurrent
  826. * ext3_get_block() -bzzz
  827. */
  828. if (!err && extend_disksize && inode->i_size > ei->i_disksize)
  829. ei->i_disksize = inode->i_size;
  830. mutex_unlock(&ei->truncate_mutex);
  831. if (err)
  832. goto cleanup;
  833. set_buffer_new(bh_result);
  834. got_it:
  835. map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
  836. if (count > blocks_to_boundary)
  837. set_buffer_boundary(bh_result);
  838. err = count;
  839. /* Clean up and exit */
  840. partial = chain + depth - 1; /* the whole chain */
  841. cleanup:
  842. while (partial > chain) {
  843. BUFFER_TRACE(partial->bh, "call brelse");
  844. brelse(partial->bh);
  845. partial--;
  846. }
  847. BUFFER_TRACE(bh_result, "returned");
  848. out:
  849. return err;
  850. }
  851. /* Maximum number of blocks we map for direct IO at once. */
  852. #define DIO_MAX_BLOCKS 4096
  853. /*
  854. * Number of credits we need for writing DIO_MAX_BLOCKS:
  855. * We need sb + group descriptor + bitmap + inode -> 4
  856. * For B blocks with A block pointers per block we need:
  857. * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
  858. * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
  859. */
  860. #define DIO_CREDITS 25
  861. static int ext3_get_block(struct inode *inode, sector_t iblock,
  862. struct buffer_head *bh_result, int create)
  863. {
  864. handle_t *handle = ext3_journal_current_handle();
  865. int ret = 0, started = 0;
  866. unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
  867. if (create && !handle) { /* Direct IO write... */
  868. if (max_blocks > DIO_MAX_BLOCKS)
  869. max_blocks = DIO_MAX_BLOCKS;
  870. handle = ext3_journal_start(inode, DIO_CREDITS +
  871. 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb));
  872. if (IS_ERR(handle)) {
  873. ret = PTR_ERR(handle);
  874. goto out;
  875. }
  876. started = 1;
  877. }
  878. ret = ext3_get_blocks_handle(handle, inode, iblock,
  879. max_blocks, bh_result, create, 0);
  880. if (ret > 0) {
  881. bh_result->b_size = (ret << inode->i_blkbits);
  882. ret = 0;
  883. }
  884. if (started)
  885. ext3_journal_stop(handle);
  886. out:
  887. return ret;
  888. }
  889. /*
  890. * `handle' can be NULL if create is zero
  891. */
  892. struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
  893. long block, int create, int *errp)
  894. {
  895. struct buffer_head dummy;
  896. int fatal = 0, err;
  897. J_ASSERT(handle != NULL || create == 0);
  898. dummy.b_state = 0;
  899. dummy.b_blocknr = -1000;
  900. buffer_trace_init(&dummy.b_history);
  901. err = ext3_get_blocks_handle(handle, inode, block, 1,
  902. &dummy, create, 1);
  903. /*
  904. * ext3_get_blocks_handle() returns number of blocks
  905. * mapped. 0 in case of a HOLE.
  906. */
  907. if (err > 0) {
  908. if (err > 1)
  909. WARN_ON(1);
  910. err = 0;
  911. }
  912. *errp = err;
  913. if (!err && buffer_mapped(&dummy)) {
  914. struct buffer_head *bh;
  915. bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
  916. if (!bh) {
  917. *errp = -EIO;
  918. goto err;
  919. }
  920. if (buffer_new(&dummy)) {
  921. J_ASSERT(create != 0);
  922. J_ASSERT(handle != NULL);
  923. /*
  924. * Now that we do not always journal data, we should
  925. * keep in mind whether this should always journal the
  926. * new buffer as metadata. For now, regular file
  927. * writes use ext3_get_block instead, so it's not a
  928. * problem.
  929. */
  930. lock_buffer(bh);
  931. BUFFER_TRACE(bh, "call get_create_access");
  932. fatal = ext3_journal_get_create_access(handle, bh);
  933. if (!fatal && !buffer_uptodate(bh)) {
  934. memset(bh->b_data,0,inode->i_sb->s_blocksize);
  935. set_buffer_uptodate(bh);
  936. }
  937. unlock_buffer(bh);
  938. BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
  939. err = ext3_journal_dirty_metadata(handle, bh);
  940. if (!fatal)
  941. fatal = err;
  942. } else {
  943. BUFFER_TRACE(bh, "not a new buffer");
  944. }
  945. if (fatal) {
  946. *errp = fatal;
  947. brelse(bh);
  948. bh = NULL;
  949. }
  950. return bh;
  951. }
  952. err:
  953. return NULL;
  954. }
  955. struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
  956. int block, int create, int *err)
  957. {
  958. struct buffer_head * bh;
  959. bh = ext3_getblk(handle, inode, block, create, err);
  960. if (!bh)
  961. return bh;
  962. if (buffer_uptodate(bh))
  963. return bh;
  964. ll_rw_block(READ_META, 1, &bh);
  965. wait_on_buffer(bh);
  966. if (buffer_uptodate(bh))
  967. return bh;
  968. put_bh(bh);
  969. *err = -EIO;
  970. return NULL;
  971. }
  972. static int walk_page_buffers( handle_t *handle,
  973. struct buffer_head *head,
  974. unsigned from,
  975. unsigned to,
  976. int *partial,
  977. int (*fn)( handle_t *handle,
  978. struct buffer_head *bh))
  979. {
  980. struct buffer_head *bh;
  981. unsigned block_start, block_end;
  982. unsigned blocksize = head->b_size;
  983. int err, ret = 0;
  984. struct buffer_head *next;
  985. for ( bh = head, block_start = 0;
  986. ret == 0 && (bh != head || !block_start);
  987. block_start = block_end, bh = next)
  988. {
  989. next = bh->b_this_page;
  990. block_end = block_start + blocksize;
  991. if (block_end <= from || block_start >= to) {
  992. if (partial && !buffer_uptodate(bh))
  993. *partial = 1;
  994. continue;
  995. }
  996. err = (*fn)(handle, bh);
  997. if (!ret)
  998. ret = err;
  999. }
  1000. return ret;
  1001. }
  1002. /*
  1003. * To preserve ordering, it is essential that the hole instantiation and
  1004. * the data write be encapsulated in a single transaction. We cannot
  1005. * close off a transaction and start a new one between the ext3_get_block()
  1006. * and the commit_write(). So doing the journal_start at the start of
  1007. * prepare_write() is the right place.
  1008. *
  1009. * Also, this function can nest inside ext3_writepage() ->
  1010. * block_write_full_page(). In that case, we *know* that ext3_writepage()
  1011. * has generated enough buffer credits to do the whole page. So we won't
  1012. * block on the journal in that case, which is good, because the caller may
  1013. * be PF_MEMALLOC.
  1014. *
  1015. * By accident, ext3 can be reentered when a transaction is open via
  1016. * quota file writes. If we were to commit the transaction while thus
  1017. * reentered, there can be a deadlock - we would be holding a quota
  1018. * lock, and the commit would never complete if another thread had a
  1019. * transaction open and was blocking on the quota lock - a ranking
  1020. * violation.
  1021. *
  1022. * So what we do is to rely on the fact that journal_stop/journal_start
  1023. * will _not_ run commit under these circumstances because handle->h_ref
  1024. * is elevated. We'll still have enough credits for the tiny quotafile
  1025. * write.
  1026. */
  1027. static int do_journal_get_write_access(handle_t *handle,
  1028. struct buffer_head *bh)
  1029. {
  1030. if (!buffer_mapped(bh) || buffer_freed(bh))
  1031. return 0;
  1032. return ext3_journal_get_write_access(handle, bh);
  1033. }
  1034. static int ext3_write_begin(struct file *file, struct address_space *mapping,
  1035. loff_t pos, unsigned len, unsigned flags,
  1036. struct page **pagep, void **fsdata)
  1037. {
  1038. struct inode *inode = mapping->host;
  1039. int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
  1040. handle_t *handle;
  1041. int retries = 0;
  1042. struct page *page;
  1043. pgoff_t index;
  1044. unsigned from, to;
  1045. index = pos >> PAGE_CACHE_SHIFT;
  1046. from = pos & (PAGE_CACHE_SIZE - 1);
  1047. to = from + len;
  1048. retry:
  1049. page = __grab_cache_page(mapping, index);
  1050. if (!page)
  1051. return -ENOMEM;
  1052. *pagep = page;
  1053. handle = ext3_journal_start(inode, needed_blocks);
  1054. if (IS_ERR(handle)) {
  1055. unlock_page(page);
  1056. page_cache_release(page);
  1057. ret = PTR_ERR(handle);
  1058. goto out;
  1059. }
  1060. ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  1061. ext3_get_block);
  1062. if (ret)
  1063. goto write_begin_failed;
  1064. if (ext3_should_journal_data(inode)) {
  1065. ret = walk_page_buffers(handle, page_buffers(page),
  1066. from, to, NULL, do_journal_get_write_access);
  1067. }
  1068. write_begin_failed:
  1069. if (ret) {
  1070. ext3_journal_stop(handle);
  1071. unlock_page(page);
  1072. page_cache_release(page);
  1073. }
  1074. if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
  1075. goto retry;
  1076. out:
  1077. return ret;
  1078. }
  1079. int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
  1080. {
  1081. int err = journal_dirty_data(handle, bh);
  1082. if (err)
  1083. ext3_journal_abort_handle(__func__, __func__,
  1084. bh, handle, err);
  1085. return err;
  1086. }
  1087. /* For write_end() in data=journal mode */
  1088. static int write_end_fn(handle_t *handle, struct buffer_head *bh)
  1089. {
  1090. if (!buffer_mapped(bh) || buffer_freed(bh))
  1091. return 0;
  1092. set_buffer_uptodate(bh);
  1093. return ext3_journal_dirty_metadata(handle, bh);
  1094. }
  1095. /*
  1096. * Generic write_end handler for ordered and writeback ext3 journal modes.
  1097. * We can't use generic_write_end, because that unlocks the page and we need to
  1098. * unlock the page after ext3_journal_stop, but ext3_journal_stop must run
  1099. * after block_write_end.
  1100. */
  1101. static int ext3_generic_write_end(struct file *file,
  1102. struct address_space *mapping,
  1103. loff_t pos, unsigned len, unsigned copied,
  1104. struct page *page, void *fsdata)
  1105. {
  1106. struct inode *inode = file->f_mapping->host;
  1107. copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
  1108. if (pos+copied > inode->i_size) {
  1109. i_size_write(inode, pos+copied);
  1110. mark_inode_dirty(inode);
  1111. }
  1112. return copied;
  1113. }
  1114. /*
  1115. * We need to pick up the new inode size which generic_commit_write gave us
  1116. * `file' can be NULL - eg, when called from page_symlink().
  1117. *
  1118. * ext3 never places buffers on inode->i_mapping->private_list. metadata
  1119. * buffers are managed internally.
  1120. */
  1121. static int ext3_ordered_write_end(struct file *file,
  1122. struct address_space *mapping,
  1123. loff_t pos, unsigned len, unsigned copied,
  1124. struct page *page, void *fsdata)
  1125. {
  1126. handle_t *handle = ext3_journal_current_handle();
  1127. struct inode *inode = file->f_mapping->host;
  1128. unsigned from, to;
  1129. int ret = 0, ret2;
  1130. from = pos & (PAGE_CACHE_SIZE - 1);
  1131. to = from + len;
  1132. ret = walk_page_buffers(handle, page_buffers(page),
  1133. from, to, NULL, ext3_journal_dirty_data);
  1134. if (ret == 0) {
  1135. /*
  1136. * generic_write_end() will run mark_inode_dirty() if i_size
  1137. * changes. So let's piggyback the i_disksize mark_inode_dirty
  1138. * into that.
  1139. */
  1140. loff_t new_i_size;
  1141. new_i_size = pos + copied;
  1142. if (new_i_size > EXT3_I(inode)->i_disksize)
  1143. EXT3_I(inode)->i_disksize = new_i_size;
  1144. ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
  1145. page, fsdata);
  1146. copied = ret2;
  1147. if (ret2 < 0)
  1148. ret = ret2;
  1149. }
  1150. ret2 = ext3_journal_stop(handle);
  1151. if (!ret)
  1152. ret = ret2;
  1153. unlock_page(page);
  1154. page_cache_release(page);
  1155. return ret ? ret : copied;
  1156. }
  1157. static int ext3_writeback_write_end(struct file *file,
  1158. struct address_space *mapping,
  1159. loff_t pos, unsigned len, unsigned copied,
  1160. struct page *page, void *fsdata)
  1161. {
  1162. handle_t *handle = ext3_journal_current_handle();
  1163. struct inode *inode = file->f_mapping->host;
  1164. int ret = 0, ret2;
  1165. loff_t new_i_size;
  1166. new_i_size = pos + copied;
  1167. if (new_i_size > EXT3_I(inode)->i_disksize)
  1168. EXT3_I(inode)->i_disksize = new_i_size;
  1169. ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
  1170. page, fsdata);
  1171. copied = ret2;
  1172. if (ret2 < 0)
  1173. ret = ret2;
  1174. ret2 = ext3_journal_stop(handle);
  1175. if (!ret)
  1176. ret = ret2;
  1177. unlock_page(page);
  1178. page_cache_release(page);
  1179. return ret ? ret : copied;
  1180. }
  1181. static int ext3_journalled_write_end(struct file *file,
  1182. struct address_space *mapping,
  1183. loff_t pos, unsigned len, unsigned copied,
  1184. struct page *page, void *fsdata)
  1185. {
  1186. handle_t *handle = ext3_journal_current_handle();
  1187. struct inode *inode = mapping->host;
  1188. int ret = 0, ret2;
  1189. int partial = 0;
  1190. unsigned from, to;
  1191. from = pos & (PAGE_CACHE_SIZE - 1);
  1192. to = from + len;
  1193. if (copied < len) {
  1194. if (!PageUptodate(page))
  1195. copied = 0;
  1196. page_zero_new_buffers(page, from+copied, to);
  1197. }
  1198. ret = walk_page_buffers(handle, page_buffers(page), from,
  1199. to, &partial, write_end_fn);
  1200. if (!partial)
  1201. SetPageUptodate(page);
  1202. if (pos+copied > inode->i_size)
  1203. i_size_write(inode, pos+copied);
  1204. EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
  1205. if (inode->i_size > EXT3_I(inode)->i_disksize) {
  1206. EXT3_I(inode)->i_disksize = inode->i_size;
  1207. ret2 = ext3_mark_inode_dirty(handle, inode);
  1208. if (!ret)
  1209. ret = ret2;
  1210. }
  1211. ret2 = ext3_journal_stop(handle);
  1212. if (!ret)
  1213. ret = ret2;
  1214. unlock_page(page);
  1215. page_cache_release(page);
  1216. return ret ? ret : copied;
  1217. }
  1218. /*
  1219. * bmap() is special. It gets used by applications such as lilo and by
  1220. * the swapper to find the on-disk block of a specific piece of data.
  1221. *
  1222. * Naturally, this is dangerous if the block concerned is still in the
  1223. * journal. If somebody makes a swapfile on an ext3 data-journaling
  1224. * filesystem and enables swap, then they may get a nasty shock when the
  1225. * data getting swapped to that swapfile suddenly gets overwritten by
  1226. * the original zero's written out previously to the journal and
  1227. * awaiting writeback in the kernel's buffer cache.
  1228. *
  1229. * So, if we see any bmap calls here on a modified, data-journaled file,
  1230. * take extra steps to flush any blocks which might be in the cache.
  1231. */
  1232. static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
  1233. {
  1234. struct inode *inode = mapping->host;
  1235. journal_t *journal;
  1236. int err;
  1237. if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
  1238. /*
  1239. * This is a REALLY heavyweight approach, but the use of
  1240. * bmap on dirty files is expected to be extremely rare:
  1241. * only if we run lilo or swapon on a freshly made file
  1242. * do we expect this to happen.
  1243. *
  1244. * (bmap requires CAP_SYS_RAWIO so this does not
  1245. * represent an unprivileged user DOS attack --- we'd be
  1246. * in trouble if mortal users could trigger this path at
  1247. * will.)
  1248. *
  1249. * NB. EXT3_STATE_JDATA is not set on files other than
  1250. * regular files. If somebody wants to bmap a directory
  1251. * or symlink and gets confused because the buffer
  1252. * hasn't yet been flushed to disk, they deserve
  1253. * everything they get.
  1254. */
  1255. EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
  1256. journal = EXT3_JOURNAL(inode);
  1257. journal_lock_updates(journal);
  1258. err = journal_flush(journal);
  1259. journal_unlock_updates(journal);
  1260. if (err)
  1261. return 0;
  1262. }
  1263. return generic_block_bmap(mapping,block,ext3_get_block);
  1264. }
  1265. static int bget_one(handle_t *handle, struct buffer_head *bh)
  1266. {
  1267. get_bh(bh);
  1268. return 0;
  1269. }
  1270. static int bput_one(handle_t *handle, struct buffer_head *bh)
  1271. {
  1272. put_bh(bh);
  1273. return 0;
  1274. }
  1275. static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
  1276. {
  1277. if (buffer_mapped(bh))
  1278. return ext3_journal_dirty_data(handle, bh);
  1279. return 0;
  1280. }
  1281. /*
  1282. * Note that we always start a transaction even if we're not journalling
  1283. * data. This is to preserve ordering: any hole instantiation within
  1284. * __block_write_full_page -> ext3_get_block() should be journalled
  1285. * along with the data so we don't crash and then get metadata which
  1286. * refers to old data.
  1287. *
  1288. * In all journalling modes block_write_full_page() will start the I/O.
  1289. *
  1290. * Problem:
  1291. *
  1292. * ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
  1293. * ext3_writepage()
  1294. *
  1295. * Similar for:
  1296. *
  1297. * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
  1298. *
  1299. * Same applies to ext3_get_block(). We will deadlock on various things like
  1300. * lock_journal and i_truncate_mutex.
  1301. *
  1302. * Setting PF_MEMALLOC here doesn't work - too many internal memory
  1303. * allocations fail.
  1304. *
  1305. * 16May01: If we're reentered then journal_current_handle() will be
  1306. * non-zero. We simply *return*.
  1307. *
  1308. * 1 July 2001: @@@ FIXME:
  1309. * In journalled data mode, a data buffer may be metadata against the
  1310. * current transaction. But the same file is part of a shared mapping
  1311. * and someone does a writepage() on it.
  1312. *
  1313. * We will move the buffer onto the async_data list, but *after* it has
  1314. * been dirtied. So there's a small window where we have dirty data on
  1315. * BJ_Metadata.
  1316. *
  1317. * Note that this only applies to the last partial page in the file. The
  1318. * bit which block_write_full_page() uses prepare/commit for. (That's
  1319. * broken code anyway: it's wrong for msync()).
  1320. *
  1321. * It's a rare case: affects the final partial page, for journalled data
  1322. * where the file is subject to bith write() and writepage() in the same
  1323. * transction. To fix it we'll need a custom block_write_full_page().
  1324. * We'll probably need that anyway for journalling writepage() output.
  1325. *
  1326. * We don't honour synchronous mounts for writepage(). That would be
  1327. * disastrous. Any write() or metadata operation will sync the fs for
  1328. * us.
  1329. *
  1330. * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
  1331. * we don't need to open a transaction here.
  1332. */
  1333. static int ext3_ordered_writepage(struct page *page,
  1334. struct writeback_control *wbc)
  1335. {
  1336. struct inode *inode = page->mapping->host;
  1337. struct buffer_head *page_bufs;
  1338. handle_t *handle = NULL;
  1339. int ret = 0;
  1340. int err;
  1341. J_ASSERT(PageLocked(page));
  1342. /*
  1343. * We give up here if we're reentered, because it might be for a
  1344. * different filesystem.
  1345. */
  1346. if (ext3_journal_current_handle())
  1347. goto out_fail;
  1348. handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
  1349. if (IS_ERR(handle)) {
  1350. ret = PTR_ERR(handle);
  1351. goto out_fail;
  1352. }
  1353. if (!page_has_buffers(page)) {
  1354. create_empty_buffers(page, inode->i_sb->s_blocksize,
  1355. (1 << BH_Dirty)|(1 << BH_Uptodate));
  1356. }
  1357. page_bufs = page_buffers(page);
  1358. walk_page_buffers(handle, page_bufs, 0,
  1359. PAGE_CACHE_SIZE, NULL, bget_one);
  1360. ret = block_write_full_page(page, ext3_get_block, wbc);
  1361. /*
  1362. * The page can become unlocked at any point now, and
  1363. * truncate can then come in and change things. So we
  1364. * can't touch *page from now on. But *page_bufs is
  1365. * safe due to elevated refcount.
  1366. */
  1367. /*
  1368. * And attach them to the current transaction. But only if
  1369. * block_write_full_page() succeeded. Otherwise they are unmapped,
  1370. * and generally junk.
  1371. */
  1372. if (ret == 0) {
  1373. err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
  1374. NULL, journal_dirty_data_fn);
  1375. if (!ret)
  1376. ret = err;
  1377. }
  1378. walk_page_buffers(handle, page_bufs, 0,
  1379. PAGE_CACHE_SIZE, NULL, bput_one);
  1380. err = ext3_journal_stop(handle);
  1381. if (!ret)
  1382. ret = err;
  1383. return ret;
  1384. out_fail:
  1385. redirty_page_for_writepage(wbc, page);
  1386. unlock_page(page);
  1387. return ret;
  1388. }
  1389. static int ext3_writeback_writepage(struct page *page,
  1390. struct writeback_control *wbc)
  1391. {
  1392. struct inode *inode = page->mapping->host;
  1393. handle_t *handle = NULL;
  1394. int ret = 0;
  1395. int err;
  1396. if (ext3_journal_current_handle())
  1397. goto out_fail;
  1398. handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
  1399. if (IS_ERR(handle)) {
  1400. ret = PTR_ERR(handle);
  1401. goto out_fail;
  1402. }
  1403. if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
  1404. ret = nobh_writepage(page, ext3_get_block, wbc);
  1405. else
  1406. ret = block_write_full_page(page, ext3_get_block, wbc);
  1407. err = ext3_journal_stop(handle);
  1408. if (!ret)
  1409. ret = err;
  1410. return ret;
  1411. out_fail:
  1412. redirty_page_for_writepage(wbc, page);
  1413. unlock_page(page);
  1414. return ret;
  1415. }
  1416. static int ext3_journalled_writepage(struct page *page,
  1417. struct writeback_control *wbc)
  1418. {
  1419. struct inode *inode = page->mapping->host;
  1420. handle_t *handle = NULL;
  1421. int ret = 0;
  1422. int err;
  1423. if (ext3_journal_current_handle())
  1424. goto no_write;
  1425. handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
  1426. if (IS_ERR(handle)) {
  1427. ret = PTR_ERR(handle);
  1428. goto no_write;
  1429. }
  1430. if (!page_has_buffers(page) || PageChecked(page)) {
  1431. /*
  1432. * It's mmapped pagecache. Add buffers and journal it. There
  1433. * doesn't seem much point in redirtying the page here.
  1434. */
  1435. ClearPageChecked(page);
  1436. ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
  1437. ext3_get_block);
  1438. if (ret != 0) {
  1439. ext3_journal_stop(handle);
  1440. goto out_unlock;
  1441. }
  1442. ret = walk_page_buffers(handle, page_buffers(page), 0,
  1443. PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
  1444. err = walk_page_buffers(handle, page_buffers(page), 0,
  1445. PAGE_CACHE_SIZE, NULL, write_end_fn);
  1446. if (ret == 0)
  1447. ret = err;
  1448. EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
  1449. unlock_page(page);
  1450. } else {
  1451. /*
  1452. * It may be a page full of checkpoint-mode buffers. We don't
  1453. * really know unless we go poke around in the buffer_heads.
  1454. * But block_write_full_page will do the right thing.
  1455. */
  1456. ret = block_write_full_page(page, ext3_get_block, wbc);
  1457. }
  1458. err = ext3_journal_stop(handle);
  1459. if (!ret)
  1460. ret = err;
  1461. out:
  1462. return ret;
  1463. no_write:
  1464. redirty_page_for_writepage(wbc, page);
  1465. out_unlock:
  1466. unlock_page(page);
  1467. goto out;
  1468. }
  1469. static int ext3_readpage(struct file *file, struct page *page)
  1470. {
  1471. return mpage_readpage(page, ext3_get_block);
  1472. }
  1473. static int
  1474. ext3_readpages(struct file *file, struct address_space *mapping,
  1475. struct list_head *pages, unsigned nr_pages)
  1476. {
  1477. return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
  1478. }
  1479. static void ext3_invalidatepage(struct page *page, unsigned long offset)
  1480. {
  1481. journal_t *journal = EXT3_JOURNAL(page->mapping->host);
  1482. /*
  1483. * If it's a full truncate we just forget about the pending dirtying
  1484. */
  1485. if (offset == 0)
  1486. ClearPageChecked(page);
  1487. journal_invalidatepage(journal, page, offset);
  1488. }
  1489. static int ext3_releasepage(struct page *page, gfp_t wait)
  1490. {
  1491. journal_t *journal = EXT3_JOURNAL(page->mapping->host);
  1492. WARN_ON(PageChecked(page));
  1493. if (!page_has_buffers(page))
  1494. return 0;
  1495. return journal_try_to_free_buffers(journal, page, wait);
  1496. }
  1497. /*
  1498. * If the O_DIRECT write will extend the file then add this inode to the
  1499. * orphan list. So recovery will truncate it back to the original size
  1500. * if the machine crashes during the write.
  1501. *
  1502. * If the O_DIRECT write is intantiating holes inside i_size and the machine
  1503. * crashes then stale disk data _may_ be exposed inside the file. But current
  1504. * VFS code falls back into buffered path in that case so we are safe.
  1505. */
  1506. static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
  1507. const struct iovec *iov, loff_t offset,
  1508. unsigned long nr_segs)
  1509. {
  1510. struct file *file = iocb->ki_filp;
  1511. struct inode *inode = file->f_mapping->host;
  1512. struct ext3_inode_info *ei = EXT3_I(inode);
  1513. handle_t *handle;
  1514. ssize_t ret;
  1515. int orphan = 0;
  1516. size_t count = iov_length(iov, nr_segs);
  1517. if (rw == WRITE) {
  1518. loff_t final_size = offset + count;
  1519. if (final_size > inode->i_size) {
  1520. /* Credits for sb + inode write */
  1521. handle = ext3_journal_start(inode, 2);
  1522. if (IS_ERR(handle)) {
  1523. ret = PTR_ERR(handle);
  1524. goto out;
  1525. }
  1526. ret = ext3_orphan_add(handle, inode);
  1527. if (ret) {
  1528. ext3_journal_stop(handle);
  1529. goto out;
  1530. }
  1531. orphan = 1;
  1532. ei->i_disksize = inode->i_size;
  1533. ext3_journal_stop(handle);
  1534. }
  1535. }
  1536. ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
  1537. offset, nr_segs,
  1538. ext3_get_block, NULL);
  1539. if (orphan) {
  1540. int err;
  1541. /* Credits for sb + inode write */
  1542. handle = ext3_journal_start(inode, 2);
  1543. if (IS_ERR(handle)) {
  1544. /* This is really bad luck. We've written the data
  1545. * but cannot extend i_size. Bail out and pretend
  1546. * the write failed... */
  1547. ret = PTR_ERR(handle);
  1548. goto out;
  1549. }
  1550. if (inode->i_nlink)
  1551. ext3_orphan_del(handle, inode);
  1552. if (ret > 0) {
  1553. loff_t end = offset + ret;
  1554. if (end > inode->i_size) {
  1555. ei->i_disksize = end;
  1556. i_size_write(inode, end);
  1557. /*
  1558. * We're going to return a positive `ret'
  1559. * here due to non-zero-length I/O, so there's
  1560. * no way of reporting error returns from
  1561. * ext3_mark_inode_dirty() to userspace. So
  1562. * ignore it.
  1563. */
  1564. ext3_mark_inode_dirty(handle, inode);
  1565. }
  1566. }
  1567. err = ext3_journal_stop(handle);
  1568. if (ret == 0)
  1569. ret = err;
  1570. }
  1571. out:
  1572. return ret;
  1573. }
  1574. /*
  1575. * Pages can be marked dirty completely asynchronously from ext3's journalling
  1576. * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
  1577. * much here because ->set_page_dirty is called under VFS locks. The page is
  1578. * not necessarily locked.
  1579. *
  1580. * We cannot just dirty the page and leave attached buffers clean, because the
  1581. * buffers' dirty state is "definitive". We cannot just set the buffers dirty
  1582. * or jbddirty because all the journalling code will explode.
  1583. *
  1584. * So what we do is to mark the page "pending dirty" and next time writepage
  1585. * is called, propagate that into the buffers appropriately.
  1586. */
  1587. static int ext3_journalled_set_page_dirty(struct page *page)
  1588. {
  1589. SetPageChecked(page);
  1590. return __set_page_dirty_nobuffers(page);
  1591. }
  1592. static const struct address_space_operations ext3_ordered_aops = {
  1593. .readpage = ext3_readpage,
  1594. .readpages = ext3_readpages,
  1595. .writepage = ext3_ordered_writepage,
  1596. .sync_page = block_sync_page,
  1597. .write_begin = ext3_write_begin,
  1598. .write_end = ext3_ordered_write_end,
  1599. .bmap = ext3_bmap,
  1600. .invalidatepage = ext3_invalidatepage,
  1601. .releasepage = ext3_releasepage,
  1602. .direct_IO = ext3_direct_IO,
  1603. .migratepage = buffer_migrate_page,
  1604. };
  1605. static const struct address_space_operations ext3_writeback_aops = {
  1606. .readpage = ext3_readpage,
  1607. .readpages = ext3_readpages,
  1608. .writepage = ext3_writeback_writepage,
  1609. .sync_page = block_sync_page,
  1610. .write_begin = ext3_write_begin,
  1611. .write_end = ext3_writeback_write_end,
  1612. .bmap = ext3_bmap,
  1613. .invalidatepage = ext3_invalidatepage,
  1614. .releasepage = ext3_releasepage,
  1615. .direct_IO = ext3_direct_IO,
  1616. .migratepage = buffer_migrate_page,
  1617. };
  1618. static const struct address_space_operations ext3_journalled_aops = {
  1619. .readpage = ext3_readpage,
  1620. .readpages = ext3_readpages,
  1621. .writepage = ext3_journalled_writepage,
  1622. .sync_page = block_sync_page,
  1623. .write_begin = ext3_write_begin,
  1624. .write_end = ext3_journalled_write_end,
  1625. .set_page_dirty = ext3_journalled_set_page_dirty,
  1626. .bmap = ext3_bmap,
  1627. .invalidatepage = ext3_invalidatepage,
  1628. .releasepage = ext3_releasepage,
  1629. };
  1630. void ext3_set_aops(struct inode *inode)
  1631. {
  1632. if (ext3_should_order_data(inode))
  1633. inode->i_mapping->a_ops = &ext3_ordered_aops;
  1634. else if (ext3_should_writeback_data(inode))
  1635. inode->i_mapping->a_ops = &ext3_writeback_aops;
  1636. else
  1637. inode->i_mapping->a_ops = &ext3_journalled_aops;
  1638. }
  1639. /*
  1640. * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
  1641. * up to the end of the block which corresponds to `from'.
  1642. * This required during truncate. We need to physically zero the tail end
  1643. * of that block so it doesn't yield old data if the file is later grown.
  1644. */
  1645. static int ext3_block_truncate_page(handle_t *handle, struct page *page,
  1646. struct address_space *mapping, loff_t from)
  1647. {
  1648. ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
  1649. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  1650. unsigned blocksize, iblock, length, pos;
  1651. struct inode *inode = mapping->host;
  1652. struct buffer_head *bh;
  1653. int err = 0;
  1654. blocksize = inode->i_sb->s_blocksize;
  1655. length = blocksize - (offset & (blocksize - 1));
  1656. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  1657. /*
  1658. * For "nobh" option, we can only work if we don't need to
  1659. * read-in the page - otherwise we create buffers to do the IO.
  1660. */
  1661. if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
  1662. ext3_should_writeback_data(inode) && PageUptodate(page)) {
  1663. zero_user(page, offset, length);
  1664. set_page_dirty(page);
  1665. goto unlock;
  1666. }
  1667. if (!page_has_buffers(page))
  1668. create_empty_buffers(page, blocksize, 0);
  1669. /* Find the buffer that contains "offset" */
  1670. bh = page_buffers(page);
  1671. pos = blocksize;
  1672. while (offset >= pos) {
  1673. bh = bh->b_this_page;
  1674. iblock++;
  1675. pos += blocksize;
  1676. }
  1677. err = 0;
  1678. if (buffer_freed(bh)) {
  1679. BUFFER_TRACE(bh, "freed: skip");
  1680. goto unlock;
  1681. }
  1682. if (!buffer_mapped(bh)) {
  1683. BUFFER_TRACE(bh, "unmapped");
  1684. ext3_get_block(inode, iblock, bh, 0);
  1685. /* unmapped? It's a hole - nothing to do */
  1686. if (!buffer_mapped(bh)) {
  1687. BUFFER_TRACE(bh, "still unmapped");
  1688. goto unlock;
  1689. }
  1690. }
  1691. /* Ok, it's mapped. Make sure it's up-to-date */
  1692. if (PageUptodate(page))
  1693. set_buffer_uptodate(bh);
  1694. if (!buffer_uptodate(bh)) {
  1695. err = -EIO;
  1696. ll_rw_block(READ, 1, &bh);
  1697. wait_on_buffer(bh);
  1698. /* Uhhuh. Read error. Complain and punt. */
  1699. if (!buffer_uptodate(bh))
  1700. goto unlock;
  1701. }
  1702. if (ext3_should_journal_data(inode)) {
  1703. BUFFER_TRACE(bh, "get write access");
  1704. err = ext3_journal_get_write_access(handle, bh);
  1705. if (err)
  1706. goto unlock;
  1707. }
  1708. zero_user(page, offset, length);
  1709. BUFFER_TRACE(bh, "zeroed end of block");
  1710. err = 0;
  1711. if (ext3_should_journal_data(inode)) {
  1712. err = ext3_journal_dirty_metadata(handle, bh);
  1713. } else {
  1714. if (ext3_should_order_data(inode))
  1715. err = ext3_journal_dirty_data(handle, bh);
  1716. mark_buffer_dirty(bh);
  1717. }
  1718. unlock:
  1719. unlock_page(page);
  1720. page_cache_release(page);
  1721. return err;
  1722. }
  1723. /*
  1724. * Probably it should be a library function... search for first non-zero word
  1725. * or memcmp with zero_page, whatever is better for particular architecture.
  1726. * Linus?
  1727. */
  1728. static inline int all_zeroes(__le32 *p, __le32 *q)
  1729. {
  1730. while (p < q)
  1731. if (*p++)
  1732. return 0;
  1733. return 1;
  1734. }
  1735. /**
  1736. * ext3_find_shared - find the indirect blocks for partial truncation.
  1737. * @inode: inode in question
  1738. * @depth: depth of the affected branch
  1739. * @offsets: offsets of pointers in that branch (see ext3_block_to_path)
  1740. * @chain: place to store the pointers to partial indirect blocks
  1741. * @top: place to the (detached) top of branch
  1742. *
  1743. * This is a helper function used by ext3_truncate().
  1744. *
  1745. * When we do truncate() we may have to clean the ends of several
  1746. * indirect blocks but leave the blocks themselves alive. Block is
  1747. * partially truncated if some data below the new i_size is refered
  1748. * from it (and it is on the path to the first completely truncated
  1749. * data block, indeed). We have to free the top of that path along
  1750. * with everything to the right of the path. Since no allocation
  1751. * past the truncation point is possible until ext3_truncate()
  1752. * finishes, we may safely do the latter, but top of branch may
  1753. * require special attention - pageout below the truncation point
  1754. * might try to populate it.
  1755. *
  1756. * We atomically detach the top of branch from the tree, store the
  1757. * block number of its root in *@top, pointers to buffer_heads of
  1758. * partially truncated blocks - in @chain[].bh and pointers to
  1759. * their last elements that should not be removed - in
  1760. * @chain[].p. Return value is the pointer to last filled element
  1761. * of @chain.
  1762. *
  1763. * The work left to caller to do the actual freeing of subtrees:
  1764. * a) free the subtree starting from *@top
  1765. * b) free the subtrees whose roots are stored in
  1766. * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
  1767. * c) free the subtrees growing from the inode past the @chain[0].
  1768. * (no partially truncated stuff there). */
  1769. static Indirect *ext3_find_shared(struct inode *inode, int depth,
  1770. int offsets[4], Indirect chain[4], __le32 *top)
  1771. {
  1772. Indirect *partial, *p;
  1773. int k, err;
  1774. *top = 0;
  1775. /* Make k index the deepest non-null offest + 1 */
  1776. for (k = depth; k > 1 && !offsets[k-1]; k--)
  1777. ;
  1778. partial = ext3_get_branch(inode, k, offsets, chain, &err);
  1779. /* Writer: pointers */
  1780. if (!partial)
  1781. partial = chain + k-1;
  1782. /*
  1783. * If the branch acquired continuation since we've looked at it -
  1784. * fine, it should all survive and (new) top doesn't belong to us.
  1785. */
  1786. if (!partial->key && *partial->p)
  1787. /* Writer: end */
  1788. goto no_top;
  1789. for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
  1790. ;
  1791. /*
  1792. * OK, we've found the last block that must survive. The rest of our
  1793. * branch should be detached before unlocking. However, if that rest
  1794. * of branch is all ours and does not grow immediately from the inode
  1795. * it's easier to cheat and just decrement partial->p.
  1796. */
  1797. if (p == chain + k - 1 && p > chain) {
  1798. p->p--;
  1799. } else {
  1800. *top = *p->p;
  1801. /* Nope, don't do this in ext3. Must leave the tree intact */
  1802. #if 0
  1803. *p->p = 0;
  1804. #endif
  1805. }
  1806. /* Writer: end */
  1807. while(partial > p) {
  1808. brelse(partial->bh);
  1809. partial--;
  1810. }
  1811. no_top:
  1812. return partial;
  1813. }
  1814. /*
  1815. * Zero a number of block pointers in either an inode or an indirect block.
  1816. * If we restart the transaction we must again get write access to the
  1817. * indirect block for further modification.
  1818. *
  1819. * We release `count' blocks on disk, but (last - first) may be greater
  1820. * than `count' because there can be holes in there.
  1821. */
  1822. static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
  1823. struct buffer_head *bh, ext3_fsblk_t block_to_free,
  1824. unsigned long count, __le32 *first, __le32 *last)
  1825. {
  1826. __le32 *p;
  1827. if (try_to_extend_transaction(handle, inode)) {
  1828. if (bh) {
  1829. BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
  1830. ext3_journal_dirty_metadata(handle, bh);
  1831. }
  1832. ext3_mark_inode_dirty(handle, inode);
  1833. ext3_journal_test_restart(handle, inode);
  1834. if (bh) {
  1835. BUFFER_TRACE(bh, "retaking write access");
  1836. ext3_journal_get_write_access(handle, bh);
  1837. }
  1838. }
  1839. /*
  1840. * Any buffers which are on the journal will be in memory. We find
  1841. * them on the hash table so journal_revoke() will run journal_forget()
  1842. * on them. We've already detached each block from the file, so
  1843. * bforget() in journal_forget() should be safe.
  1844. *
  1845. * AKPM: turn on bforget in journal_forget()!!!
  1846. */
  1847. for (p = first; p < last; p++) {
  1848. u32 nr = le32_to_cpu(*p);
  1849. if (nr) {
  1850. struct buffer_head *bh;
  1851. *p = 0;
  1852. bh = sb_find_get_block(inode->i_sb, nr);
  1853. ext3_forget(handle, 0, inode, bh, nr);
  1854. }
  1855. }
  1856. ext3_free_blocks(handle, inode, block_to_free, count);
  1857. }
  1858. /**
  1859. * ext3_free_data - free a list of data blocks
  1860. * @handle: handle for this transaction
  1861. * @inode: inode we are dealing with
  1862. * @this_bh: indirect buffer_head which contains *@first and *@last
  1863. * @first: array of block numbers
  1864. * @last: points immediately past the end of array
  1865. *
  1866. * We are freeing all blocks refered from that array (numbers are stored as
  1867. * little-endian 32-bit) and updating @inode->i_blocks appropriately.
  1868. *
  1869. * We accumulate contiguous runs of blocks to free. Conveniently, if these
  1870. * blocks are contiguous then releasing them at one time will only affect one
  1871. * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
  1872. * actually use a lot of journal space.
  1873. *
  1874. * @this_bh will be %NULL if @first and @last point into the inode's direct
  1875. * block pointers.
  1876. */
  1877. static void ext3_free_data(handle_t *handle, struct inode *inode,
  1878. struct buffer_head *this_bh,
  1879. __le32 *first, __le32 *last)
  1880. {
  1881. ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */
  1882. unsigned long count = 0; /* Number of blocks in the run */
  1883. __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
  1884. corresponding to
  1885. block_to_free */
  1886. ext3_fsblk_t nr; /* Current block # */
  1887. __le32 *p; /* Pointer into inode/ind
  1888. for current block */
  1889. int err;
  1890. if (this_bh) { /* For indirect block */
  1891. BUFFER_TRACE(this_bh, "get_write_access");
  1892. err = ext3_journal_get_write_access(handle, this_bh);
  1893. /* Important: if we can't update the indirect pointers
  1894. * to the blocks, we can't free them. */
  1895. if (err)
  1896. return;
  1897. }
  1898. for (p = first; p < last; p++) {
  1899. nr = le32_to_cpu(*p);
  1900. if (nr) {
  1901. /* accumulate blocks to free if they're contiguous */
  1902. if (count == 0) {
  1903. block_to_free = nr;
  1904. block_to_free_p = p;
  1905. count = 1;
  1906. } else if (nr == block_to_free + count) {
  1907. count++;
  1908. } else {
  1909. ext3_clear_blocks(handle, inode, this_bh,
  1910. block_to_free,
  1911. count, block_to_free_p, p);
  1912. block_to_free = nr;
  1913. block_to_free_p = p;
  1914. count = 1;
  1915. }
  1916. }
  1917. }
  1918. if (count > 0)
  1919. ext3_clear_blocks(handle, inode, this_bh, block_to_free,
  1920. count, block_to_free_p, p);
  1921. if (this_bh) {
  1922. BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
  1923. ext3_journal_dirty_metadata(handle, this_bh);
  1924. }
  1925. }
  1926. /**
  1927. * ext3_free_branches - free an array of branches
  1928. * @handle: JBD handle for this transaction
  1929. * @inode: inode we are dealing with
  1930. * @parent_bh: the buffer_head which contains *@first and *@last
  1931. * @first: array of block numbers
  1932. * @last: pointer immediately past the end of array
  1933. * @depth: depth of the branches to free
  1934. *
  1935. * We are freeing all blocks refered from these branches (numbers are
  1936. * stored as little-endian 32-bit) and updating @inode->i_blocks
  1937. * appropriately.
  1938. */
  1939. static void ext3_free_branches(handle_t *handle, struct inode *inode,
  1940. struct buffer_head *parent_bh,
  1941. __le32 *first, __le32 *last, int depth)
  1942. {
  1943. ext3_fsblk_t nr;
  1944. __le32 *p;
  1945. if (is_handle_aborted(handle))
  1946. return;
  1947. if (depth--) {
  1948. struct buffer_head *bh;
  1949. int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
  1950. p = last;
  1951. while (--p >= first) {
  1952. nr = le32_to_cpu(*p);
  1953. if (!nr)
  1954. continue; /* A hole */
  1955. /* Go read the buffer for the next level down */
  1956. bh = sb_bread(inode->i_sb, nr);
  1957. /*
  1958. * A read failure? Report error and clear slot
  1959. * (should be rare).
  1960. */
  1961. if (!bh) {
  1962. ext3_error(inode->i_sb, "ext3_free_branches",
  1963. "Read failure, inode=%lu, block="E3FSBLK,
  1964. inode->i_ino, nr);
  1965. continue;
  1966. }
  1967. /* This zaps the entire block. Bottom up. */
  1968. BUFFER_TRACE(bh, "free child branches");
  1969. ext3_free_branches(handle, inode, bh,
  1970. (__le32*)bh->b_data,
  1971. (__le32*)bh->b_data + addr_per_block,
  1972. depth);
  1973. /*
  1974. * We've probably journalled the indirect block several
  1975. * times during the truncate. But it's no longer
  1976. * needed and we now drop it from the transaction via
  1977. * journal_revoke().
  1978. *
  1979. * That's easy if it's exclusively part of this
  1980. * transaction. But if it's part of the committing
  1981. * transaction then journal_forget() will simply
  1982. * brelse() it. That means that if the underlying
  1983. * block is reallocated in ext3_get_block(),
  1984. * unmap_underlying_metadata() will find this block
  1985. * and will try to get rid of it. damn, damn.
  1986. *
  1987. * If this block has already been committed to the
  1988. * journal, a revoke record will be written. And
  1989. * revoke records must be emitted *before* clearing
  1990. * this block's bit in the bitmaps.
  1991. */
  1992. ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
  1993. /*
  1994. * Everything below this this pointer has been
  1995. * released. Now let this top-of-subtree go.
  1996. *
  1997. * We want the freeing of this indirect block to be
  1998. * atomic in the journal with the updating of the
  1999. * bitmap block which owns it. So make some room in
  2000. * the journal.
  2001. *
  2002. * We zero the parent pointer *after* freeing its
  2003. * pointee in the bitmaps, so if extend_transaction()
  2004. * for some reason fails to put the bitmap changes and
  2005. * the release into the same transaction, recovery
  2006. * will merely complain about releasing a free block,
  2007. * rather than leaking blocks.
  2008. */
  2009. if (is_handle_aborted(handle))
  2010. return;
  2011. if (try_to_extend_transaction(handle, inode)) {
  2012. ext3_mark_inode_dirty(handle, inode);
  2013. ext3_journal_test_restart(handle, inode);
  2014. }
  2015. ext3_free_blocks(handle, inode, nr, 1);
  2016. if (parent_bh) {
  2017. /*
  2018. * The block which we have just freed is
  2019. * pointed to by an indirect block: journal it
  2020. */
  2021. BUFFER_TRACE(parent_bh, "get_write_access");
  2022. if (!ext3_journal_get_write_access(handle,
  2023. parent_bh)){
  2024. *p = 0;
  2025. BUFFER_TRACE(parent_bh,
  2026. "call ext3_journal_dirty_metadata");
  2027. ext3_journal_dirty_metadata(handle,
  2028. parent_bh);
  2029. }
  2030. }
  2031. }
  2032. } else {
  2033. /* We have reached the bottom of the tree. */
  2034. BUFFER_TRACE(parent_bh, "free data blocks");
  2035. ext3_free_data(handle, inode, parent_bh, first, last);
  2036. }
  2037. }
  2038. /*
  2039. * ext3_truncate()
  2040. *
  2041. * We block out ext3_get_block() block instantiations across the entire
  2042. * transaction, and VFS/VM ensures that ext3_truncate() cannot run
  2043. * simultaneously on behalf of the same inode.
  2044. *
  2045. * As we work through the truncate and commmit bits of it to the journal there
  2046. * is one core, guiding principle: the file's tree must always be consistent on
  2047. * disk. We must be able to restart the truncate after a crash.
  2048. *
  2049. * The file's tree may be transiently inconsistent in memory (although it
  2050. * probably isn't), but whenever we close off and commit a journal transaction,
  2051. * the contents of (the filesystem + the journal) must be consistent and
  2052. * restartable. It's pretty simple, really: bottom up, right to left (although
  2053. * left-to-right works OK too).
  2054. *
  2055. * Note that at recovery time, journal replay occurs *before* the restart of
  2056. * truncate against the orphan inode list.
  2057. *
  2058. * The committed inode has the new, desired i_size (which is the same as
  2059. * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see
  2060. * that this inode's truncate did not complete and it will again call
  2061. * ext3_truncate() to have another go. So there will be instantiated blocks
  2062. * to the right of the truncation point in a crashed ext3 filesystem. But
  2063. * that's fine - as long as they are linked from the inode, the post-crash
  2064. * ext3_truncate() run will find them and release them.
  2065. */
  2066. void ext3_truncate(struct inode *inode)
  2067. {
  2068. handle_t *handle;
  2069. struct ext3_inode_info *ei = EXT3_I(inode);
  2070. __le32 *i_data = ei->i_data;
  2071. int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
  2072. struct address_space *mapping = inode->i_mapping;
  2073. int offsets[4];
  2074. Indirect chain[4];
  2075. Indirect *partial;
  2076. __le32 nr = 0;
  2077. int n;
  2078. long last_block;
  2079. unsigned blocksize = inode->i_sb->s_blocksize;
  2080. struct page *page;
  2081. if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  2082. S_ISLNK(inode->i_mode)))
  2083. return;
  2084. if (ext3_inode_is_fast_symlink(inode))
  2085. return;
  2086. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  2087. return;
  2088. /*
  2089. * We have to lock the EOF page here, because lock_page() nests
  2090. * outside journal_start().
  2091. */
  2092. if ((inode->i_size & (blocksize - 1)) == 0) {
  2093. /* Block boundary? Nothing to do */
  2094. page = NULL;
  2095. } else {
  2096. page = grab_cache_page(mapping,
  2097. inode->i_size >> PAGE_CACHE_SHIFT);
  2098. if (!page)
  2099. return;
  2100. }
  2101. handle = start_transaction(inode);
  2102. if (IS_ERR(handle)) {
  2103. if (page) {
  2104. clear_highpage(page);
  2105. flush_dcache_page(page);
  2106. unlock_page(page);
  2107. page_cache_release(page);
  2108. }
  2109. return; /* AKPM: return what? */
  2110. }
  2111. last_block = (inode->i_size + blocksize-1)
  2112. >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
  2113. if (page)
  2114. ext3_block_truncate_page(handle, page, mapping, inode->i_size);
  2115. n = ext3_block_to_path(inode, last_block, offsets, NULL);
  2116. if (n == 0)
  2117. goto out_stop; /* error */
  2118. /*
  2119. * OK. This truncate is going to happen. We add the inode to the
  2120. * orphan list, so that if this truncate spans multiple transactions,
  2121. * and we crash, we will resume the truncate when the filesystem
  2122. * recovers. It also marks the inode dirty, to catch the new size.
  2123. *
  2124. * Implication: the file must always be in a sane, consistent
  2125. * truncatable state while each transaction commits.
  2126. */
  2127. if (ext3_orphan_add(handle, inode))
  2128. goto out_stop;
  2129. /*
  2130. * The orphan list entry will now protect us from any crash which
  2131. * occurs before the truncate completes, so it is now safe to propagate
  2132. * the new, shorter inode size (held for now in i_size) into the
  2133. * on-disk inode. We do this via i_disksize, which is the value which
  2134. * ext3 *really* writes onto the disk inode.
  2135. */
  2136. ei->i_disksize = inode->i_size;
  2137. /*
  2138. * From here we block out all ext3_get_block() callers who want to
  2139. * modify the block allocation tree.
  2140. */
  2141. mutex_lock(&ei->truncate_mutex);
  2142. if (n == 1) { /* direct blocks */
  2143. ext3_free_data(handle, inode, NULL, i_data+offsets[0],
  2144. i_data + EXT3_NDIR_BLOCKS);
  2145. goto do_indirects;
  2146. }
  2147. partial = ext3_find_shared(inode, n, offsets, chain, &nr);
  2148. /* Kill the top of shared branch (not detached) */
  2149. if (nr) {
  2150. if (partial == chain) {
  2151. /* Shared branch grows from the inode */
  2152. ext3_free_branches(handle, inode, NULL,
  2153. &nr, &nr+1, (chain+n-1) - partial);
  2154. *partial->p = 0;
  2155. /*
  2156. * We mark the inode dirty prior to restart,
  2157. * and prior to stop. No need for it here.
  2158. */
  2159. } else {
  2160. /* Shared branch grows from an indirect block */
  2161. BUFFER_TRACE(partial->bh, "get_write_access");
  2162. ext3_free_branches(handle, inode, partial->bh,
  2163. partial->p,
  2164. partial->p+1, (chain+n-1) - partial);
  2165. }
  2166. }
  2167. /* Clear the ends of indirect blocks on the shared branch */
  2168. while (partial > chain) {
  2169. ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
  2170. (__le32*)partial->bh->b_data+addr_per_block,
  2171. (chain+n-1) - partial);
  2172. BUFFER_TRACE(partial->bh, "call brelse");
  2173. brelse (partial->bh);
  2174. partial--;
  2175. }
  2176. do_indirects:
  2177. /* Kill the remaining (whole) subtrees */
  2178. switch (offsets[0]) {
  2179. default:
  2180. nr = i_data[EXT3_IND_BLOCK];
  2181. if (nr) {
  2182. ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
  2183. i_data[EXT3_IND_BLOCK] = 0;
  2184. }
  2185. case EXT3_IND_BLOCK:
  2186. nr = i_data[EXT3_DIND_BLOCK];
  2187. if (nr) {
  2188. ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
  2189. i_data[EXT3_DIND_BLOCK] = 0;
  2190. }
  2191. case EXT3_DIND_BLOCK:
  2192. nr = i_data[EXT3_TIND_BLOCK];
  2193. if (nr) {
  2194. ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
  2195. i_data[EXT3_TIND_BLOCK] = 0;
  2196. }
  2197. case EXT3_TIND_BLOCK:
  2198. ;
  2199. }
  2200. ext3_discard_reservation(inode);
  2201. mutex_unlock(&ei->truncate_mutex);
  2202. inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
  2203. ext3_mark_inode_dirty(handle, inode);
  2204. /*
  2205. * In a multi-transaction truncate, we only make the final transaction
  2206. * synchronous
  2207. */
  2208. if (IS_SYNC(inode))
  2209. handle->h_sync = 1;
  2210. out_stop:
  2211. /*
  2212. * If this was a simple ftruncate(), and the file will remain alive
  2213. * then we need to clear up the orphan record which we created above.
  2214. * However, if this was a real unlink then we were called by
  2215. * ext3_delete_inode(), and we allow that function to clean up the
  2216. * orphan info for us.
  2217. */
  2218. if (inode->i_nlink)
  2219. ext3_orphan_del(handle, inode);
  2220. ext3_journal_stop(handle);
  2221. }
  2222. static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
  2223. unsigned long ino, struct ext3_iloc *iloc)
  2224. {
  2225. unsigned long block_group;
  2226. unsigned long offset;
  2227. ext3_fsblk_t block;
  2228. struct ext3_group_desc *gdp;
  2229. if (!ext3_valid_inum(sb, ino)) {
  2230. /*
  2231. * This error is already checked for in namei.c unless we are
  2232. * looking at an NFS filehandle, in which case no error
  2233. * report is needed
  2234. */
  2235. return 0;
  2236. }
  2237. block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
  2238. gdp = ext3_get_group_desc(sb, block_group, NULL);
  2239. if (!gdp)
  2240. return 0;
  2241. /*
  2242. * Figure out the offset within the block group inode table
  2243. */
  2244. offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
  2245. EXT3_INODE_SIZE(sb);
  2246. block = le32_to_cpu(gdp->bg_inode_table) +
  2247. (offset >> EXT3_BLOCK_SIZE_BITS(sb));
  2248. iloc->block_group = block_group;
  2249. iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
  2250. return block;
  2251. }
  2252. /*
  2253. * ext3_get_inode_loc returns with an extra refcount against the inode's
  2254. * underlying buffer_head on success. If 'in_mem' is true, we have all
  2255. * data in memory that is needed to recreate the on-disk version of this
  2256. * inode.
  2257. */
  2258. static int __ext3_get_inode_loc(struct inode *inode,
  2259. struct ext3_iloc *iloc, int in_mem)
  2260. {
  2261. ext3_fsblk_t block;
  2262. struct buffer_head *bh;
  2263. block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
  2264. if (!block)
  2265. return -EIO;
  2266. bh = sb_getblk(inode->i_sb, block);
  2267. if (!bh) {
  2268. ext3_error (inode->i_sb, "ext3_get_inode_loc",
  2269. "unable to read inode block - "
  2270. "inode=%lu, block="E3FSBLK,
  2271. inode->i_ino, block);
  2272. return -EIO;
  2273. }
  2274. if (!buffer_uptodate(bh)) {
  2275. lock_buffer(bh);
  2276. if (buffer_uptodate(bh)) {
  2277. /* someone brought it uptodate while we waited */
  2278. unlock_buffer(bh);
  2279. goto has_buffer;
  2280. }
  2281. /*
  2282. * If we have all information of the inode in memory and this
  2283. * is the only valid inode in the block, we need not read the
  2284. * block.
  2285. */
  2286. if (in_mem) {
  2287. struct buffer_head *bitmap_bh;
  2288. struct ext3_group_desc *desc;
  2289. int inodes_per_buffer;
  2290. int inode_offset, i;
  2291. int block_group;
  2292. int start;
  2293. block_group = (inode->i_ino - 1) /
  2294. EXT3_INODES_PER_GROUP(inode->i_sb);
  2295. inodes_per_buffer = bh->b_size /
  2296. EXT3_INODE_SIZE(inode->i_sb);
  2297. inode_offset = ((inode->i_ino - 1) %
  2298. EXT3_INODES_PER_GROUP(inode->i_sb));
  2299. start = inode_offset & ~(inodes_per_buffer - 1);
  2300. /* Is the inode bitmap in cache? */
  2301. desc = ext3_get_group_desc(inode->i_sb,
  2302. block_group, NULL);
  2303. if (!desc)
  2304. goto make_io;
  2305. bitmap_bh = sb_getblk(inode->i_sb,
  2306. le32_to_cpu(desc->bg_inode_bitmap));
  2307. if (!bitmap_bh)
  2308. goto make_io;
  2309. /*
  2310. * If the inode bitmap isn't in cache then the
  2311. * optimisation may end up performing two reads instead
  2312. * of one, so skip it.
  2313. */
  2314. if (!buffer_uptodate(bitmap_bh)) {
  2315. brelse(bitmap_bh);
  2316. goto make_io;
  2317. }
  2318. for (i = start; i < start + inodes_per_buffer; i++) {
  2319. if (i == inode_offset)
  2320. continue;
  2321. if (ext3_test_bit(i, bitmap_bh->b_data))
  2322. break;
  2323. }
  2324. brelse(bitmap_bh);
  2325. if (i == start + inodes_per_buffer) {
  2326. /* all other inodes are free, so skip I/O */
  2327. memset(bh->b_data, 0, bh->b_size);
  2328. set_buffer_uptodate(bh);
  2329. unlock_buffer(bh);
  2330. goto has_buffer;
  2331. }
  2332. }
  2333. make_io:
  2334. /*
  2335. * There are other valid inodes in the buffer, this inode
  2336. * has in-inode xattrs, or we don't have this inode in memory.
  2337. * Read the block from disk.
  2338. */
  2339. get_bh(bh);
  2340. bh->b_end_io = end_buffer_read_sync;
  2341. submit_bh(READ_META, bh);
  2342. wait_on_buffer(bh);
  2343. if (!buffer_uptodate(bh)) {
  2344. ext3_error(inode->i_sb, "ext3_get_inode_loc",
  2345. "unable to read inode block - "
  2346. "inode=%lu, block="E3FSBLK,
  2347. inode->i_ino, block);
  2348. brelse(bh);
  2349. return -EIO;
  2350. }
  2351. }
  2352. has_buffer:
  2353. iloc->bh = bh;
  2354. return 0;
  2355. }
  2356. int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
  2357. {
  2358. /* We have all inode data except xattrs in memory here. */
  2359. return __ext3_get_inode_loc(inode, iloc,
  2360. !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR));
  2361. }
  2362. void ext3_set_inode_flags(struct inode *inode)
  2363. {
  2364. unsigned int flags = EXT3_I(inode)->i_flags;
  2365. inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
  2366. if (flags & EXT3_SYNC_FL)
  2367. inode->i_flags |= S_SYNC;
  2368. if (flags & EXT3_APPEND_FL)
  2369. inode->i_flags |= S_APPEND;
  2370. if (flags & EXT3_IMMUTABLE_FL)
  2371. inode->i_flags |= S_IMMUTABLE;
  2372. if (flags & EXT3_NOATIME_FL)
  2373. inode->i_flags |= S_NOATIME;
  2374. if (flags & EXT3_DIRSYNC_FL)
  2375. inode->i_flags |= S_DIRSYNC;
  2376. }
  2377. /* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
  2378. void ext3_get_inode_flags(struct ext3_inode_info *ei)
  2379. {
  2380. unsigned int flags = ei->vfs_inode.i_flags;
  2381. ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
  2382. EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
  2383. if (flags & S_SYNC)
  2384. ei->i_flags |= EXT3_SYNC_FL;
  2385. if (flags & S_APPEND)
  2386. ei->i_flags |= EXT3_APPEND_FL;
  2387. if (flags & S_IMMUTABLE)
  2388. ei->i_flags |= EXT3_IMMUTABLE_FL;
  2389. if (flags & S_NOATIME)
  2390. ei->i_flags |= EXT3_NOATIME_FL;
  2391. if (flags & S_DIRSYNC)
  2392. ei->i_flags |= EXT3_DIRSYNC_FL;
  2393. }
  2394. struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
  2395. {
  2396. struct ext3_iloc iloc;
  2397. struct ext3_inode *raw_inode;
  2398. struct ext3_inode_info *ei;
  2399. struct buffer_head *bh;
  2400. struct inode *inode;
  2401. long ret;
  2402. int block;
  2403. inode = iget_locked(sb, ino);
  2404. if (!inode)
  2405. return ERR_PTR(-ENOMEM);
  2406. if (!(inode->i_state & I_NEW))
  2407. return inode;
  2408. ei = EXT3_I(inode);
  2409. #ifdef CONFIG_EXT3_FS_POSIX_ACL
  2410. ei->i_acl = EXT3_ACL_NOT_CACHED;
  2411. ei->i_default_acl = EXT3_ACL_NOT_CACHED;
  2412. #endif
  2413. ei->i_block_alloc_info = NULL;
  2414. ret = __ext3_get_inode_loc(inode, &iloc, 0);
  2415. if (ret < 0)
  2416. goto bad_inode;
  2417. bh = iloc.bh;
  2418. raw_inode = ext3_raw_inode(&iloc);
  2419. inode->i_mode = le16_to_cpu(raw_inode->i_mode);
  2420. inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
  2421. inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
  2422. if(!(test_opt (inode->i_sb, NO_UID32))) {
  2423. inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
  2424. inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
  2425. }
  2426. inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
  2427. inode->i_size = le32_to_cpu(raw_inode->i_size);
  2428. inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
  2429. inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
  2430. inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
  2431. inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
  2432. ei->i_state = 0;
  2433. ei->i_dir_start_lookup = 0;
  2434. ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
  2435. /* We now have enough fields to check if the inode was active or not.
  2436. * This is needed because nfsd might try to access dead inodes
  2437. * the test is that same one that e2fsck uses
  2438. * NeilBrown 1999oct15
  2439. */
  2440. if (inode->i_nlink == 0) {
  2441. if (inode->i_mode == 0 ||
  2442. !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
  2443. /* this inode is deleted */
  2444. brelse (bh);
  2445. ret = -ESTALE;
  2446. goto bad_inode;
  2447. }
  2448. /* The only unlinked inodes we let through here have
  2449. * valid i_mode and are being read by the orphan
  2450. * recovery code: that's fine, we're about to complete
  2451. * the process of deleting those. */
  2452. }
  2453. inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
  2454. ei->i_flags = le32_to_cpu(raw_inode->i_flags);
  2455. #ifdef EXT3_FRAGMENTS
  2456. ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
  2457. ei->i_frag_no = raw_inode->i_frag;
  2458. ei->i_frag_size = raw_inode->i_fsize;
  2459. #endif
  2460. ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
  2461. if (!S_ISREG(inode->i_mode)) {
  2462. ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
  2463. } else {
  2464. inode->i_size |=
  2465. ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
  2466. }
  2467. ei->i_disksize = inode->i_size;
  2468. inode->i_generation = le32_to_cpu(raw_inode->i_generation);
  2469. ei->i_block_group = iloc.block_group;
  2470. /*
  2471. * NOTE! The in-memory inode i_data array is in little-endian order
  2472. * even on big-endian machines: we do NOT byteswap the block numbers!
  2473. */
  2474. for (block = 0; block < EXT3_N_BLOCKS; block++)
  2475. ei->i_data[block] = raw_inode->i_block[block];
  2476. INIT_LIST_HEAD(&ei->i_orphan);
  2477. if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
  2478. EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
  2479. /*
  2480. * When mke2fs creates big inodes it does not zero out
  2481. * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
  2482. * so ignore those first few inodes.
  2483. */
  2484. ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
  2485. if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
  2486. EXT3_INODE_SIZE(inode->i_sb)) {
  2487. brelse (bh);
  2488. ret = -EIO;
  2489. goto bad_inode;
  2490. }
  2491. if (ei->i_extra_isize == 0) {
  2492. /* The extra space is currently unused. Use it. */
  2493. ei->i_extra_isize = sizeof(struct ext3_inode) -
  2494. EXT3_GOOD_OLD_INODE_SIZE;
  2495. } else {
  2496. __le32 *magic = (void *)raw_inode +
  2497. EXT3_GOOD_OLD_INODE_SIZE +
  2498. ei->i_extra_isize;
  2499. if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
  2500. ei->i_state |= EXT3_STATE_XATTR;
  2501. }
  2502. } else
  2503. ei->i_extra_isize = 0;
  2504. if (S_ISREG(inode->i_mode)) {
  2505. inode->i_op = &ext3_file_inode_operations;
  2506. inode->i_fop = &ext3_file_operations;
  2507. ext3_set_aops(inode);
  2508. } else if (S_ISDIR(inode->i_mode)) {
  2509. inode->i_op = &ext3_dir_inode_operations;
  2510. inode->i_fop = &ext3_dir_operations;
  2511. } else if (S_ISLNK(inode->i_mode)) {
  2512. if (ext3_inode_is_fast_symlink(inode))
  2513. inode->i_op = &ext3_fast_symlink_inode_operations;
  2514. else {
  2515. inode->i_op = &ext3_symlink_inode_operations;
  2516. ext3_set_aops(inode);
  2517. }
  2518. } else {
  2519. inode->i_op = &ext3_special_inode_operations;
  2520. if (raw_inode->i_block[0])
  2521. init_special_inode(inode, inode->i_mode,
  2522. old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
  2523. else
  2524. init_special_inode(inode, inode->i_mode,
  2525. new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
  2526. }
  2527. brelse (iloc.bh);
  2528. ext3_set_inode_flags(inode);
  2529. unlock_new_inode(inode);
  2530. return inode;
  2531. bad_inode:
  2532. iget_failed(inode);
  2533. return ERR_PTR(ret);
  2534. }
  2535. /*
  2536. * Post the struct inode info into an on-disk inode location in the
  2537. * buffer-cache. This gobbles the caller's reference to the
  2538. * buffer_head in the inode location struct.
  2539. *
  2540. * The caller must have write access to iloc->bh.
  2541. */
  2542. static int ext3_do_update_inode(handle_t *handle,
  2543. struct inode *inode,
  2544. struct ext3_iloc *iloc)
  2545. {
  2546. struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
  2547. struct ext3_inode_info *ei = EXT3_I(inode);
  2548. struct buffer_head *bh = iloc->bh;
  2549. int err = 0, rc, block;
  2550. /* For fields not not tracking in the in-memory inode,
  2551. * initialise them to zero for new inodes. */
  2552. if (ei->i_state & EXT3_STATE_NEW)
  2553. memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
  2554. ext3_get_inode_flags(ei);
  2555. raw_inode->i_mode = cpu_to_le16(inode->i_mode);
  2556. if(!(test_opt(inode->i_sb, NO_UID32))) {
  2557. raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
  2558. raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
  2559. /*
  2560. * Fix up interoperability with old kernels. Otherwise, old inodes get
  2561. * re-used with the upper 16 bits of the uid/gid intact
  2562. */
  2563. if(!ei->i_dtime) {
  2564. raw_inode->i_uid_high =
  2565. cpu_to_le16(high_16_bits(inode->i_uid));
  2566. raw_inode->i_gid_high =
  2567. cpu_to_le16(high_16_bits(inode->i_gid));
  2568. } else {
  2569. raw_inode->i_uid_high = 0;
  2570. raw_inode->i_gid_high = 0;
  2571. }
  2572. } else {
  2573. raw_inode->i_uid_low =
  2574. cpu_to_le16(fs_high2lowuid(inode->i_uid));
  2575. raw_inode->i_gid_low =
  2576. cpu_to_le16(fs_high2lowgid(inode->i_gid));
  2577. raw_inode->i_uid_high = 0;
  2578. raw_inode->i_gid_high = 0;
  2579. }
  2580. raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
  2581. raw_inode->i_size = cpu_to_le32(ei->i_disksize);
  2582. raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
  2583. raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
  2584. raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
  2585. raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
  2586. raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
  2587. raw_inode->i_flags = cpu_to_le32(ei->i_flags);
  2588. #ifdef EXT3_FRAGMENTS
  2589. raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
  2590. raw_inode->i_frag = ei->i_frag_no;
  2591. raw_inode->i_fsize = ei->i_frag_size;
  2592. #endif
  2593. raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
  2594. if (!S_ISREG(inode->i_mode)) {
  2595. raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
  2596. } else {
  2597. raw_inode->i_size_high =
  2598. cpu_to_le32(ei->i_disksize >> 32);
  2599. if (ei->i_disksize > 0x7fffffffULL) {
  2600. struct super_block *sb = inode->i_sb;
  2601. if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
  2602. EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
  2603. EXT3_SB(sb)->s_es->s_rev_level ==
  2604. cpu_to_le32(EXT3_GOOD_OLD_REV)) {
  2605. /* If this is the first large file
  2606. * created, add a flag to the superblock.
  2607. */
  2608. err = ext3_journal_get_write_access(handle,
  2609. EXT3_SB(sb)->s_sbh);
  2610. if (err)
  2611. goto out_brelse;
  2612. ext3_update_dynamic_rev(sb);
  2613. EXT3_SET_RO_COMPAT_FEATURE(sb,
  2614. EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
  2615. sb->s_dirt = 1;
  2616. handle->h_sync = 1;
  2617. err = ext3_journal_dirty_metadata(handle,
  2618. EXT3_SB(sb)->s_sbh);
  2619. }
  2620. }
  2621. }
  2622. raw_inode->i_generation = cpu_to_le32(inode->i_generation);
  2623. if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
  2624. if (old_valid_dev(inode->i_rdev)) {
  2625. raw_inode->i_block[0] =
  2626. cpu_to_le32(old_encode_dev(inode->i_rdev));
  2627. raw_inode->i_block[1] = 0;
  2628. } else {
  2629. raw_inode->i_block[0] = 0;
  2630. raw_inode->i_block[1] =
  2631. cpu_to_le32(new_encode_dev(inode->i_rdev));
  2632. raw_inode->i_block[2] = 0;
  2633. }
  2634. } else for (block = 0; block < EXT3_N_BLOCKS; block++)
  2635. raw_inode->i_block[block] = ei->i_data[block];
  2636. if (ei->i_extra_isize)
  2637. raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
  2638. BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
  2639. rc = ext3_journal_dirty_metadata(handle, bh);
  2640. if (!err)
  2641. err = rc;
  2642. ei->i_state &= ~EXT3_STATE_NEW;
  2643. out_brelse:
  2644. brelse (bh);
  2645. ext3_std_error(inode->i_sb, err);
  2646. return err;
  2647. }
  2648. /*
  2649. * ext3_write_inode()
  2650. *
  2651. * We are called from a few places:
  2652. *
  2653. * - Within generic_file_write() for O_SYNC files.
  2654. * Here, there will be no transaction running. We wait for any running
  2655. * trasnaction to commit.
  2656. *
  2657. * - Within sys_sync(), kupdate and such.
  2658. * We wait on commit, if tol to.
  2659. *
  2660. * - Within prune_icache() (PF_MEMALLOC == true)
  2661. * Here we simply return. We can't afford to block kswapd on the
  2662. * journal commit.
  2663. *
  2664. * In all cases it is actually safe for us to return without doing anything,
  2665. * because the inode has been copied into a raw inode buffer in
  2666. * ext3_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
  2667. * knfsd.
  2668. *
  2669. * Note that we are absolutely dependent upon all inode dirtiers doing the
  2670. * right thing: they *must* call mark_inode_dirty() after dirtying info in
  2671. * which we are interested.
  2672. *
  2673. * It would be a bug for them to not do this. The code:
  2674. *
  2675. * mark_inode_dirty(inode)
  2676. * stuff();
  2677. * inode->i_size = expr;
  2678. *
  2679. * is in error because a kswapd-driven write_inode() could occur while
  2680. * `stuff()' is running, and the new i_size will be lost. Plus the inode
  2681. * will no longer be on the superblock's dirty inode list.
  2682. */
  2683. int ext3_write_inode(struct inode *inode, int wait)
  2684. {
  2685. if (current->flags & PF_MEMALLOC)
  2686. return 0;
  2687. if (ext3_journal_current_handle()) {
  2688. jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
  2689. dump_stack();
  2690. return -EIO;
  2691. }
  2692. if (!wait)
  2693. return 0;
  2694. return ext3_force_commit(inode->i_sb);
  2695. }
  2696. /*
  2697. * ext3_setattr()
  2698. *
  2699. * Called from notify_change.
  2700. *
  2701. * We want to trap VFS attempts to truncate the file as soon as
  2702. * possible. In particular, we want to make sure that when the VFS
  2703. * shrinks i_size, we put the inode on the orphan list and modify
  2704. * i_disksize immediately, so that during the subsequent flushing of
  2705. * dirty pages and freeing of disk blocks, we can guarantee that any
  2706. * commit will leave the blocks being flushed in an unused state on
  2707. * disk. (On recovery, the inode will get truncated and the blocks will
  2708. * be freed, so we have a strong guarantee that no future commit will
  2709. * leave these blocks visible to the user.)
  2710. *
  2711. * Called with inode->sem down.
  2712. */
  2713. int ext3_setattr(struct dentry *dentry, struct iattr *attr)
  2714. {
  2715. struct inode *inode = dentry->d_inode;
  2716. int error, rc = 0;
  2717. const unsigned int ia_valid = attr->ia_valid;
  2718. error = inode_change_ok(inode, attr);
  2719. if (error)
  2720. return error;
  2721. if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
  2722. (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
  2723. handle_t *handle;
  2724. /* (user+group)*(old+new) structure, inode write (sb,
  2725. * inode block, ? - but truncate inode update has it) */
  2726. handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+
  2727. EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
  2728. if (IS_ERR(handle)) {
  2729. error = PTR_ERR(handle);
  2730. goto err_out;
  2731. }
  2732. error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
  2733. if (error) {
  2734. ext3_journal_stop(handle);
  2735. return error;
  2736. }
  2737. /* Update corresponding info in inode so that everything is in
  2738. * one transaction */
  2739. if (attr->ia_valid & ATTR_UID)
  2740. inode->i_uid = attr->ia_uid;
  2741. if (attr->ia_valid & ATTR_GID)
  2742. inode->i_gid = attr->ia_gid;
  2743. error = ext3_mark_inode_dirty(handle, inode);
  2744. ext3_journal_stop(handle);
  2745. }
  2746. if (S_ISREG(inode->i_mode) &&
  2747. attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
  2748. handle_t *handle;
  2749. handle = ext3_journal_start(inode, 3);
  2750. if (IS_ERR(handle)) {
  2751. error = PTR_ERR(handle);
  2752. goto err_out;
  2753. }
  2754. error = ext3_orphan_add(handle, inode);
  2755. EXT3_I(inode)->i_disksize = attr->ia_size;
  2756. rc = ext3_mark_inode_dirty(handle, inode);
  2757. if (!error)
  2758. error = rc;
  2759. ext3_journal_stop(handle);
  2760. }
  2761. rc = inode_setattr(inode, attr);
  2762. /* If inode_setattr's call to ext3_truncate failed to get a
  2763. * transaction handle at all, we need to clean up the in-core
  2764. * orphan list manually. */
  2765. if (inode->i_nlink)
  2766. ext3_orphan_del(NULL, inode);
  2767. if (!rc && (ia_valid & ATTR_MODE))
  2768. rc = ext3_acl_chmod(inode);
  2769. err_out:
  2770. ext3_std_error(inode->i_sb, error);
  2771. if (!error)
  2772. error = rc;
  2773. return error;
  2774. }
  2775. /*
  2776. * How many blocks doth make a writepage()?
  2777. *
  2778. * With N blocks per page, it may be:
  2779. * N data blocks
  2780. * 2 indirect block
  2781. * 2 dindirect
  2782. * 1 tindirect
  2783. * N+5 bitmap blocks (from the above)
  2784. * N+5 group descriptor summary blocks
  2785. * 1 inode block
  2786. * 1 superblock.
  2787. * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
  2788. *
  2789. * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
  2790. *
  2791. * With ordered or writeback data it's the same, less the N data blocks.
  2792. *
  2793. * If the inode's direct blocks can hold an integral number of pages then a
  2794. * page cannot straddle two indirect blocks, and we can only touch one indirect
  2795. * and dindirect block, and the "5" above becomes "3".
  2796. *
  2797. * This still overestimates under most circumstances. If we were to pass the
  2798. * start and end offsets in here as well we could do block_to_path() on each
  2799. * block and work out the exact number of indirects which are touched. Pah.
  2800. */
  2801. static int ext3_writepage_trans_blocks(struct inode *inode)
  2802. {
  2803. int bpp = ext3_journal_blocks_per_page(inode);
  2804. int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
  2805. int ret;
  2806. if (ext3_should_journal_data(inode))
  2807. ret = 3 * (bpp + indirects) + 2;
  2808. else
  2809. ret = 2 * (bpp + indirects) + 2;
  2810. #ifdef CONFIG_QUOTA
  2811. /* We know that structure was already allocated during DQUOT_INIT so
  2812. * we will be updating only the data blocks + inodes */
  2813. ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
  2814. #endif
  2815. return ret;
  2816. }
  2817. /*
  2818. * The caller must have previously called ext3_reserve_inode_write().
  2819. * Give this, we know that the caller already has write access to iloc->bh.
  2820. */
  2821. int ext3_mark_iloc_dirty(handle_t *handle,
  2822. struct inode *inode, struct ext3_iloc *iloc)
  2823. {
  2824. int err = 0;
  2825. /* the do_update_inode consumes one bh->b_count */
  2826. get_bh(iloc->bh);
  2827. /* ext3_do_update_inode() does journal_dirty_metadata */
  2828. err = ext3_do_update_inode(handle, inode, iloc);
  2829. put_bh(iloc->bh);
  2830. return err;
  2831. }
  2832. /*
  2833. * On success, We end up with an outstanding reference count against
  2834. * iloc->bh. This _must_ be cleaned up later.
  2835. */
  2836. int
  2837. ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
  2838. struct ext3_iloc *iloc)
  2839. {
  2840. int err = 0;
  2841. if (handle) {
  2842. err = ext3_get_inode_loc(inode, iloc);
  2843. if (!err) {
  2844. BUFFER_TRACE(iloc->bh, "get_write_access");
  2845. err = ext3_journal_get_write_access(handle, iloc->bh);
  2846. if (err) {
  2847. brelse(iloc->bh);
  2848. iloc->bh = NULL;
  2849. }
  2850. }
  2851. }
  2852. ext3_std_error(inode->i_sb, err);
  2853. return err;
  2854. }
  2855. /*
  2856. * What we do here is to mark the in-core inode as clean with respect to inode
  2857. * dirtiness (it may still be data-dirty).
  2858. * This means that the in-core inode may be reaped by prune_icache
  2859. * without having to perform any I/O. This is a very good thing,
  2860. * because *any* task may call prune_icache - even ones which
  2861. * have a transaction open against a different journal.
  2862. *
  2863. * Is this cheating? Not really. Sure, we haven't written the
  2864. * inode out, but prune_icache isn't a user-visible syncing function.
  2865. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
  2866. * we start and wait on commits.
  2867. *
  2868. * Is this efficient/effective? Well, we're being nice to the system
  2869. * by cleaning up our inodes proactively so they can be reaped
  2870. * without I/O. But we are potentially leaving up to five seconds'
  2871. * worth of inodes floating about which prune_icache wants us to
  2872. * write out. One way to fix that would be to get prune_icache()
  2873. * to do a write_super() to free up some memory. It has the desired
  2874. * effect.
  2875. */
  2876. int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
  2877. {
  2878. struct ext3_iloc iloc;
  2879. int err;
  2880. might_sleep();
  2881. err = ext3_reserve_inode_write(handle, inode, &iloc);
  2882. if (!err)
  2883. err = ext3_mark_iloc_dirty(handle, inode, &iloc);
  2884. return err;
  2885. }
  2886. /*
  2887. * ext3_dirty_inode() is called from __mark_inode_dirty()
  2888. *
  2889. * We're really interested in the case where a file is being extended.
  2890. * i_size has been changed by generic_commit_write() and we thus need
  2891. * to include the updated inode in the current transaction.
  2892. *
  2893. * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
  2894. * are allocated to the file.
  2895. *
  2896. * If the inode is marked synchronous, we don't honour that here - doing
  2897. * so would cause a commit on atime updates, which we don't bother doing.
  2898. * We handle synchronous inodes at the highest possible level.
  2899. */
  2900. void ext3_dirty_inode(struct inode *inode)
  2901. {
  2902. handle_t *current_handle = ext3_journal_current_handle();
  2903. handle_t *handle;
  2904. handle = ext3_journal_start(inode, 2);
  2905. if (IS_ERR(handle))
  2906. goto out;
  2907. if (current_handle &&
  2908. current_handle->h_transaction != handle->h_transaction) {
  2909. /* This task has a transaction open against a different fs */
  2910. printk(KERN_EMERG "%s: transactions do not match!\n",
  2911. __func__);
  2912. } else {
  2913. jbd_debug(5, "marking dirty. outer handle=%p\n",
  2914. current_handle);
  2915. ext3_mark_inode_dirty(handle, inode);
  2916. }
  2917. ext3_journal_stop(handle);
  2918. out:
  2919. return;
  2920. }
  2921. #if 0
  2922. /*
  2923. * Bind an inode's backing buffer_head into this transaction, to prevent
  2924. * it from being flushed to disk early. Unlike
  2925. * ext3_reserve_inode_write, this leaves behind no bh reference and
  2926. * returns no iloc structure, so the caller needs to repeat the iloc
  2927. * lookup to mark the inode dirty later.
  2928. */
  2929. static int ext3_pin_inode(handle_t *handle, struct inode *inode)
  2930. {
  2931. struct ext3_iloc iloc;
  2932. int err = 0;
  2933. if (handle) {
  2934. err = ext3_get_inode_loc(inode, &iloc);
  2935. if (!err) {
  2936. BUFFER_TRACE(iloc.bh, "get_write_access");
  2937. err = journal_get_write_access(handle, iloc.bh);
  2938. if (!err)
  2939. err = ext3_journal_dirty_metadata(handle,
  2940. iloc.bh);
  2941. brelse(iloc.bh);
  2942. }
  2943. }
  2944. ext3_std_error(inode->i_sb, err);
  2945. return err;
  2946. }
  2947. #endif
  2948. int ext3_change_inode_journal_flag(struct inode *inode, int val)
  2949. {
  2950. journal_t *journal;
  2951. handle_t *handle;
  2952. int err;
  2953. /*
  2954. * We have to be very careful here: changing a data block's
  2955. * journaling status dynamically is dangerous. If we write a
  2956. * data block to the journal, change the status and then delete
  2957. * that block, we risk forgetting to revoke the old log record
  2958. * from the journal and so a subsequent replay can corrupt data.
  2959. * So, first we make sure that the journal is empty and that
  2960. * nobody is changing anything.
  2961. */
  2962. journal = EXT3_JOURNAL(inode);
  2963. if (is_journal_aborted(journal))
  2964. return -EROFS;
  2965. journal_lock_updates(journal);
  2966. journal_flush(journal);
  2967. /*
  2968. * OK, there are no updates running now, and all cached data is
  2969. * synced to disk. We are now in a completely consistent state
  2970. * which doesn't have anything in the journal, and we know that
  2971. * no filesystem updates are running, so it is safe to modify
  2972. * the inode's in-core data-journaling state flag now.
  2973. */
  2974. if (val)
  2975. EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
  2976. else
  2977. EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
  2978. ext3_set_aops(inode);
  2979. journal_unlock_updates(journal);
  2980. /* Finally we can mark the inode as dirty. */
  2981. handle = ext3_journal_start(inode, 1);
  2982. if (IS_ERR(handle))
  2983. return PTR_ERR(handle);
  2984. err = ext3_mark_inode_dirty(handle, inode);
  2985. handle->h_sync = 1;
  2986. ext3_journal_stop(handle);
  2987. ext3_std_error(inode->i_sb, err);
  2988. return err;
  2989. }