journal.c 126 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888
  1. /*
  2. ** Write ahead logging implementation copyright Chris Mason 2000
  3. **
  4. ** The background commits make this code very interelated, and
  5. ** overly complex. I need to rethink things a bit....The major players:
  6. **
  7. ** journal_begin -- call with the number of blocks you expect to log.
  8. ** If the current transaction is too
  9. ** old, it will block until the current transaction is
  10. ** finished, and then start a new one.
  11. ** Usually, your transaction will get joined in with
  12. ** previous ones for speed.
  13. **
  14. ** journal_join -- same as journal_begin, but won't block on the current
  15. ** transaction regardless of age. Don't ever call
  16. ** this. Ever. There are only two places it should be
  17. ** called from, and they are both inside this file.
  18. **
  19. ** journal_mark_dirty -- adds blocks into this transaction. clears any flags
  20. ** that might make them get sent to disk
  21. ** and then marks them BH_JDirty. Puts the buffer head
  22. ** into the current transaction hash.
  23. **
  24. ** journal_end -- if the current transaction is batchable, it does nothing
  25. ** otherwise, it could do an async/synchronous commit, or
  26. ** a full flush of all log and real blocks in the
  27. ** transaction.
  28. **
  29. ** flush_old_commits -- if the current transaction is too old, it is ended and
  30. ** commit blocks are sent to disk. Forces commit blocks
  31. ** to disk for all backgrounded commits that have been
  32. ** around too long.
  33. ** -- Note, if you call this as an immediate flush from
  34. ** from within kupdate, it will ignore the immediate flag
  35. */
  36. #include <linux/config.h>
  37. #include <asm/uaccess.h>
  38. #include <asm/system.h>
  39. #include <linux/time.h>
  40. #include <asm/semaphore.h>
  41. #include <linux/vmalloc.h>
  42. #include <linux/reiserfs_fs.h>
  43. #include <linux/kernel.h>
  44. #include <linux/errno.h>
  45. #include <linux/fcntl.h>
  46. #include <linux/stat.h>
  47. #include <linux/string.h>
  48. #include <linux/smp_lock.h>
  49. #include <linux/buffer_head.h>
  50. #include <linux/workqueue.h>
  51. #include <linux/writeback.h>
  52. #include <linux/blkdev.h>
  53. /* gets a struct reiserfs_journal_list * from a list head */
  54. #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
  55. j_list))
  56. #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
  57. j_working_list))
  58. /* the number of mounted filesystems. This is used to decide when to
  59. ** start and kill the commit workqueue
  60. */
  61. static int reiserfs_mounted_fs_count;
  62. static struct workqueue_struct *commit_wq;
  63. #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
  64. structs at 4k */
  65. #define BUFNR 64 /*read ahead */
  66. /* cnode stat bits. Move these into reiserfs_fs.h */
  67. #define BLOCK_FREED 2 /* this block was freed, and can't be written. */
  68. #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
  69. #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
  70. #define BLOCK_DIRTIED 5
  71. /* journal list state bits */
  72. #define LIST_TOUCHED 1
  73. #define LIST_DIRTY 2
  74. #define LIST_COMMIT_PENDING 4 /* someone will commit this list */
  75. /* flags for do_journal_end */
  76. #define FLUSH_ALL 1 /* flush commit and real blocks */
  77. #define COMMIT_NOW 2 /* end and commit this transaction */
  78. #define WAIT 4 /* wait for the log blocks to hit the disk*/
  79. static int do_journal_end(struct reiserfs_transaction_handle *,struct super_block *,unsigned long nblocks,int flags) ;
  80. static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
  81. static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
  82. static int can_dirty(struct reiserfs_journal_cnode *cn) ;
  83. static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks);
  84. static int release_journal_dev( struct super_block *super,
  85. struct reiserfs_journal *journal );
  86. static int dirty_one_transaction(struct super_block *s,
  87. struct reiserfs_journal_list *jl);
  88. static void flush_async_commits(void *p);
  89. static void queue_log_writer(struct super_block *s);
  90. /* values for join in do_journal_begin_r */
  91. enum {
  92. JBEGIN_REG = 0, /* regular journal begin */
  93. JBEGIN_JOIN = 1, /* join the running transaction if at all possible */
  94. JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */
  95. };
  96. static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
  97. struct super_block * p_s_sb,
  98. unsigned long nblocks,int join);
  99. static void init_journal_hash(struct super_block *p_s_sb) {
  100. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  101. memset(journal->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
  102. }
  103. /*
  104. ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
  105. ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
  106. ** more details.
  107. */
  108. static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) {
  109. if (bh) {
  110. clear_buffer_dirty(bh);
  111. clear_buffer_journal_test(bh);
  112. }
  113. return 0 ;
  114. }
  115. static void disable_barrier(struct super_block *s)
  116. {
  117. REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
  118. printk("reiserfs: disabling flush barriers on %s\n", reiserfs_bdevname(s));
  119. }
  120. static struct reiserfs_bitmap_node *
  121. allocate_bitmap_node(struct super_block *p_s_sb) {
  122. struct reiserfs_bitmap_node *bn ;
  123. static int id;
  124. bn = reiserfs_kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS, p_s_sb) ;
  125. if (!bn) {
  126. return NULL ;
  127. }
  128. bn->data = reiserfs_kmalloc(p_s_sb->s_blocksize, GFP_NOFS, p_s_sb) ;
  129. if (!bn->data) {
  130. reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
  131. return NULL ;
  132. }
  133. bn->id = id++ ;
  134. memset(bn->data, 0, p_s_sb->s_blocksize) ;
  135. INIT_LIST_HEAD(&bn->list) ;
  136. return bn ;
  137. }
  138. static struct reiserfs_bitmap_node *
  139. get_bitmap_node(struct super_block *p_s_sb) {
  140. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  141. struct reiserfs_bitmap_node *bn = NULL;
  142. struct list_head *entry = journal->j_bitmap_nodes.next ;
  143. journal->j_used_bitmap_nodes++ ;
  144. repeat:
  145. if(entry != &journal->j_bitmap_nodes) {
  146. bn = list_entry(entry, struct reiserfs_bitmap_node, list) ;
  147. list_del(entry) ;
  148. memset(bn->data, 0, p_s_sb->s_blocksize) ;
  149. journal->j_free_bitmap_nodes-- ;
  150. return bn ;
  151. }
  152. bn = allocate_bitmap_node(p_s_sb) ;
  153. if (!bn) {
  154. yield();
  155. goto repeat ;
  156. }
  157. return bn ;
  158. }
  159. static inline void free_bitmap_node(struct super_block *p_s_sb,
  160. struct reiserfs_bitmap_node *bn) {
  161. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  162. journal->j_used_bitmap_nodes-- ;
  163. if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
  164. reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
  165. reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
  166. } else {
  167. list_add(&bn->list, &journal->j_bitmap_nodes) ;
  168. journal->j_free_bitmap_nodes++ ;
  169. }
  170. }
  171. static void allocate_bitmap_nodes(struct super_block *p_s_sb) {
  172. int i ;
  173. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  174. struct reiserfs_bitmap_node *bn = NULL ;
  175. for (i = 0 ; i < REISERFS_MIN_BITMAP_NODES ; i++) {
  176. bn = allocate_bitmap_node(p_s_sb) ;
  177. if (bn) {
  178. list_add(&bn->list, &journal->j_bitmap_nodes) ;
  179. journal->j_free_bitmap_nodes++ ;
  180. } else {
  181. break ; // this is ok, we'll try again when more are needed
  182. }
  183. }
  184. }
  185. static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
  186. struct reiserfs_list_bitmap *jb) {
  187. int bmap_nr = block / (p_s_sb->s_blocksize << 3) ;
  188. int bit_nr = block % (p_s_sb->s_blocksize << 3) ;
  189. if (!jb->bitmaps[bmap_nr]) {
  190. jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb) ;
  191. }
  192. set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data) ;
  193. return 0 ;
  194. }
  195. static void cleanup_bitmap_list(struct super_block *p_s_sb,
  196. struct reiserfs_list_bitmap *jb) {
  197. int i;
  198. if (jb->bitmaps == NULL)
  199. return;
  200. for (i = 0 ; i < SB_BMAP_NR(p_s_sb) ; i++) {
  201. if (jb->bitmaps[i]) {
  202. free_bitmap_node(p_s_sb, jb->bitmaps[i]) ;
  203. jb->bitmaps[i] = NULL ;
  204. }
  205. }
  206. }
  207. /*
  208. ** only call this on FS unmount.
  209. */
  210. static int free_list_bitmaps(struct super_block *p_s_sb,
  211. struct reiserfs_list_bitmap *jb_array) {
  212. int i ;
  213. struct reiserfs_list_bitmap *jb ;
  214. for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
  215. jb = jb_array + i ;
  216. jb->journal_list = NULL ;
  217. cleanup_bitmap_list(p_s_sb, jb) ;
  218. vfree(jb->bitmaps) ;
  219. jb->bitmaps = NULL ;
  220. }
  221. return 0;
  222. }
  223. static int free_bitmap_nodes(struct super_block *p_s_sb) {
  224. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  225. struct list_head *next = journal->j_bitmap_nodes.next ;
  226. struct reiserfs_bitmap_node *bn ;
  227. while(next != &journal->j_bitmap_nodes) {
  228. bn = list_entry(next, struct reiserfs_bitmap_node, list) ;
  229. list_del(next) ;
  230. reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
  231. reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
  232. next = journal->j_bitmap_nodes.next ;
  233. journal->j_free_bitmap_nodes-- ;
  234. }
  235. return 0 ;
  236. }
  237. /*
  238. ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
  239. ** jb_array is the array to be filled in.
  240. */
  241. int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
  242. struct reiserfs_list_bitmap *jb_array,
  243. int bmap_nr) {
  244. int i ;
  245. int failed = 0 ;
  246. struct reiserfs_list_bitmap *jb ;
  247. int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *) ;
  248. for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
  249. jb = jb_array + i ;
  250. jb->journal_list = NULL ;
  251. jb->bitmaps = vmalloc( mem ) ;
  252. if (!jb->bitmaps) {
  253. reiserfs_warning(p_s_sb, "clm-2000, unable to allocate bitmaps for journal lists") ;
  254. failed = 1;
  255. break ;
  256. }
  257. memset(jb->bitmaps, 0, mem) ;
  258. }
  259. if (failed) {
  260. free_list_bitmaps(p_s_sb, jb_array) ;
  261. return -1 ;
  262. }
  263. return 0 ;
  264. }
  265. /*
  266. ** find an available list bitmap. If you can't find one, flush a commit list
  267. ** and try again
  268. */
  269. static struct reiserfs_list_bitmap *
  270. get_list_bitmap(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
  271. int i,j ;
  272. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  273. struct reiserfs_list_bitmap *jb = NULL ;
  274. for (j = 0 ; j < (JOURNAL_NUM_BITMAPS * 3) ; j++) {
  275. i = journal->j_list_bitmap_index ;
  276. journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS ;
  277. jb = journal->j_list_bitmap + i ;
  278. if (journal->j_list_bitmap[i].journal_list) {
  279. flush_commit_list(p_s_sb, journal->j_list_bitmap[i].journal_list, 1) ;
  280. if (!journal->j_list_bitmap[i].journal_list) {
  281. break ;
  282. }
  283. } else {
  284. break ;
  285. }
  286. }
  287. if (jb->journal_list) { /* double check to make sure if flushed correctly */
  288. return NULL ;
  289. }
  290. jb->journal_list = jl ;
  291. return jb ;
  292. }
  293. /*
  294. ** allocates a new chunk of X nodes, and links them all together as a list.
  295. ** Uses the cnode->next and cnode->prev pointers
  296. ** returns NULL on failure
  297. */
  298. static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) {
  299. struct reiserfs_journal_cnode *head ;
  300. int i ;
  301. if (num_cnodes <= 0) {
  302. return NULL ;
  303. }
  304. head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
  305. if (!head) {
  306. return NULL ;
  307. }
  308. memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
  309. head[0].prev = NULL ;
  310. head[0].next = head + 1 ;
  311. for (i = 1 ; i < num_cnodes; i++) {
  312. head[i].prev = head + (i - 1) ;
  313. head[i].next = head + (i + 1) ; /* if last one, overwrite it after the if */
  314. }
  315. head[num_cnodes -1].next = NULL ;
  316. return head ;
  317. }
  318. /*
  319. ** pulls a cnode off the free list, or returns NULL on failure
  320. */
  321. static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) {
  322. struct reiserfs_journal_cnode *cn ;
  323. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  324. reiserfs_check_lock_depth(p_s_sb, "get_cnode") ;
  325. if (journal->j_cnode_free <= 0) {
  326. return NULL ;
  327. }
  328. journal->j_cnode_used++ ;
  329. journal->j_cnode_free-- ;
  330. cn = journal->j_cnode_free_list ;
  331. if (!cn) {
  332. return cn ;
  333. }
  334. if (cn->next) {
  335. cn->next->prev = NULL ;
  336. }
  337. journal->j_cnode_free_list = cn->next ;
  338. memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ;
  339. return cn ;
  340. }
  341. /*
  342. ** returns a cnode to the free list
  343. */
  344. static void free_cnode(struct super_block *p_s_sb, struct reiserfs_journal_cnode *cn) {
  345. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  346. reiserfs_check_lock_depth(p_s_sb, "free_cnode") ;
  347. journal->j_cnode_used-- ;
  348. journal->j_cnode_free++ ;
  349. /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
  350. cn->next = journal->j_cnode_free_list ;
  351. if (journal->j_cnode_free_list) {
  352. journal->j_cnode_free_list->prev = cn ;
  353. }
  354. cn->prev = NULL ; /* not needed with the memset, but I might kill the memset, and forget to do this */
  355. journal->j_cnode_free_list = cn ;
  356. }
  357. static void clear_prepared_bits(struct buffer_head *bh) {
  358. clear_buffer_journal_prepared (bh);
  359. clear_buffer_journal_restore_dirty (bh);
  360. }
  361. /* utility function to force a BUG if it is called without the big
  362. ** kernel lock held. caller is the string printed just before calling BUG()
  363. */
  364. void reiserfs_check_lock_depth(struct super_block *sb, char *caller) {
  365. #ifdef CONFIG_SMP
  366. if (current->lock_depth < 0) {
  367. reiserfs_panic (sb, "%s called without kernel lock held", caller) ;
  368. }
  369. #else
  370. ;
  371. #endif
  372. }
  373. /* return a cnode with same dev, block number and size in table, or null if not found */
  374. static inline struct reiserfs_journal_cnode *
  375. get_journal_hash_dev(struct super_block *sb,
  376. struct reiserfs_journal_cnode **table,
  377. long bl)
  378. {
  379. struct reiserfs_journal_cnode *cn ;
  380. cn = journal_hash(table, sb, bl) ;
  381. while(cn) {
  382. if (cn->blocknr == bl && cn->sb == sb)
  383. return cn ;
  384. cn = cn->hnext ;
  385. }
  386. return (struct reiserfs_journal_cnode *)0 ;
  387. }
  388. /*
  389. ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
  390. ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
  391. ** being overwritten by a replay after crashing.
  392. **
  393. ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
  394. ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
  395. ** sure you never write the block without logging it.
  396. **
  397. ** next_zero_bit is a suggestion about the next block to try for find_forward.
  398. ** when bl is rejected because it is set in a journal list bitmap, we search
  399. ** for the next zero bit in the bitmap that rejected bl. Then, we return that
  400. ** through next_zero_bit for find_forward to try.
  401. **
  402. ** Just because we return something in next_zero_bit does not mean we won't
  403. ** reject it on the next call to reiserfs_in_journal
  404. **
  405. */
  406. int reiserfs_in_journal(struct super_block *p_s_sb,
  407. int bmap_nr, int bit_nr, int search_all,
  408. b_blocknr_t *next_zero_bit) {
  409. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  410. struct reiserfs_journal_cnode *cn ;
  411. struct reiserfs_list_bitmap *jb ;
  412. int i ;
  413. unsigned long bl;
  414. *next_zero_bit = 0 ; /* always start this at zero. */
  415. PROC_INFO_INC( p_s_sb, journal.in_journal );
  416. /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
  417. ** if we crash before the transaction that freed it commits, this transaction won't
  418. ** have committed either, and the block will never be written
  419. */
  420. if (search_all) {
  421. for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
  422. PROC_INFO_INC( p_s_sb, journal.in_journal_bitmap );
  423. jb = journal->j_list_bitmap + i ;
  424. if (jb->journal_list && jb->bitmaps[bmap_nr] &&
  425. test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data)) {
  426. *next_zero_bit = find_next_zero_bit((unsigned long *)
  427. (jb->bitmaps[bmap_nr]->data),
  428. p_s_sb->s_blocksize << 3, bit_nr+1) ;
  429. return 1 ;
  430. }
  431. }
  432. }
  433. bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
  434. /* is it in any old transactions? */
  435. if (search_all && (cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
  436. return 1;
  437. }
  438. /* is it in the current transaction. This should never happen */
  439. if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
  440. BUG();
  441. return 1;
  442. }
  443. PROC_INFO_INC( p_s_sb, journal.in_journal_reusable );
  444. /* safe for reuse */
  445. return 0 ;
  446. }
  447. /* insert cn into table
  448. */
  449. static inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) {
  450. struct reiserfs_journal_cnode *cn_orig ;
  451. cn_orig = journal_hash(table, cn->sb, cn->blocknr) ;
  452. cn->hnext = cn_orig ;
  453. cn->hprev = NULL ;
  454. if (cn_orig) {
  455. cn_orig->hprev = cn ;
  456. }
  457. journal_hash(table, cn->sb, cn->blocknr) = cn ;
  458. }
  459. /* lock the current transaction */
  460. inline static void lock_journal(struct super_block *p_s_sb) {
  461. PROC_INFO_INC( p_s_sb, journal.lock_journal );
  462. down(&SB_JOURNAL(p_s_sb)->j_lock);
  463. }
  464. /* unlock the current transaction */
  465. inline static void unlock_journal(struct super_block *p_s_sb) {
  466. up(&SB_JOURNAL(p_s_sb)->j_lock);
  467. }
  468. static inline void get_journal_list(struct reiserfs_journal_list *jl)
  469. {
  470. jl->j_refcount++;
  471. }
  472. static inline void put_journal_list(struct super_block *s,
  473. struct reiserfs_journal_list *jl)
  474. {
  475. if (jl->j_refcount < 1) {
  476. reiserfs_panic (s, "trans id %lu, refcount at %d", jl->j_trans_id,
  477. jl->j_refcount);
  478. }
  479. if (--jl->j_refcount == 0)
  480. reiserfs_kfree(jl, sizeof(struct reiserfs_journal_list), s);
  481. }
  482. /*
  483. ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
  484. ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
  485. ** transaction.
  486. */
  487. static void cleanup_freed_for_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
  488. struct reiserfs_list_bitmap *jb = jl->j_list_bitmap ;
  489. if (jb) {
  490. cleanup_bitmap_list(p_s_sb, jb) ;
  491. }
  492. jl->j_list_bitmap->journal_list = NULL ;
  493. jl->j_list_bitmap = NULL ;
  494. }
  495. static int journal_list_still_alive(struct super_block *s,
  496. unsigned long trans_id)
  497. {
  498. struct reiserfs_journal *journal = SB_JOURNAL (s);
  499. struct list_head *entry = &journal->j_journal_list;
  500. struct reiserfs_journal_list *jl;
  501. if (!list_empty(entry)) {
  502. jl = JOURNAL_LIST_ENTRY(entry->next);
  503. if (jl->j_trans_id <= trans_id) {
  504. return 1;
  505. }
  506. }
  507. return 0;
  508. }
  509. static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) {
  510. char b[BDEVNAME_SIZE];
  511. if (buffer_journaled(bh)) {
  512. reiserfs_warning(NULL, "clm-2084: pinned buffer %lu:%s sent to disk",
  513. bh->b_blocknr, bdevname(bh->b_bdev, b)) ;
  514. }
  515. if (uptodate)
  516. set_buffer_uptodate(bh) ;
  517. else
  518. clear_buffer_uptodate(bh) ;
  519. unlock_buffer(bh) ;
  520. put_bh(bh) ;
  521. }
  522. static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate) {
  523. if (uptodate)
  524. set_buffer_uptodate(bh) ;
  525. else
  526. clear_buffer_uptodate(bh) ;
  527. unlock_buffer(bh) ;
  528. put_bh(bh) ;
  529. }
  530. static void submit_logged_buffer(struct buffer_head *bh) {
  531. get_bh(bh) ;
  532. bh->b_end_io = reiserfs_end_buffer_io_sync ;
  533. clear_buffer_journal_new (bh);
  534. clear_buffer_dirty(bh) ;
  535. if (!test_clear_buffer_journal_test (bh))
  536. BUG();
  537. if (!buffer_uptodate(bh))
  538. BUG();
  539. submit_bh(WRITE, bh) ;
  540. }
  541. static void submit_ordered_buffer(struct buffer_head *bh) {
  542. get_bh(bh) ;
  543. bh->b_end_io = reiserfs_end_ordered_io;
  544. clear_buffer_dirty(bh) ;
  545. if (!buffer_uptodate(bh))
  546. BUG();
  547. submit_bh(WRITE, bh) ;
  548. }
  549. static int submit_barrier_buffer(struct buffer_head *bh) {
  550. get_bh(bh) ;
  551. bh->b_end_io = reiserfs_end_ordered_io;
  552. clear_buffer_dirty(bh) ;
  553. if (!buffer_uptodate(bh))
  554. BUG();
  555. return submit_bh(WRITE_BARRIER, bh) ;
  556. }
  557. static void check_barrier_completion(struct super_block *s,
  558. struct buffer_head *bh) {
  559. if (buffer_eopnotsupp(bh)) {
  560. clear_buffer_eopnotsupp(bh);
  561. disable_barrier(s);
  562. set_buffer_uptodate(bh);
  563. set_buffer_dirty(bh);
  564. sync_dirty_buffer(bh);
  565. }
  566. }
  567. #define CHUNK_SIZE 32
  568. struct buffer_chunk {
  569. struct buffer_head *bh[CHUNK_SIZE];
  570. int nr;
  571. };
  572. static void write_chunk(struct buffer_chunk *chunk) {
  573. int i;
  574. get_fs_excl();
  575. for (i = 0; i < chunk->nr ; i++) {
  576. submit_logged_buffer(chunk->bh[i]) ;
  577. }
  578. chunk->nr = 0;
  579. put_fs_excl();
  580. }
  581. static void write_ordered_chunk(struct buffer_chunk *chunk) {
  582. int i;
  583. get_fs_excl();
  584. for (i = 0; i < chunk->nr ; i++) {
  585. submit_ordered_buffer(chunk->bh[i]) ;
  586. }
  587. chunk->nr = 0;
  588. put_fs_excl();
  589. }
  590. static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
  591. spinlock_t *lock,
  592. void (fn)(struct buffer_chunk *))
  593. {
  594. int ret = 0;
  595. if (chunk->nr >= CHUNK_SIZE)
  596. BUG();
  597. chunk->bh[chunk->nr++] = bh;
  598. if (chunk->nr >= CHUNK_SIZE) {
  599. ret = 1;
  600. if (lock)
  601. spin_unlock(lock);
  602. fn(chunk);
  603. if (lock)
  604. spin_lock(lock);
  605. }
  606. return ret;
  607. }
  608. static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
  609. static struct reiserfs_jh *alloc_jh(void) {
  610. struct reiserfs_jh *jh;
  611. while(1) {
  612. jh = kmalloc(sizeof(*jh), GFP_NOFS);
  613. if (jh) {
  614. atomic_inc(&nr_reiserfs_jh);
  615. return jh;
  616. }
  617. yield();
  618. }
  619. }
  620. /*
  621. * we want to free the jh when the buffer has been written
  622. * and waited on
  623. */
  624. void reiserfs_free_jh(struct buffer_head *bh) {
  625. struct reiserfs_jh *jh;
  626. jh = bh->b_private;
  627. if (jh) {
  628. bh->b_private = NULL;
  629. jh->bh = NULL;
  630. list_del_init(&jh->list);
  631. kfree(jh);
  632. if (atomic_read(&nr_reiserfs_jh) <= 0)
  633. BUG();
  634. atomic_dec(&nr_reiserfs_jh);
  635. put_bh(bh);
  636. }
  637. }
  638. static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
  639. int tail)
  640. {
  641. struct reiserfs_jh *jh;
  642. if (bh->b_private) {
  643. spin_lock(&j->j_dirty_buffers_lock);
  644. if (!bh->b_private) {
  645. spin_unlock(&j->j_dirty_buffers_lock);
  646. goto no_jh;
  647. }
  648. jh = bh->b_private;
  649. list_del_init(&jh->list);
  650. } else {
  651. no_jh:
  652. get_bh(bh);
  653. jh = alloc_jh();
  654. spin_lock(&j->j_dirty_buffers_lock);
  655. /* buffer must be locked for __add_jh, should be able to have
  656. * two adds at the same time
  657. */
  658. if (bh->b_private)
  659. BUG();
  660. jh->bh = bh;
  661. bh->b_private = jh;
  662. }
  663. jh->jl = j->j_current_jl;
  664. if (tail)
  665. list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
  666. else {
  667. list_add_tail(&jh->list, &jh->jl->j_bh_list);
  668. }
  669. spin_unlock(&j->j_dirty_buffers_lock);
  670. return 0;
  671. }
  672. int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh) {
  673. return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
  674. }
  675. int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh) {
  676. return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
  677. }
  678. #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
  679. static int write_ordered_buffers(spinlock_t *lock,
  680. struct reiserfs_journal *j,
  681. struct reiserfs_journal_list *jl,
  682. struct list_head *list)
  683. {
  684. struct buffer_head *bh;
  685. struct reiserfs_jh *jh;
  686. int ret = j->j_errno;
  687. struct buffer_chunk chunk;
  688. struct list_head tmp;
  689. INIT_LIST_HEAD(&tmp);
  690. chunk.nr = 0;
  691. spin_lock(lock);
  692. while(!list_empty(list)) {
  693. jh = JH_ENTRY(list->next);
  694. bh = jh->bh;
  695. get_bh(bh);
  696. if (test_set_buffer_locked(bh)) {
  697. if (!buffer_dirty(bh)) {
  698. list_del_init(&jh->list);
  699. list_add(&jh->list, &tmp);
  700. goto loop_next;
  701. }
  702. spin_unlock(lock);
  703. if (chunk.nr)
  704. write_ordered_chunk(&chunk);
  705. wait_on_buffer(bh);
  706. cond_resched();
  707. spin_lock(lock);
  708. goto loop_next;
  709. }
  710. if (buffer_dirty(bh)) {
  711. list_del_init(&jh->list);
  712. list_add(&jh->list, &tmp);
  713. add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
  714. } else {
  715. reiserfs_free_jh(bh);
  716. unlock_buffer(bh);
  717. }
  718. loop_next:
  719. put_bh(bh);
  720. cond_resched_lock(lock);
  721. }
  722. if (chunk.nr) {
  723. spin_unlock(lock);
  724. write_ordered_chunk(&chunk);
  725. spin_lock(lock);
  726. }
  727. while(!list_empty(&tmp)) {
  728. jh = JH_ENTRY(tmp.prev);
  729. bh = jh->bh;
  730. get_bh(bh);
  731. reiserfs_free_jh(bh);
  732. if (buffer_locked(bh)) {
  733. spin_unlock(lock);
  734. wait_on_buffer(bh);
  735. spin_lock(lock);
  736. }
  737. if (!buffer_uptodate(bh)) {
  738. ret = -EIO;
  739. }
  740. put_bh(bh);
  741. cond_resched_lock(lock);
  742. }
  743. spin_unlock(lock);
  744. return ret;
  745. }
  746. static int flush_older_commits(struct super_block *s, struct reiserfs_journal_list *jl) {
  747. struct reiserfs_journal *journal = SB_JOURNAL (s);
  748. struct reiserfs_journal_list *other_jl;
  749. struct reiserfs_journal_list *first_jl;
  750. struct list_head *entry;
  751. unsigned long trans_id = jl->j_trans_id;
  752. unsigned long other_trans_id;
  753. unsigned long first_trans_id;
  754. find_first:
  755. /*
  756. * first we walk backwards to find the oldest uncommitted transation
  757. */
  758. first_jl = jl;
  759. entry = jl->j_list.prev;
  760. while(1) {
  761. other_jl = JOURNAL_LIST_ENTRY(entry);
  762. if (entry == &journal->j_journal_list ||
  763. atomic_read(&other_jl->j_older_commits_done))
  764. break;
  765. first_jl = other_jl;
  766. entry = other_jl->j_list.prev;
  767. }
  768. /* if we didn't find any older uncommitted transactions, return now */
  769. if (first_jl == jl) {
  770. return 0;
  771. }
  772. first_trans_id = first_jl->j_trans_id;
  773. entry = &first_jl->j_list;
  774. while(1) {
  775. other_jl = JOURNAL_LIST_ENTRY(entry);
  776. other_trans_id = other_jl->j_trans_id;
  777. if (other_trans_id < trans_id) {
  778. if (atomic_read(&other_jl->j_commit_left) != 0) {
  779. flush_commit_list(s, other_jl, 0);
  780. /* list we were called with is gone, return */
  781. if (!journal_list_still_alive(s, trans_id))
  782. return 1;
  783. /* the one we just flushed is gone, this means all
  784. * older lists are also gone, so first_jl is no longer
  785. * valid either. Go back to the beginning.
  786. */
  787. if (!journal_list_still_alive(s, other_trans_id)) {
  788. goto find_first;
  789. }
  790. }
  791. entry = entry->next;
  792. if (entry == &journal->j_journal_list)
  793. return 0;
  794. } else {
  795. return 0;
  796. }
  797. }
  798. return 0;
  799. }
  800. int reiserfs_async_progress_wait(struct super_block *s) {
  801. DEFINE_WAIT(wait);
  802. struct reiserfs_journal *j = SB_JOURNAL(s);
  803. if (atomic_read(&j->j_async_throttle))
  804. blk_congestion_wait(WRITE, HZ/10);
  805. return 0;
  806. }
  807. /*
  808. ** if this journal list still has commit blocks unflushed, send them to disk.
  809. **
  810. ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
  811. ** Before the commit block can by written, every other log block must be safely on disk
  812. **
  813. */
  814. static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) {
  815. int i;
  816. int bn ;
  817. struct buffer_head *tbh = NULL ;
  818. unsigned long trans_id = jl->j_trans_id;
  819. struct reiserfs_journal *journal = SB_JOURNAL (s);
  820. int barrier = 0;
  821. int retval = 0;
  822. reiserfs_check_lock_depth(s, "flush_commit_list") ;
  823. if (atomic_read(&jl->j_older_commits_done)) {
  824. return 0 ;
  825. }
  826. get_fs_excl();
  827. /* before we can put our commit blocks on disk, we have to make sure everyone older than
  828. ** us is on disk too
  829. */
  830. BUG_ON (jl->j_len <= 0);
  831. BUG_ON (trans_id == journal->j_trans_id);
  832. get_journal_list(jl);
  833. if (flushall) {
  834. if (flush_older_commits(s, jl) == 1) {
  835. /* list disappeared during flush_older_commits. return */
  836. goto put_jl;
  837. }
  838. }
  839. /* make sure nobody is trying to flush this one at the same time */
  840. down(&jl->j_commit_lock);
  841. if (!journal_list_still_alive(s, trans_id)) {
  842. up(&jl->j_commit_lock);
  843. goto put_jl;
  844. }
  845. BUG_ON (jl->j_trans_id == 0);
  846. /* this commit is done, exit */
  847. if (atomic_read(&(jl->j_commit_left)) <= 0) {
  848. if (flushall) {
  849. atomic_set(&(jl->j_older_commits_done), 1) ;
  850. }
  851. up(&jl->j_commit_lock);
  852. goto put_jl;
  853. }
  854. if (!list_empty(&jl->j_bh_list)) {
  855. unlock_kernel();
  856. write_ordered_buffers(&journal->j_dirty_buffers_lock,
  857. journal, jl, &jl->j_bh_list);
  858. lock_kernel();
  859. }
  860. BUG_ON (!list_empty(&jl->j_bh_list));
  861. /*
  862. * for the description block and all the log blocks, submit any buffers
  863. * that haven't already reached the disk
  864. */
  865. atomic_inc(&journal->j_async_throttle);
  866. for (i = 0 ; i < (jl->j_len + 1) ; i++) {
  867. bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start+i) %
  868. SB_ONDISK_JOURNAL_SIZE(s);
  869. tbh = journal_find_get_block(s, bn) ;
  870. if (buffer_dirty(tbh)) /* redundant, ll_rw_block() checks */
  871. ll_rw_block(WRITE, 1, &tbh) ;
  872. put_bh(tbh) ;
  873. }
  874. atomic_dec(&journal->j_async_throttle);
  875. /* wait on everything written so far before writing the commit
  876. * if we are in barrier mode, send the commit down now
  877. */
  878. barrier = reiserfs_barrier_flush(s);
  879. if (barrier) {
  880. int ret;
  881. lock_buffer(jl->j_commit_bh);
  882. ret = submit_barrier_buffer(jl->j_commit_bh);
  883. if (ret == -EOPNOTSUPP) {
  884. set_buffer_uptodate(jl->j_commit_bh);
  885. disable_barrier(s);
  886. barrier = 0;
  887. }
  888. }
  889. for (i = 0 ; i < (jl->j_len + 1) ; i++) {
  890. bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
  891. (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s) ;
  892. tbh = journal_find_get_block(s, bn) ;
  893. wait_on_buffer(tbh) ;
  894. // since we're using ll_rw_blk above, it might have skipped over
  895. // a locked buffer. Double check here
  896. //
  897. if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
  898. sync_dirty_buffer(tbh);
  899. if (unlikely (!buffer_uptodate(tbh))) {
  900. #ifdef CONFIG_REISERFS_CHECK
  901. reiserfs_warning(s, "journal-601, buffer write failed") ;
  902. #endif
  903. retval = -EIO;
  904. }
  905. put_bh(tbh) ; /* once for journal_find_get_block */
  906. put_bh(tbh) ; /* once due to original getblk in do_journal_end */
  907. atomic_dec(&(jl->j_commit_left)) ;
  908. }
  909. BUG_ON (atomic_read(&(jl->j_commit_left)) != 1);
  910. if (!barrier) {
  911. if (buffer_dirty(jl->j_commit_bh))
  912. BUG();
  913. mark_buffer_dirty(jl->j_commit_bh) ;
  914. sync_dirty_buffer(jl->j_commit_bh) ;
  915. } else
  916. wait_on_buffer(jl->j_commit_bh);
  917. check_barrier_completion(s, jl->j_commit_bh);
  918. /* If there was a write error in the journal - we can't commit this
  919. * transaction - it will be invalid and, if successful, will just end
  920. * up propogating the write error out to the filesystem. */
  921. if (unlikely (!buffer_uptodate(jl->j_commit_bh))) {
  922. #ifdef CONFIG_REISERFS_CHECK
  923. reiserfs_warning(s, "journal-615: buffer write failed") ;
  924. #endif
  925. retval = -EIO;
  926. }
  927. bforget(jl->j_commit_bh) ;
  928. if (journal->j_last_commit_id != 0 &&
  929. (jl->j_trans_id - journal->j_last_commit_id) != 1) {
  930. reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",
  931. journal->j_last_commit_id,
  932. jl->j_trans_id);
  933. }
  934. journal->j_last_commit_id = jl->j_trans_id;
  935. /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
  936. cleanup_freed_for_journal_list(s, jl) ;
  937. retval = retval ? retval : journal->j_errno;
  938. /* mark the metadata dirty */
  939. if (!retval)
  940. dirty_one_transaction(s, jl);
  941. atomic_dec(&(jl->j_commit_left)) ;
  942. if (flushall) {
  943. atomic_set(&(jl->j_older_commits_done), 1) ;
  944. }
  945. up(&jl->j_commit_lock);
  946. put_jl:
  947. put_journal_list(s, jl);
  948. if (retval)
  949. reiserfs_abort (s, retval, "Journal write error in %s", __FUNCTION__);
  950. put_fs_excl();
  951. return retval;
  952. }
  953. /*
  954. ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
  955. ** returns NULL if it can't find anything
  956. */
  957. static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) {
  958. struct super_block *sb = cn->sb;
  959. b_blocknr_t blocknr = cn->blocknr ;
  960. cn = cn->hprev ;
  961. while(cn) {
  962. if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
  963. return cn->jlist ;
  964. }
  965. cn = cn->hprev ;
  966. }
  967. return NULL ;
  968. }
  969. static void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **,
  970. struct reiserfs_journal_list *, unsigned long, int);
  971. /*
  972. ** once all the real blocks have been flushed, it is safe to remove them from the
  973. ** journal list for this transaction. Aside from freeing the cnode, this also allows the
  974. ** block to be reallocated for data blocks if it had been deleted.
  975. */
  976. static void remove_all_from_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, int debug) {
  977. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  978. struct reiserfs_journal_cnode *cn, *last ;
  979. cn = jl->j_realblock ;
  980. /* which is better, to lock once around the whole loop, or
  981. ** to lock for each call to remove_journal_hash?
  982. */
  983. while(cn) {
  984. if (cn->blocknr != 0) {
  985. if (debug) {
  986. reiserfs_warning (p_s_sb, "block %u, bh is %d, state %ld", cn->blocknr,
  987. cn->bh ? 1: 0, cn->state) ;
  988. }
  989. cn->state = 0 ;
  990. remove_journal_hash(p_s_sb, journal->j_list_hash_table, jl, cn->blocknr, 1) ;
  991. }
  992. last = cn ;
  993. cn = cn->next ;
  994. free_cnode(p_s_sb, last) ;
  995. }
  996. jl->j_realblock = NULL ;
  997. }
  998. /*
  999. ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
  1000. ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
  1001. ** releasing blocks in this transaction for reuse as data blocks.
  1002. ** called by flush_journal_list, before it calls remove_all_from_journal_list
  1003. **
  1004. */
  1005. static int _update_journal_header_block(struct super_block *p_s_sb, unsigned long offset, unsigned long trans_id) {
  1006. struct reiserfs_journal_header *jh ;
  1007. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  1008. if (reiserfs_is_journal_aborted (journal))
  1009. return -EIO;
  1010. if (trans_id >= journal->j_last_flush_trans_id) {
  1011. if (buffer_locked((journal->j_header_bh))) {
  1012. wait_on_buffer((journal->j_header_bh)) ;
  1013. if (unlikely (!buffer_uptodate(journal->j_header_bh))) {
  1014. #ifdef CONFIG_REISERFS_CHECK
  1015. reiserfs_warning (p_s_sb, "journal-699: buffer write failed") ;
  1016. #endif
  1017. return -EIO;
  1018. }
  1019. }
  1020. journal->j_last_flush_trans_id = trans_id ;
  1021. journal->j_first_unflushed_offset = offset ;
  1022. jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data) ;
  1023. jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ;
  1024. jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
  1025. jh->j_mount_id = cpu_to_le32(journal->j_mount_id) ;
  1026. if (reiserfs_barrier_flush(p_s_sb)) {
  1027. int ret;
  1028. lock_buffer(journal->j_header_bh);
  1029. ret = submit_barrier_buffer(journal->j_header_bh);
  1030. if (ret == -EOPNOTSUPP) {
  1031. set_buffer_uptodate(journal->j_header_bh);
  1032. disable_barrier(p_s_sb);
  1033. goto sync;
  1034. }
  1035. wait_on_buffer(journal->j_header_bh);
  1036. check_barrier_completion(p_s_sb, journal->j_header_bh);
  1037. } else {
  1038. sync:
  1039. set_buffer_dirty(journal->j_header_bh) ;
  1040. sync_dirty_buffer(journal->j_header_bh) ;
  1041. }
  1042. if (!buffer_uptodate(journal->j_header_bh)) {
  1043. reiserfs_warning (p_s_sb, "journal-837: IO error during journal replay");
  1044. return -EIO ;
  1045. }
  1046. }
  1047. return 0 ;
  1048. }
  1049. static int update_journal_header_block(struct super_block *p_s_sb,
  1050. unsigned long offset,
  1051. unsigned long trans_id) {
  1052. return _update_journal_header_block(p_s_sb, offset, trans_id);
  1053. }
  1054. /*
  1055. ** flush any and all journal lists older than you are
  1056. ** can only be called from flush_journal_list
  1057. */
  1058. static int flush_older_journal_lists(struct super_block *p_s_sb,
  1059. struct reiserfs_journal_list *jl)
  1060. {
  1061. struct list_head *entry;
  1062. struct reiserfs_journal_list *other_jl ;
  1063. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  1064. unsigned long trans_id = jl->j_trans_id;
  1065. /* we know we are the only ones flushing things, no extra race
  1066. * protection is required.
  1067. */
  1068. restart:
  1069. entry = journal->j_journal_list.next;
  1070. /* Did we wrap? */
  1071. if (entry == &journal->j_journal_list)
  1072. return 0;
  1073. other_jl = JOURNAL_LIST_ENTRY(entry);
  1074. if (other_jl->j_trans_id < trans_id) {
  1075. BUG_ON (other_jl->j_refcount <= 0);
  1076. /* do not flush all */
  1077. flush_journal_list(p_s_sb, other_jl, 0) ;
  1078. /* other_jl is now deleted from the list */
  1079. goto restart;
  1080. }
  1081. return 0 ;
  1082. }
  1083. static void del_from_work_list(struct super_block *s,
  1084. struct reiserfs_journal_list *jl) {
  1085. struct reiserfs_journal *journal = SB_JOURNAL (s);
  1086. if (!list_empty(&jl->j_working_list)) {
  1087. list_del_init(&jl->j_working_list);
  1088. journal->j_num_work_lists--;
  1089. }
  1090. }
  1091. /* flush a journal list, both commit and real blocks
  1092. **
  1093. ** always set flushall to 1, unless you are calling from inside
  1094. ** flush_journal_list
  1095. **
  1096. ** IMPORTANT. This can only be called while there are no journal writers,
  1097. ** and the journal is locked. That means it can only be called from
  1098. ** do_journal_end, or by journal_release
  1099. */
  1100. static int flush_journal_list(struct super_block *s,
  1101. struct reiserfs_journal_list *jl, int flushall) {
  1102. struct reiserfs_journal_list *pjl ;
  1103. struct reiserfs_journal_cnode *cn, *last ;
  1104. int count ;
  1105. int was_jwait = 0 ;
  1106. int was_dirty = 0 ;
  1107. struct buffer_head *saved_bh ;
  1108. unsigned long j_len_saved = jl->j_len ;
  1109. struct reiserfs_journal *journal = SB_JOURNAL (s);
  1110. int err = 0;
  1111. BUG_ON (j_len_saved <= 0);
  1112. if (atomic_read(&journal->j_wcount) != 0) {
  1113. reiserfs_warning(s, "clm-2048: flush_journal_list called with wcount %d",
  1114. atomic_read(&journal->j_wcount)) ;
  1115. }
  1116. BUG_ON (jl->j_trans_id == 0);
  1117. /* if flushall == 0, the lock is already held */
  1118. if (flushall) {
  1119. down(&journal->j_flush_sem);
  1120. } else if (!down_trylock(&journal->j_flush_sem)) {
  1121. BUG();
  1122. }
  1123. count = 0 ;
  1124. if (j_len_saved > journal->j_trans_max) {
  1125. reiserfs_panic(s, "journal-715: flush_journal_list, length is %lu, trans id %lu\n", j_len_saved, jl->j_trans_id);
  1126. return 0 ;
  1127. }
  1128. get_fs_excl();
  1129. /* if all the work is already done, get out of here */
  1130. if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
  1131. atomic_read(&(jl->j_commit_left)) <= 0) {
  1132. goto flush_older_and_return ;
  1133. }
  1134. /* start by putting the commit list on disk. This will also flush
  1135. ** the commit lists of any olders transactions
  1136. */
  1137. flush_commit_list(s, jl, 1) ;
  1138. if (!(jl->j_state & LIST_DIRTY) && !reiserfs_is_journal_aborted (journal))
  1139. BUG();
  1140. /* are we done now? */
  1141. if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
  1142. atomic_read(&(jl->j_commit_left)) <= 0) {
  1143. goto flush_older_and_return ;
  1144. }
  1145. /* loop through each cnode, see if we need to write it,
  1146. ** or wait on a more recent transaction, or just ignore it
  1147. */
  1148. if (atomic_read(&(journal->j_wcount)) != 0) {
  1149. reiserfs_panic(s, "journal-844: panic journal list is flushing, wcount is not 0\n") ;
  1150. }
  1151. cn = jl->j_realblock ;
  1152. while(cn) {
  1153. was_jwait = 0 ;
  1154. was_dirty = 0 ;
  1155. saved_bh = NULL ;
  1156. /* blocknr of 0 is no longer in the hash, ignore it */
  1157. if (cn->blocknr == 0) {
  1158. goto free_cnode ;
  1159. }
  1160. /* This transaction failed commit. Don't write out to the disk */
  1161. if (!(jl->j_state & LIST_DIRTY))
  1162. goto free_cnode;
  1163. pjl = find_newer_jl_for_cn(cn) ;
  1164. /* the order is important here. We check pjl to make sure we
  1165. ** don't clear BH_JDirty_wait if we aren't the one writing this
  1166. ** block to disk
  1167. */
  1168. if (!pjl && cn->bh) {
  1169. saved_bh = cn->bh ;
  1170. /* we do this to make sure nobody releases the buffer while
  1171. ** we are working with it
  1172. */
  1173. get_bh(saved_bh) ;
  1174. if (buffer_journal_dirty(saved_bh)) {
  1175. BUG_ON (!can_dirty (cn));
  1176. was_jwait = 1 ;
  1177. was_dirty = 1 ;
  1178. } else if (can_dirty(cn)) {
  1179. /* everything with !pjl && jwait should be writable */
  1180. BUG();
  1181. }
  1182. }
  1183. /* if someone has this block in a newer transaction, just make
  1184. ** sure they are commited, and don't try writing it to disk
  1185. */
  1186. if (pjl) {
  1187. if (atomic_read(&pjl->j_commit_left))
  1188. flush_commit_list(s, pjl, 1) ;
  1189. goto free_cnode ;
  1190. }
  1191. /* bh == NULL when the block got to disk on its own, OR,
  1192. ** the block got freed in a future transaction
  1193. */
  1194. if (saved_bh == NULL) {
  1195. goto free_cnode ;
  1196. }
  1197. /* this should never happen. kupdate_one_transaction has this list
  1198. ** locked while it works, so we should never see a buffer here that
  1199. ** is not marked JDirty_wait
  1200. */
  1201. if ((!was_jwait) && !buffer_locked(saved_bh)) {
  1202. reiserfs_warning (s, "journal-813: BAD! buffer %llu %cdirty %cjwait, "
  1203. "not in a newer tranasction",
  1204. (unsigned long long)saved_bh->b_blocknr,
  1205. was_dirty ? ' ' : '!', was_jwait ? ' ' : '!') ;
  1206. }
  1207. if (was_dirty) {
  1208. /* we inc again because saved_bh gets decremented at free_cnode */
  1209. get_bh(saved_bh) ;
  1210. set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
  1211. lock_buffer(saved_bh);
  1212. BUG_ON (cn->blocknr != saved_bh->b_blocknr);
  1213. if (buffer_dirty(saved_bh))
  1214. submit_logged_buffer(saved_bh) ;
  1215. else
  1216. unlock_buffer(saved_bh);
  1217. count++ ;
  1218. } else {
  1219. reiserfs_warning (s, "clm-2082: Unable to flush buffer %llu in %s",
  1220. (unsigned long long)saved_bh->b_blocknr, __FUNCTION__);
  1221. }
  1222. free_cnode:
  1223. last = cn ;
  1224. cn = cn->next ;
  1225. if (saved_bh) {
  1226. /* we incremented this to keep others from taking the buffer head away */
  1227. put_bh(saved_bh) ;
  1228. if (atomic_read(&(saved_bh->b_count)) < 0) {
  1229. reiserfs_warning (s, "journal-945: saved_bh->b_count < 0");
  1230. }
  1231. }
  1232. }
  1233. if (count > 0) {
  1234. cn = jl->j_realblock ;
  1235. while(cn) {
  1236. if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
  1237. if (!cn->bh) {
  1238. reiserfs_panic(s, "journal-1011: cn->bh is NULL\n") ;
  1239. }
  1240. wait_on_buffer(cn->bh) ;
  1241. if (!cn->bh) {
  1242. reiserfs_panic(s, "journal-1012: cn->bh is NULL\n") ;
  1243. }
  1244. if (unlikely (!buffer_uptodate(cn->bh))) {
  1245. #ifdef CONFIG_REISERFS_CHECK
  1246. reiserfs_warning(s, "journal-949: buffer write failed\n") ;
  1247. #endif
  1248. err = -EIO;
  1249. }
  1250. /* note, we must clear the JDirty_wait bit after the up to date
  1251. ** check, otherwise we race against our flushpage routine
  1252. */
  1253. BUG_ON (!test_clear_buffer_journal_dirty (cn->bh));
  1254. /* undo the inc from journal_mark_dirty */
  1255. put_bh(cn->bh) ;
  1256. brelse(cn->bh) ;
  1257. }
  1258. cn = cn->next ;
  1259. }
  1260. }
  1261. if (err)
  1262. reiserfs_abort (s, -EIO, "Write error while pushing transaction to disk in %s", __FUNCTION__);
  1263. flush_older_and_return:
  1264. /* before we can update the journal header block, we _must_ flush all
  1265. ** real blocks from all older transactions to disk. This is because
  1266. ** once the header block is updated, this transaction will not be
  1267. ** replayed after a crash
  1268. */
  1269. if (flushall) {
  1270. flush_older_journal_lists(s, jl);
  1271. }
  1272. err = journal->j_errno;
  1273. /* before we can remove everything from the hash tables for this
  1274. ** transaction, we must make sure it can never be replayed
  1275. **
  1276. ** since we are only called from do_journal_end, we know for sure there
  1277. ** are no allocations going on while we are flushing journal lists. So,
  1278. ** we only need to update the journal header block for the last list
  1279. ** being flushed
  1280. */
  1281. if (!err && flushall) {
  1282. err = update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id) ;
  1283. if (err)
  1284. reiserfs_abort (s, -EIO, "Write error while updating journal header in %s", __FUNCTION__);
  1285. }
  1286. remove_all_from_journal_list(s, jl, 0) ;
  1287. list_del_init(&jl->j_list);
  1288. journal->j_num_lists--;
  1289. del_from_work_list(s, jl);
  1290. if (journal->j_last_flush_id != 0 &&
  1291. (jl->j_trans_id - journal->j_last_flush_id) != 1) {
  1292. reiserfs_warning(s, "clm-2201: last flush %lu, current %lu",
  1293. journal->j_last_flush_id,
  1294. jl->j_trans_id);
  1295. }
  1296. journal->j_last_flush_id = jl->j_trans_id;
  1297. /* not strictly required since we are freeing the list, but it should
  1298. * help find code using dead lists later on
  1299. */
  1300. jl->j_len = 0 ;
  1301. atomic_set(&(jl->j_nonzerolen), 0) ;
  1302. jl->j_start = 0 ;
  1303. jl->j_realblock = NULL ;
  1304. jl->j_commit_bh = NULL ;
  1305. jl->j_trans_id = 0 ;
  1306. jl->j_state = 0;
  1307. put_journal_list(s, jl);
  1308. if (flushall)
  1309. up(&journal->j_flush_sem);
  1310. put_fs_excl();
  1311. return err ;
  1312. }
  1313. static int write_one_transaction(struct super_block *s,
  1314. struct reiserfs_journal_list *jl,
  1315. struct buffer_chunk *chunk)
  1316. {
  1317. struct reiserfs_journal_cnode *cn;
  1318. int ret = 0 ;
  1319. jl->j_state |= LIST_TOUCHED;
  1320. del_from_work_list(s, jl);
  1321. if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
  1322. return 0;
  1323. }
  1324. cn = jl->j_realblock ;
  1325. while(cn) {
  1326. /* if the blocknr == 0, this has been cleared from the hash,
  1327. ** skip it
  1328. */
  1329. if (cn->blocknr == 0) {
  1330. goto next ;
  1331. }
  1332. if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
  1333. struct buffer_head *tmp_bh;
  1334. /* we can race against journal_mark_freed when we try
  1335. * to lock_buffer(cn->bh), so we have to inc the buffer
  1336. * count, and recheck things after locking
  1337. */
  1338. tmp_bh = cn->bh;
  1339. get_bh(tmp_bh);
  1340. lock_buffer(tmp_bh);
  1341. if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
  1342. if (!buffer_journal_dirty(tmp_bh) ||
  1343. buffer_journal_prepared(tmp_bh))
  1344. BUG();
  1345. add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
  1346. ret++;
  1347. } else {
  1348. /* note, cn->bh might be null now */
  1349. unlock_buffer(tmp_bh);
  1350. }
  1351. put_bh(tmp_bh);
  1352. }
  1353. next:
  1354. cn = cn->next ;
  1355. cond_resched();
  1356. }
  1357. return ret ;
  1358. }
  1359. /* used by flush_commit_list */
  1360. static int dirty_one_transaction(struct super_block *s,
  1361. struct reiserfs_journal_list *jl)
  1362. {
  1363. struct reiserfs_journal_cnode *cn;
  1364. struct reiserfs_journal_list *pjl;
  1365. int ret = 0 ;
  1366. jl->j_state |= LIST_DIRTY;
  1367. cn = jl->j_realblock ;
  1368. while(cn) {
  1369. /* look for a more recent transaction that logged this
  1370. ** buffer. Only the most recent transaction with a buffer in
  1371. ** it is allowed to send that buffer to disk
  1372. */
  1373. pjl = find_newer_jl_for_cn(cn) ;
  1374. if (!pjl && cn->blocknr && cn->bh && buffer_journal_dirty(cn->bh))
  1375. {
  1376. BUG_ON (!can_dirty(cn));
  1377. /* if the buffer is prepared, it will either be logged
  1378. * or restored. If restored, we need to make sure
  1379. * it actually gets marked dirty
  1380. */
  1381. clear_buffer_journal_new (cn->bh);
  1382. if (buffer_journal_prepared (cn->bh)) {
  1383. set_buffer_journal_restore_dirty (cn->bh);
  1384. } else {
  1385. set_buffer_journal_test (cn->bh);
  1386. mark_buffer_dirty(cn->bh);
  1387. }
  1388. }
  1389. cn = cn->next ;
  1390. }
  1391. return ret ;
  1392. }
  1393. static int kupdate_transactions(struct super_block *s,
  1394. struct reiserfs_journal_list *jl,
  1395. struct reiserfs_journal_list **next_jl,
  1396. unsigned long *next_trans_id,
  1397. int num_blocks,
  1398. int num_trans) {
  1399. int ret = 0;
  1400. int written = 0 ;
  1401. int transactions_flushed = 0;
  1402. unsigned long orig_trans_id = jl->j_trans_id;
  1403. struct buffer_chunk chunk;
  1404. struct list_head *entry;
  1405. struct reiserfs_journal *journal = SB_JOURNAL (s);
  1406. chunk.nr = 0;
  1407. down(&journal->j_flush_sem);
  1408. if (!journal_list_still_alive(s, orig_trans_id)) {
  1409. goto done;
  1410. }
  1411. /* we've got j_flush_sem held, nobody is going to delete any
  1412. * of these lists out from underneath us
  1413. */
  1414. while((num_trans && transactions_flushed < num_trans) ||
  1415. (!num_trans && written < num_blocks)) {
  1416. if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
  1417. atomic_read(&jl->j_commit_left) || !(jl->j_state & LIST_DIRTY))
  1418. {
  1419. del_from_work_list(s, jl);
  1420. break;
  1421. }
  1422. ret = write_one_transaction(s, jl, &chunk);
  1423. if (ret < 0)
  1424. goto done;
  1425. transactions_flushed++;
  1426. written += ret;
  1427. entry = jl->j_list.next;
  1428. /* did we wrap? */
  1429. if (entry == &journal->j_journal_list) {
  1430. break;
  1431. }
  1432. jl = JOURNAL_LIST_ENTRY(entry);
  1433. /* don't bother with older transactions */
  1434. if (jl->j_trans_id <= orig_trans_id)
  1435. break;
  1436. }
  1437. if (chunk.nr) {
  1438. write_chunk(&chunk);
  1439. }
  1440. done:
  1441. up(&journal->j_flush_sem);
  1442. return ret;
  1443. }
  1444. /* for o_sync and fsync heavy applications, they tend to use
  1445. ** all the journa list slots with tiny transactions. These
  1446. ** trigger lots and lots of calls to update the header block, which
  1447. ** adds seeks and slows things down.
  1448. **
  1449. ** This function tries to clear out a large chunk of the journal lists
  1450. ** at once, which makes everything faster since only the newest journal
  1451. ** list updates the header block
  1452. */
  1453. static int flush_used_journal_lists(struct super_block *s,
  1454. struct reiserfs_journal_list *jl) {
  1455. unsigned long len = 0;
  1456. unsigned long cur_len;
  1457. int ret;
  1458. int i;
  1459. int limit = 256;
  1460. struct reiserfs_journal_list *tjl;
  1461. struct reiserfs_journal_list *flush_jl;
  1462. unsigned long trans_id;
  1463. struct reiserfs_journal *journal = SB_JOURNAL (s);
  1464. flush_jl = tjl = jl;
  1465. /* in data logging mode, try harder to flush a lot of blocks */
  1466. if (reiserfs_data_log(s))
  1467. limit = 1024;
  1468. /* flush for 256 transactions or limit blocks, whichever comes first */
  1469. for(i = 0 ; i < 256 && len < limit ; i++) {
  1470. if (atomic_read(&tjl->j_commit_left) ||
  1471. tjl->j_trans_id < jl->j_trans_id) {
  1472. break;
  1473. }
  1474. cur_len = atomic_read(&tjl->j_nonzerolen);
  1475. if (cur_len > 0) {
  1476. tjl->j_state &= ~LIST_TOUCHED;
  1477. }
  1478. len += cur_len;
  1479. flush_jl = tjl;
  1480. if (tjl->j_list.next == &journal->j_journal_list)
  1481. break;
  1482. tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
  1483. }
  1484. /* try to find a group of blocks we can flush across all the
  1485. ** transactions, but only bother if we've actually spanned
  1486. ** across multiple lists
  1487. */
  1488. if (flush_jl != jl) {
  1489. ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
  1490. }
  1491. flush_journal_list(s, flush_jl, 1);
  1492. return 0;
  1493. }
  1494. /*
  1495. ** removes any nodes in table with name block and dev as bh.
  1496. ** only touchs the hnext and hprev pointers.
  1497. */
  1498. void remove_journal_hash(struct super_block *sb,
  1499. struct reiserfs_journal_cnode **table,
  1500. struct reiserfs_journal_list *jl,
  1501. unsigned long block, int remove_freed)
  1502. {
  1503. struct reiserfs_journal_cnode *cur ;
  1504. struct reiserfs_journal_cnode **head ;
  1505. head= &(journal_hash(table, sb, block)) ;
  1506. if (!head) {
  1507. return ;
  1508. }
  1509. cur = *head ;
  1510. while(cur) {
  1511. if (cur->blocknr == block && cur->sb == sb && (jl == NULL || jl == cur->jlist) &&
  1512. (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
  1513. if (cur->hnext) {
  1514. cur->hnext->hprev = cur->hprev ;
  1515. }
  1516. if (cur->hprev) {
  1517. cur->hprev->hnext = cur->hnext ;
  1518. } else {
  1519. *head = cur->hnext ;
  1520. }
  1521. cur->blocknr = 0 ;
  1522. cur->sb = NULL ;
  1523. cur->state = 0 ;
  1524. if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
  1525. atomic_dec(&(cur->jlist->j_nonzerolen)) ;
  1526. cur->bh = NULL ;
  1527. cur->jlist = NULL ;
  1528. }
  1529. cur = cur->hnext ;
  1530. }
  1531. }
  1532. static void free_journal_ram(struct super_block *p_s_sb) {
  1533. struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
  1534. reiserfs_kfree(journal->j_current_jl,
  1535. sizeof(struct reiserfs_journal_list), p_s_sb);
  1536. journal->j_num_lists--;
  1537. vfree(journal->j_cnode_free_orig) ;
  1538. free_list_bitmaps(p_s_sb, journal->j_list_bitmap) ;
  1539. free_bitmap_nodes(p_s_sb) ; /* must be after free_list_bitmaps */
  1540. if (journal->j_header_bh) {
  1541. brelse(journal->j_header_bh) ;
  1542. }
  1543. /* j_header_bh is on the journal dev, make sure not to release the journal
  1544. * dev until we brelse j_header_bh
  1545. */
  1546. release_journal_dev(p_s_sb, journal);
  1547. vfree(journal) ;
  1548. }
  1549. /*
  1550. ** call on unmount. Only set error to 1 if you haven't made your way out
  1551. ** of read_super() yet. Any other caller must keep error at 0.
  1552. */
  1553. static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, int error) {
  1554. struct reiserfs_transaction_handle myth ;
  1555. int flushed = 0;
  1556. struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
  1557. /* we only want to flush out transactions if we were called with error == 0
  1558. */
  1559. if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
  1560. /* end the current trans */
  1561. BUG_ON (!th->t_trans_id);
  1562. do_journal_end(th, p_s_sb,10, FLUSH_ALL) ;
  1563. /* make sure something gets logged to force our way into the flush code */
  1564. if (!journal_join(&myth, p_s_sb, 1)) {
  1565. reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
  1566. journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
  1567. do_journal_end(&myth, p_s_sb,1, FLUSH_ALL) ;
  1568. flushed = 1;
  1569. }
  1570. }
  1571. /* this also catches errors during the do_journal_end above */
  1572. if (!error && reiserfs_is_journal_aborted(journal)) {
  1573. memset(&myth, 0, sizeof(myth));
  1574. if (!journal_join_abort(&myth, p_s_sb, 1)) {
  1575. reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
  1576. journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
  1577. do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL) ;
  1578. }
  1579. }
  1580. reiserfs_mounted_fs_count-- ;
  1581. /* wait for all commits to finish */
  1582. cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work);
  1583. flush_workqueue(commit_wq);
  1584. if (!reiserfs_mounted_fs_count) {
  1585. destroy_workqueue(commit_wq);
  1586. commit_wq = NULL;
  1587. }
  1588. free_journal_ram(p_s_sb) ;
  1589. return 0 ;
  1590. }
  1591. /*
  1592. ** call on unmount. flush all journal trans, release all alloc'd ram
  1593. */
  1594. int journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
  1595. return do_journal_release(th, p_s_sb, 0) ;
  1596. }
  1597. /*
  1598. ** only call from an error condition inside reiserfs_read_super!
  1599. */
  1600. int journal_release_error(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
  1601. return do_journal_release(th, p_s_sb, 1) ;
  1602. }
  1603. /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
  1604. static int journal_compare_desc_commit(struct super_block *p_s_sb, struct reiserfs_journal_desc *desc,
  1605. struct reiserfs_journal_commit *commit) {
  1606. if (get_commit_trans_id (commit) != get_desc_trans_id (desc) ||
  1607. get_commit_trans_len (commit) != get_desc_trans_len (desc) ||
  1608. get_commit_trans_len (commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
  1609. get_commit_trans_len (commit) <= 0
  1610. ) {
  1611. return 1 ;
  1612. }
  1613. return 0 ;
  1614. }
  1615. /* returns 0 if it did not find a description block
  1616. ** returns -1 if it found a corrupt commit block
  1617. ** returns 1 if both desc and commit were valid
  1618. */
  1619. static int journal_transaction_is_valid(struct super_block *p_s_sb, struct buffer_head *d_bh, unsigned long *oldest_invalid_trans_id, unsigned long *newest_mount_id) {
  1620. struct reiserfs_journal_desc *desc ;
  1621. struct reiserfs_journal_commit *commit ;
  1622. struct buffer_head *c_bh ;
  1623. unsigned long offset ;
  1624. if (!d_bh)
  1625. return 0 ;
  1626. desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
  1627. if (get_desc_trans_len(desc) > 0 && !memcmp(get_journal_desc_magic (d_bh), JOURNAL_DESC_MAGIC, 8)) {
  1628. if (oldest_invalid_trans_id && *oldest_invalid_trans_id && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
  1629. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-986: transaction "
  1630. "is valid returning because trans_id %d is greater than "
  1631. "oldest_invalid %lu", get_desc_trans_id(desc),
  1632. *oldest_invalid_trans_id);
  1633. return 0 ;
  1634. }
  1635. if (newest_mount_id && *newest_mount_id > get_desc_mount_id (desc)) {
  1636. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1087: transaction "
  1637. "is valid returning because mount_id %d is less than "
  1638. "newest_mount_id %lu", get_desc_mount_id (desc),
  1639. *newest_mount_id) ;
  1640. return -1 ;
  1641. }
  1642. if ( get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max ) {
  1643. reiserfs_warning(p_s_sb, "journal-2018: Bad transaction length %d encountered, ignoring transaction", get_desc_trans_len(desc));
  1644. return -1 ;
  1645. }
  1646. offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
  1647. /* ok, we have a journal description block, lets see if the transaction was valid */
  1648. c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
  1649. ((offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
  1650. if (!c_bh)
  1651. return 0 ;
  1652. commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
  1653. if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
  1654. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
  1655. "journal_transaction_is_valid, commit offset %ld had bad "
  1656. "time %d or length %d",
  1657. c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1658. get_commit_trans_id (commit),
  1659. get_commit_trans_len(commit));
  1660. brelse(c_bh) ;
  1661. if (oldest_invalid_trans_id) {
  1662. *oldest_invalid_trans_id = get_desc_trans_id(desc) ;
  1663. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1004: "
  1664. "transaction_is_valid setting oldest invalid trans_id "
  1665. "to %d", get_desc_trans_id(desc)) ;
  1666. }
  1667. return -1;
  1668. }
  1669. brelse(c_bh) ;
  1670. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1006: found valid "
  1671. "transaction start offset %llu, len %d id %d",
  1672. d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1673. get_desc_trans_len(desc), get_desc_trans_id(desc)) ;
  1674. return 1 ;
  1675. } else {
  1676. return 0 ;
  1677. }
  1678. }
  1679. static void brelse_array(struct buffer_head **heads, int num) {
  1680. int i ;
  1681. for (i = 0 ; i < num ; i++) {
  1682. brelse(heads[i]) ;
  1683. }
  1684. }
  1685. /*
  1686. ** given the start, and values for the oldest acceptable transactions,
  1687. ** this either reads in a replays a transaction, or returns because the transaction
  1688. ** is invalid, or too old.
  1689. */
  1690. static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cur_dblock, unsigned long oldest_start,
  1691. unsigned long oldest_trans_id, unsigned long newest_mount_id) {
  1692. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  1693. struct reiserfs_journal_desc *desc ;
  1694. struct reiserfs_journal_commit *commit ;
  1695. unsigned long trans_id = 0 ;
  1696. struct buffer_head *c_bh ;
  1697. struct buffer_head *d_bh ;
  1698. struct buffer_head **log_blocks = NULL ;
  1699. struct buffer_head **real_blocks = NULL ;
  1700. unsigned long trans_offset ;
  1701. int i;
  1702. int trans_half;
  1703. d_bh = journal_bread(p_s_sb, cur_dblock) ;
  1704. if (!d_bh)
  1705. return 1 ;
  1706. desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
  1707. trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
  1708. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
  1709. "journal_read_transaction, offset %llu, len %d mount_id %d",
  1710. d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1711. get_desc_trans_len(desc), get_desc_mount_id(desc)) ;
  1712. if (get_desc_trans_id(desc) < oldest_trans_id) {
  1713. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
  1714. "journal_read_trans skipping because %lu is too old",
  1715. cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
  1716. brelse(d_bh) ;
  1717. return 1 ;
  1718. }
  1719. if (get_desc_mount_id(desc) != newest_mount_id) {
  1720. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
  1721. "journal_read_trans skipping because %d is != "
  1722. "newest_mount_id %lu", get_desc_mount_id(desc),
  1723. newest_mount_id) ;
  1724. brelse(d_bh) ;
  1725. return 1 ;
  1726. }
  1727. c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
  1728. ((trans_offset + get_desc_trans_len(desc) + 1) %
  1729. SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
  1730. if (!c_bh) {
  1731. brelse(d_bh) ;
  1732. return 1 ;
  1733. }
  1734. commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
  1735. if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
  1736. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal_read_transaction, "
  1737. "commit offset %llu had bad time %d or length %d",
  1738. c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1739. get_commit_trans_id(commit), get_commit_trans_len(commit));
  1740. brelse(c_bh) ;
  1741. brelse(d_bh) ;
  1742. return 1;
  1743. }
  1744. trans_id = get_desc_trans_id(desc) ;
  1745. /* now we know we've got a good transaction, and it was inside the valid time ranges */
  1746. log_blocks = reiserfs_kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
  1747. real_blocks = reiserfs_kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
  1748. if (!log_blocks || !real_blocks) {
  1749. brelse(c_bh) ;
  1750. brelse(d_bh) ;
  1751. reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1752. reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1753. reiserfs_warning(p_s_sb, "journal-1169: kmalloc failed, unable to mount FS") ;
  1754. return -1 ;
  1755. }
  1756. /* get all the buffer heads */
  1757. trans_half = journal_trans_half (p_s_sb->s_blocksize) ;
  1758. for(i = 0 ; i < get_desc_trans_len(desc) ; i++) {
  1759. log_blocks[i] = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + (trans_offset + 1 + i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
  1760. if (i < trans_half) {
  1761. real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(desc->j_realblock[i])) ;
  1762. } else {
  1763. real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(commit->j_realblock[i - trans_half])) ;
  1764. }
  1765. if ( real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb) ) {
  1766. reiserfs_warning(p_s_sb, "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
  1767. goto abort_replay;
  1768. }
  1769. /* make sure we don't try to replay onto log or reserved area */
  1770. if (is_block_in_log_or_reserved_area(p_s_sb, real_blocks[i]->b_blocknr)) {
  1771. reiserfs_warning(p_s_sb, "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block") ;
  1772. abort_replay:
  1773. brelse_array(log_blocks, i) ;
  1774. brelse_array(real_blocks, i) ;
  1775. brelse(c_bh) ;
  1776. brelse(d_bh) ;
  1777. reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1778. reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1779. return -1 ;
  1780. }
  1781. }
  1782. /* read in the log blocks, memcpy to the corresponding real block */
  1783. ll_rw_block(READ, get_desc_trans_len(desc), log_blocks) ;
  1784. for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
  1785. wait_on_buffer(log_blocks[i]) ;
  1786. if (!buffer_uptodate(log_blocks[i])) {
  1787. reiserfs_warning(p_s_sb, "journal-1212: REPLAY FAILURE fsck required! buffer write failed") ;
  1788. brelse_array(log_blocks + i, get_desc_trans_len(desc) - i) ;
  1789. brelse_array(real_blocks, get_desc_trans_len(desc)) ;
  1790. brelse(c_bh) ;
  1791. brelse(d_bh) ;
  1792. reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1793. reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1794. return -1 ;
  1795. }
  1796. memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size) ;
  1797. set_buffer_uptodate(real_blocks[i]) ;
  1798. brelse(log_blocks[i]) ;
  1799. }
  1800. /* flush out the real blocks */
  1801. for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
  1802. set_buffer_dirty(real_blocks[i]) ;
  1803. ll_rw_block(WRITE, 1, real_blocks + i) ;
  1804. }
  1805. for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
  1806. wait_on_buffer(real_blocks[i]) ;
  1807. if (!buffer_uptodate(real_blocks[i])) {
  1808. reiserfs_warning(p_s_sb, "journal-1226: REPLAY FAILURE, fsck required! buffer write failed") ;
  1809. brelse_array(real_blocks + i, get_desc_trans_len(desc) - i) ;
  1810. brelse(c_bh) ;
  1811. brelse(d_bh) ;
  1812. reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1813. reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1814. return -1 ;
  1815. }
  1816. brelse(real_blocks[i]) ;
  1817. }
  1818. cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + ((trans_offset + get_desc_trans_len(desc) + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
  1819. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal "
  1820. "start to offset %ld",
  1821. cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
  1822. /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
  1823. journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
  1824. journal->j_last_flush_trans_id = trans_id ;
  1825. journal->j_trans_id = trans_id + 1;
  1826. brelse(c_bh) ;
  1827. brelse(d_bh) ;
  1828. reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
  1829. reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
  1830. return 0 ;
  1831. }
  1832. /* This function reads blocks starting from block and to max_block of bufsize
  1833. size (but no more than BUFNR blocks at a time). This proved to improve
  1834. mounting speed on self-rebuilding raid5 arrays at least.
  1835. Right now it is only used from journal code. But later we might use it
  1836. from other places.
  1837. Note: Do not use journal_getblk/sb_getblk functions here! */
  1838. static struct buffer_head * reiserfs_breada (struct block_device *dev, int block, int bufsize,
  1839. unsigned int max_block)
  1840. {
  1841. struct buffer_head * bhlist[BUFNR];
  1842. unsigned int blocks = BUFNR;
  1843. struct buffer_head * bh;
  1844. int i, j;
  1845. bh = __getblk (dev, block, bufsize );
  1846. if (buffer_uptodate (bh))
  1847. return (bh);
  1848. if (block + BUFNR > max_block) {
  1849. blocks = max_block - block;
  1850. }
  1851. bhlist[0] = bh;
  1852. j = 1;
  1853. for (i = 1; i < blocks; i++) {
  1854. bh = __getblk (dev, block + i, bufsize);
  1855. if (buffer_uptodate (bh)) {
  1856. brelse (bh);
  1857. break;
  1858. }
  1859. else bhlist[j++] = bh;
  1860. }
  1861. ll_rw_block (READ, j, bhlist);
  1862. for(i = 1; i < j; i++)
  1863. brelse (bhlist[i]);
  1864. bh = bhlist[0];
  1865. wait_on_buffer (bh);
  1866. if (buffer_uptodate (bh))
  1867. return bh;
  1868. brelse (bh);
  1869. return NULL;
  1870. }
  1871. /*
  1872. ** read and replay the log
  1873. ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
  1874. ** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
  1875. **
  1876. ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
  1877. **
  1878. ** On exit, it sets things up so the first transaction will work correctly.
  1879. */
  1880. static int journal_read(struct super_block *p_s_sb) {
  1881. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  1882. struct reiserfs_journal_desc *desc ;
  1883. unsigned long oldest_trans_id = 0;
  1884. unsigned long oldest_invalid_trans_id = 0 ;
  1885. time_t start ;
  1886. unsigned long oldest_start = 0;
  1887. unsigned long cur_dblock = 0 ;
  1888. unsigned long newest_mount_id = 9 ;
  1889. struct buffer_head *d_bh ;
  1890. struct reiserfs_journal_header *jh ;
  1891. int valid_journal_header = 0 ;
  1892. int replay_count = 0 ;
  1893. int continue_replay = 1 ;
  1894. int ret ;
  1895. char b[BDEVNAME_SIZE];
  1896. cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
  1897. reiserfs_info (p_s_sb, "checking transaction log (%s)\n",
  1898. bdevname(journal->j_dev_bd, b));
  1899. start = get_seconds();
  1900. /* step 1, read in the journal header block. Check the transaction it says
  1901. ** is the first unflushed, and if that transaction is not valid,
  1902. ** replay is done
  1903. */
  1904. journal->j_header_bh = journal_bread(p_s_sb,
  1905. SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
  1906. SB_ONDISK_JOURNAL_SIZE(p_s_sb));
  1907. if (!journal->j_header_bh) {
  1908. return 1 ;
  1909. }
  1910. jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data) ;
  1911. if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 &&
  1912. le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(p_s_sb) &&
  1913. le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
  1914. oldest_start = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
  1915. le32_to_cpu(jh->j_first_unflushed_offset) ;
  1916. oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
  1917. newest_mount_id = le32_to_cpu(jh->j_mount_id);
  1918. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1153: found in "
  1919. "header: first_unflushed_offset %d, last_flushed_trans_id "
  1920. "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
  1921. le32_to_cpu(jh->j_last_flush_trans_id)) ;
  1922. valid_journal_header = 1 ;
  1923. /* now, we try to read the first unflushed offset. If it is not valid,
  1924. ** there is nothing more we can do, and it makes no sense to read
  1925. ** through the whole log.
  1926. */
  1927. d_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset)) ;
  1928. ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL) ;
  1929. if (!ret) {
  1930. continue_replay = 0 ;
  1931. }
  1932. brelse(d_bh) ;
  1933. goto start_log_replay;
  1934. }
  1935. if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
  1936. reiserfs_warning (p_s_sb,
  1937. "clm-2076: device is readonly, unable to replay log") ;
  1938. return -1 ;
  1939. }
  1940. /* ok, there are transactions that need to be replayed. start with the first log block, find
  1941. ** all the valid transactions, and pick out the oldest.
  1942. */
  1943. while(continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
  1944. /* Note that it is required for blocksize of primary fs device and journal
  1945. device to be the same */
  1946. d_bh = reiserfs_breada(journal->j_dev_bd, cur_dblock, p_s_sb->s_blocksize,
  1947. SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
  1948. ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
  1949. if (ret == 1) {
  1950. desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
  1951. if (oldest_start == 0) { /* init all oldest_ values */
  1952. oldest_trans_id = get_desc_trans_id(desc) ;
  1953. oldest_start = d_bh->b_blocknr ;
  1954. newest_mount_id = get_desc_mount_id(desc) ;
  1955. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1179: Setting "
  1956. "oldest_start to offset %llu, trans_id %lu",
  1957. oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1958. oldest_trans_id) ;
  1959. } else if (oldest_trans_id > get_desc_trans_id(desc)) {
  1960. /* one we just read was older */
  1961. oldest_trans_id = get_desc_trans_id(desc) ;
  1962. oldest_start = d_bh->b_blocknr ;
  1963. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting "
  1964. "oldest_start to offset %lu, trans_id %lu",
  1965. oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1966. oldest_trans_id) ;
  1967. }
  1968. if (newest_mount_id < get_desc_mount_id(desc)) {
  1969. newest_mount_id = get_desc_mount_id(desc) ;
  1970. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
  1971. "newest_mount_id to %d", get_desc_mount_id(desc));
  1972. }
  1973. cur_dblock += get_desc_trans_len(desc) + 2 ;
  1974. } else {
  1975. cur_dblock++ ;
  1976. }
  1977. brelse(d_bh) ;
  1978. }
  1979. start_log_replay:
  1980. cur_dblock = oldest_start ;
  1981. if (oldest_trans_id) {
  1982. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay "
  1983. "from offset %llu, trans_id %lu",
  1984. cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1985. oldest_trans_id) ;
  1986. }
  1987. replay_count = 0 ;
  1988. while(continue_replay && oldest_trans_id > 0) {
  1989. ret = journal_read_transaction(p_s_sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id) ;
  1990. if (ret < 0) {
  1991. return ret ;
  1992. } else if (ret != 0) {
  1993. break ;
  1994. }
  1995. cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start ;
  1996. replay_count++ ;
  1997. if (cur_dblock == oldest_start)
  1998. break;
  1999. }
  2000. if (oldest_trans_id == 0) {
  2001. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1225: No valid "
  2002. "transactions found") ;
  2003. }
  2004. /* j_start does not get set correctly if we don't replay any transactions.
  2005. ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
  2006. ** copy the trans_id from the header
  2007. */
  2008. if (valid_journal_header && replay_count == 0) {
  2009. journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset) ;
  2010. journal->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
  2011. journal->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) ;
  2012. journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
  2013. } else {
  2014. journal->j_mount_id = newest_mount_id + 1 ;
  2015. }
  2016. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
  2017. "newest_mount_id to %lu", journal->j_mount_id) ;
  2018. journal->j_first_unflushed_offset = journal->j_start ;
  2019. if (replay_count > 0) {
  2020. reiserfs_info (p_s_sb, "replayed %d transactions in %lu seconds\n",
  2021. replay_count, get_seconds() - start) ;
  2022. }
  2023. if (!bdev_read_only(p_s_sb->s_bdev) &&
  2024. _update_journal_header_block(p_s_sb, journal->j_start,
  2025. journal->j_last_flush_trans_id))
  2026. {
  2027. /* replay failed, caller must call free_journal_ram and abort
  2028. ** the mount
  2029. */
  2030. return -1 ;
  2031. }
  2032. return 0 ;
  2033. }
  2034. static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
  2035. {
  2036. struct reiserfs_journal_list *jl;
  2037. retry:
  2038. jl = reiserfs_kmalloc(sizeof(struct reiserfs_journal_list), GFP_NOFS, s);
  2039. if (!jl) {
  2040. yield();
  2041. goto retry;
  2042. }
  2043. memset(jl, 0, sizeof(*jl));
  2044. INIT_LIST_HEAD(&jl->j_list);
  2045. INIT_LIST_HEAD(&jl->j_working_list);
  2046. INIT_LIST_HEAD(&jl->j_tail_bh_list);
  2047. INIT_LIST_HEAD(&jl->j_bh_list);
  2048. sema_init(&jl->j_commit_lock, 1);
  2049. SB_JOURNAL(s)->j_num_lists++;
  2050. get_journal_list(jl);
  2051. return jl;
  2052. }
  2053. static void journal_list_init(struct super_block *p_s_sb) {
  2054. SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
  2055. }
  2056. static int release_journal_dev( struct super_block *super,
  2057. struct reiserfs_journal *journal )
  2058. {
  2059. int result;
  2060. result = 0;
  2061. if( journal -> j_dev_file != NULL ) {
  2062. result = filp_close( journal -> j_dev_file, NULL );
  2063. journal -> j_dev_file = NULL;
  2064. journal -> j_dev_bd = NULL;
  2065. } else if( journal -> j_dev_bd != NULL ) {
  2066. result = blkdev_put( journal -> j_dev_bd );
  2067. journal -> j_dev_bd = NULL;
  2068. }
  2069. if( result != 0 ) {
  2070. reiserfs_warning(super, "sh-457: release_journal_dev: Cannot release journal device: %i", result );
  2071. }
  2072. return result;
  2073. }
  2074. static int journal_init_dev( struct super_block *super,
  2075. struct reiserfs_journal *journal,
  2076. const char *jdev_name )
  2077. {
  2078. int result;
  2079. dev_t jdev;
  2080. int blkdev_mode = FMODE_READ | FMODE_WRITE;
  2081. char b[BDEVNAME_SIZE];
  2082. result = 0;
  2083. journal -> j_dev_bd = NULL;
  2084. journal -> j_dev_file = NULL;
  2085. jdev = SB_ONDISK_JOURNAL_DEVICE( super ) ?
  2086. new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
  2087. if (bdev_read_only(super->s_bdev))
  2088. blkdev_mode = FMODE_READ;
  2089. /* there is no "jdev" option and journal is on separate device */
  2090. if( ( !jdev_name || !jdev_name[ 0 ] ) ) {
  2091. journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
  2092. if (IS_ERR(journal->j_dev_bd)) {
  2093. result = PTR_ERR(journal->j_dev_bd);
  2094. journal->j_dev_bd = NULL;
  2095. reiserfs_warning (super, "sh-458: journal_init_dev: "
  2096. "cannot init journal device '%s': %i",
  2097. __bdevname(jdev, b), result );
  2098. return result;
  2099. } else if (jdev != super->s_dev)
  2100. set_blocksize(journal->j_dev_bd, super->s_blocksize);
  2101. return 0;
  2102. }
  2103. journal -> j_dev_file = filp_open( jdev_name, 0, 0 );
  2104. if( !IS_ERR( journal -> j_dev_file ) ) {
  2105. struct inode *jdev_inode = journal->j_dev_file->f_mapping->host;
  2106. if( !S_ISBLK( jdev_inode -> i_mode ) ) {
  2107. reiserfs_warning(super, "journal_init_dev: '%s' is "
  2108. "not a block device", jdev_name );
  2109. result = -ENOTBLK;
  2110. release_journal_dev( super, journal );
  2111. } else {
  2112. /* ok */
  2113. journal->j_dev_bd = I_BDEV(jdev_inode);
  2114. set_blocksize(journal->j_dev_bd, super->s_blocksize);
  2115. reiserfs_info(super, "journal_init_dev: journal device: %s\n",
  2116. bdevname(journal->j_dev_bd, b));
  2117. }
  2118. } else {
  2119. result = PTR_ERR( journal -> j_dev_file );
  2120. journal -> j_dev_file = NULL;
  2121. reiserfs_warning (super,
  2122. "journal_init_dev: Cannot open '%s': %i",
  2123. jdev_name, result );
  2124. }
  2125. return result;
  2126. }
  2127. /*
  2128. ** must be called once on fs mount. calls journal_read for you
  2129. */
  2130. int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_format, unsigned int commit_max_age) {
  2131. int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2 ;
  2132. struct buffer_head *bhjh;
  2133. struct reiserfs_super_block * rs;
  2134. struct reiserfs_journal_header *jh;
  2135. struct reiserfs_journal *journal;
  2136. struct reiserfs_journal_list *jl;
  2137. char b[BDEVNAME_SIZE];
  2138. journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof (struct reiserfs_journal)) ;
  2139. if (!journal) {
  2140. reiserfs_warning (p_s_sb, "journal-1256: unable to get memory for journal structure") ;
  2141. return 1 ;
  2142. }
  2143. memset(journal, 0, sizeof(struct reiserfs_journal)) ;
  2144. INIT_LIST_HEAD(&journal->j_bitmap_nodes) ;
  2145. INIT_LIST_HEAD (&journal->j_prealloc_list);
  2146. INIT_LIST_HEAD(&journal->j_working_list);
  2147. INIT_LIST_HEAD(&journal->j_journal_list);
  2148. journal->j_persistent_trans = 0;
  2149. if (reiserfs_allocate_list_bitmaps(p_s_sb,
  2150. journal->j_list_bitmap,
  2151. SB_BMAP_NR(p_s_sb)))
  2152. goto free_and_return ;
  2153. allocate_bitmap_nodes(p_s_sb) ;
  2154. /* reserved for journal area support */
  2155. SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
  2156. REISERFS_OLD_DISK_OFFSET_IN_BYTES / p_s_sb->s_blocksize +
  2157. SB_BMAP_NR(p_s_sb) + 1 :
  2158. REISERFS_DISK_OFFSET_IN_BYTES / p_s_sb->s_blocksize + 2);
  2159. /* Sanity check to see is the standard journal fitting withing first bitmap
  2160. (actual for small blocksizes) */
  2161. if ( !SB_ONDISK_JOURNAL_DEVICE( p_s_sb ) &&
  2162. (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8) ) {
  2163. reiserfs_warning (p_s_sb, "journal-1393: journal does not fit for area "
  2164. "addressed by first of bitmap blocks. It starts at "
  2165. "%u and its size is %u. Block size %ld",
  2166. SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
  2167. SB_ONDISK_JOURNAL_SIZE(p_s_sb), p_s_sb->s_blocksize);
  2168. goto free_and_return;
  2169. }
  2170. if( journal_init_dev( p_s_sb, journal, j_dev_name ) != 0 ) {
  2171. reiserfs_warning (p_s_sb, "sh-462: unable to initialize jornal device");
  2172. goto free_and_return;
  2173. }
  2174. rs = SB_DISK_SUPER_BLOCK(p_s_sb);
  2175. /* read journal header */
  2176. bhjh = journal_bread(p_s_sb,
  2177. SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
  2178. if (!bhjh) {
  2179. reiserfs_warning (p_s_sb, "sh-459: unable to read journal header");
  2180. goto free_and_return;
  2181. }
  2182. jh = (struct reiserfs_journal_header *)(bhjh->b_data);
  2183. /* make sure that journal matches to the super block */
  2184. if (is_reiserfs_jr(rs) && (le32_to_cpu(jh->jh_journal.jp_journal_magic) != sb_jp_journal_magic(rs))) {
  2185. reiserfs_warning (p_s_sb, "sh-460: journal header magic %x "
  2186. "(device %s) does not match to magic found in super "
  2187. "block %x",
  2188. jh->jh_journal.jp_journal_magic,
  2189. bdevname( journal->j_dev_bd, b),
  2190. sb_jp_journal_magic(rs));
  2191. brelse (bhjh);
  2192. goto free_and_return;
  2193. }
  2194. journal->j_trans_max = le32_to_cpu (jh->jh_journal.jp_journal_trans_max);
  2195. journal->j_max_batch = le32_to_cpu (jh->jh_journal.jp_journal_max_batch);
  2196. journal->j_max_commit_age = le32_to_cpu (jh->jh_journal.jp_journal_max_commit_age);
  2197. journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
  2198. if (journal->j_trans_max) {
  2199. /* make sure these parameters are available, assign it if they are not */
  2200. __u32 initial = journal->j_trans_max;
  2201. __u32 ratio = 1;
  2202. if (p_s_sb->s_blocksize < 4096)
  2203. ratio = 4096 / p_s_sb->s_blocksize;
  2204. if (SB_ONDISK_JOURNAL_SIZE(p_s_sb)/journal->j_trans_max < JOURNAL_MIN_RATIO)
  2205. journal->j_trans_max = SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
  2206. if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio)
  2207. journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT / ratio;
  2208. if (journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio)
  2209. journal->j_trans_max = JOURNAL_TRANS_MIN_DEFAULT / ratio;
  2210. if (journal->j_trans_max != initial)
  2211. reiserfs_warning (p_s_sb, "sh-461: journal_init: wrong transaction max size (%u). Changed to %u",
  2212. initial, journal->j_trans_max);
  2213. journal->j_max_batch = journal->j_trans_max*
  2214. JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT;
  2215. }
  2216. if (!journal->j_trans_max) {
  2217. /*we have the file system was created by old version of mkreiserfs
  2218. so this field contains zero value */
  2219. journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT ;
  2220. journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT ;
  2221. journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE ;
  2222. /* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
  2223. trans max size is decreased proportionally */
  2224. if (p_s_sb->s_blocksize < 4096) {
  2225. journal->j_trans_max /= (4096 / p_s_sb->s_blocksize) ;
  2226. journal->j_max_batch = (journal->j_trans_max) * 9 / 10 ;
  2227. }
  2228. }
  2229. journal->j_default_max_commit_age = journal->j_max_commit_age;
  2230. if (commit_max_age != 0) {
  2231. journal->j_max_commit_age = commit_max_age;
  2232. journal->j_max_trans_age = commit_max_age;
  2233. }
  2234. reiserfs_info (p_s_sb, "journal params: device %s, size %u, "
  2235. "journal first block %u, max trans len %u, max batch %u, "
  2236. "max commit age %u, max trans age %u\n",
  2237. bdevname( journal->j_dev_bd, b),
  2238. SB_ONDISK_JOURNAL_SIZE(p_s_sb),
  2239. SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  2240. journal->j_trans_max,
  2241. journal->j_max_batch,
  2242. journal->j_max_commit_age,
  2243. journal->j_max_trans_age);
  2244. brelse (bhjh);
  2245. journal->j_list_bitmap_index = 0 ;
  2246. journal_list_init(p_s_sb) ;
  2247. memset(journal->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
  2248. INIT_LIST_HEAD(&journal->j_dirty_buffers) ;
  2249. spin_lock_init(&journal->j_dirty_buffers_lock) ;
  2250. journal->j_start = 0 ;
  2251. journal->j_len = 0 ;
  2252. journal->j_len_alloc = 0 ;
  2253. atomic_set(&(journal->j_wcount), 0) ;
  2254. atomic_set(&(journal->j_async_throttle), 0) ;
  2255. journal->j_bcount = 0 ;
  2256. journal->j_trans_start_time = 0 ;
  2257. journal->j_last = NULL ;
  2258. journal->j_first = NULL ;
  2259. init_waitqueue_head(&(journal->j_join_wait)) ;
  2260. sema_init(&journal->j_lock, 1);
  2261. sema_init(&journal->j_flush_sem, 1);
  2262. journal->j_trans_id = 10 ;
  2263. journal->j_mount_id = 10 ;
  2264. journal->j_state = 0 ;
  2265. atomic_set(&(journal->j_jlock), 0) ;
  2266. journal->j_cnode_free_list = allocate_cnodes(num_cnodes) ;
  2267. journal->j_cnode_free_orig = journal->j_cnode_free_list ;
  2268. journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0 ;
  2269. journal->j_cnode_used = 0 ;
  2270. journal->j_must_wait = 0 ;
  2271. init_journal_hash(p_s_sb) ;
  2272. jl = journal->j_current_jl;
  2273. jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
  2274. if (!jl->j_list_bitmap) {
  2275. reiserfs_warning(p_s_sb, "journal-2005, get_list_bitmap failed for journal list 0") ;
  2276. goto free_and_return;
  2277. }
  2278. if (journal_read(p_s_sb) < 0) {
  2279. reiserfs_warning(p_s_sb, "Replay Failure, unable to mount") ;
  2280. goto free_and_return;
  2281. }
  2282. reiserfs_mounted_fs_count++ ;
  2283. if (reiserfs_mounted_fs_count <= 1)
  2284. commit_wq = create_workqueue("reiserfs");
  2285. INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
  2286. return 0 ;
  2287. free_and_return:
  2288. free_journal_ram(p_s_sb);
  2289. return 1;
  2290. }
  2291. /*
  2292. ** test for a polite end of the current transaction. Used by file_write, and should
  2293. ** be used by delete to make sure they don't write more than can fit inside a single
  2294. ** transaction
  2295. */
  2296. int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) {
  2297. struct reiserfs_journal *journal = SB_JOURNAL (th->t_super);
  2298. time_t now = get_seconds() ;
  2299. /* cannot restart while nested */
  2300. BUG_ON (!th->t_trans_id);
  2301. if (th->t_refcount > 1)
  2302. return 0 ;
  2303. if ( journal->j_must_wait > 0 ||
  2304. (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
  2305. atomic_read(&(journal->j_jlock)) ||
  2306. (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
  2307. journal->j_cnode_free < (journal->j_trans_max * 3)) {
  2308. return 1 ;
  2309. }
  2310. return 0 ;
  2311. }
  2312. /* this must be called inside a transaction, and requires the
  2313. ** kernel_lock to be held
  2314. */
  2315. void reiserfs_block_writes(struct reiserfs_transaction_handle *th) {
  2316. struct reiserfs_journal *journal = SB_JOURNAL (th->t_super);
  2317. BUG_ON (!th->t_trans_id);
  2318. journal->j_must_wait = 1 ;
  2319. set_bit(J_WRITERS_BLOCKED, &journal->j_state) ;
  2320. return ;
  2321. }
  2322. /* this must be called without a transaction started, and does not
  2323. ** require BKL
  2324. */
  2325. void reiserfs_allow_writes(struct super_block *s) {
  2326. struct reiserfs_journal *journal = SB_JOURNAL (s);
  2327. clear_bit(J_WRITERS_BLOCKED, &journal->j_state) ;
  2328. wake_up(&journal->j_join_wait) ;
  2329. }
  2330. /* this must be called without a transaction started, and does not
  2331. ** require BKL
  2332. */
  2333. void reiserfs_wait_on_write_block(struct super_block *s) {
  2334. struct reiserfs_journal *journal = SB_JOURNAL (s);
  2335. wait_event(journal->j_join_wait,
  2336. !test_bit(J_WRITERS_BLOCKED, &journal->j_state)) ;
  2337. }
  2338. static void queue_log_writer(struct super_block *s) {
  2339. wait_queue_t wait;
  2340. struct reiserfs_journal *journal = SB_JOURNAL (s);
  2341. set_bit(J_WRITERS_QUEUED, &journal->j_state);
  2342. /*
  2343. * we don't want to use wait_event here because
  2344. * we only want to wait once.
  2345. */
  2346. init_waitqueue_entry(&wait, current);
  2347. add_wait_queue(&journal->j_join_wait, &wait);
  2348. set_current_state(TASK_UNINTERRUPTIBLE);
  2349. if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
  2350. schedule();
  2351. current->state = TASK_RUNNING;
  2352. remove_wait_queue(&journal->j_join_wait, &wait);
  2353. }
  2354. static void wake_queued_writers(struct super_block *s) {
  2355. struct reiserfs_journal *journal = SB_JOURNAL (s);
  2356. if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
  2357. wake_up(&journal->j_join_wait);
  2358. }
  2359. static void let_transaction_grow(struct super_block *sb,
  2360. unsigned long trans_id)
  2361. {
  2362. struct reiserfs_journal *journal = SB_JOURNAL (sb);
  2363. unsigned long bcount = journal->j_bcount;
  2364. while(1) {
  2365. set_current_state(TASK_UNINTERRUPTIBLE);
  2366. schedule_timeout(1);
  2367. journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
  2368. while ((atomic_read(&journal->j_wcount) > 0 ||
  2369. atomic_read(&journal->j_jlock)) &&
  2370. journal->j_trans_id == trans_id) {
  2371. queue_log_writer(sb);
  2372. }
  2373. if (journal->j_trans_id != trans_id)
  2374. break;
  2375. if (bcount == journal->j_bcount)
  2376. break;
  2377. bcount = journal->j_bcount;
  2378. }
  2379. }
  2380. /* join == true if you must join an existing transaction.
  2381. ** join == false if you can deal with waiting for others to finish
  2382. **
  2383. ** this will block until the transaction is joinable. send the number of blocks you
  2384. ** expect to use in nblocks.
  2385. */
  2386. static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,unsigned long nblocks,int join) {
  2387. time_t now = get_seconds() ;
  2388. int old_trans_id ;
  2389. struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
  2390. struct reiserfs_transaction_handle myth;
  2391. int sched_count = 0;
  2392. int retval;
  2393. reiserfs_check_lock_depth(p_s_sb, "journal_begin") ;
  2394. if (nblocks > journal->j_trans_max)
  2395. BUG();
  2396. PROC_INFO_INC( p_s_sb, journal.journal_being );
  2397. /* set here for journal_join */
  2398. th->t_refcount = 1;
  2399. th->t_super = p_s_sb ;
  2400. relock:
  2401. lock_journal(p_s_sb) ;
  2402. if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted (journal)) {
  2403. unlock_journal (p_s_sb);
  2404. retval = journal->j_errno;
  2405. goto out_fail;
  2406. }
  2407. journal->j_bcount++;
  2408. if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
  2409. unlock_journal(p_s_sb) ;
  2410. reiserfs_wait_on_write_block(p_s_sb) ;
  2411. PROC_INFO_INC( p_s_sb, journal.journal_relock_writers );
  2412. goto relock ;
  2413. }
  2414. now = get_seconds();
  2415. /* if there is no room in the journal OR
  2416. ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
  2417. ** we don't sleep if there aren't other writers
  2418. */
  2419. if ( (!join && journal->j_must_wait > 0) ||
  2420. ( !join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch) ||
  2421. (!join && atomic_read(&journal->j_wcount) > 0 && journal->j_trans_start_time > 0 &&
  2422. (now - journal->j_trans_start_time) > journal->j_max_trans_age) ||
  2423. (!join && atomic_read(&journal->j_jlock)) ||
  2424. (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
  2425. old_trans_id = journal->j_trans_id;
  2426. unlock_journal(p_s_sb) ; /* allow others to finish this transaction */
  2427. if (!join && (journal->j_len_alloc + nblocks + 2) >=
  2428. journal->j_max_batch &&
  2429. ((journal->j_len + nblocks + 2) * 100) < (journal->j_len_alloc * 75))
  2430. {
  2431. if (atomic_read(&journal->j_wcount) > 10) {
  2432. sched_count++;
  2433. queue_log_writer(p_s_sb);
  2434. goto relock;
  2435. }
  2436. }
  2437. /* don't mess with joining the transaction if all we have to do is
  2438. * wait for someone else to do a commit
  2439. */
  2440. if (atomic_read(&journal->j_jlock)) {
  2441. while (journal->j_trans_id == old_trans_id &&
  2442. atomic_read(&journal->j_jlock)) {
  2443. queue_log_writer(p_s_sb);
  2444. }
  2445. goto relock;
  2446. }
  2447. retval = journal_join(&myth, p_s_sb, 1) ;
  2448. if (retval)
  2449. goto out_fail;
  2450. /* someone might have ended the transaction while we joined */
  2451. if (old_trans_id != journal->j_trans_id) {
  2452. retval = do_journal_end(&myth, p_s_sb, 1, 0) ;
  2453. } else {
  2454. retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW) ;
  2455. }
  2456. if (retval)
  2457. goto out_fail;
  2458. PROC_INFO_INC( p_s_sb, journal.journal_relock_wcount );
  2459. goto relock ;
  2460. }
  2461. /* we are the first writer, set trans_id */
  2462. if (journal->j_trans_start_time == 0) {
  2463. journal->j_trans_start_time = get_seconds();
  2464. }
  2465. atomic_inc(&(journal->j_wcount)) ;
  2466. journal->j_len_alloc += nblocks ;
  2467. th->t_blocks_logged = 0 ;
  2468. th->t_blocks_allocated = nblocks ;
  2469. th->t_trans_id = journal->j_trans_id ;
  2470. unlock_journal(p_s_sb) ;
  2471. INIT_LIST_HEAD (&th->t_list);
  2472. get_fs_excl();
  2473. return 0 ;
  2474. out_fail:
  2475. memset (th, 0, sizeof (*th));
  2476. /* Re-set th->t_super, so we can properly keep track of how many
  2477. * persistent transactions there are. We need to do this so if this
  2478. * call is part of a failed restart_transaction, we can free it later */
  2479. th->t_super = p_s_sb;
  2480. return retval;
  2481. }
  2482. struct reiserfs_transaction_handle *
  2483. reiserfs_persistent_transaction(struct super_block *s, int nblocks) {
  2484. int ret ;
  2485. struct reiserfs_transaction_handle *th ;
  2486. /* if we're nesting into an existing transaction. It will be
  2487. ** persistent on its own
  2488. */
  2489. if (reiserfs_transaction_running(s)) {
  2490. th = current->journal_info ;
  2491. th->t_refcount++ ;
  2492. if (th->t_refcount < 2) {
  2493. BUG() ;
  2494. }
  2495. return th ;
  2496. }
  2497. th = reiserfs_kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS, s) ;
  2498. if (!th)
  2499. return NULL;
  2500. ret = journal_begin(th, s, nblocks) ;
  2501. if (ret) {
  2502. reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle), s) ;
  2503. return NULL;
  2504. }
  2505. SB_JOURNAL(s)->j_persistent_trans++;
  2506. return th ;
  2507. }
  2508. int
  2509. reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th) {
  2510. struct super_block *s = th->t_super;
  2511. int ret = 0;
  2512. if (th->t_trans_id)
  2513. ret = journal_end(th, th->t_super, th->t_blocks_allocated);
  2514. else
  2515. ret = -EIO;
  2516. if (th->t_refcount == 0) {
  2517. SB_JOURNAL(s)->j_persistent_trans--;
  2518. reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle), s) ;
  2519. }
  2520. return ret;
  2521. }
  2522. static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
  2523. struct reiserfs_transaction_handle *cur_th = current->journal_info;
  2524. /* this keeps do_journal_end from NULLing out the current->journal_info
  2525. ** pointer
  2526. */
  2527. th->t_handle_save = cur_th ;
  2528. if (cur_th && cur_th->t_refcount > 1) {
  2529. BUG() ;
  2530. }
  2531. return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN) ;
  2532. }
  2533. int journal_join_abort(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
  2534. struct reiserfs_transaction_handle *cur_th = current->journal_info;
  2535. /* this keeps do_journal_end from NULLing out the current->journal_info
  2536. ** pointer
  2537. */
  2538. th->t_handle_save = cur_th ;
  2539. if (cur_th && cur_th->t_refcount > 1) {
  2540. BUG() ;
  2541. }
  2542. return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT) ;
  2543. }
  2544. int journal_begin(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks) {
  2545. struct reiserfs_transaction_handle *cur_th = current->journal_info ;
  2546. int ret ;
  2547. th->t_handle_save = NULL ;
  2548. if (cur_th) {
  2549. /* we are nesting into the current transaction */
  2550. if (cur_th->t_super == p_s_sb) {
  2551. BUG_ON (!cur_th->t_refcount);
  2552. cur_th->t_refcount++ ;
  2553. memcpy(th, cur_th, sizeof(*th));
  2554. if (th->t_refcount <= 1)
  2555. reiserfs_warning (p_s_sb, "BAD: refcount <= 1, but journal_info != 0");
  2556. return 0;
  2557. } else {
  2558. /* we've ended up with a handle from a different filesystem.
  2559. ** save it and restore on journal_end. This should never
  2560. ** really happen...
  2561. */
  2562. reiserfs_warning(p_s_sb, "clm-2100: nesting info a different FS") ;
  2563. th->t_handle_save = current->journal_info ;
  2564. current->journal_info = th;
  2565. }
  2566. } else {
  2567. current->journal_info = th;
  2568. }
  2569. ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG) ;
  2570. if (current->journal_info != th)
  2571. BUG() ;
  2572. /* I guess this boils down to being the reciprocal of clm-2100 above.
  2573. * If do_journal_begin_r fails, we need to put it back, since journal_end
  2574. * won't be called to do it. */
  2575. if (ret)
  2576. current->journal_info = th->t_handle_save;
  2577. else
  2578. BUG_ON (!th->t_refcount);
  2579. return ret ;
  2580. }
  2581. /*
  2582. ** puts bh into the current transaction. If it was already there, reorders removes the
  2583. ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
  2584. **
  2585. ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
  2586. ** transaction is committed.
  2587. **
  2588. ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
  2589. */
  2590. int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
  2591. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2592. struct reiserfs_journal_cnode *cn = NULL;
  2593. int count_already_incd = 0 ;
  2594. int prepared = 0 ;
  2595. BUG_ON (!th->t_trans_id);
  2596. PROC_INFO_INC( p_s_sb, journal.mark_dirty );
  2597. if (th->t_trans_id != journal->j_trans_id) {
  2598. reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n",
  2599. th->t_trans_id, journal->j_trans_id);
  2600. }
  2601. p_s_sb->s_dirt = 1;
  2602. prepared = test_clear_buffer_journal_prepared (bh);
  2603. clear_buffer_journal_restore_dirty (bh);
  2604. /* already in this transaction, we are done */
  2605. if (buffer_journaled(bh)) {
  2606. PROC_INFO_INC( p_s_sb, journal.mark_dirty_already );
  2607. return 0 ;
  2608. }
  2609. /* this must be turned into a panic instead of a warning. We can't allow
  2610. ** a dirty or journal_dirty or locked buffer to be logged, as some changes
  2611. ** could get to disk too early. NOT GOOD.
  2612. */
  2613. if (!prepared || buffer_dirty(bh)) {
  2614. reiserfs_warning (p_s_sb, "journal-1777: buffer %llu bad state "
  2615. "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
  2616. (unsigned long long)bh->b_blocknr, prepared ? ' ' : '!',
  2617. buffer_locked(bh) ? ' ' : '!',
  2618. buffer_dirty(bh) ? ' ' : '!',
  2619. buffer_journal_dirty(bh) ? ' ' : '!') ;
  2620. }
  2621. if (atomic_read(&(journal->j_wcount)) <= 0) {
  2622. reiserfs_warning (p_s_sb, "journal-1409: journal_mark_dirty returning because j_wcount was %d", atomic_read(&(journal->j_wcount))) ;
  2623. return 1 ;
  2624. }
  2625. /* this error means I've screwed up, and we've overflowed the transaction.
  2626. ** Nothing can be done here, except make the FS readonly or panic.
  2627. */
  2628. if (journal->j_len >= journal->j_trans_max) {
  2629. reiserfs_panic(th->t_super, "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", journal->j_len) ;
  2630. }
  2631. if (buffer_journal_dirty(bh)) {
  2632. count_already_incd = 1 ;
  2633. PROC_INFO_INC( p_s_sb, journal.mark_dirty_notjournal );
  2634. clear_buffer_journal_dirty (bh);
  2635. }
  2636. if (journal->j_len > journal->j_len_alloc) {
  2637. journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT ;
  2638. }
  2639. set_buffer_journaled (bh);
  2640. /* now put this guy on the end */
  2641. if (!cn) {
  2642. cn = get_cnode(p_s_sb) ;
  2643. if (!cn) {
  2644. reiserfs_panic(p_s_sb, "get_cnode failed!\n");
  2645. }
  2646. if (th->t_blocks_logged == th->t_blocks_allocated) {
  2647. th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT ;
  2648. journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT ;
  2649. }
  2650. th->t_blocks_logged++ ;
  2651. journal->j_len++ ;
  2652. cn->bh = bh ;
  2653. cn->blocknr = bh->b_blocknr ;
  2654. cn->sb = p_s_sb;
  2655. cn->jlist = NULL ;
  2656. insert_journal_hash(journal->j_hash_table, cn) ;
  2657. if (!count_already_incd) {
  2658. get_bh(bh) ;
  2659. }
  2660. }
  2661. cn->next = NULL ;
  2662. cn->prev = journal->j_last ;
  2663. cn->bh = bh ;
  2664. if (journal->j_last) {
  2665. journal->j_last->next = cn ;
  2666. journal->j_last = cn ;
  2667. } else {
  2668. journal->j_first = cn ;
  2669. journal->j_last = cn ;
  2670. }
  2671. return 0 ;
  2672. }
  2673. int journal_end(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
  2674. if (!current->journal_info && th->t_refcount > 1)
  2675. reiserfs_warning (p_s_sb, "REISER-NESTING: th NULL, refcount %d",
  2676. th->t_refcount);
  2677. if (!th->t_trans_id) {
  2678. WARN_ON (1);
  2679. return -EIO;
  2680. }
  2681. th->t_refcount--;
  2682. if (th->t_refcount > 0) {
  2683. struct reiserfs_transaction_handle *cur_th = current->journal_info ;
  2684. /* we aren't allowed to close a nested transaction on a different
  2685. ** filesystem from the one in the task struct
  2686. */
  2687. if (cur_th->t_super != th->t_super)
  2688. BUG() ;
  2689. if (th != cur_th) {
  2690. memcpy(current->journal_info, th, sizeof(*th));
  2691. th->t_trans_id = 0;
  2692. }
  2693. return 0;
  2694. } else {
  2695. return do_journal_end(th, p_s_sb, nblocks, 0) ;
  2696. }
  2697. }
  2698. /* removes from the current transaction, relsing and descrementing any counters.
  2699. ** also files the removed buffer directly onto the clean list
  2700. **
  2701. ** called by journal_mark_freed when a block has been deleted
  2702. **
  2703. ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
  2704. */
  2705. static int remove_from_transaction(struct super_block *p_s_sb, b_blocknr_t blocknr, int already_cleaned) {
  2706. struct buffer_head *bh ;
  2707. struct reiserfs_journal_cnode *cn ;
  2708. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2709. int ret = 0;
  2710. cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr) ;
  2711. if (!cn || !cn->bh) {
  2712. return ret ;
  2713. }
  2714. bh = cn->bh ;
  2715. if (cn->prev) {
  2716. cn->prev->next = cn->next ;
  2717. }
  2718. if (cn->next) {
  2719. cn->next->prev = cn->prev ;
  2720. }
  2721. if (cn == journal->j_first) {
  2722. journal->j_first = cn->next ;
  2723. }
  2724. if (cn == journal->j_last) {
  2725. journal->j_last = cn->prev ;
  2726. }
  2727. if (bh)
  2728. remove_journal_hash(p_s_sb, journal->j_hash_table, NULL, bh->b_blocknr, 0) ;
  2729. clear_buffer_journaled (bh); /* don't log this one */
  2730. if (!already_cleaned) {
  2731. clear_buffer_journal_dirty (bh);
  2732. clear_buffer_dirty(bh);
  2733. clear_buffer_journal_test (bh);
  2734. put_bh(bh) ;
  2735. if (atomic_read(&(bh->b_count)) < 0) {
  2736. reiserfs_warning (p_s_sb, "journal-1752: remove from trans, b_count < 0");
  2737. }
  2738. ret = 1 ;
  2739. }
  2740. journal->j_len-- ;
  2741. journal->j_len_alloc-- ;
  2742. free_cnode(p_s_sb, cn) ;
  2743. return ret ;
  2744. }
  2745. /*
  2746. ** for any cnode in a journal list, it can only be dirtied of all the
  2747. ** transactions that include it are commited to disk.
  2748. ** this checks through each transaction, and returns 1 if you are allowed to dirty,
  2749. ** and 0 if you aren't
  2750. **
  2751. ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
  2752. ** blocks for a given transaction on disk
  2753. **
  2754. */
  2755. static int can_dirty(struct reiserfs_journal_cnode *cn) {
  2756. struct super_block *sb = cn->sb;
  2757. b_blocknr_t blocknr = cn->blocknr ;
  2758. struct reiserfs_journal_cnode *cur = cn->hprev ;
  2759. int can_dirty = 1 ;
  2760. /* first test hprev. These are all newer than cn, so any node here
  2761. ** with the same block number and dev means this node can't be sent
  2762. ** to disk right now.
  2763. */
  2764. while(cur && can_dirty) {
  2765. if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
  2766. cur->blocknr == blocknr) {
  2767. can_dirty = 0 ;
  2768. }
  2769. cur = cur->hprev ;
  2770. }
  2771. /* then test hnext. These are all older than cn. As long as they
  2772. ** are committed to the log, it is safe to write cn to disk
  2773. */
  2774. cur = cn->hnext ;
  2775. while(cur && can_dirty) {
  2776. if (cur->jlist && cur->jlist->j_len > 0 &&
  2777. atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
  2778. cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
  2779. can_dirty = 0 ;
  2780. }
  2781. cur = cur->hnext ;
  2782. }
  2783. return can_dirty ;
  2784. }
  2785. /* syncs the commit blocks, but does not force the real buffers to disk
  2786. ** will wait until the current transaction is done/commited before returning
  2787. */
  2788. int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
  2789. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2790. BUG_ON (!th->t_trans_id);
  2791. /* you can sync while nested, very, very bad */
  2792. if (th->t_refcount > 1) {
  2793. BUG() ;
  2794. }
  2795. if (journal->j_len == 0) {
  2796. reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
  2797. journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
  2798. }
  2799. return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT) ;
  2800. }
  2801. /*
  2802. ** writeback the pending async commits to disk
  2803. */
  2804. static void flush_async_commits(void *p) {
  2805. struct super_block *p_s_sb = p;
  2806. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2807. struct reiserfs_journal_list *jl;
  2808. struct list_head *entry;
  2809. lock_kernel();
  2810. if (!list_empty(&journal->j_journal_list)) {
  2811. /* last entry is the youngest, commit it and you get everything */
  2812. entry = journal->j_journal_list.prev;
  2813. jl = JOURNAL_LIST_ENTRY(entry);
  2814. flush_commit_list(p_s_sb, jl, 1);
  2815. }
  2816. unlock_kernel();
  2817. /*
  2818. * this is a little racey, but there's no harm in missing
  2819. * the filemap_fdata_write
  2820. */
  2821. if (!atomic_read(&journal->j_async_throttle) && !reiserfs_is_journal_aborted (journal)) {
  2822. atomic_inc(&journal->j_async_throttle);
  2823. filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
  2824. atomic_dec(&journal->j_async_throttle);
  2825. }
  2826. }
  2827. /*
  2828. ** flushes any old transactions to disk
  2829. ** ends the current transaction if it is too old
  2830. */
  2831. int reiserfs_flush_old_commits(struct super_block *p_s_sb) {
  2832. time_t now ;
  2833. struct reiserfs_transaction_handle th ;
  2834. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2835. now = get_seconds();
  2836. /* safety check so we don't flush while we are replaying the log during
  2837. * mount
  2838. */
  2839. if (list_empty(&journal->j_journal_list)) {
  2840. return 0 ;
  2841. }
  2842. /* check the current transaction. If there are no writers, and it is
  2843. * too old, finish it, and force the commit blocks to disk
  2844. */
  2845. if (atomic_read(&journal->j_wcount) <= 0 &&
  2846. journal->j_trans_start_time > 0 &&
  2847. journal->j_len > 0 &&
  2848. (now - journal->j_trans_start_time) > journal->j_max_trans_age)
  2849. {
  2850. if (!journal_join(&th, p_s_sb, 1)) {
  2851. reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
  2852. journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
  2853. /* we're only being called from kreiserfsd, it makes no sense to do
  2854. ** an async commit so that kreiserfsd can do it later
  2855. */
  2856. do_journal_end(&th, p_s_sb,1, COMMIT_NOW | WAIT) ;
  2857. }
  2858. }
  2859. return p_s_sb->s_dirt;
  2860. }
  2861. /*
  2862. ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
  2863. **
  2864. ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
  2865. ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
  2866. ** flushes the commit list and returns 0.
  2867. **
  2868. ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
  2869. **
  2870. ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
  2871. */
  2872. static int check_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,
  2873. unsigned long nblocks, int flags) {
  2874. time_t now ;
  2875. int flush = flags & FLUSH_ALL ;
  2876. int commit_now = flags & COMMIT_NOW ;
  2877. int wait_on_commit = flags & WAIT ;
  2878. struct reiserfs_journal_list *jl;
  2879. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2880. BUG_ON (!th->t_trans_id);
  2881. if (th->t_trans_id != journal->j_trans_id) {
  2882. reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n",
  2883. th->t_trans_id, journal->j_trans_id);
  2884. }
  2885. journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged) ;
  2886. if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
  2887. atomic_dec(&(journal->j_wcount)) ;
  2888. }
  2889. /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
  2890. ** will be dealt with by next transaction that actually writes something, but should be taken
  2891. ** care of in this trans
  2892. */
  2893. if (journal->j_len == 0) {
  2894. BUG();
  2895. }
  2896. /* if wcount > 0, and we are called to with flush or commit_now,
  2897. ** we wait on j_join_wait. We will wake up when the last writer has
  2898. ** finished the transaction, and started it on its way to the disk.
  2899. ** Then, we flush the commit or journal list, and just return 0
  2900. ** because the rest of journal end was already done for this transaction.
  2901. */
  2902. if (atomic_read(&(journal->j_wcount)) > 0) {
  2903. if (flush || commit_now) {
  2904. unsigned trans_id ;
  2905. jl = journal->j_current_jl;
  2906. trans_id = jl->j_trans_id;
  2907. if (wait_on_commit)
  2908. jl->j_state |= LIST_COMMIT_PENDING;
  2909. atomic_set(&(journal->j_jlock), 1) ;
  2910. if (flush) {
  2911. journal->j_next_full_flush = 1 ;
  2912. }
  2913. unlock_journal(p_s_sb) ;
  2914. /* sleep while the current transaction is still j_jlocked */
  2915. while(journal->j_trans_id == trans_id) {
  2916. if (atomic_read(&journal->j_jlock)) {
  2917. queue_log_writer(p_s_sb);
  2918. } else {
  2919. lock_journal(p_s_sb);
  2920. if (journal->j_trans_id == trans_id) {
  2921. atomic_set(&(journal->j_jlock), 1) ;
  2922. }
  2923. unlock_journal(p_s_sb);
  2924. }
  2925. }
  2926. if (journal->j_trans_id == trans_id) {
  2927. BUG();
  2928. }
  2929. if (commit_now && journal_list_still_alive(p_s_sb, trans_id) &&
  2930. wait_on_commit)
  2931. {
  2932. flush_commit_list(p_s_sb, jl, 1) ;
  2933. }
  2934. return 0 ;
  2935. }
  2936. unlock_journal(p_s_sb) ;
  2937. return 0 ;
  2938. }
  2939. /* deal with old transactions where we are the last writers */
  2940. now = get_seconds();
  2941. if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
  2942. commit_now = 1 ;
  2943. journal->j_next_async_flush = 1 ;
  2944. }
  2945. /* don't batch when someone is waiting on j_join_wait */
  2946. /* don't batch when syncing the commit or flushing the whole trans */
  2947. if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock))) && !flush && !commit_now &&
  2948. (journal->j_len < journal->j_max_batch) &&
  2949. journal->j_len_alloc < journal->j_max_batch && journal->j_cnode_free > (journal->j_trans_max * 3)) {
  2950. journal->j_bcount++ ;
  2951. unlock_journal(p_s_sb) ;
  2952. return 0 ;
  2953. }
  2954. if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
  2955. reiserfs_panic(p_s_sb, "journal-003: journal_end: j_start (%ld) is too high\n", journal->j_start) ;
  2956. }
  2957. return 1 ;
  2958. }
  2959. /*
  2960. ** Does all the work that makes deleting blocks safe.
  2961. ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
  2962. **
  2963. ** otherwise:
  2964. ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
  2965. ** before this transaction has finished.
  2966. **
  2967. ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
  2968. ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
  2969. ** the block can't be reallocated yet.
  2970. **
  2971. ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
  2972. */
  2973. int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, b_blocknr_t blocknr) {
  2974. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2975. struct reiserfs_journal_cnode *cn = NULL ;
  2976. struct buffer_head *bh = NULL ;
  2977. struct reiserfs_list_bitmap *jb = NULL ;
  2978. int cleaned = 0 ;
  2979. BUG_ON (!th->t_trans_id);
  2980. cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
  2981. if (cn && cn->bh) {
  2982. bh = cn->bh ;
  2983. get_bh(bh) ;
  2984. }
  2985. /* if it is journal new, we just remove it from this transaction */
  2986. if (bh && buffer_journal_new(bh)) {
  2987. clear_buffer_journal_new (bh);
  2988. clear_prepared_bits(bh) ;
  2989. reiserfs_clean_and_file_buffer(bh) ;
  2990. cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
  2991. } else {
  2992. /* set the bit for this block in the journal bitmap for this transaction */
  2993. jb = journal->j_current_jl->j_list_bitmap;
  2994. if (!jb) {
  2995. reiserfs_panic(p_s_sb, "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n") ;
  2996. }
  2997. set_bit_in_list_bitmap(p_s_sb, blocknr, jb) ;
  2998. /* Note, the entire while loop is not allowed to schedule. */
  2999. if (bh) {
  3000. clear_prepared_bits(bh) ;
  3001. reiserfs_clean_and_file_buffer(bh) ;
  3002. }
  3003. cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
  3004. /* find all older transactions with this block, make sure they don't try to write it out */
  3005. cn = get_journal_hash_dev(p_s_sb,journal->j_list_hash_table, blocknr) ;
  3006. while (cn) {
  3007. if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
  3008. set_bit(BLOCK_FREED, &cn->state) ;
  3009. if (cn->bh) {
  3010. if (!cleaned) {
  3011. /* remove_from_transaction will brelse the buffer if it was
  3012. ** in the current trans
  3013. */
  3014. clear_buffer_journal_dirty (cn->bh);
  3015. clear_buffer_dirty(cn->bh);
  3016. clear_buffer_journal_test(cn->bh);
  3017. cleaned = 1 ;
  3018. put_bh(cn->bh) ;
  3019. if (atomic_read(&(cn->bh->b_count)) < 0) {
  3020. reiserfs_warning (p_s_sb, "journal-2138: cn->bh->b_count < 0");
  3021. }
  3022. }
  3023. if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
  3024. atomic_dec(&(cn->jlist->j_nonzerolen)) ;
  3025. }
  3026. cn->bh = NULL ;
  3027. }
  3028. }
  3029. cn = cn->hnext ;
  3030. }
  3031. }
  3032. if (bh) {
  3033. put_bh(bh) ; /* get_hash grabs the buffer */
  3034. if (atomic_read(&(bh->b_count)) < 0) {
  3035. reiserfs_warning (p_s_sb, "journal-2165: bh->b_count < 0");
  3036. }
  3037. }
  3038. return 0 ;
  3039. }
  3040. void reiserfs_update_inode_transaction(struct inode *inode) {
  3041. struct reiserfs_journal *journal = SB_JOURNAL (inode->i_sb);
  3042. REISERFS_I(inode)->i_jl = journal->j_current_jl;
  3043. REISERFS_I(inode)->i_trans_id = journal->j_trans_id ;
  3044. }
  3045. /*
  3046. * returns -1 on error, 0 if no commits/barriers were done and 1
  3047. * if a transaction was actually committed and the barrier was done
  3048. */
  3049. static int __commit_trans_jl(struct inode *inode, unsigned long id,
  3050. struct reiserfs_journal_list *jl)
  3051. {
  3052. struct reiserfs_transaction_handle th ;
  3053. struct super_block *sb = inode->i_sb ;
  3054. struct reiserfs_journal *journal = SB_JOURNAL (sb);
  3055. int ret = 0;
  3056. /* is it from the current transaction, or from an unknown transaction? */
  3057. if (id == journal->j_trans_id) {
  3058. jl = journal->j_current_jl;
  3059. /* try to let other writers come in and grow this transaction */
  3060. let_transaction_grow(sb, id);
  3061. if (journal->j_trans_id != id) {
  3062. goto flush_commit_only;
  3063. }
  3064. ret = journal_begin(&th, sb, 1) ;
  3065. if (ret)
  3066. return ret;
  3067. /* someone might have ended this transaction while we joined */
  3068. if (journal->j_trans_id != id) {
  3069. reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1) ;
  3070. journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb)) ;
  3071. ret = journal_end(&th, sb, 1) ;
  3072. goto flush_commit_only;
  3073. }
  3074. ret = journal_end_sync(&th, sb, 1) ;
  3075. if (!ret)
  3076. ret = 1;
  3077. } else {
  3078. /* this gets tricky, we have to make sure the journal list in
  3079. * the inode still exists. We know the list is still around
  3080. * if we've got a larger transaction id than the oldest list
  3081. */
  3082. flush_commit_only:
  3083. if (journal_list_still_alive(inode->i_sb, id)) {
  3084. /*
  3085. * we only set ret to 1 when we know for sure
  3086. * the barrier hasn't been started yet on the commit
  3087. * block.
  3088. */
  3089. if (atomic_read(&jl->j_commit_left) > 1)
  3090. ret = 1;
  3091. flush_commit_list(sb, jl, 1) ;
  3092. if (journal->j_errno)
  3093. ret = journal->j_errno;
  3094. }
  3095. }
  3096. /* otherwise the list is gone, and long since committed */
  3097. return ret;
  3098. }
  3099. int reiserfs_commit_for_inode(struct inode *inode) {
  3100. unsigned long id = REISERFS_I(inode)->i_trans_id;
  3101. struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
  3102. /* for the whole inode, assume unset id means it was
  3103. * changed in the current transaction. More conservative
  3104. */
  3105. if (!id || !jl) {
  3106. reiserfs_update_inode_transaction(inode) ;
  3107. id = REISERFS_I(inode)->i_trans_id;
  3108. /* jl will be updated in __commit_trans_jl */
  3109. }
  3110. return __commit_trans_jl(inode, id, jl);
  3111. }
  3112. void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
  3113. struct buffer_head *bh) {
  3114. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  3115. PROC_INFO_INC( p_s_sb, journal.restore_prepared );
  3116. if (!bh) {
  3117. return ;
  3118. }
  3119. if (test_clear_buffer_journal_restore_dirty (bh) &&
  3120. buffer_journal_dirty(bh)) {
  3121. struct reiserfs_journal_cnode *cn;
  3122. cn = get_journal_hash_dev(p_s_sb,
  3123. journal->j_list_hash_table,
  3124. bh->b_blocknr);
  3125. if (cn && can_dirty(cn)) {
  3126. set_buffer_journal_test (bh);
  3127. mark_buffer_dirty(bh);
  3128. }
  3129. }
  3130. clear_buffer_journal_prepared (bh);
  3131. }
  3132. extern struct tree_balance *cur_tb ;
  3133. /*
  3134. ** before we can change a metadata block, we have to make sure it won't
  3135. ** be written to disk while we are altering it. So, we must:
  3136. ** clean it
  3137. ** wait on it.
  3138. **
  3139. */
  3140. int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
  3141. struct buffer_head *bh, int wait) {
  3142. PROC_INFO_INC( p_s_sb, journal.prepare );
  3143. if (test_set_buffer_locked(bh)) {
  3144. if (!wait)
  3145. return 0;
  3146. lock_buffer(bh);
  3147. }
  3148. set_buffer_journal_prepared (bh);
  3149. if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
  3150. clear_buffer_journal_test (bh);
  3151. set_buffer_journal_restore_dirty (bh);
  3152. }
  3153. unlock_buffer(bh);
  3154. return 1;
  3155. }
  3156. static void flush_old_journal_lists(struct super_block *s) {
  3157. struct reiserfs_journal *journal = SB_JOURNAL (s);
  3158. struct reiserfs_journal_list *jl;
  3159. struct list_head *entry;
  3160. time_t now = get_seconds();
  3161. while(!list_empty(&journal->j_journal_list)) {
  3162. entry = journal->j_journal_list.next;
  3163. jl = JOURNAL_LIST_ENTRY(entry);
  3164. /* this check should always be run, to send old lists to disk */
  3165. if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4))) {
  3166. flush_used_journal_lists(s, jl);
  3167. } else {
  3168. break;
  3169. }
  3170. }
  3171. }
  3172. /*
  3173. ** long and ugly. If flush, will not return until all commit
  3174. ** blocks and all real buffers in the trans are on disk.
  3175. ** If no_async, won't return until all commit blocks are on disk.
  3176. **
  3177. ** keep reading, there are comments as you go along
  3178. **
  3179. ** If the journal is aborted, we just clean up. Things like flushing
  3180. ** journal lists, etc just won't happen.
  3181. */
  3182. static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks,
  3183. int flags) {
  3184. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  3185. struct reiserfs_journal_cnode *cn, *next, *jl_cn;
  3186. struct reiserfs_journal_cnode *last_cn = NULL;
  3187. struct reiserfs_journal_desc *desc ;
  3188. struct reiserfs_journal_commit *commit ;
  3189. struct buffer_head *c_bh ; /* commit bh */
  3190. struct buffer_head *d_bh ; /* desc bh */
  3191. int cur_write_start = 0 ; /* start index of current log write */
  3192. int old_start ;
  3193. int i ;
  3194. int flush = flags & FLUSH_ALL ;
  3195. int wait_on_commit = flags & WAIT ;
  3196. struct reiserfs_journal_list *jl, *temp_jl;
  3197. struct list_head *entry, *safe;
  3198. unsigned long jindex;
  3199. unsigned long commit_trans_id;
  3200. int trans_half;
  3201. BUG_ON (th->t_refcount > 1);
  3202. BUG_ON (!th->t_trans_id);
  3203. put_fs_excl();
  3204. current->journal_info = th->t_handle_save;
  3205. reiserfs_check_lock_depth(p_s_sb, "journal end");
  3206. if (journal->j_len == 0) {
  3207. reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
  3208. journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
  3209. }
  3210. lock_journal(p_s_sb) ;
  3211. if (journal->j_next_full_flush) {
  3212. flags |= FLUSH_ALL ;
  3213. flush = 1 ;
  3214. }
  3215. if (journal->j_next_async_flush) {
  3216. flags |= COMMIT_NOW | WAIT;
  3217. wait_on_commit = 1;
  3218. }
  3219. /* check_journal_end locks the journal, and unlocks if it does not return 1
  3220. ** it tells us if we should continue with the journal_end, or just return
  3221. */
  3222. if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
  3223. p_s_sb->s_dirt = 1;
  3224. wake_queued_writers(p_s_sb);
  3225. reiserfs_async_progress_wait(p_s_sb);
  3226. goto out ;
  3227. }
  3228. /* check_journal_end might set these, check again */
  3229. if (journal->j_next_full_flush) {
  3230. flush = 1 ;
  3231. }
  3232. /*
  3233. ** j must wait means we have to flush the log blocks, and the real blocks for
  3234. ** this transaction
  3235. */
  3236. if (journal->j_must_wait > 0) {
  3237. flush = 1 ;
  3238. }
  3239. #ifdef REISERFS_PREALLOCATE
  3240. /* quota ops might need to nest, setup the journal_info pointer for them */
  3241. current->journal_info = th ;
  3242. reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
  3243. * the transaction */
  3244. current->journal_info = th->t_handle_save ;
  3245. #endif
  3246. /* setup description block */
  3247. d_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start) ;
  3248. set_buffer_uptodate(d_bh);
  3249. desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
  3250. memset(d_bh->b_data, 0, d_bh->b_size) ;
  3251. memcpy(get_journal_desc_magic (d_bh), JOURNAL_DESC_MAGIC, 8) ;
  3252. set_desc_trans_id(desc, journal->j_trans_id) ;
  3253. /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
  3254. c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
  3255. ((journal->j_start + journal->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
  3256. commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
  3257. memset(c_bh->b_data, 0, c_bh->b_size) ;
  3258. set_commit_trans_id(commit, journal->j_trans_id) ;
  3259. set_buffer_uptodate(c_bh) ;
  3260. /* init this journal list */
  3261. jl = journal->j_current_jl;
  3262. /* we lock the commit before doing anything because
  3263. * we want to make sure nobody tries to run flush_commit_list until
  3264. * the new transaction is fully setup, and we've already flushed the
  3265. * ordered bh list
  3266. */
  3267. down(&jl->j_commit_lock);
  3268. /* save the transaction id in case we need to commit it later */
  3269. commit_trans_id = jl->j_trans_id;
  3270. atomic_set(&jl->j_older_commits_done, 0) ;
  3271. jl->j_trans_id = journal->j_trans_id ;
  3272. jl->j_timestamp = journal->j_trans_start_time ;
  3273. jl->j_commit_bh = c_bh ;
  3274. jl->j_start = journal->j_start ;
  3275. jl->j_len = journal->j_len ;
  3276. atomic_set(&jl->j_nonzerolen, journal->j_len) ;
  3277. atomic_set(&jl->j_commit_left, journal->j_len + 2);
  3278. jl->j_realblock = NULL ;
  3279. /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
  3280. ** for each real block, add it to the journal list hash,
  3281. ** copy into real block index array in the commit or desc block
  3282. */
  3283. trans_half = journal_trans_half(p_s_sb->s_blocksize);
  3284. for (i = 0, cn = journal->j_first ; cn ; cn = cn->next, i++) {
  3285. if (buffer_journaled (cn->bh)) {
  3286. jl_cn = get_cnode(p_s_sb) ;
  3287. if (!jl_cn) {
  3288. reiserfs_panic(p_s_sb, "journal-1676, get_cnode returned NULL\n") ;
  3289. }
  3290. if (i == 0) {
  3291. jl->j_realblock = jl_cn ;
  3292. }
  3293. jl_cn->prev = last_cn ;
  3294. jl_cn->next = NULL ;
  3295. if (last_cn) {
  3296. last_cn->next = jl_cn ;
  3297. }
  3298. last_cn = jl_cn ;
  3299. /* make sure the block we are trying to log is not a block
  3300. of journal or reserved area */
  3301. if (is_block_in_log_or_reserved_area(p_s_sb, cn->bh->b_blocknr)) {
  3302. reiserfs_panic(p_s_sb, "journal-2332: Trying to log block %lu, which is a log block\n", cn->bh->b_blocknr) ;
  3303. }
  3304. jl_cn->blocknr = cn->bh->b_blocknr ;
  3305. jl_cn->state = 0 ;
  3306. jl_cn->sb = p_s_sb;
  3307. jl_cn->bh = cn->bh ;
  3308. jl_cn->jlist = jl;
  3309. insert_journal_hash(journal->j_list_hash_table, jl_cn) ;
  3310. if (i < trans_half) {
  3311. desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr) ;
  3312. } else {
  3313. commit->j_realblock[i - trans_half] = cpu_to_le32(cn->bh->b_blocknr) ;
  3314. }
  3315. } else {
  3316. i-- ;
  3317. }
  3318. }
  3319. set_desc_trans_len(desc, journal->j_len) ;
  3320. set_desc_mount_id(desc, journal->j_mount_id) ;
  3321. set_desc_trans_id(desc, journal->j_trans_id) ;
  3322. set_commit_trans_len(commit, journal->j_len);
  3323. /* special check in case all buffers in the journal were marked for not logging */
  3324. if (journal->j_len == 0) {
  3325. BUG();
  3326. }
  3327. /* we're about to dirty all the log blocks, mark the description block
  3328. * dirty now too. Don't mark the commit block dirty until all the
  3329. * others are on disk
  3330. */
  3331. mark_buffer_dirty(d_bh);
  3332. /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
  3333. cur_write_start = journal->j_start ;
  3334. cn = journal->j_first ;
  3335. jindex = 1 ; /* start at one so we don't get the desc again */
  3336. while(cn) {
  3337. clear_buffer_journal_new (cn->bh);
  3338. /* copy all the real blocks into log area. dirty log blocks */
  3339. if (buffer_journaled (cn->bh)) {
  3340. struct buffer_head *tmp_bh ;
  3341. char *addr;
  3342. struct page *page;
  3343. tmp_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
  3344. ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
  3345. set_buffer_uptodate(tmp_bh);
  3346. page = cn->bh->b_page;
  3347. addr = kmap(page);
  3348. memcpy(tmp_bh->b_data, addr + offset_in_page(cn->bh->b_data),
  3349. cn->bh->b_size);
  3350. kunmap(page);
  3351. mark_buffer_dirty(tmp_bh);
  3352. jindex++ ;
  3353. set_buffer_journal_dirty (cn->bh);
  3354. clear_buffer_journaled (cn->bh);
  3355. } else {
  3356. /* JDirty cleared sometime during transaction. don't log this one */
  3357. reiserfs_warning(p_s_sb, "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!") ;
  3358. brelse(cn->bh) ;
  3359. }
  3360. next = cn->next ;
  3361. free_cnode(p_s_sb, cn) ;
  3362. cn = next ;
  3363. cond_resched();
  3364. }
  3365. /* we are done with both the c_bh and d_bh, but
  3366. ** c_bh must be written after all other commit blocks,
  3367. ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
  3368. */
  3369. journal->j_current_jl = alloc_journal_list(p_s_sb);
  3370. /* now it is safe to insert this transaction on the main list */
  3371. list_add_tail(&jl->j_list, &journal->j_journal_list);
  3372. list_add_tail(&jl->j_working_list, &journal->j_working_list);
  3373. journal->j_num_work_lists++;
  3374. /* reset journal values for the next transaction */
  3375. old_start = journal->j_start ;
  3376. journal->j_start = (journal->j_start + journal->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
  3377. atomic_set(&(journal->j_wcount), 0) ;
  3378. journal->j_bcount = 0 ;
  3379. journal->j_last = NULL ;
  3380. journal->j_first = NULL ;
  3381. journal->j_len = 0 ;
  3382. journal->j_trans_start_time = 0 ;
  3383. journal->j_trans_id++ ;
  3384. journal->j_current_jl->j_trans_id = journal->j_trans_id;
  3385. journal->j_must_wait = 0 ;
  3386. journal->j_len_alloc = 0 ;
  3387. journal->j_next_full_flush = 0 ;
  3388. journal->j_next_async_flush = 0 ;
  3389. init_journal_hash(p_s_sb) ;
  3390. // make sure reiserfs_add_jh sees the new current_jl before we
  3391. // write out the tails
  3392. smp_mb();
  3393. /* tail conversion targets have to hit the disk before we end the
  3394. * transaction. Otherwise a later transaction might repack the tail
  3395. * before this transaction commits, leaving the data block unflushed and
  3396. * clean, if we crash before the later transaction commits, the data block
  3397. * is lost.
  3398. */
  3399. if (!list_empty(&jl->j_tail_bh_list)) {
  3400. unlock_kernel();
  3401. write_ordered_buffers(&journal->j_dirty_buffers_lock,
  3402. journal, jl, &jl->j_tail_bh_list);
  3403. lock_kernel();
  3404. }
  3405. if (!list_empty(&jl->j_tail_bh_list))
  3406. BUG();
  3407. up(&jl->j_commit_lock);
  3408. /* honor the flush wishes from the caller, simple commits can
  3409. ** be done outside the journal lock, they are done below
  3410. **
  3411. ** if we don't flush the commit list right now, we put it into
  3412. ** the work queue so the people waiting on the async progress work
  3413. ** queue don't wait for this proc to flush journal lists and such.
  3414. */
  3415. if (flush) {
  3416. flush_commit_list(p_s_sb, jl, 1) ;
  3417. flush_journal_list(p_s_sb, jl, 1) ;
  3418. } else if (!(jl->j_state & LIST_COMMIT_PENDING))
  3419. queue_delayed_work(commit_wq, &journal->j_work, HZ/10);
  3420. /* if the next transaction has any chance of wrapping, flush
  3421. ** transactions that might get overwritten. If any journal lists are very
  3422. ** old flush them as well.
  3423. */
  3424. first_jl:
  3425. list_for_each_safe(entry, safe, &journal->j_journal_list) {
  3426. temp_jl = JOURNAL_LIST_ENTRY(entry);
  3427. if (journal->j_start <= temp_jl->j_start) {
  3428. if ((journal->j_start + journal->j_trans_max + 1) >=
  3429. temp_jl->j_start)
  3430. {
  3431. flush_used_journal_lists(p_s_sb, temp_jl);
  3432. goto first_jl;
  3433. } else if ((journal->j_start +
  3434. journal->j_trans_max + 1) <
  3435. SB_ONDISK_JOURNAL_SIZE(p_s_sb))
  3436. {
  3437. /* if we don't cross into the next transaction and we don't
  3438. * wrap, there is no way we can overlap any later transactions
  3439. * break now
  3440. */
  3441. break;
  3442. }
  3443. } else if ((journal->j_start +
  3444. journal->j_trans_max + 1) >
  3445. SB_ONDISK_JOURNAL_SIZE(p_s_sb))
  3446. {
  3447. if (((journal->j_start + journal->j_trans_max + 1) %
  3448. SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >= temp_jl->j_start)
  3449. {
  3450. flush_used_journal_lists(p_s_sb, temp_jl);
  3451. goto first_jl;
  3452. } else {
  3453. /* we don't overlap anything from out start to the end of the
  3454. * log, and our wrapped portion doesn't overlap anything at
  3455. * the start of the log. We can break
  3456. */
  3457. break;
  3458. }
  3459. }
  3460. }
  3461. flush_old_journal_lists(p_s_sb);
  3462. journal->j_current_jl->j_list_bitmap = get_list_bitmap(p_s_sb, journal->j_current_jl) ;
  3463. if (!(journal->j_current_jl->j_list_bitmap)) {
  3464. reiserfs_panic(p_s_sb, "journal-1996: do_journal_end, could not get a list bitmap\n") ;
  3465. }
  3466. atomic_set(&(journal->j_jlock), 0) ;
  3467. unlock_journal(p_s_sb) ;
  3468. /* wake up any body waiting to join. */
  3469. clear_bit(J_WRITERS_QUEUED, &journal->j_state);
  3470. wake_up(&(journal->j_join_wait)) ;
  3471. if (!flush && wait_on_commit &&
  3472. journal_list_still_alive(p_s_sb, commit_trans_id)) {
  3473. flush_commit_list(p_s_sb, jl, 1) ;
  3474. }
  3475. out:
  3476. reiserfs_check_lock_depth(p_s_sb, "journal end2");
  3477. memset (th, 0, sizeof (*th));
  3478. /* Re-set th->t_super, so we can properly keep track of how many
  3479. * persistent transactions there are. We need to do this so if this
  3480. * call is part of a failed restart_transaction, we can free it later */
  3481. th->t_super = p_s_sb;
  3482. return journal->j_errno;
  3483. }
  3484. static void
  3485. __reiserfs_journal_abort_hard (struct super_block *sb)
  3486. {
  3487. struct reiserfs_journal *journal = SB_JOURNAL (sb);
  3488. if (test_bit (J_ABORTED, &journal->j_state))
  3489. return;
  3490. printk (KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
  3491. reiserfs_bdevname (sb));
  3492. sb->s_flags |= MS_RDONLY;
  3493. set_bit (J_ABORTED, &journal->j_state);
  3494. #ifdef CONFIG_REISERFS_CHECK
  3495. dump_stack();
  3496. #endif
  3497. }
  3498. static void
  3499. __reiserfs_journal_abort_soft (struct super_block *sb, int errno)
  3500. {
  3501. struct reiserfs_journal *journal = SB_JOURNAL (sb);
  3502. if (test_bit (J_ABORTED, &journal->j_state))
  3503. return;
  3504. if (!journal->j_errno)
  3505. journal->j_errno = errno;
  3506. __reiserfs_journal_abort_hard (sb);
  3507. }
  3508. void
  3509. reiserfs_journal_abort (struct super_block *sb, int errno)
  3510. {
  3511. return __reiserfs_journal_abort_soft (sb, errno);
  3512. }