journal.c 125 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874
  1. /*
  2. ** Write ahead logging implementation copyright Chris Mason 2000
  3. **
  4. ** The background commits make this code very interelated, and
  5. ** overly complex. I need to rethink things a bit....The major players:
  6. **
  7. ** journal_begin -- call with the number of blocks you expect to log.
  8. ** If the current transaction is too
  9. ** old, it will block until the current transaction is
  10. ** finished, and then start a new one.
  11. ** Usually, your transaction will get joined in with
  12. ** previous ones for speed.
  13. **
  14. ** journal_join -- same as journal_begin, but won't block on the current
  15. ** transaction regardless of age. Don't ever call
  16. ** this. Ever. There are only two places it should be
  17. ** called from, and they are both inside this file.
  18. **
  19. ** journal_mark_dirty -- adds blocks into this transaction. clears any flags
  20. ** that might make them get sent to disk
  21. ** and then marks them BH_JDirty. Puts the buffer head
  22. ** into the current transaction hash.
  23. **
  24. ** journal_end -- if the current transaction is batchable, it does nothing
  25. ** otherwise, it could do an async/synchronous commit, or
  26. ** a full flush of all log and real blocks in the
  27. ** transaction.
  28. **
  29. ** flush_old_commits -- if the current transaction is too old, it is ended and
  30. ** commit blocks are sent to disk. Forces commit blocks
  31. ** to disk for all backgrounded commits that have been
  32. ** around too long.
  33. ** -- Note, if you call this as an immediate flush from
  34. ** from within kupdate, it will ignore the immediate flag
  35. */
  36. #include <linux/config.h>
  37. #include <asm/uaccess.h>
  38. #include <asm/system.h>
  39. #include <linux/time.h>
  40. #include <asm/semaphore.h>
  41. #include <linux/vmalloc.h>
  42. #include <linux/reiserfs_fs.h>
  43. #include <linux/kernel.h>
  44. #include <linux/errno.h>
  45. #include <linux/fcntl.h>
  46. #include <linux/stat.h>
  47. #include <linux/string.h>
  48. #include <linux/smp_lock.h>
  49. #include <linux/buffer_head.h>
  50. #include <linux/workqueue.h>
  51. #include <linux/writeback.h>
  52. #include <linux/blkdev.h>
  53. /* gets a struct reiserfs_journal_list * from a list head */
  54. #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
  55. j_list))
  56. #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
  57. j_working_list))
  58. /* the number of mounted filesystems. This is used to decide when to
  59. ** start and kill the commit workqueue
  60. */
  61. static int reiserfs_mounted_fs_count;
  62. static struct workqueue_struct *commit_wq;
  63. #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
  64. structs at 4k */
  65. #define BUFNR 64 /*read ahead */
  66. /* cnode stat bits. Move these into reiserfs_fs.h */
  67. #define BLOCK_FREED 2 /* this block was freed, and can't be written. */
  68. #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
  69. #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
  70. #define BLOCK_DIRTIED 5
  71. /* journal list state bits */
  72. #define LIST_TOUCHED 1
  73. #define LIST_DIRTY 2
  74. #define LIST_COMMIT_PENDING 4 /* someone will commit this list */
  75. /* flags for do_journal_end */
  76. #define FLUSH_ALL 1 /* flush commit and real blocks */
  77. #define COMMIT_NOW 2 /* end and commit this transaction */
  78. #define WAIT 4 /* wait for the log blocks to hit the disk*/
  79. static int do_journal_end(struct reiserfs_transaction_handle *,struct super_block *,unsigned long nblocks,int flags) ;
  80. static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
  81. static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
  82. static int can_dirty(struct reiserfs_journal_cnode *cn) ;
  83. static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks);
  84. static int release_journal_dev( struct super_block *super,
  85. struct reiserfs_journal *journal );
  86. static int dirty_one_transaction(struct super_block *s,
  87. struct reiserfs_journal_list *jl);
  88. static void flush_async_commits(void *p);
  89. static void queue_log_writer(struct super_block *s);
  90. /* values for join in do_journal_begin_r */
  91. enum {
  92. JBEGIN_REG = 0, /* regular journal begin */
  93. JBEGIN_JOIN = 1, /* join the running transaction if at all possible */
  94. JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */
  95. };
  96. static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
  97. struct super_block * p_s_sb,
  98. unsigned long nblocks,int join);
  99. static void init_journal_hash(struct super_block *p_s_sb) {
  100. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  101. memset(journal->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
  102. }
  103. /*
  104. ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
  105. ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
  106. ** more details.
  107. */
  108. static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) {
  109. if (bh) {
  110. clear_buffer_dirty(bh);
  111. clear_buffer_journal_test(bh);
  112. }
  113. return 0 ;
  114. }
  115. static void disable_barrier(struct super_block *s)
  116. {
  117. REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
  118. printk("reiserfs: disabling flush barriers on %s\n", reiserfs_bdevname(s));
  119. }
  120. static struct reiserfs_bitmap_node *
  121. allocate_bitmap_node(struct super_block *p_s_sb) {
  122. struct reiserfs_bitmap_node *bn ;
  123. static int id;
  124. bn = reiserfs_kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS, p_s_sb) ;
  125. if (!bn) {
  126. return NULL ;
  127. }
  128. bn->data = reiserfs_kmalloc(p_s_sb->s_blocksize, GFP_NOFS, p_s_sb) ;
  129. if (!bn->data) {
  130. reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
  131. return NULL ;
  132. }
  133. bn->id = id++ ;
  134. memset(bn->data, 0, p_s_sb->s_blocksize) ;
  135. INIT_LIST_HEAD(&bn->list) ;
  136. return bn ;
  137. }
  138. static struct reiserfs_bitmap_node *
  139. get_bitmap_node(struct super_block *p_s_sb) {
  140. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  141. struct reiserfs_bitmap_node *bn = NULL;
  142. struct list_head *entry = journal->j_bitmap_nodes.next ;
  143. journal->j_used_bitmap_nodes++ ;
  144. repeat:
  145. if(entry != &journal->j_bitmap_nodes) {
  146. bn = list_entry(entry, struct reiserfs_bitmap_node, list) ;
  147. list_del(entry) ;
  148. memset(bn->data, 0, p_s_sb->s_blocksize) ;
  149. journal->j_free_bitmap_nodes-- ;
  150. return bn ;
  151. }
  152. bn = allocate_bitmap_node(p_s_sb) ;
  153. if (!bn) {
  154. yield();
  155. goto repeat ;
  156. }
  157. return bn ;
  158. }
  159. static inline void free_bitmap_node(struct super_block *p_s_sb,
  160. struct reiserfs_bitmap_node *bn) {
  161. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  162. journal->j_used_bitmap_nodes-- ;
  163. if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
  164. reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
  165. reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
  166. } else {
  167. list_add(&bn->list, &journal->j_bitmap_nodes) ;
  168. journal->j_free_bitmap_nodes++ ;
  169. }
  170. }
  171. static void allocate_bitmap_nodes(struct super_block *p_s_sb) {
  172. int i ;
  173. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  174. struct reiserfs_bitmap_node *bn = NULL ;
  175. for (i = 0 ; i < REISERFS_MIN_BITMAP_NODES ; i++) {
  176. bn = allocate_bitmap_node(p_s_sb) ;
  177. if (bn) {
  178. list_add(&bn->list, &journal->j_bitmap_nodes) ;
  179. journal->j_free_bitmap_nodes++ ;
  180. } else {
  181. break ; // this is ok, we'll try again when more are needed
  182. }
  183. }
  184. }
  185. static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
  186. struct reiserfs_list_bitmap *jb) {
  187. int bmap_nr = block / (p_s_sb->s_blocksize << 3) ;
  188. int bit_nr = block % (p_s_sb->s_blocksize << 3) ;
  189. if (!jb->bitmaps[bmap_nr]) {
  190. jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb) ;
  191. }
  192. set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data) ;
  193. return 0 ;
  194. }
  195. static void cleanup_bitmap_list(struct super_block *p_s_sb,
  196. struct reiserfs_list_bitmap *jb) {
  197. int i;
  198. if (jb->bitmaps == NULL)
  199. return;
  200. for (i = 0 ; i < SB_BMAP_NR(p_s_sb) ; i++) {
  201. if (jb->bitmaps[i]) {
  202. free_bitmap_node(p_s_sb, jb->bitmaps[i]) ;
  203. jb->bitmaps[i] = NULL ;
  204. }
  205. }
  206. }
  207. /*
  208. ** only call this on FS unmount.
  209. */
  210. static int free_list_bitmaps(struct super_block *p_s_sb,
  211. struct reiserfs_list_bitmap *jb_array) {
  212. int i ;
  213. struct reiserfs_list_bitmap *jb ;
  214. for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
  215. jb = jb_array + i ;
  216. jb->journal_list = NULL ;
  217. cleanup_bitmap_list(p_s_sb, jb) ;
  218. vfree(jb->bitmaps) ;
  219. jb->bitmaps = NULL ;
  220. }
  221. return 0;
  222. }
  223. static int free_bitmap_nodes(struct super_block *p_s_sb) {
  224. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  225. struct list_head *next = journal->j_bitmap_nodes.next ;
  226. struct reiserfs_bitmap_node *bn ;
  227. while(next != &journal->j_bitmap_nodes) {
  228. bn = list_entry(next, struct reiserfs_bitmap_node, list) ;
  229. list_del(next) ;
  230. reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
  231. reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
  232. next = journal->j_bitmap_nodes.next ;
  233. journal->j_free_bitmap_nodes-- ;
  234. }
  235. return 0 ;
  236. }
  237. /*
  238. ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
  239. ** jb_array is the array to be filled in.
  240. */
  241. int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
  242. struct reiserfs_list_bitmap *jb_array,
  243. int bmap_nr) {
  244. int i ;
  245. int failed = 0 ;
  246. struct reiserfs_list_bitmap *jb ;
  247. int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *) ;
  248. for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
  249. jb = jb_array + i ;
  250. jb->journal_list = NULL ;
  251. jb->bitmaps = vmalloc( mem ) ;
  252. if (!jb->bitmaps) {
  253. reiserfs_warning(p_s_sb, "clm-2000, unable to allocate bitmaps for journal lists") ;
  254. failed = 1;
  255. break ;
  256. }
  257. memset(jb->bitmaps, 0, mem) ;
  258. }
  259. if (failed) {
  260. free_list_bitmaps(p_s_sb, jb_array) ;
  261. return -1 ;
  262. }
  263. return 0 ;
  264. }
  265. /*
  266. ** find an available list bitmap. If you can't find one, flush a commit list
  267. ** and try again
  268. */
  269. static struct reiserfs_list_bitmap *
  270. get_list_bitmap(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
  271. int i,j ;
  272. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  273. struct reiserfs_list_bitmap *jb = NULL ;
  274. for (j = 0 ; j < (JOURNAL_NUM_BITMAPS * 3) ; j++) {
  275. i = journal->j_list_bitmap_index ;
  276. journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS ;
  277. jb = journal->j_list_bitmap + i ;
  278. if (journal->j_list_bitmap[i].journal_list) {
  279. flush_commit_list(p_s_sb, journal->j_list_bitmap[i].journal_list, 1) ;
  280. if (!journal->j_list_bitmap[i].journal_list) {
  281. break ;
  282. }
  283. } else {
  284. break ;
  285. }
  286. }
  287. if (jb->journal_list) { /* double check to make sure if flushed correctly */
  288. return NULL ;
  289. }
  290. jb->journal_list = jl ;
  291. return jb ;
  292. }
  293. /*
  294. ** allocates a new chunk of X nodes, and links them all together as a list.
  295. ** Uses the cnode->next and cnode->prev pointers
  296. ** returns NULL on failure
  297. */
  298. static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) {
  299. struct reiserfs_journal_cnode *head ;
  300. int i ;
  301. if (num_cnodes <= 0) {
  302. return NULL ;
  303. }
  304. head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
  305. if (!head) {
  306. return NULL ;
  307. }
  308. memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
  309. head[0].prev = NULL ;
  310. head[0].next = head + 1 ;
  311. for (i = 1 ; i < num_cnodes; i++) {
  312. head[i].prev = head + (i - 1) ;
  313. head[i].next = head + (i + 1) ; /* if last one, overwrite it after the if */
  314. }
  315. head[num_cnodes -1].next = NULL ;
  316. return head ;
  317. }
  318. /*
  319. ** pulls a cnode off the free list, or returns NULL on failure
  320. */
  321. static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) {
  322. struct reiserfs_journal_cnode *cn ;
  323. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  324. reiserfs_check_lock_depth(p_s_sb, "get_cnode") ;
  325. if (journal->j_cnode_free <= 0) {
  326. return NULL ;
  327. }
  328. journal->j_cnode_used++ ;
  329. journal->j_cnode_free-- ;
  330. cn = journal->j_cnode_free_list ;
  331. if (!cn) {
  332. return cn ;
  333. }
  334. if (cn->next) {
  335. cn->next->prev = NULL ;
  336. }
  337. journal->j_cnode_free_list = cn->next ;
  338. memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ;
  339. return cn ;
  340. }
  341. /*
  342. ** returns a cnode to the free list
  343. */
  344. static void free_cnode(struct super_block *p_s_sb, struct reiserfs_journal_cnode *cn) {
  345. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  346. reiserfs_check_lock_depth(p_s_sb, "free_cnode") ;
  347. journal->j_cnode_used-- ;
  348. journal->j_cnode_free++ ;
  349. /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
  350. cn->next = journal->j_cnode_free_list ;
  351. if (journal->j_cnode_free_list) {
  352. journal->j_cnode_free_list->prev = cn ;
  353. }
  354. cn->prev = NULL ; /* not needed with the memset, but I might kill the memset, and forget to do this */
  355. journal->j_cnode_free_list = cn ;
  356. }
  357. static void clear_prepared_bits(struct buffer_head *bh) {
  358. clear_buffer_journal_prepared (bh);
  359. clear_buffer_journal_restore_dirty (bh);
  360. }
  361. /* utility function to force a BUG if it is called without the big
  362. ** kernel lock held. caller is the string printed just before calling BUG()
  363. */
  364. void reiserfs_check_lock_depth(struct super_block *sb, char *caller) {
  365. #ifdef CONFIG_SMP
  366. if (current->lock_depth < 0) {
  367. reiserfs_panic (sb, "%s called without kernel lock held", caller) ;
  368. }
  369. #else
  370. ;
  371. #endif
  372. }
  373. /* return a cnode with same dev, block number and size in table, or null if not found */
  374. static inline struct reiserfs_journal_cnode *
  375. get_journal_hash_dev(struct super_block *sb,
  376. struct reiserfs_journal_cnode **table,
  377. long bl)
  378. {
  379. struct reiserfs_journal_cnode *cn ;
  380. cn = journal_hash(table, sb, bl) ;
  381. while(cn) {
  382. if (cn->blocknr == bl && cn->sb == sb)
  383. return cn ;
  384. cn = cn->hnext ;
  385. }
  386. return (struct reiserfs_journal_cnode *)0 ;
  387. }
  388. /*
  389. ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
  390. ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
  391. ** being overwritten by a replay after crashing.
  392. **
  393. ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
  394. ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
  395. ** sure you never write the block without logging it.
  396. **
  397. ** next_zero_bit is a suggestion about the next block to try for find_forward.
  398. ** when bl is rejected because it is set in a journal list bitmap, we search
  399. ** for the next zero bit in the bitmap that rejected bl. Then, we return that
  400. ** through next_zero_bit for find_forward to try.
  401. **
  402. ** Just because we return something in next_zero_bit does not mean we won't
  403. ** reject it on the next call to reiserfs_in_journal
  404. **
  405. */
  406. int reiserfs_in_journal(struct super_block *p_s_sb,
  407. int bmap_nr, int bit_nr, int search_all,
  408. b_blocknr_t *next_zero_bit) {
  409. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  410. struct reiserfs_journal_cnode *cn ;
  411. struct reiserfs_list_bitmap *jb ;
  412. int i ;
  413. unsigned long bl;
  414. *next_zero_bit = 0 ; /* always start this at zero. */
  415. PROC_INFO_INC( p_s_sb, journal.in_journal );
  416. /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
  417. ** if we crash before the transaction that freed it commits, this transaction won't
  418. ** have committed either, and the block will never be written
  419. */
  420. if (search_all) {
  421. for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
  422. PROC_INFO_INC( p_s_sb, journal.in_journal_bitmap );
  423. jb = journal->j_list_bitmap + i ;
  424. if (jb->journal_list && jb->bitmaps[bmap_nr] &&
  425. test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data)) {
  426. *next_zero_bit = find_next_zero_bit((unsigned long *)
  427. (jb->bitmaps[bmap_nr]->data),
  428. p_s_sb->s_blocksize << 3, bit_nr+1) ;
  429. return 1 ;
  430. }
  431. }
  432. }
  433. bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
  434. /* is it in any old transactions? */
  435. if (search_all && (cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
  436. return 1;
  437. }
  438. /* is it in the current transaction. This should never happen */
  439. if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
  440. BUG();
  441. return 1;
  442. }
  443. PROC_INFO_INC( p_s_sb, journal.in_journal_reusable );
  444. /* safe for reuse */
  445. return 0 ;
  446. }
  447. /* insert cn into table
  448. */
  449. static inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) {
  450. struct reiserfs_journal_cnode *cn_orig ;
  451. cn_orig = journal_hash(table, cn->sb, cn->blocknr) ;
  452. cn->hnext = cn_orig ;
  453. cn->hprev = NULL ;
  454. if (cn_orig) {
  455. cn_orig->hprev = cn ;
  456. }
  457. journal_hash(table, cn->sb, cn->blocknr) = cn ;
  458. }
  459. /* lock the current transaction */
  460. inline static void lock_journal(struct super_block *p_s_sb) {
  461. PROC_INFO_INC( p_s_sb, journal.lock_journal );
  462. down(&SB_JOURNAL(p_s_sb)->j_lock);
  463. }
  464. /* unlock the current transaction */
  465. inline static void unlock_journal(struct super_block *p_s_sb) {
  466. up(&SB_JOURNAL(p_s_sb)->j_lock);
  467. }
  468. static inline void get_journal_list(struct reiserfs_journal_list *jl)
  469. {
  470. jl->j_refcount++;
  471. }
  472. static inline void put_journal_list(struct super_block *s,
  473. struct reiserfs_journal_list *jl)
  474. {
  475. if (jl->j_refcount < 1) {
  476. reiserfs_panic (s, "trans id %lu, refcount at %d", jl->j_trans_id,
  477. jl->j_refcount);
  478. }
  479. if (--jl->j_refcount == 0)
  480. reiserfs_kfree(jl, sizeof(struct reiserfs_journal_list), s);
  481. }
  482. /*
  483. ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
  484. ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
  485. ** transaction.
  486. */
  487. static void cleanup_freed_for_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
  488. struct reiserfs_list_bitmap *jb = jl->j_list_bitmap ;
  489. if (jb) {
  490. cleanup_bitmap_list(p_s_sb, jb) ;
  491. }
  492. jl->j_list_bitmap->journal_list = NULL ;
  493. jl->j_list_bitmap = NULL ;
  494. }
  495. static int journal_list_still_alive(struct super_block *s,
  496. unsigned long trans_id)
  497. {
  498. struct reiserfs_journal *journal = SB_JOURNAL (s);
  499. struct list_head *entry = &journal->j_journal_list;
  500. struct reiserfs_journal_list *jl;
  501. if (!list_empty(entry)) {
  502. jl = JOURNAL_LIST_ENTRY(entry->next);
  503. if (jl->j_trans_id <= trans_id) {
  504. return 1;
  505. }
  506. }
  507. return 0;
  508. }
  509. static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) {
  510. char b[BDEVNAME_SIZE];
  511. if (buffer_journaled(bh)) {
  512. reiserfs_warning(NULL, "clm-2084: pinned buffer %lu:%s sent to disk",
  513. bh->b_blocknr, bdevname(bh->b_bdev, b)) ;
  514. }
  515. if (uptodate)
  516. set_buffer_uptodate(bh) ;
  517. else
  518. clear_buffer_uptodate(bh) ;
  519. unlock_buffer(bh) ;
  520. put_bh(bh) ;
  521. }
  522. static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate) {
  523. if (uptodate)
  524. set_buffer_uptodate(bh) ;
  525. else
  526. clear_buffer_uptodate(bh) ;
  527. unlock_buffer(bh) ;
  528. put_bh(bh) ;
  529. }
  530. static void submit_logged_buffer(struct buffer_head *bh) {
  531. get_bh(bh) ;
  532. bh->b_end_io = reiserfs_end_buffer_io_sync ;
  533. clear_buffer_journal_new (bh);
  534. clear_buffer_dirty(bh) ;
  535. if (!test_clear_buffer_journal_test (bh))
  536. BUG();
  537. if (!buffer_uptodate(bh))
  538. BUG();
  539. submit_bh(WRITE, bh) ;
  540. }
  541. static void submit_ordered_buffer(struct buffer_head *bh) {
  542. get_bh(bh) ;
  543. bh->b_end_io = reiserfs_end_ordered_io;
  544. clear_buffer_dirty(bh) ;
  545. if (!buffer_uptodate(bh))
  546. BUG();
  547. submit_bh(WRITE, bh) ;
  548. }
  549. static int submit_barrier_buffer(struct buffer_head *bh) {
  550. get_bh(bh) ;
  551. bh->b_end_io = reiserfs_end_ordered_io;
  552. clear_buffer_dirty(bh) ;
  553. if (!buffer_uptodate(bh))
  554. BUG();
  555. return submit_bh(WRITE_BARRIER, bh) ;
  556. }
  557. static void check_barrier_completion(struct super_block *s,
  558. struct buffer_head *bh) {
  559. if (buffer_eopnotsupp(bh)) {
  560. clear_buffer_eopnotsupp(bh);
  561. disable_barrier(s);
  562. set_buffer_uptodate(bh);
  563. set_buffer_dirty(bh);
  564. sync_dirty_buffer(bh);
  565. }
  566. }
  567. #define CHUNK_SIZE 32
  568. struct buffer_chunk {
  569. struct buffer_head *bh[CHUNK_SIZE];
  570. int nr;
  571. };
  572. static void write_chunk(struct buffer_chunk *chunk) {
  573. int i;
  574. for (i = 0; i < chunk->nr ; i++) {
  575. submit_logged_buffer(chunk->bh[i]) ;
  576. }
  577. chunk->nr = 0;
  578. }
  579. static void write_ordered_chunk(struct buffer_chunk *chunk) {
  580. int i;
  581. for (i = 0; i < chunk->nr ; i++) {
  582. submit_ordered_buffer(chunk->bh[i]) ;
  583. }
  584. chunk->nr = 0;
  585. }
  586. static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
  587. spinlock_t *lock,
  588. void (fn)(struct buffer_chunk *))
  589. {
  590. int ret = 0;
  591. if (chunk->nr >= CHUNK_SIZE)
  592. BUG();
  593. chunk->bh[chunk->nr++] = bh;
  594. if (chunk->nr >= CHUNK_SIZE) {
  595. ret = 1;
  596. if (lock)
  597. spin_unlock(lock);
  598. fn(chunk);
  599. if (lock)
  600. spin_lock(lock);
  601. }
  602. return ret;
  603. }
  604. static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
  605. static struct reiserfs_jh *alloc_jh(void) {
  606. struct reiserfs_jh *jh;
  607. while(1) {
  608. jh = kmalloc(sizeof(*jh), GFP_NOFS);
  609. if (jh) {
  610. atomic_inc(&nr_reiserfs_jh);
  611. return jh;
  612. }
  613. yield();
  614. }
  615. }
  616. /*
  617. * we want to free the jh when the buffer has been written
  618. * and waited on
  619. */
  620. void reiserfs_free_jh(struct buffer_head *bh) {
  621. struct reiserfs_jh *jh;
  622. jh = bh->b_private;
  623. if (jh) {
  624. bh->b_private = NULL;
  625. jh->bh = NULL;
  626. list_del_init(&jh->list);
  627. kfree(jh);
  628. if (atomic_read(&nr_reiserfs_jh) <= 0)
  629. BUG();
  630. atomic_dec(&nr_reiserfs_jh);
  631. put_bh(bh);
  632. }
  633. }
  634. static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
  635. int tail)
  636. {
  637. struct reiserfs_jh *jh;
  638. if (bh->b_private) {
  639. spin_lock(&j->j_dirty_buffers_lock);
  640. if (!bh->b_private) {
  641. spin_unlock(&j->j_dirty_buffers_lock);
  642. goto no_jh;
  643. }
  644. jh = bh->b_private;
  645. list_del_init(&jh->list);
  646. } else {
  647. no_jh:
  648. get_bh(bh);
  649. jh = alloc_jh();
  650. spin_lock(&j->j_dirty_buffers_lock);
  651. /* buffer must be locked for __add_jh, should be able to have
  652. * two adds at the same time
  653. */
  654. if (bh->b_private)
  655. BUG();
  656. jh->bh = bh;
  657. bh->b_private = jh;
  658. }
  659. jh->jl = j->j_current_jl;
  660. if (tail)
  661. list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
  662. else {
  663. list_add_tail(&jh->list, &jh->jl->j_bh_list);
  664. }
  665. spin_unlock(&j->j_dirty_buffers_lock);
  666. return 0;
  667. }
  668. int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh) {
  669. return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
  670. }
  671. int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh) {
  672. return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
  673. }
  674. #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
  675. static int write_ordered_buffers(spinlock_t *lock,
  676. struct reiserfs_journal *j,
  677. struct reiserfs_journal_list *jl,
  678. struct list_head *list)
  679. {
  680. struct buffer_head *bh;
  681. struct reiserfs_jh *jh;
  682. int ret = j->j_errno;
  683. struct buffer_chunk chunk;
  684. struct list_head tmp;
  685. INIT_LIST_HEAD(&tmp);
  686. chunk.nr = 0;
  687. spin_lock(lock);
  688. while(!list_empty(list)) {
  689. jh = JH_ENTRY(list->next);
  690. bh = jh->bh;
  691. get_bh(bh);
  692. if (test_set_buffer_locked(bh)) {
  693. if (!buffer_dirty(bh)) {
  694. list_del_init(&jh->list);
  695. list_add(&jh->list, &tmp);
  696. goto loop_next;
  697. }
  698. spin_unlock(lock);
  699. if (chunk.nr)
  700. write_ordered_chunk(&chunk);
  701. wait_on_buffer(bh);
  702. cond_resched();
  703. spin_lock(lock);
  704. goto loop_next;
  705. }
  706. if (buffer_dirty(bh)) {
  707. list_del_init(&jh->list);
  708. list_add(&jh->list, &tmp);
  709. add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
  710. } else {
  711. reiserfs_free_jh(bh);
  712. unlock_buffer(bh);
  713. }
  714. loop_next:
  715. put_bh(bh);
  716. cond_resched_lock(lock);
  717. }
  718. if (chunk.nr) {
  719. spin_unlock(lock);
  720. write_ordered_chunk(&chunk);
  721. spin_lock(lock);
  722. }
  723. while(!list_empty(&tmp)) {
  724. jh = JH_ENTRY(tmp.prev);
  725. bh = jh->bh;
  726. get_bh(bh);
  727. reiserfs_free_jh(bh);
  728. if (buffer_locked(bh)) {
  729. spin_unlock(lock);
  730. wait_on_buffer(bh);
  731. spin_lock(lock);
  732. }
  733. if (!buffer_uptodate(bh)) {
  734. ret = -EIO;
  735. }
  736. put_bh(bh);
  737. cond_resched_lock(lock);
  738. }
  739. spin_unlock(lock);
  740. return ret;
  741. }
  742. static int flush_older_commits(struct super_block *s, struct reiserfs_journal_list *jl) {
  743. struct reiserfs_journal *journal = SB_JOURNAL (s);
  744. struct reiserfs_journal_list *other_jl;
  745. struct reiserfs_journal_list *first_jl;
  746. struct list_head *entry;
  747. unsigned long trans_id = jl->j_trans_id;
  748. unsigned long other_trans_id;
  749. unsigned long first_trans_id;
  750. find_first:
  751. /*
  752. * first we walk backwards to find the oldest uncommitted transation
  753. */
  754. first_jl = jl;
  755. entry = jl->j_list.prev;
  756. while(1) {
  757. other_jl = JOURNAL_LIST_ENTRY(entry);
  758. if (entry == &journal->j_journal_list ||
  759. atomic_read(&other_jl->j_older_commits_done))
  760. break;
  761. first_jl = other_jl;
  762. entry = other_jl->j_list.prev;
  763. }
  764. /* if we didn't find any older uncommitted transactions, return now */
  765. if (first_jl == jl) {
  766. return 0;
  767. }
  768. first_trans_id = first_jl->j_trans_id;
  769. entry = &first_jl->j_list;
  770. while(1) {
  771. other_jl = JOURNAL_LIST_ENTRY(entry);
  772. other_trans_id = other_jl->j_trans_id;
  773. if (other_trans_id < trans_id) {
  774. if (atomic_read(&other_jl->j_commit_left) != 0) {
  775. flush_commit_list(s, other_jl, 0);
  776. /* list we were called with is gone, return */
  777. if (!journal_list_still_alive(s, trans_id))
  778. return 1;
  779. /* the one we just flushed is gone, this means all
  780. * older lists are also gone, so first_jl is no longer
  781. * valid either. Go back to the beginning.
  782. */
  783. if (!journal_list_still_alive(s, other_trans_id)) {
  784. goto find_first;
  785. }
  786. }
  787. entry = entry->next;
  788. if (entry == &journal->j_journal_list)
  789. return 0;
  790. } else {
  791. return 0;
  792. }
  793. }
  794. return 0;
  795. }
  796. int reiserfs_async_progress_wait(struct super_block *s) {
  797. DEFINE_WAIT(wait);
  798. struct reiserfs_journal *j = SB_JOURNAL(s);
  799. if (atomic_read(&j->j_async_throttle))
  800. blk_congestion_wait(WRITE, HZ/10);
  801. return 0;
  802. }
  803. /*
  804. ** if this journal list still has commit blocks unflushed, send them to disk.
  805. **
  806. ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
  807. ** Before the commit block can by written, every other log block must be safely on disk
  808. **
  809. */
  810. static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) {
  811. int i;
  812. int bn ;
  813. struct buffer_head *tbh = NULL ;
  814. unsigned long trans_id = jl->j_trans_id;
  815. struct reiserfs_journal *journal = SB_JOURNAL (s);
  816. int barrier = 0;
  817. int retval = 0;
  818. reiserfs_check_lock_depth(s, "flush_commit_list") ;
  819. if (atomic_read(&jl->j_older_commits_done)) {
  820. return 0 ;
  821. }
  822. /* before we can put our commit blocks on disk, we have to make sure everyone older than
  823. ** us is on disk too
  824. */
  825. BUG_ON (jl->j_len <= 0);
  826. BUG_ON (trans_id == journal->j_trans_id);
  827. get_journal_list(jl);
  828. if (flushall) {
  829. if (flush_older_commits(s, jl) == 1) {
  830. /* list disappeared during flush_older_commits. return */
  831. goto put_jl;
  832. }
  833. }
  834. /* make sure nobody is trying to flush this one at the same time */
  835. down(&jl->j_commit_lock);
  836. if (!journal_list_still_alive(s, trans_id)) {
  837. up(&jl->j_commit_lock);
  838. goto put_jl;
  839. }
  840. BUG_ON (jl->j_trans_id == 0);
  841. /* this commit is done, exit */
  842. if (atomic_read(&(jl->j_commit_left)) <= 0) {
  843. if (flushall) {
  844. atomic_set(&(jl->j_older_commits_done), 1) ;
  845. }
  846. up(&jl->j_commit_lock);
  847. goto put_jl;
  848. }
  849. if (!list_empty(&jl->j_bh_list)) {
  850. unlock_kernel();
  851. write_ordered_buffers(&journal->j_dirty_buffers_lock,
  852. journal, jl, &jl->j_bh_list);
  853. lock_kernel();
  854. }
  855. BUG_ON (!list_empty(&jl->j_bh_list));
  856. /*
  857. * for the description block and all the log blocks, submit any buffers
  858. * that haven't already reached the disk
  859. */
  860. atomic_inc(&journal->j_async_throttle);
  861. for (i = 0 ; i < (jl->j_len + 1) ; i++) {
  862. bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start+i) %
  863. SB_ONDISK_JOURNAL_SIZE(s);
  864. tbh = journal_find_get_block(s, bn) ;
  865. if (buffer_dirty(tbh)) /* redundant, ll_rw_block() checks */
  866. ll_rw_block(WRITE, 1, &tbh) ;
  867. put_bh(tbh) ;
  868. }
  869. atomic_dec(&journal->j_async_throttle);
  870. /* wait on everything written so far before writing the commit
  871. * if we are in barrier mode, send the commit down now
  872. */
  873. barrier = reiserfs_barrier_flush(s);
  874. if (barrier) {
  875. int ret;
  876. lock_buffer(jl->j_commit_bh);
  877. ret = submit_barrier_buffer(jl->j_commit_bh);
  878. if (ret == -EOPNOTSUPP) {
  879. set_buffer_uptodate(jl->j_commit_bh);
  880. disable_barrier(s);
  881. barrier = 0;
  882. }
  883. }
  884. for (i = 0 ; i < (jl->j_len + 1) ; i++) {
  885. bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
  886. (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s) ;
  887. tbh = journal_find_get_block(s, bn) ;
  888. wait_on_buffer(tbh) ;
  889. // since we're using ll_rw_blk above, it might have skipped over
  890. // a locked buffer. Double check here
  891. //
  892. if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
  893. sync_dirty_buffer(tbh);
  894. if (unlikely (!buffer_uptodate(tbh))) {
  895. #ifdef CONFIG_REISERFS_CHECK
  896. reiserfs_warning(s, "journal-601, buffer write failed") ;
  897. #endif
  898. retval = -EIO;
  899. }
  900. put_bh(tbh) ; /* once for journal_find_get_block */
  901. put_bh(tbh) ; /* once due to original getblk in do_journal_end */
  902. atomic_dec(&(jl->j_commit_left)) ;
  903. }
  904. BUG_ON (atomic_read(&(jl->j_commit_left)) != 1);
  905. if (!barrier) {
  906. if (buffer_dirty(jl->j_commit_bh))
  907. BUG();
  908. mark_buffer_dirty(jl->j_commit_bh) ;
  909. sync_dirty_buffer(jl->j_commit_bh) ;
  910. } else
  911. wait_on_buffer(jl->j_commit_bh);
  912. check_barrier_completion(s, jl->j_commit_bh);
  913. /* If there was a write error in the journal - we can't commit this
  914. * transaction - it will be invalid and, if successful, will just end
  915. * up propogating the write error out to the filesystem. */
  916. if (unlikely (!buffer_uptodate(jl->j_commit_bh))) {
  917. #ifdef CONFIG_REISERFS_CHECK
  918. reiserfs_warning(s, "journal-615: buffer write failed") ;
  919. #endif
  920. retval = -EIO;
  921. }
  922. bforget(jl->j_commit_bh) ;
  923. if (journal->j_last_commit_id != 0 &&
  924. (jl->j_trans_id - journal->j_last_commit_id) != 1) {
  925. reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",
  926. journal->j_last_commit_id,
  927. jl->j_trans_id);
  928. }
  929. journal->j_last_commit_id = jl->j_trans_id;
  930. /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
  931. cleanup_freed_for_journal_list(s, jl) ;
  932. retval = retval ? retval : journal->j_errno;
  933. /* mark the metadata dirty */
  934. if (!retval)
  935. dirty_one_transaction(s, jl);
  936. atomic_dec(&(jl->j_commit_left)) ;
  937. if (flushall) {
  938. atomic_set(&(jl->j_older_commits_done), 1) ;
  939. }
  940. up(&jl->j_commit_lock);
  941. put_jl:
  942. put_journal_list(s, jl);
  943. if (retval)
  944. reiserfs_abort (s, retval, "Journal write error in %s", __FUNCTION__);
  945. return retval;
  946. }
  947. /*
  948. ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
  949. ** returns NULL if it can't find anything
  950. */
  951. static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) {
  952. struct super_block *sb = cn->sb;
  953. b_blocknr_t blocknr = cn->blocknr ;
  954. cn = cn->hprev ;
  955. while(cn) {
  956. if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
  957. return cn->jlist ;
  958. }
  959. cn = cn->hprev ;
  960. }
  961. return NULL ;
  962. }
  963. static void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **,
  964. struct reiserfs_journal_list *, unsigned long, int);
  965. /*
  966. ** once all the real blocks have been flushed, it is safe to remove them from the
  967. ** journal list for this transaction. Aside from freeing the cnode, this also allows the
  968. ** block to be reallocated for data blocks if it had been deleted.
  969. */
  970. static void remove_all_from_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, int debug) {
  971. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  972. struct reiserfs_journal_cnode *cn, *last ;
  973. cn = jl->j_realblock ;
  974. /* which is better, to lock once around the whole loop, or
  975. ** to lock for each call to remove_journal_hash?
  976. */
  977. while(cn) {
  978. if (cn->blocknr != 0) {
  979. if (debug) {
  980. reiserfs_warning (p_s_sb, "block %u, bh is %d, state %ld", cn->blocknr,
  981. cn->bh ? 1: 0, cn->state) ;
  982. }
  983. cn->state = 0 ;
  984. remove_journal_hash(p_s_sb, journal->j_list_hash_table, jl, cn->blocknr, 1) ;
  985. }
  986. last = cn ;
  987. cn = cn->next ;
  988. free_cnode(p_s_sb, last) ;
  989. }
  990. jl->j_realblock = NULL ;
  991. }
  992. /*
  993. ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
  994. ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
  995. ** releasing blocks in this transaction for reuse as data blocks.
  996. ** called by flush_journal_list, before it calls remove_all_from_journal_list
  997. **
  998. */
  999. static int _update_journal_header_block(struct super_block *p_s_sb, unsigned long offset, unsigned long trans_id) {
  1000. struct reiserfs_journal_header *jh ;
  1001. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  1002. if (reiserfs_is_journal_aborted (journal))
  1003. return -EIO;
  1004. if (trans_id >= journal->j_last_flush_trans_id) {
  1005. if (buffer_locked((journal->j_header_bh))) {
  1006. wait_on_buffer((journal->j_header_bh)) ;
  1007. if (unlikely (!buffer_uptodate(journal->j_header_bh))) {
  1008. #ifdef CONFIG_REISERFS_CHECK
  1009. reiserfs_warning (p_s_sb, "journal-699: buffer write failed") ;
  1010. #endif
  1011. return -EIO;
  1012. }
  1013. }
  1014. journal->j_last_flush_trans_id = trans_id ;
  1015. journal->j_first_unflushed_offset = offset ;
  1016. jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data) ;
  1017. jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ;
  1018. jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
  1019. jh->j_mount_id = cpu_to_le32(journal->j_mount_id) ;
  1020. if (reiserfs_barrier_flush(p_s_sb)) {
  1021. int ret;
  1022. lock_buffer(journal->j_header_bh);
  1023. ret = submit_barrier_buffer(journal->j_header_bh);
  1024. if (ret == -EOPNOTSUPP) {
  1025. set_buffer_uptodate(journal->j_header_bh);
  1026. disable_barrier(p_s_sb);
  1027. goto sync;
  1028. }
  1029. wait_on_buffer(journal->j_header_bh);
  1030. check_barrier_completion(p_s_sb, journal->j_header_bh);
  1031. } else {
  1032. sync:
  1033. set_buffer_dirty(journal->j_header_bh) ;
  1034. sync_dirty_buffer(journal->j_header_bh) ;
  1035. }
  1036. if (!buffer_uptodate(journal->j_header_bh)) {
  1037. reiserfs_warning (p_s_sb, "journal-837: IO error during journal replay");
  1038. return -EIO ;
  1039. }
  1040. }
  1041. return 0 ;
  1042. }
  1043. static int update_journal_header_block(struct super_block *p_s_sb,
  1044. unsigned long offset,
  1045. unsigned long trans_id) {
  1046. return _update_journal_header_block(p_s_sb, offset, trans_id);
  1047. }
  1048. /*
  1049. ** flush any and all journal lists older than you are
  1050. ** can only be called from flush_journal_list
  1051. */
  1052. static int flush_older_journal_lists(struct super_block *p_s_sb,
  1053. struct reiserfs_journal_list *jl)
  1054. {
  1055. struct list_head *entry;
  1056. struct reiserfs_journal_list *other_jl ;
  1057. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  1058. unsigned long trans_id = jl->j_trans_id;
  1059. /* we know we are the only ones flushing things, no extra race
  1060. * protection is required.
  1061. */
  1062. restart:
  1063. entry = journal->j_journal_list.next;
  1064. /* Did we wrap? */
  1065. if (entry == &journal->j_journal_list)
  1066. return 0;
  1067. other_jl = JOURNAL_LIST_ENTRY(entry);
  1068. if (other_jl->j_trans_id < trans_id) {
  1069. BUG_ON (other_jl->j_refcount <= 0);
  1070. /* do not flush all */
  1071. flush_journal_list(p_s_sb, other_jl, 0) ;
  1072. /* other_jl is now deleted from the list */
  1073. goto restart;
  1074. }
  1075. return 0 ;
  1076. }
  1077. static void del_from_work_list(struct super_block *s,
  1078. struct reiserfs_journal_list *jl) {
  1079. struct reiserfs_journal *journal = SB_JOURNAL (s);
  1080. if (!list_empty(&jl->j_working_list)) {
  1081. list_del_init(&jl->j_working_list);
  1082. journal->j_num_work_lists--;
  1083. }
  1084. }
  1085. /* flush a journal list, both commit and real blocks
  1086. **
  1087. ** always set flushall to 1, unless you are calling from inside
  1088. ** flush_journal_list
  1089. **
  1090. ** IMPORTANT. This can only be called while there are no journal writers,
  1091. ** and the journal is locked. That means it can only be called from
  1092. ** do_journal_end, or by journal_release
  1093. */
  1094. static int flush_journal_list(struct super_block *s,
  1095. struct reiserfs_journal_list *jl, int flushall) {
  1096. struct reiserfs_journal_list *pjl ;
  1097. struct reiserfs_journal_cnode *cn, *last ;
  1098. int count ;
  1099. int was_jwait = 0 ;
  1100. int was_dirty = 0 ;
  1101. struct buffer_head *saved_bh ;
  1102. unsigned long j_len_saved = jl->j_len ;
  1103. struct reiserfs_journal *journal = SB_JOURNAL (s);
  1104. int err = 0;
  1105. BUG_ON (j_len_saved <= 0);
  1106. if (atomic_read(&journal->j_wcount) != 0) {
  1107. reiserfs_warning(s, "clm-2048: flush_journal_list called with wcount %d",
  1108. atomic_read(&journal->j_wcount)) ;
  1109. }
  1110. BUG_ON (jl->j_trans_id == 0);
  1111. /* if flushall == 0, the lock is already held */
  1112. if (flushall) {
  1113. down(&journal->j_flush_sem);
  1114. } else if (!down_trylock(&journal->j_flush_sem)) {
  1115. BUG();
  1116. }
  1117. count = 0 ;
  1118. if (j_len_saved > journal->j_trans_max) {
  1119. reiserfs_panic(s, "journal-715: flush_journal_list, length is %lu, trans id %lu\n", j_len_saved, jl->j_trans_id);
  1120. return 0 ;
  1121. }
  1122. /* if all the work is already done, get out of here */
  1123. if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
  1124. atomic_read(&(jl->j_commit_left)) <= 0) {
  1125. goto flush_older_and_return ;
  1126. }
  1127. /* start by putting the commit list on disk. This will also flush
  1128. ** the commit lists of any olders transactions
  1129. */
  1130. flush_commit_list(s, jl, 1) ;
  1131. if (!(jl->j_state & LIST_DIRTY) && !reiserfs_is_journal_aborted (journal))
  1132. BUG();
  1133. /* are we done now? */
  1134. if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
  1135. atomic_read(&(jl->j_commit_left)) <= 0) {
  1136. goto flush_older_and_return ;
  1137. }
  1138. /* loop through each cnode, see if we need to write it,
  1139. ** or wait on a more recent transaction, or just ignore it
  1140. */
  1141. if (atomic_read(&(journal->j_wcount)) != 0) {
  1142. reiserfs_panic(s, "journal-844: panic journal list is flushing, wcount is not 0\n") ;
  1143. }
  1144. cn = jl->j_realblock ;
  1145. while(cn) {
  1146. was_jwait = 0 ;
  1147. was_dirty = 0 ;
  1148. saved_bh = NULL ;
  1149. /* blocknr of 0 is no longer in the hash, ignore it */
  1150. if (cn->blocknr == 0) {
  1151. goto free_cnode ;
  1152. }
  1153. /* This transaction failed commit. Don't write out to the disk */
  1154. if (!(jl->j_state & LIST_DIRTY))
  1155. goto free_cnode;
  1156. pjl = find_newer_jl_for_cn(cn) ;
  1157. /* the order is important here. We check pjl to make sure we
  1158. ** don't clear BH_JDirty_wait if we aren't the one writing this
  1159. ** block to disk
  1160. */
  1161. if (!pjl && cn->bh) {
  1162. saved_bh = cn->bh ;
  1163. /* we do this to make sure nobody releases the buffer while
  1164. ** we are working with it
  1165. */
  1166. get_bh(saved_bh) ;
  1167. if (buffer_journal_dirty(saved_bh)) {
  1168. BUG_ON (!can_dirty (cn));
  1169. was_jwait = 1 ;
  1170. was_dirty = 1 ;
  1171. } else if (can_dirty(cn)) {
  1172. /* everything with !pjl && jwait should be writable */
  1173. BUG();
  1174. }
  1175. }
  1176. /* if someone has this block in a newer transaction, just make
  1177. ** sure they are commited, and don't try writing it to disk
  1178. */
  1179. if (pjl) {
  1180. if (atomic_read(&pjl->j_commit_left))
  1181. flush_commit_list(s, pjl, 1) ;
  1182. goto free_cnode ;
  1183. }
  1184. /* bh == NULL when the block got to disk on its own, OR,
  1185. ** the block got freed in a future transaction
  1186. */
  1187. if (saved_bh == NULL) {
  1188. goto free_cnode ;
  1189. }
  1190. /* this should never happen. kupdate_one_transaction has this list
  1191. ** locked while it works, so we should never see a buffer here that
  1192. ** is not marked JDirty_wait
  1193. */
  1194. if ((!was_jwait) && !buffer_locked(saved_bh)) {
  1195. reiserfs_warning (s, "journal-813: BAD! buffer %llu %cdirty %cjwait, "
  1196. "not in a newer tranasction",
  1197. (unsigned long long)saved_bh->b_blocknr,
  1198. was_dirty ? ' ' : '!', was_jwait ? ' ' : '!') ;
  1199. }
  1200. if (was_dirty) {
  1201. /* we inc again because saved_bh gets decremented at free_cnode */
  1202. get_bh(saved_bh) ;
  1203. set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
  1204. lock_buffer(saved_bh);
  1205. BUG_ON (cn->blocknr != saved_bh->b_blocknr);
  1206. if (buffer_dirty(saved_bh))
  1207. submit_logged_buffer(saved_bh) ;
  1208. else
  1209. unlock_buffer(saved_bh);
  1210. count++ ;
  1211. } else {
  1212. reiserfs_warning (s, "clm-2082: Unable to flush buffer %llu in %s",
  1213. (unsigned long long)saved_bh->b_blocknr, __FUNCTION__);
  1214. }
  1215. free_cnode:
  1216. last = cn ;
  1217. cn = cn->next ;
  1218. if (saved_bh) {
  1219. /* we incremented this to keep others from taking the buffer head away */
  1220. put_bh(saved_bh) ;
  1221. if (atomic_read(&(saved_bh->b_count)) < 0) {
  1222. reiserfs_warning (s, "journal-945: saved_bh->b_count < 0");
  1223. }
  1224. }
  1225. }
  1226. if (count > 0) {
  1227. cn = jl->j_realblock ;
  1228. while(cn) {
  1229. if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
  1230. if (!cn->bh) {
  1231. reiserfs_panic(s, "journal-1011: cn->bh is NULL\n") ;
  1232. }
  1233. wait_on_buffer(cn->bh) ;
  1234. if (!cn->bh) {
  1235. reiserfs_panic(s, "journal-1012: cn->bh is NULL\n") ;
  1236. }
  1237. if (unlikely (!buffer_uptodate(cn->bh))) {
  1238. #ifdef CONFIG_REISERFS_CHECK
  1239. reiserfs_warning(s, "journal-949: buffer write failed\n") ;
  1240. #endif
  1241. err = -EIO;
  1242. }
  1243. /* note, we must clear the JDirty_wait bit after the up to date
  1244. ** check, otherwise we race against our flushpage routine
  1245. */
  1246. BUG_ON (!test_clear_buffer_journal_dirty (cn->bh));
  1247. /* undo the inc from journal_mark_dirty */
  1248. put_bh(cn->bh) ;
  1249. brelse(cn->bh) ;
  1250. }
  1251. cn = cn->next ;
  1252. }
  1253. }
  1254. if (err)
  1255. reiserfs_abort (s, -EIO, "Write error while pushing transaction to disk in %s", __FUNCTION__);
  1256. flush_older_and_return:
  1257. /* before we can update the journal header block, we _must_ flush all
  1258. ** real blocks from all older transactions to disk. This is because
  1259. ** once the header block is updated, this transaction will not be
  1260. ** replayed after a crash
  1261. */
  1262. if (flushall) {
  1263. flush_older_journal_lists(s, jl);
  1264. }
  1265. err = journal->j_errno;
  1266. /* before we can remove everything from the hash tables for this
  1267. ** transaction, we must make sure it can never be replayed
  1268. **
  1269. ** since we are only called from do_journal_end, we know for sure there
  1270. ** are no allocations going on while we are flushing journal lists. So,
  1271. ** we only need to update the journal header block for the last list
  1272. ** being flushed
  1273. */
  1274. if (!err && flushall) {
  1275. err = update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id) ;
  1276. if (err)
  1277. reiserfs_abort (s, -EIO, "Write error while updating journal header in %s", __FUNCTION__);
  1278. }
  1279. remove_all_from_journal_list(s, jl, 0) ;
  1280. list_del_init(&jl->j_list);
  1281. journal->j_num_lists--;
  1282. del_from_work_list(s, jl);
  1283. if (journal->j_last_flush_id != 0 &&
  1284. (jl->j_trans_id - journal->j_last_flush_id) != 1) {
  1285. reiserfs_warning(s, "clm-2201: last flush %lu, current %lu",
  1286. journal->j_last_flush_id,
  1287. jl->j_trans_id);
  1288. }
  1289. journal->j_last_flush_id = jl->j_trans_id;
  1290. /* not strictly required since we are freeing the list, but it should
  1291. * help find code using dead lists later on
  1292. */
  1293. jl->j_len = 0 ;
  1294. atomic_set(&(jl->j_nonzerolen), 0) ;
  1295. jl->j_start = 0 ;
  1296. jl->j_realblock = NULL ;
  1297. jl->j_commit_bh = NULL ;
  1298. jl->j_trans_id = 0 ;
  1299. jl->j_state = 0;
  1300. put_journal_list(s, jl);
  1301. if (flushall)
  1302. up(&journal->j_flush_sem);
  1303. return err ;
  1304. }
  1305. static int write_one_transaction(struct super_block *s,
  1306. struct reiserfs_journal_list *jl,
  1307. struct buffer_chunk *chunk)
  1308. {
  1309. struct reiserfs_journal_cnode *cn;
  1310. int ret = 0 ;
  1311. jl->j_state |= LIST_TOUCHED;
  1312. del_from_work_list(s, jl);
  1313. if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
  1314. return 0;
  1315. }
  1316. cn = jl->j_realblock ;
  1317. while(cn) {
  1318. /* if the blocknr == 0, this has been cleared from the hash,
  1319. ** skip it
  1320. */
  1321. if (cn->blocknr == 0) {
  1322. goto next ;
  1323. }
  1324. if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
  1325. struct buffer_head *tmp_bh;
  1326. /* we can race against journal_mark_freed when we try
  1327. * to lock_buffer(cn->bh), so we have to inc the buffer
  1328. * count, and recheck things after locking
  1329. */
  1330. tmp_bh = cn->bh;
  1331. get_bh(tmp_bh);
  1332. lock_buffer(tmp_bh);
  1333. if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
  1334. if (!buffer_journal_dirty(tmp_bh) ||
  1335. buffer_journal_prepared(tmp_bh))
  1336. BUG();
  1337. add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
  1338. ret++;
  1339. } else {
  1340. /* note, cn->bh might be null now */
  1341. unlock_buffer(tmp_bh);
  1342. }
  1343. put_bh(tmp_bh);
  1344. }
  1345. next:
  1346. cn = cn->next ;
  1347. cond_resched();
  1348. }
  1349. return ret ;
  1350. }
  1351. /* used by flush_commit_list */
  1352. static int dirty_one_transaction(struct super_block *s,
  1353. struct reiserfs_journal_list *jl)
  1354. {
  1355. struct reiserfs_journal_cnode *cn;
  1356. struct reiserfs_journal_list *pjl;
  1357. int ret = 0 ;
  1358. jl->j_state |= LIST_DIRTY;
  1359. cn = jl->j_realblock ;
  1360. while(cn) {
  1361. /* look for a more recent transaction that logged this
  1362. ** buffer. Only the most recent transaction with a buffer in
  1363. ** it is allowed to send that buffer to disk
  1364. */
  1365. pjl = find_newer_jl_for_cn(cn) ;
  1366. if (!pjl && cn->blocknr && cn->bh && buffer_journal_dirty(cn->bh))
  1367. {
  1368. BUG_ON (!can_dirty(cn));
  1369. /* if the buffer is prepared, it will either be logged
  1370. * or restored. If restored, we need to make sure
  1371. * it actually gets marked dirty
  1372. */
  1373. clear_buffer_journal_new (cn->bh);
  1374. if (buffer_journal_prepared (cn->bh)) {
  1375. set_buffer_journal_restore_dirty (cn->bh);
  1376. } else {
  1377. set_buffer_journal_test (cn->bh);
  1378. mark_buffer_dirty(cn->bh);
  1379. }
  1380. }
  1381. cn = cn->next ;
  1382. }
  1383. return ret ;
  1384. }
  1385. static int kupdate_transactions(struct super_block *s,
  1386. struct reiserfs_journal_list *jl,
  1387. struct reiserfs_journal_list **next_jl,
  1388. unsigned long *next_trans_id,
  1389. int num_blocks,
  1390. int num_trans) {
  1391. int ret = 0;
  1392. int written = 0 ;
  1393. int transactions_flushed = 0;
  1394. unsigned long orig_trans_id = jl->j_trans_id;
  1395. struct buffer_chunk chunk;
  1396. struct list_head *entry;
  1397. struct reiserfs_journal *journal = SB_JOURNAL (s);
  1398. chunk.nr = 0;
  1399. down(&journal->j_flush_sem);
  1400. if (!journal_list_still_alive(s, orig_trans_id)) {
  1401. goto done;
  1402. }
  1403. /* we've got j_flush_sem held, nobody is going to delete any
  1404. * of these lists out from underneath us
  1405. */
  1406. while((num_trans && transactions_flushed < num_trans) ||
  1407. (!num_trans && written < num_blocks)) {
  1408. if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
  1409. atomic_read(&jl->j_commit_left) || !(jl->j_state & LIST_DIRTY))
  1410. {
  1411. del_from_work_list(s, jl);
  1412. break;
  1413. }
  1414. ret = write_one_transaction(s, jl, &chunk);
  1415. if (ret < 0)
  1416. goto done;
  1417. transactions_flushed++;
  1418. written += ret;
  1419. entry = jl->j_list.next;
  1420. /* did we wrap? */
  1421. if (entry == &journal->j_journal_list) {
  1422. break;
  1423. }
  1424. jl = JOURNAL_LIST_ENTRY(entry);
  1425. /* don't bother with older transactions */
  1426. if (jl->j_trans_id <= orig_trans_id)
  1427. break;
  1428. }
  1429. if (chunk.nr) {
  1430. write_chunk(&chunk);
  1431. }
  1432. done:
  1433. up(&journal->j_flush_sem);
  1434. return ret;
  1435. }
  1436. /* for o_sync and fsync heavy applications, they tend to use
  1437. ** all the journa list slots with tiny transactions. These
  1438. ** trigger lots and lots of calls to update the header block, which
  1439. ** adds seeks and slows things down.
  1440. **
  1441. ** This function tries to clear out a large chunk of the journal lists
  1442. ** at once, which makes everything faster since only the newest journal
  1443. ** list updates the header block
  1444. */
  1445. static int flush_used_journal_lists(struct super_block *s,
  1446. struct reiserfs_journal_list *jl) {
  1447. unsigned long len = 0;
  1448. unsigned long cur_len;
  1449. int ret;
  1450. int i;
  1451. int limit = 256;
  1452. struct reiserfs_journal_list *tjl;
  1453. struct reiserfs_journal_list *flush_jl;
  1454. unsigned long trans_id;
  1455. struct reiserfs_journal *journal = SB_JOURNAL (s);
  1456. flush_jl = tjl = jl;
  1457. /* in data logging mode, try harder to flush a lot of blocks */
  1458. if (reiserfs_data_log(s))
  1459. limit = 1024;
  1460. /* flush for 256 transactions or limit blocks, whichever comes first */
  1461. for(i = 0 ; i < 256 && len < limit ; i++) {
  1462. if (atomic_read(&tjl->j_commit_left) ||
  1463. tjl->j_trans_id < jl->j_trans_id) {
  1464. break;
  1465. }
  1466. cur_len = atomic_read(&tjl->j_nonzerolen);
  1467. if (cur_len > 0) {
  1468. tjl->j_state &= ~LIST_TOUCHED;
  1469. }
  1470. len += cur_len;
  1471. flush_jl = tjl;
  1472. if (tjl->j_list.next == &journal->j_journal_list)
  1473. break;
  1474. tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
  1475. }
  1476. /* try to find a group of blocks we can flush across all the
  1477. ** transactions, but only bother if we've actually spanned
  1478. ** across multiple lists
  1479. */
  1480. if (flush_jl != jl) {
  1481. ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
  1482. }
  1483. flush_journal_list(s, flush_jl, 1);
  1484. return 0;
  1485. }
  1486. /*
  1487. ** removes any nodes in table with name block and dev as bh.
  1488. ** only touchs the hnext and hprev pointers.
  1489. */
  1490. void remove_journal_hash(struct super_block *sb,
  1491. struct reiserfs_journal_cnode **table,
  1492. struct reiserfs_journal_list *jl,
  1493. unsigned long block, int remove_freed)
  1494. {
  1495. struct reiserfs_journal_cnode *cur ;
  1496. struct reiserfs_journal_cnode **head ;
  1497. head= &(journal_hash(table, sb, block)) ;
  1498. if (!head) {
  1499. return ;
  1500. }
  1501. cur = *head ;
  1502. while(cur) {
  1503. if (cur->blocknr == block && cur->sb == sb && (jl == NULL || jl == cur->jlist) &&
  1504. (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
  1505. if (cur->hnext) {
  1506. cur->hnext->hprev = cur->hprev ;
  1507. }
  1508. if (cur->hprev) {
  1509. cur->hprev->hnext = cur->hnext ;
  1510. } else {
  1511. *head = cur->hnext ;
  1512. }
  1513. cur->blocknr = 0 ;
  1514. cur->sb = NULL ;
  1515. cur->state = 0 ;
  1516. if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
  1517. atomic_dec(&(cur->jlist->j_nonzerolen)) ;
  1518. cur->bh = NULL ;
  1519. cur->jlist = NULL ;
  1520. }
  1521. cur = cur->hnext ;
  1522. }
  1523. }
  1524. static void free_journal_ram(struct super_block *p_s_sb) {
  1525. struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
  1526. reiserfs_kfree(journal->j_current_jl,
  1527. sizeof(struct reiserfs_journal_list), p_s_sb);
  1528. journal->j_num_lists--;
  1529. vfree(journal->j_cnode_free_orig) ;
  1530. free_list_bitmaps(p_s_sb, journal->j_list_bitmap) ;
  1531. free_bitmap_nodes(p_s_sb) ; /* must be after free_list_bitmaps */
  1532. if (journal->j_header_bh) {
  1533. brelse(journal->j_header_bh) ;
  1534. }
  1535. /* j_header_bh is on the journal dev, make sure not to release the journal
  1536. * dev until we brelse j_header_bh
  1537. */
  1538. release_journal_dev(p_s_sb, journal);
  1539. vfree(journal) ;
  1540. }
  1541. /*
  1542. ** call on unmount. Only set error to 1 if you haven't made your way out
  1543. ** of read_super() yet. Any other caller must keep error at 0.
  1544. */
  1545. static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, int error) {
  1546. struct reiserfs_transaction_handle myth ;
  1547. int flushed = 0;
  1548. struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
  1549. /* we only want to flush out transactions if we were called with error == 0
  1550. */
  1551. if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
  1552. /* end the current trans */
  1553. BUG_ON (!th->t_trans_id);
  1554. do_journal_end(th, p_s_sb,10, FLUSH_ALL) ;
  1555. /* make sure something gets logged to force our way into the flush code */
  1556. if (!journal_join(&myth, p_s_sb, 1)) {
  1557. reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
  1558. journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
  1559. do_journal_end(&myth, p_s_sb,1, FLUSH_ALL) ;
  1560. flushed = 1;
  1561. }
  1562. }
  1563. /* this also catches errors during the do_journal_end above */
  1564. if (!error && reiserfs_is_journal_aborted(journal)) {
  1565. memset(&myth, 0, sizeof(myth));
  1566. if (!journal_join_abort(&myth, p_s_sb, 1)) {
  1567. reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
  1568. journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
  1569. do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL) ;
  1570. }
  1571. }
  1572. reiserfs_mounted_fs_count-- ;
  1573. /* wait for all commits to finish */
  1574. cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work);
  1575. flush_workqueue(commit_wq);
  1576. if (!reiserfs_mounted_fs_count) {
  1577. destroy_workqueue(commit_wq);
  1578. commit_wq = NULL;
  1579. }
  1580. free_journal_ram(p_s_sb) ;
  1581. return 0 ;
  1582. }
  1583. /*
  1584. ** call on unmount. flush all journal trans, release all alloc'd ram
  1585. */
  1586. int journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
  1587. return do_journal_release(th, p_s_sb, 0) ;
  1588. }
  1589. /*
  1590. ** only call from an error condition inside reiserfs_read_super!
  1591. */
  1592. int journal_release_error(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
  1593. return do_journal_release(th, p_s_sb, 1) ;
  1594. }
  1595. /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
  1596. static int journal_compare_desc_commit(struct super_block *p_s_sb, struct reiserfs_journal_desc *desc,
  1597. struct reiserfs_journal_commit *commit) {
  1598. if (get_commit_trans_id (commit) != get_desc_trans_id (desc) ||
  1599. get_commit_trans_len (commit) != get_desc_trans_len (desc) ||
  1600. get_commit_trans_len (commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
  1601. get_commit_trans_len (commit) <= 0
  1602. ) {
  1603. return 1 ;
  1604. }
  1605. return 0 ;
  1606. }
  1607. /* returns 0 if it did not find a description block
  1608. ** returns -1 if it found a corrupt commit block
  1609. ** returns 1 if both desc and commit were valid
  1610. */
  1611. static int journal_transaction_is_valid(struct super_block *p_s_sb, struct buffer_head *d_bh, unsigned long *oldest_invalid_trans_id, unsigned long *newest_mount_id) {
  1612. struct reiserfs_journal_desc *desc ;
  1613. struct reiserfs_journal_commit *commit ;
  1614. struct buffer_head *c_bh ;
  1615. unsigned long offset ;
  1616. if (!d_bh)
  1617. return 0 ;
  1618. desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
  1619. if (get_desc_trans_len(desc) > 0 && !memcmp(get_journal_desc_magic (d_bh), JOURNAL_DESC_MAGIC, 8)) {
  1620. if (oldest_invalid_trans_id && *oldest_invalid_trans_id && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
  1621. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-986: transaction "
  1622. "is valid returning because trans_id %d is greater than "
  1623. "oldest_invalid %lu", get_desc_trans_id(desc),
  1624. *oldest_invalid_trans_id);
  1625. return 0 ;
  1626. }
  1627. if (newest_mount_id && *newest_mount_id > get_desc_mount_id (desc)) {
  1628. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1087: transaction "
  1629. "is valid returning because mount_id %d is less than "
  1630. "newest_mount_id %lu", get_desc_mount_id (desc),
  1631. *newest_mount_id) ;
  1632. return -1 ;
  1633. }
  1634. if ( get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max ) {
  1635. reiserfs_warning(p_s_sb, "journal-2018: Bad transaction length %d encountered, ignoring transaction", get_desc_trans_len(desc));
  1636. return -1 ;
  1637. }
  1638. offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
  1639. /* ok, we have a journal description block, lets see if the transaction was valid */
  1640. c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
  1641. ((offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
  1642. if (!c_bh)
  1643. return 0 ;
  1644. commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
  1645. if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
  1646. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
  1647. "journal_transaction_is_valid, commit offset %ld had bad "
  1648. "time %d or length %d",
  1649. c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1650. get_commit_trans_id (commit),
  1651. get_commit_trans_len(commit));
  1652. brelse(c_bh) ;
  1653. if (oldest_invalid_trans_id) {
  1654. *oldest_invalid_trans_id = get_desc_trans_id(desc) ;
  1655. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1004: "
  1656. "transaction_is_valid setting oldest invalid trans_id "
  1657. "to %d", get_desc_trans_id(desc)) ;
  1658. }
  1659. return -1;
  1660. }
  1661. brelse(c_bh) ;
  1662. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1006: found valid "
  1663. "transaction start offset %llu, len %d id %d",
  1664. d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1665. get_desc_trans_len(desc), get_desc_trans_id(desc)) ;
  1666. return 1 ;
  1667. } else {
  1668. return 0 ;
  1669. }
  1670. }
  1671. static void brelse_array(struct buffer_head **heads, int num) {
  1672. int i ;
  1673. for (i = 0 ; i < num ; i++) {
  1674. brelse(heads[i]) ;
  1675. }
  1676. }
  1677. /*
  1678. ** given the start, and values for the oldest acceptable transactions,
  1679. ** this either reads in a replays a transaction, or returns because the transaction
  1680. ** is invalid, or too old.
  1681. */
  1682. static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cur_dblock, unsigned long oldest_start,
  1683. unsigned long oldest_trans_id, unsigned long newest_mount_id) {
  1684. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  1685. struct reiserfs_journal_desc *desc ;
  1686. struct reiserfs_journal_commit *commit ;
  1687. unsigned long trans_id = 0 ;
  1688. struct buffer_head *c_bh ;
  1689. struct buffer_head *d_bh ;
  1690. struct buffer_head **log_blocks = NULL ;
  1691. struct buffer_head **real_blocks = NULL ;
  1692. unsigned long trans_offset ;
  1693. int i;
  1694. int trans_half;
  1695. d_bh = journal_bread(p_s_sb, cur_dblock) ;
  1696. if (!d_bh)
  1697. return 1 ;
  1698. desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
  1699. trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
  1700. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
  1701. "journal_read_transaction, offset %llu, len %d mount_id %d",
  1702. d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1703. get_desc_trans_len(desc), get_desc_mount_id(desc)) ;
  1704. if (get_desc_trans_id(desc) < oldest_trans_id) {
  1705. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
  1706. "journal_read_trans skipping because %lu is too old",
  1707. cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
  1708. brelse(d_bh) ;
  1709. return 1 ;
  1710. }
  1711. if (get_desc_mount_id(desc) != newest_mount_id) {
  1712. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
  1713. "journal_read_trans skipping because %d is != "
  1714. "newest_mount_id %lu", get_desc_mount_id(desc),
  1715. newest_mount_id) ;
  1716. brelse(d_bh) ;
  1717. return 1 ;
  1718. }
  1719. c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
  1720. ((trans_offset + get_desc_trans_len(desc) + 1) %
  1721. SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
  1722. if (!c_bh) {
  1723. brelse(d_bh) ;
  1724. return 1 ;
  1725. }
  1726. commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
  1727. if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
  1728. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal_read_transaction, "
  1729. "commit offset %llu had bad time %d or length %d",
  1730. c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1731. get_commit_trans_id(commit), get_commit_trans_len(commit));
  1732. brelse(c_bh) ;
  1733. brelse(d_bh) ;
  1734. return 1;
  1735. }
  1736. trans_id = get_desc_trans_id(desc) ;
  1737. /* now we know we've got a good transaction, and it was inside the valid time ranges */
  1738. log_blocks = reiserfs_kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
  1739. real_blocks = reiserfs_kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
  1740. if (!log_blocks || !real_blocks) {
  1741. brelse(c_bh) ;
  1742. brelse(d_bh) ;
  1743. reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1744. reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1745. reiserfs_warning(p_s_sb, "journal-1169: kmalloc failed, unable to mount FS") ;
  1746. return -1 ;
  1747. }
  1748. /* get all the buffer heads */
  1749. trans_half = journal_trans_half (p_s_sb->s_blocksize) ;
  1750. for(i = 0 ; i < get_desc_trans_len(desc) ; i++) {
  1751. log_blocks[i] = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + (trans_offset + 1 + i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
  1752. if (i < trans_half) {
  1753. real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(desc->j_realblock[i])) ;
  1754. } else {
  1755. real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(commit->j_realblock[i - trans_half])) ;
  1756. }
  1757. if ( real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb) ) {
  1758. reiserfs_warning(p_s_sb, "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
  1759. goto abort_replay;
  1760. }
  1761. /* make sure we don't try to replay onto log or reserved area */
  1762. if (is_block_in_log_or_reserved_area(p_s_sb, real_blocks[i]->b_blocknr)) {
  1763. reiserfs_warning(p_s_sb, "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block") ;
  1764. abort_replay:
  1765. brelse_array(log_blocks, i) ;
  1766. brelse_array(real_blocks, i) ;
  1767. brelse(c_bh) ;
  1768. brelse(d_bh) ;
  1769. reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1770. reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1771. return -1 ;
  1772. }
  1773. }
  1774. /* read in the log blocks, memcpy to the corresponding real block */
  1775. ll_rw_block(READ, get_desc_trans_len(desc), log_blocks) ;
  1776. for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
  1777. wait_on_buffer(log_blocks[i]) ;
  1778. if (!buffer_uptodate(log_blocks[i])) {
  1779. reiserfs_warning(p_s_sb, "journal-1212: REPLAY FAILURE fsck required! buffer write failed") ;
  1780. brelse_array(log_blocks + i, get_desc_trans_len(desc) - i) ;
  1781. brelse_array(real_blocks, get_desc_trans_len(desc)) ;
  1782. brelse(c_bh) ;
  1783. brelse(d_bh) ;
  1784. reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1785. reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1786. return -1 ;
  1787. }
  1788. memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size) ;
  1789. set_buffer_uptodate(real_blocks[i]) ;
  1790. brelse(log_blocks[i]) ;
  1791. }
  1792. /* flush out the real blocks */
  1793. for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
  1794. set_buffer_dirty(real_blocks[i]) ;
  1795. ll_rw_block(WRITE, 1, real_blocks + i) ;
  1796. }
  1797. for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
  1798. wait_on_buffer(real_blocks[i]) ;
  1799. if (!buffer_uptodate(real_blocks[i])) {
  1800. reiserfs_warning(p_s_sb, "journal-1226: REPLAY FAILURE, fsck required! buffer write failed") ;
  1801. brelse_array(real_blocks + i, get_desc_trans_len(desc) - i) ;
  1802. brelse(c_bh) ;
  1803. brelse(d_bh) ;
  1804. reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1805. reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
  1806. return -1 ;
  1807. }
  1808. brelse(real_blocks[i]) ;
  1809. }
  1810. cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + ((trans_offset + get_desc_trans_len(desc) + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
  1811. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal "
  1812. "start to offset %ld",
  1813. cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
  1814. /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
  1815. journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
  1816. journal->j_last_flush_trans_id = trans_id ;
  1817. journal->j_trans_id = trans_id + 1;
  1818. brelse(c_bh) ;
  1819. brelse(d_bh) ;
  1820. reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
  1821. reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
  1822. return 0 ;
  1823. }
  1824. /* This function reads blocks starting from block and to max_block of bufsize
  1825. size (but no more than BUFNR blocks at a time). This proved to improve
  1826. mounting speed on self-rebuilding raid5 arrays at least.
  1827. Right now it is only used from journal code. But later we might use it
  1828. from other places.
  1829. Note: Do not use journal_getblk/sb_getblk functions here! */
  1830. static struct buffer_head * reiserfs_breada (struct block_device *dev, int block, int bufsize,
  1831. unsigned int max_block)
  1832. {
  1833. struct buffer_head * bhlist[BUFNR];
  1834. unsigned int blocks = BUFNR;
  1835. struct buffer_head * bh;
  1836. int i, j;
  1837. bh = __getblk (dev, block, bufsize );
  1838. if (buffer_uptodate (bh))
  1839. return (bh);
  1840. if (block + BUFNR > max_block) {
  1841. blocks = max_block - block;
  1842. }
  1843. bhlist[0] = bh;
  1844. j = 1;
  1845. for (i = 1; i < blocks; i++) {
  1846. bh = __getblk (dev, block + i, bufsize);
  1847. if (buffer_uptodate (bh)) {
  1848. brelse (bh);
  1849. break;
  1850. }
  1851. else bhlist[j++] = bh;
  1852. }
  1853. ll_rw_block (READ, j, bhlist);
  1854. for(i = 1; i < j; i++)
  1855. brelse (bhlist[i]);
  1856. bh = bhlist[0];
  1857. wait_on_buffer (bh);
  1858. if (buffer_uptodate (bh))
  1859. return bh;
  1860. brelse (bh);
  1861. return NULL;
  1862. }
  1863. /*
  1864. ** read and replay the log
  1865. ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
  1866. ** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
  1867. **
  1868. ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
  1869. **
  1870. ** On exit, it sets things up so the first transaction will work correctly.
  1871. */
  1872. static int journal_read(struct super_block *p_s_sb) {
  1873. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  1874. struct reiserfs_journal_desc *desc ;
  1875. unsigned long oldest_trans_id = 0;
  1876. unsigned long oldest_invalid_trans_id = 0 ;
  1877. time_t start ;
  1878. unsigned long oldest_start = 0;
  1879. unsigned long cur_dblock = 0 ;
  1880. unsigned long newest_mount_id = 9 ;
  1881. struct buffer_head *d_bh ;
  1882. struct reiserfs_journal_header *jh ;
  1883. int valid_journal_header = 0 ;
  1884. int replay_count = 0 ;
  1885. int continue_replay = 1 ;
  1886. int ret ;
  1887. char b[BDEVNAME_SIZE];
  1888. cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
  1889. reiserfs_info (p_s_sb, "checking transaction log (%s)\n",
  1890. bdevname(journal->j_dev_bd, b));
  1891. start = get_seconds();
  1892. /* step 1, read in the journal header block. Check the transaction it says
  1893. ** is the first unflushed, and if that transaction is not valid,
  1894. ** replay is done
  1895. */
  1896. journal->j_header_bh = journal_bread(p_s_sb,
  1897. SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
  1898. SB_ONDISK_JOURNAL_SIZE(p_s_sb));
  1899. if (!journal->j_header_bh) {
  1900. return 1 ;
  1901. }
  1902. jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data) ;
  1903. if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 &&
  1904. le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(p_s_sb) &&
  1905. le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
  1906. oldest_start = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
  1907. le32_to_cpu(jh->j_first_unflushed_offset) ;
  1908. oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
  1909. newest_mount_id = le32_to_cpu(jh->j_mount_id);
  1910. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1153: found in "
  1911. "header: first_unflushed_offset %d, last_flushed_trans_id "
  1912. "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
  1913. le32_to_cpu(jh->j_last_flush_trans_id)) ;
  1914. valid_journal_header = 1 ;
  1915. /* now, we try to read the first unflushed offset. If it is not valid,
  1916. ** there is nothing more we can do, and it makes no sense to read
  1917. ** through the whole log.
  1918. */
  1919. d_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset)) ;
  1920. ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL) ;
  1921. if (!ret) {
  1922. continue_replay = 0 ;
  1923. }
  1924. brelse(d_bh) ;
  1925. goto start_log_replay;
  1926. }
  1927. if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
  1928. reiserfs_warning (p_s_sb,
  1929. "clm-2076: device is readonly, unable to replay log") ;
  1930. return -1 ;
  1931. }
  1932. /* ok, there are transactions that need to be replayed. start with the first log block, find
  1933. ** all the valid transactions, and pick out the oldest.
  1934. */
  1935. while(continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
  1936. /* Note that it is required for blocksize of primary fs device and journal
  1937. device to be the same */
  1938. d_bh = reiserfs_breada(journal->j_dev_bd, cur_dblock, p_s_sb->s_blocksize,
  1939. SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
  1940. ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
  1941. if (ret == 1) {
  1942. desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
  1943. if (oldest_start == 0) { /* init all oldest_ values */
  1944. oldest_trans_id = get_desc_trans_id(desc) ;
  1945. oldest_start = d_bh->b_blocknr ;
  1946. newest_mount_id = get_desc_mount_id(desc) ;
  1947. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1179: Setting "
  1948. "oldest_start to offset %llu, trans_id %lu",
  1949. oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1950. oldest_trans_id) ;
  1951. } else if (oldest_trans_id > get_desc_trans_id(desc)) {
  1952. /* one we just read was older */
  1953. oldest_trans_id = get_desc_trans_id(desc) ;
  1954. oldest_start = d_bh->b_blocknr ;
  1955. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting "
  1956. "oldest_start to offset %lu, trans_id %lu",
  1957. oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1958. oldest_trans_id) ;
  1959. }
  1960. if (newest_mount_id < get_desc_mount_id(desc)) {
  1961. newest_mount_id = get_desc_mount_id(desc) ;
  1962. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
  1963. "newest_mount_id to %d", get_desc_mount_id(desc));
  1964. }
  1965. cur_dblock += get_desc_trans_len(desc) + 2 ;
  1966. } else {
  1967. cur_dblock++ ;
  1968. }
  1969. brelse(d_bh) ;
  1970. }
  1971. start_log_replay:
  1972. cur_dblock = oldest_start ;
  1973. if (oldest_trans_id) {
  1974. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay "
  1975. "from offset %llu, trans_id %lu",
  1976. cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  1977. oldest_trans_id) ;
  1978. }
  1979. replay_count = 0 ;
  1980. while(continue_replay && oldest_trans_id > 0) {
  1981. ret = journal_read_transaction(p_s_sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id) ;
  1982. if (ret < 0) {
  1983. return ret ;
  1984. } else if (ret != 0) {
  1985. break ;
  1986. }
  1987. cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start ;
  1988. replay_count++ ;
  1989. if (cur_dblock == oldest_start)
  1990. break;
  1991. }
  1992. if (oldest_trans_id == 0) {
  1993. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1225: No valid "
  1994. "transactions found") ;
  1995. }
  1996. /* j_start does not get set correctly if we don't replay any transactions.
  1997. ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
  1998. ** copy the trans_id from the header
  1999. */
  2000. if (valid_journal_header && replay_count == 0) {
  2001. journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset) ;
  2002. journal->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
  2003. journal->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) ;
  2004. journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
  2005. } else {
  2006. journal->j_mount_id = newest_mount_id + 1 ;
  2007. }
  2008. reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
  2009. "newest_mount_id to %lu", journal->j_mount_id) ;
  2010. journal->j_first_unflushed_offset = journal->j_start ;
  2011. if (replay_count > 0) {
  2012. reiserfs_info (p_s_sb, "replayed %d transactions in %lu seconds\n",
  2013. replay_count, get_seconds() - start) ;
  2014. }
  2015. if (!bdev_read_only(p_s_sb->s_bdev) &&
  2016. _update_journal_header_block(p_s_sb, journal->j_start,
  2017. journal->j_last_flush_trans_id))
  2018. {
  2019. /* replay failed, caller must call free_journal_ram and abort
  2020. ** the mount
  2021. */
  2022. return -1 ;
  2023. }
  2024. return 0 ;
  2025. }
  2026. static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
  2027. {
  2028. struct reiserfs_journal_list *jl;
  2029. retry:
  2030. jl = reiserfs_kmalloc(sizeof(struct reiserfs_journal_list), GFP_NOFS, s);
  2031. if (!jl) {
  2032. yield();
  2033. goto retry;
  2034. }
  2035. memset(jl, 0, sizeof(*jl));
  2036. INIT_LIST_HEAD(&jl->j_list);
  2037. INIT_LIST_HEAD(&jl->j_working_list);
  2038. INIT_LIST_HEAD(&jl->j_tail_bh_list);
  2039. INIT_LIST_HEAD(&jl->j_bh_list);
  2040. sema_init(&jl->j_commit_lock, 1);
  2041. SB_JOURNAL(s)->j_num_lists++;
  2042. get_journal_list(jl);
  2043. return jl;
  2044. }
  2045. static void journal_list_init(struct super_block *p_s_sb) {
  2046. SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
  2047. }
  2048. static int release_journal_dev( struct super_block *super,
  2049. struct reiserfs_journal *journal )
  2050. {
  2051. int result;
  2052. result = 0;
  2053. if( journal -> j_dev_file != NULL ) {
  2054. result = filp_close( journal -> j_dev_file, NULL );
  2055. journal -> j_dev_file = NULL;
  2056. journal -> j_dev_bd = NULL;
  2057. } else if( journal -> j_dev_bd != NULL ) {
  2058. result = blkdev_put( journal -> j_dev_bd );
  2059. journal -> j_dev_bd = NULL;
  2060. }
  2061. if( result != 0 ) {
  2062. reiserfs_warning(super, "sh-457: release_journal_dev: Cannot release journal device: %i", result );
  2063. }
  2064. return result;
  2065. }
  2066. static int journal_init_dev( struct super_block *super,
  2067. struct reiserfs_journal *journal,
  2068. const char *jdev_name )
  2069. {
  2070. int result;
  2071. dev_t jdev;
  2072. int blkdev_mode = FMODE_READ | FMODE_WRITE;
  2073. char b[BDEVNAME_SIZE];
  2074. result = 0;
  2075. journal -> j_dev_bd = NULL;
  2076. journal -> j_dev_file = NULL;
  2077. jdev = SB_ONDISK_JOURNAL_DEVICE( super ) ?
  2078. new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
  2079. if (bdev_read_only(super->s_bdev))
  2080. blkdev_mode = FMODE_READ;
  2081. /* there is no "jdev" option and journal is on separate device */
  2082. if( ( !jdev_name || !jdev_name[ 0 ] ) ) {
  2083. journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
  2084. if (IS_ERR(journal->j_dev_bd)) {
  2085. result = PTR_ERR(journal->j_dev_bd);
  2086. journal->j_dev_bd = NULL;
  2087. reiserfs_warning (super, "sh-458: journal_init_dev: "
  2088. "cannot init journal device '%s': %i",
  2089. __bdevname(jdev, b), result );
  2090. return result;
  2091. } else if (jdev != super->s_dev)
  2092. set_blocksize(journal->j_dev_bd, super->s_blocksize);
  2093. return 0;
  2094. }
  2095. journal -> j_dev_file = filp_open( jdev_name, 0, 0 );
  2096. if( !IS_ERR( journal -> j_dev_file ) ) {
  2097. struct inode *jdev_inode = journal->j_dev_file->f_mapping->host;
  2098. if( !S_ISBLK( jdev_inode -> i_mode ) ) {
  2099. reiserfs_warning(super, "journal_init_dev: '%s' is "
  2100. "not a block device", jdev_name );
  2101. result = -ENOTBLK;
  2102. release_journal_dev( super, journal );
  2103. } else {
  2104. /* ok */
  2105. journal->j_dev_bd = I_BDEV(jdev_inode);
  2106. set_blocksize(journal->j_dev_bd, super->s_blocksize);
  2107. reiserfs_info(super, "journal_init_dev: journal device: %s\n",
  2108. bdevname(journal->j_dev_bd, b));
  2109. }
  2110. } else {
  2111. result = PTR_ERR( journal -> j_dev_file );
  2112. journal -> j_dev_file = NULL;
  2113. reiserfs_warning (super,
  2114. "journal_init_dev: Cannot open '%s': %i",
  2115. jdev_name, result );
  2116. }
  2117. return result;
  2118. }
  2119. /*
  2120. ** must be called once on fs mount. calls journal_read for you
  2121. */
  2122. int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_format, unsigned int commit_max_age) {
  2123. int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2 ;
  2124. struct buffer_head *bhjh;
  2125. struct reiserfs_super_block * rs;
  2126. struct reiserfs_journal_header *jh;
  2127. struct reiserfs_journal *journal;
  2128. struct reiserfs_journal_list *jl;
  2129. char b[BDEVNAME_SIZE];
  2130. journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof (struct reiserfs_journal)) ;
  2131. if (!journal) {
  2132. reiserfs_warning (p_s_sb, "journal-1256: unable to get memory for journal structure") ;
  2133. return 1 ;
  2134. }
  2135. memset(journal, 0, sizeof(struct reiserfs_journal)) ;
  2136. INIT_LIST_HEAD(&journal->j_bitmap_nodes) ;
  2137. INIT_LIST_HEAD (&journal->j_prealloc_list);
  2138. INIT_LIST_HEAD(&journal->j_working_list);
  2139. INIT_LIST_HEAD(&journal->j_journal_list);
  2140. journal->j_persistent_trans = 0;
  2141. if (reiserfs_allocate_list_bitmaps(p_s_sb,
  2142. journal->j_list_bitmap,
  2143. SB_BMAP_NR(p_s_sb)))
  2144. goto free_and_return ;
  2145. allocate_bitmap_nodes(p_s_sb) ;
  2146. /* reserved for journal area support */
  2147. SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
  2148. REISERFS_OLD_DISK_OFFSET_IN_BYTES / p_s_sb->s_blocksize +
  2149. SB_BMAP_NR(p_s_sb) + 1 :
  2150. REISERFS_DISK_OFFSET_IN_BYTES / p_s_sb->s_blocksize + 2);
  2151. /* Sanity check to see is the standard journal fitting withing first bitmap
  2152. (actual for small blocksizes) */
  2153. if ( !SB_ONDISK_JOURNAL_DEVICE( p_s_sb ) &&
  2154. (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8) ) {
  2155. reiserfs_warning (p_s_sb, "journal-1393: journal does not fit for area "
  2156. "addressed by first of bitmap blocks. It starts at "
  2157. "%u and its size is %u. Block size %ld",
  2158. SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
  2159. SB_ONDISK_JOURNAL_SIZE(p_s_sb), p_s_sb->s_blocksize);
  2160. goto free_and_return;
  2161. }
  2162. if( journal_init_dev( p_s_sb, journal, j_dev_name ) != 0 ) {
  2163. reiserfs_warning (p_s_sb, "sh-462: unable to initialize jornal device");
  2164. goto free_and_return;
  2165. }
  2166. rs = SB_DISK_SUPER_BLOCK(p_s_sb);
  2167. /* read journal header */
  2168. bhjh = journal_bread(p_s_sb,
  2169. SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
  2170. if (!bhjh) {
  2171. reiserfs_warning (p_s_sb, "sh-459: unable to read journal header");
  2172. goto free_and_return;
  2173. }
  2174. jh = (struct reiserfs_journal_header *)(bhjh->b_data);
  2175. /* make sure that journal matches to the super block */
  2176. if (is_reiserfs_jr(rs) && (le32_to_cpu(jh->jh_journal.jp_journal_magic) != sb_jp_journal_magic(rs))) {
  2177. reiserfs_warning (p_s_sb, "sh-460: journal header magic %x "
  2178. "(device %s) does not match to magic found in super "
  2179. "block %x",
  2180. jh->jh_journal.jp_journal_magic,
  2181. bdevname( journal->j_dev_bd, b),
  2182. sb_jp_journal_magic(rs));
  2183. brelse (bhjh);
  2184. goto free_and_return;
  2185. }
  2186. journal->j_trans_max = le32_to_cpu (jh->jh_journal.jp_journal_trans_max);
  2187. journal->j_max_batch = le32_to_cpu (jh->jh_journal.jp_journal_max_batch);
  2188. journal->j_max_commit_age = le32_to_cpu (jh->jh_journal.jp_journal_max_commit_age);
  2189. journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
  2190. if (journal->j_trans_max) {
  2191. /* make sure these parameters are available, assign it if they are not */
  2192. __u32 initial = journal->j_trans_max;
  2193. __u32 ratio = 1;
  2194. if (p_s_sb->s_blocksize < 4096)
  2195. ratio = 4096 / p_s_sb->s_blocksize;
  2196. if (SB_ONDISK_JOURNAL_SIZE(p_s_sb)/journal->j_trans_max < JOURNAL_MIN_RATIO)
  2197. journal->j_trans_max = SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
  2198. if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio)
  2199. journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT / ratio;
  2200. if (journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio)
  2201. journal->j_trans_max = JOURNAL_TRANS_MIN_DEFAULT / ratio;
  2202. if (journal->j_trans_max != initial)
  2203. reiserfs_warning (p_s_sb, "sh-461: journal_init: wrong transaction max size (%u). Changed to %u",
  2204. initial, journal->j_trans_max);
  2205. journal->j_max_batch = journal->j_trans_max*
  2206. JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT;
  2207. }
  2208. if (!journal->j_trans_max) {
  2209. /*we have the file system was created by old version of mkreiserfs
  2210. so this field contains zero value */
  2211. journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT ;
  2212. journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT ;
  2213. journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE ;
  2214. /* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
  2215. trans max size is decreased proportionally */
  2216. if (p_s_sb->s_blocksize < 4096) {
  2217. journal->j_trans_max /= (4096 / p_s_sb->s_blocksize) ;
  2218. journal->j_max_batch = (journal->j_trans_max) * 9 / 10 ;
  2219. }
  2220. }
  2221. journal->j_default_max_commit_age = journal->j_max_commit_age;
  2222. if (commit_max_age != 0) {
  2223. journal->j_max_commit_age = commit_max_age;
  2224. journal->j_max_trans_age = commit_max_age;
  2225. }
  2226. reiserfs_info (p_s_sb, "journal params: device %s, size %u, "
  2227. "journal first block %u, max trans len %u, max batch %u, "
  2228. "max commit age %u, max trans age %u\n",
  2229. bdevname( journal->j_dev_bd, b),
  2230. SB_ONDISK_JOURNAL_SIZE(p_s_sb),
  2231. SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
  2232. journal->j_trans_max,
  2233. journal->j_max_batch,
  2234. journal->j_max_commit_age,
  2235. journal->j_max_trans_age);
  2236. brelse (bhjh);
  2237. journal->j_list_bitmap_index = 0 ;
  2238. journal_list_init(p_s_sb) ;
  2239. memset(journal->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
  2240. INIT_LIST_HEAD(&journal->j_dirty_buffers) ;
  2241. spin_lock_init(&journal->j_dirty_buffers_lock) ;
  2242. journal->j_start = 0 ;
  2243. journal->j_len = 0 ;
  2244. journal->j_len_alloc = 0 ;
  2245. atomic_set(&(journal->j_wcount), 0) ;
  2246. atomic_set(&(journal->j_async_throttle), 0) ;
  2247. journal->j_bcount = 0 ;
  2248. journal->j_trans_start_time = 0 ;
  2249. journal->j_last = NULL ;
  2250. journal->j_first = NULL ;
  2251. init_waitqueue_head(&(journal->j_join_wait)) ;
  2252. sema_init(&journal->j_lock, 1);
  2253. sema_init(&journal->j_flush_sem, 1);
  2254. journal->j_trans_id = 10 ;
  2255. journal->j_mount_id = 10 ;
  2256. journal->j_state = 0 ;
  2257. atomic_set(&(journal->j_jlock), 0) ;
  2258. journal->j_cnode_free_list = allocate_cnodes(num_cnodes) ;
  2259. journal->j_cnode_free_orig = journal->j_cnode_free_list ;
  2260. journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0 ;
  2261. journal->j_cnode_used = 0 ;
  2262. journal->j_must_wait = 0 ;
  2263. init_journal_hash(p_s_sb) ;
  2264. jl = journal->j_current_jl;
  2265. jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
  2266. if (!jl->j_list_bitmap) {
  2267. reiserfs_warning(p_s_sb, "journal-2005, get_list_bitmap failed for journal list 0") ;
  2268. goto free_and_return;
  2269. }
  2270. if (journal_read(p_s_sb) < 0) {
  2271. reiserfs_warning(p_s_sb, "Replay Failure, unable to mount") ;
  2272. goto free_and_return;
  2273. }
  2274. reiserfs_mounted_fs_count++ ;
  2275. if (reiserfs_mounted_fs_count <= 1)
  2276. commit_wq = create_workqueue("reiserfs");
  2277. INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
  2278. return 0 ;
  2279. free_and_return:
  2280. free_journal_ram(p_s_sb);
  2281. return 1;
  2282. }
  2283. /*
  2284. ** test for a polite end of the current transaction. Used by file_write, and should
  2285. ** be used by delete to make sure they don't write more than can fit inside a single
  2286. ** transaction
  2287. */
  2288. int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) {
  2289. struct reiserfs_journal *journal = SB_JOURNAL (th->t_super);
  2290. time_t now = get_seconds() ;
  2291. /* cannot restart while nested */
  2292. BUG_ON (!th->t_trans_id);
  2293. if (th->t_refcount > 1)
  2294. return 0 ;
  2295. if ( journal->j_must_wait > 0 ||
  2296. (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
  2297. atomic_read(&(journal->j_jlock)) ||
  2298. (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
  2299. journal->j_cnode_free < (journal->j_trans_max * 3)) {
  2300. return 1 ;
  2301. }
  2302. return 0 ;
  2303. }
  2304. /* this must be called inside a transaction, and requires the
  2305. ** kernel_lock to be held
  2306. */
  2307. void reiserfs_block_writes(struct reiserfs_transaction_handle *th) {
  2308. struct reiserfs_journal *journal = SB_JOURNAL (th->t_super);
  2309. BUG_ON (!th->t_trans_id);
  2310. journal->j_must_wait = 1 ;
  2311. set_bit(J_WRITERS_BLOCKED, &journal->j_state) ;
  2312. return ;
  2313. }
  2314. /* this must be called without a transaction started, and does not
  2315. ** require BKL
  2316. */
  2317. void reiserfs_allow_writes(struct super_block *s) {
  2318. struct reiserfs_journal *journal = SB_JOURNAL (s);
  2319. clear_bit(J_WRITERS_BLOCKED, &journal->j_state) ;
  2320. wake_up(&journal->j_join_wait) ;
  2321. }
  2322. /* this must be called without a transaction started, and does not
  2323. ** require BKL
  2324. */
  2325. void reiserfs_wait_on_write_block(struct super_block *s) {
  2326. struct reiserfs_journal *journal = SB_JOURNAL (s);
  2327. wait_event(journal->j_join_wait,
  2328. !test_bit(J_WRITERS_BLOCKED, &journal->j_state)) ;
  2329. }
  2330. static void queue_log_writer(struct super_block *s) {
  2331. wait_queue_t wait;
  2332. struct reiserfs_journal *journal = SB_JOURNAL (s);
  2333. set_bit(J_WRITERS_QUEUED, &journal->j_state);
  2334. /*
  2335. * we don't want to use wait_event here because
  2336. * we only want to wait once.
  2337. */
  2338. init_waitqueue_entry(&wait, current);
  2339. add_wait_queue(&journal->j_join_wait, &wait);
  2340. set_current_state(TASK_UNINTERRUPTIBLE);
  2341. if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
  2342. schedule();
  2343. current->state = TASK_RUNNING;
  2344. remove_wait_queue(&journal->j_join_wait, &wait);
  2345. }
  2346. static void wake_queued_writers(struct super_block *s) {
  2347. struct reiserfs_journal *journal = SB_JOURNAL (s);
  2348. if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
  2349. wake_up(&journal->j_join_wait);
  2350. }
  2351. static void let_transaction_grow(struct super_block *sb,
  2352. unsigned long trans_id)
  2353. {
  2354. struct reiserfs_journal *journal = SB_JOURNAL (sb);
  2355. unsigned long bcount = journal->j_bcount;
  2356. while(1) {
  2357. set_current_state(TASK_UNINTERRUPTIBLE);
  2358. schedule_timeout(1);
  2359. journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
  2360. while ((atomic_read(&journal->j_wcount) > 0 ||
  2361. atomic_read(&journal->j_jlock)) &&
  2362. journal->j_trans_id == trans_id) {
  2363. queue_log_writer(sb);
  2364. }
  2365. if (journal->j_trans_id != trans_id)
  2366. break;
  2367. if (bcount == journal->j_bcount)
  2368. break;
  2369. bcount = journal->j_bcount;
  2370. }
  2371. }
  2372. /* join == true if you must join an existing transaction.
  2373. ** join == false if you can deal with waiting for others to finish
  2374. **
  2375. ** this will block until the transaction is joinable. send the number of blocks you
  2376. ** expect to use in nblocks.
  2377. */
  2378. static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,unsigned long nblocks,int join) {
  2379. time_t now = get_seconds() ;
  2380. int old_trans_id ;
  2381. struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
  2382. struct reiserfs_transaction_handle myth;
  2383. int sched_count = 0;
  2384. int retval;
  2385. reiserfs_check_lock_depth(p_s_sb, "journal_begin") ;
  2386. PROC_INFO_INC( p_s_sb, journal.journal_being );
  2387. /* set here for journal_join */
  2388. th->t_refcount = 1;
  2389. th->t_super = p_s_sb ;
  2390. relock:
  2391. lock_journal(p_s_sb) ;
  2392. if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted (journal)) {
  2393. unlock_journal (p_s_sb);
  2394. retval = journal->j_errno;
  2395. goto out_fail;
  2396. }
  2397. journal->j_bcount++;
  2398. if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
  2399. unlock_journal(p_s_sb) ;
  2400. reiserfs_wait_on_write_block(p_s_sb) ;
  2401. PROC_INFO_INC( p_s_sb, journal.journal_relock_writers );
  2402. goto relock ;
  2403. }
  2404. now = get_seconds();
  2405. /* if there is no room in the journal OR
  2406. ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
  2407. ** we don't sleep if there aren't other writers
  2408. */
  2409. if ( (!join && journal->j_must_wait > 0) ||
  2410. ( !join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch) ||
  2411. (!join && atomic_read(&journal->j_wcount) > 0 && journal->j_trans_start_time > 0 &&
  2412. (now - journal->j_trans_start_time) > journal->j_max_trans_age) ||
  2413. (!join && atomic_read(&journal->j_jlock)) ||
  2414. (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
  2415. old_trans_id = journal->j_trans_id;
  2416. unlock_journal(p_s_sb) ; /* allow others to finish this transaction */
  2417. if (!join && (journal->j_len_alloc + nblocks + 2) >=
  2418. journal->j_max_batch &&
  2419. ((journal->j_len + nblocks + 2) * 100) < (journal->j_len_alloc * 75))
  2420. {
  2421. if (atomic_read(&journal->j_wcount) > 10) {
  2422. sched_count++;
  2423. queue_log_writer(p_s_sb);
  2424. goto relock;
  2425. }
  2426. }
  2427. /* don't mess with joining the transaction if all we have to do is
  2428. * wait for someone else to do a commit
  2429. */
  2430. if (atomic_read(&journal->j_jlock)) {
  2431. while (journal->j_trans_id == old_trans_id &&
  2432. atomic_read(&journal->j_jlock)) {
  2433. queue_log_writer(p_s_sb);
  2434. }
  2435. goto relock;
  2436. }
  2437. retval = journal_join(&myth, p_s_sb, 1) ;
  2438. if (retval)
  2439. goto out_fail;
  2440. /* someone might have ended the transaction while we joined */
  2441. if (old_trans_id != journal->j_trans_id) {
  2442. retval = do_journal_end(&myth, p_s_sb, 1, 0) ;
  2443. } else {
  2444. retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW) ;
  2445. }
  2446. if (retval)
  2447. goto out_fail;
  2448. PROC_INFO_INC( p_s_sb, journal.journal_relock_wcount );
  2449. goto relock ;
  2450. }
  2451. /* we are the first writer, set trans_id */
  2452. if (journal->j_trans_start_time == 0) {
  2453. journal->j_trans_start_time = get_seconds();
  2454. }
  2455. atomic_inc(&(journal->j_wcount)) ;
  2456. journal->j_len_alloc += nblocks ;
  2457. th->t_blocks_logged = 0 ;
  2458. th->t_blocks_allocated = nblocks ;
  2459. th->t_trans_id = journal->j_trans_id ;
  2460. unlock_journal(p_s_sb) ;
  2461. INIT_LIST_HEAD (&th->t_list);
  2462. return 0 ;
  2463. out_fail:
  2464. memset (th, 0, sizeof (*th));
  2465. /* Re-set th->t_super, so we can properly keep track of how many
  2466. * persistent transactions there are. We need to do this so if this
  2467. * call is part of a failed restart_transaction, we can free it later */
  2468. th->t_super = p_s_sb;
  2469. return retval;
  2470. }
  2471. struct reiserfs_transaction_handle *
  2472. reiserfs_persistent_transaction(struct super_block *s, int nblocks) {
  2473. int ret ;
  2474. struct reiserfs_transaction_handle *th ;
  2475. /* if we're nesting into an existing transaction. It will be
  2476. ** persistent on its own
  2477. */
  2478. if (reiserfs_transaction_running(s)) {
  2479. th = current->journal_info ;
  2480. th->t_refcount++ ;
  2481. if (th->t_refcount < 2) {
  2482. BUG() ;
  2483. }
  2484. return th ;
  2485. }
  2486. th = reiserfs_kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS, s) ;
  2487. if (!th)
  2488. return NULL;
  2489. ret = journal_begin(th, s, nblocks) ;
  2490. if (ret) {
  2491. reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle), s) ;
  2492. return NULL;
  2493. }
  2494. SB_JOURNAL(s)->j_persistent_trans++;
  2495. return th ;
  2496. }
  2497. int
  2498. reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th) {
  2499. struct super_block *s = th->t_super;
  2500. int ret = 0;
  2501. if (th->t_trans_id)
  2502. ret = journal_end(th, th->t_super, th->t_blocks_allocated);
  2503. else
  2504. ret = -EIO;
  2505. if (th->t_refcount == 0) {
  2506. SB_JOURNAL(s)->j_persistent_trans--;
  2507. reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle), s) ;
  2508. }
  2509. return ret;
  2510. }
  2511. static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
  2512. struct reiserfs_transaction_handle *cur_th = current->journal_info;
  2513. /* this keeps do_journal_end from NULLing out the current->journal_info
  2514. ** pointer
  2515. */
  2516. th->t_handle_save = cur_th ;
  2517. if (cur_th && cur_th->t_refcount > 1) {
  2518. BUG() ;
  2519. }
  2520. return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN) ;
  2521. }
  2522. int journal_join_abort(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
  2523. struct reiserfs_transaction_handle *cur_th = current->journal_info;
  2524. /* this keeps do_journal_end from NULLing out the current->journal_info
  2525. ** pointer
  2526. */
  2527. th->t_handle_save = cur_th ;
  2528. if (cur_th && cur_th->t_refcount > 1) {
  2529. BUG() ;
  2530. }
  2531. return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT) ;
  2532. }
  2533. int journal_begin(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks) {
  2534. struct reiserfs_transaction_handle *cur_th = current->journal_info ;
  2535. int ret ;
  2536. th->t_handle_save = NULL ;
  2537. if (cur_th) {
  2538. /* we are nesting into the current transaction */
  2539. if (cur_th->t_super == p_s_sb) {
  2540. BUG_ON (!cur_th->t_refcount);
  2541. cur_th->t_refcount++ ;
  2542. memcpy(th, cur_th, sizeof(*th));
  2543. if (th->t_refcount <= 1)
  2544. reiserfs_warning (p_s_sb, "BAD: refcount <= 1, but journal_info != 0");
  2545. return 0;
  2546. } else {
  2547. /* we've ended up with a handle from a different filesystem.
  2548. ** save it and restore on journal_end. This should never
  2549. ** really happen...
  2550. */
  2551. reiserfs_warning(p_s_sb, "clm-2100: nesting info a different FS") ;
  2552. th->t_handle_save = current->journal_info ;
  2553. current->journal_info = th;
  2554. }
  2555. } else {
  2556. current->journal_info = th;
  2557. }
  2558. ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG) ;
  2559. if (current->journal_info != th)
  2560. BUG() ;
  2561. /* I guess this boils down to being the reciprocal of clm-2100 above.
  2562. * If do_journal_begin_r fails, we need to put it back, since journal_end
  2563. * won't be called to do it. */
  2564. if (ret)
  2565. current->journal_info = th->t_handle_save;
  2566. else
  2567. BUG_ON (!th->t_refcount);
  2568. return ret ;
  2569. }
  2570. /*
  2571. ** puts bh into the current transaction. If it was already there, reorders removes the
  2572. ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
  2573. **
  2574. ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
  2575. ** transaction is committed.
  2576. **
  2577. ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
  2578. */
  2579. int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
  2580. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2581. struct reiserfs_journal_cnode *cn = NULL;
  2582. int count_already_incd = 0 ;
  2583. int prepared = 0 ;
  2584. BUG_ON (!th->t_trans_id);
  2585. PROC_INFO_INC( p_s_sb, journal.mark_dirty );
  2586. if (th->t_trans_id != journal->j_trans_id) {
  2587. reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n",
  2588. th->t_trans_id, journal->j_trans_id);
  2589. }
  2590. p_s_sb->s_dirt = 1;
  2591. prepared = test_clear_buffer_journal_prepared (bh);
  2592. clear_buffer_journal_restore_dirty (bh);
  2593. /* already in this transaction, we are done */
  2594. if (buffer_journaled(bh)) {
  2595. PROC_INFO_INC( p_s_sb, journal.mark_dirty_already );
  2596. return 0 ;
  2597. }
  2598. /* this must be turned into a panic instead of a warning. We can't allow
  2599. ** a dirty or journal_dirty or locked buffer to be logged, as some changes
  2600. ** could get to disk too early. NOT GOOD.
  2601. */
  2602. if (!prepared || buffer_dirty(bh)) {
  2603. reiserfs_warning (p_s_sb, "journal-1777: buffer %llu bad state "
  2604. "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
  2605. (unsigned long long)bh->b_blocknr, prepared ? ' ' : '!',
  2606. buffer_locked(bh) ? ' ' : '!',
  2607. buffer_dirty(bh) ? ' ' : '!',
  2608. buffer_journal_dirty(bh) ? ' ' : '!') ;
  2609. }
  2610. if (atomic_read(&(journal->j_wcount)) <= 0) {
  2611. reiserfs_warning (p_s_sb, "journal-1409: journal_mark_dirty returning because j_wcount was %d", atomic_read(&(journal->j_wcount))) ;
  2612. return 1 ;
  2613. }
  2614. /* this error means I've screwed up, and we've overflowed the transaction.
  2615. ** Nothing can be done here, except make the FS readonly or panic.
  2616. */
  2617. if (journal->j_len >= journal->j_trans_max) {
  2618. reiserfs_panic(th->t_super, "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", journal->j_len) ;
  2619. }
  2620. if (buffer_journal_dirty(bh)) {
  2621. count_already_incd = 1 ;
  2622. PROC_INFO_INC( p_s_sb, journal.mark_dirty_notjournal );
  2623. clear_buffer_journal_dirty (bh);
  2624. }
  2625. if (journal->j_len > journal->j_len_alloc) {
  2626. journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT ;
  2627. }
  2628. set_buffer_journaled (bh);
  2629. /* now put this guy on the end */
  2630. if (!cn) {
  2631. cn = get_cnode(p_s_sb) ;
  2632. if (!cn) {
  2633. reiserfs_panic(p_s_sb, "get_cnode failed!\n");
  2634. }
  2635. if (th->t_blocks_logged == th->t_blocks_allocated) {
  2636. th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT ;
  2637. journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT ;
  2638. }
  2639. th->t_blocks_logged++ ;
  2640. journal->j_len++ ;
  2641. cn->bh = bh ;
  2642. cn->blocknr = bh->b_blocknr ;
  2643. cn->sb = p_s_sb;
  2644. cn->jlist = NULL ;
  2645. insert_journal_hash(journal->j_hash_table, cn) ;
  2646. if (!count_already_incd) {
  2647. get_bh(bh) ;
  2648. }
  2649. }
  2650. cn->next = NULL ;
  2651. cn->prev = journal->j_last ;
  2652. cn->bh = bh ;
  2653. if (journal->j_last) {
  2654. journal->j_last->next = cn ;
  2655. journal->j_last = cn ;
  2656. } else {
  2657. journal->j_first = cn ;
  2658. journal->j_last = cn ;
  2659. }
  2660. return 0 ;
  2661. }
  2662. int journal_end(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
  2663. if (!current->journal_info && th->t_refcount > 1)
  2664. reiserfs_warning (p_s_sb, "REISER-NESTING: th NULL, refcount %d",
  2665. th->t_refcount);
  2666. if (!th->t_trans_id) {
  2667. WARN_ON (1);
  2668. return -EIO;
  2669. }
  2670. th->t_refcount--;
  2671. if (th->t_refcount > 0) {
  2672. struct reiserfs_transaction_handle *cur_th = current->journal_info ;
  2673. /* we aren't allowed to close a nested transaction on a different
  2674. ** filesystem from the one in the task struct
  2675. */
  2676. if (cur_th->t_super != th->t_super)
  2677. BUG() ;
  2678. if (th != cur_th) {
  2679. memcpy(current->journal_info, th, sizeof(*th));
  2680. th->t_trans_id = 0;
  2681. }
  2682. return 0;
  2683. } else {
  2684. return do_journal_end(th, p_s_sb, nblocks, 0) ;
  2685. }
  2686. }
  2687. /* removes from the current transaction, relsing and descrementing any counters.
  2688. ** also files the removed buffer directly onto the clean list
  2689. **
  2690. ** called by journal_mark_freed when a block has been deleted
  2691. **
  2692. ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
  2693. */
  2694. static int remove_from_transaction(struct super_block *p_s_sb, b_blocknr_t blocknr, int already_cleaned) {
  2695. struct buffer_head *bh ;
  2696. struct reiserfs_journal_cnode *cn ;
  2697. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2698. int ret = 0;
  2699. cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr) ;
  2700. if (!cn || !cn->bh) {
  2701. return ret ;
  2702. }
  2703. bh = cn->bh ;
  2704. if (cn->prev) {
  2705. cn->prev->next = cn->next ;
  2706. }
  2707. if (cn->next) {
  2708. cn->next->prev = cn->prev ;
  2709. }
  2710. if (cn == journal->j_first) {
  2711. journal->j_first = cn->next ;
  2712. }
  2713. if (cn == journal->j_last) {
  2714. journal->j_last = cn->prev ;
  2715. }
  2716. if (bh)
  2717. remove_journal_hash(p_s_sb, journal->j_hash_table, NULL, bh->b_blocknr, 0) ;
  2718. clear_buffer_journaled (bh); /* don't log this one */
  2719. if (!already_cleaned) {
  2720. clear_buffer_journal_dirty (bh);
  2721. clear_buffer_dirty(bh);
  2722. clear_buffer_journal_test (bh);
  2723. put_bh(bh) ;
  2724. if (atomic_read(&(bh->b_count)) < 0) {
  2725. reiserfs_warning (p_s_sb, "journal-1752: remove from trans, b_count < 0");
  2726. }
  2727. ret = 1 ;
  2728. }
  2729. journal->j_len-- ;
  2730. journal->j_len_alloc-- ;
  2731. free_cnode(p_s_sb, cn) ;
  2732. return ret ;
  2733. }
  2734. /*
  2735. ** for any cnode in a journal list, it can only be dirtied of all the
  2736. ** transactions that include it are commited to disk.
  2737. ** this checks through each transaction, and returns 1 if you are allowed to dirty,
  2738. ** and 0 if you aren't
  2739. **
  2740. ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
  2741. ** blocks for a given transaction on disk
  2742. **
  2743. */
  2744. static int can_dirty(struct reiserfs_journal_cnode *cn) {
  2745. struct super_block *sb = cn->sb;
  2746. b_blocknr_t blocknr = cn->blocknr ;
  2747. struct reiserfs_journal_cnode *cur = cn->hprev ;
  2748. int can_dirty = 1 ;
  2749. /* first test hprev. These are all newer than cn, so any node here
  2750. ** with the same block number and dev means this node can't be sent
  2751. ** to disk right now.
  2752. */
  2753. while(cur && can_dirty) {
  2754. if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
  2755. cur->blocknr == blocknr) {
  2756. can_dirty = 0 ;
  2757. }
  2758. cur = cur->hprev ;
  2759. }
  2760. /* then test hnext. These are all older than cn. As long as they
  2761. ** are committed to the log, it is safe to write cn to disk
  2762. */
  2763. cur = cn->hnext ;
  2764. while(cur && can_dirty) {
  2765. if (cur->jlist && cur->jlist->j_len > 0 &&
  2766. atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
  2767. cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
  2768. can_dirty = 0 ;
  2769. }
  2770. cur = cur->hnext ;
  2771. }
  2772. return can_dirty ;
  2773. }
  2774. /* syncs the commit blocks, but does not force the real buffers to disk
  2775. ** will wait until the current transaction is done/commited before returning
  2776. */
  2777. int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
  2778. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2779. BUG_ON (!th->t_trans_id);
  2780. /* you can sync while nested, very, very bad */
  2781. if (th->t_refcount > 1) {
  2782. BUG() ;
  2783. }
  2784. if (journal->j_len == 0) {
  2785. reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
  2786. journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
  2787. }
  2788. return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT) ;
  2789. }
  2790. /*
  2791. ** writeback the pending async commits to disk
  2792. */
  2793. static void flush_async_commits(void *p) {
  2794. struct super_block *p_s_sb = p;
  2795. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2796. struct reiserfs_journal_list *jl;
  2797. struct list_head *entry;
  2798. lock_kernel();
  2799. if (!list_empty(&journal->j_journal_list)) {
  2800. /* last entry is the youngest, commit it and you get everything */
  2801. entry = journal->j_journal_list.prev;
  2802. jl = JOURNAL_LIST_ENTRY(entry);
  2803. flush_commit_list(p_s_sb, jl, 1);
  2804. }
  2805. unlock_kernel();
  2806. /*
  2807. * this is a little racey, but there's no harm in missing
  2808. * the filemap_fdata_write
  2809. */
  2810. if (!atomic_read(&journal->j_async_throttle) && !reiserfs_is_journal_aborted (journal)) {
  2811. atomic_inc(&journal->j_async_throttle);
  2812. filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
  2813. atomic_dec(&journal->j_async_throttle);
  2814. }
  2815. }
  2816. /*
  2817. ** flushes any old transactions to disk
  2818. ** ends the current transaction if it is too old
  2819. */
  2820. int reiserfs_flush_old_commits(struct super_block *p_s_sb) {
  2821. time_t now ;
  2822. struct reiserfs_transaction_handle th ;
  2823. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2824. now = get_seconds();
  2825. /* safety check so we don't flush while we are replaying the log during
  2826. * mount
  2827. */
  2828. if (list_empty(&journal->j_journal_list)) {
  2829. return 0 ;
  2830. }
  2831. /* check the current transaction. If there are no writers, and it is
  2832. * too old, finish it, and force the commit blocks to disk
  2833. */
  2834. if (atomic_read(&journal->j_wcount) <= 0 &&
  2835. journal->j_trans_start_time > 0 &&
  2836. journal->j_len > 0 &&
  2837. (now - journal->j_trans_start_time) > journal->j_max_trans_age)
  2838. {
  2839. if (!journal_join(&th, p_s_sb, 1)) {
  2840. reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
  2841. journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
  2842. /* we're only being called from kreiserfsd, it makes no sense to do
  2843. ** an async commit so that kreiserfsd can do it later
  2844. */
  2845. do_journal_end(&th, p_s_sb,1, COMMIT_NOW | WAIT) ;
  2846. }
  2847. }
  2848. return p_s_sb->s_dirt;
  2849. }
  2850. /*
  2851. ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
  2852. **
  2853. ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
  2854. ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
  2855. ** flushes the commit list and returns 0.
  2856. **
  2857. ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
  2858. **
  2859. ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
  2860. */
  2861. static int check_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,
  2862. unsigned long nblocks, int flags) {
  2863. time_t now ;
  2864. int flush = flags & FLUSH_ALL ;
  2865. int commit_now = flags & COMMIT_NOW ;
  2866. int wait_on_commit = flags & WAIT ;
  2867. struct reiserfs_journal_list *jl;
  2868. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2869. BUG_ON (!th->t_trans_id);
  2870. if (th->t_trans_id != journal->j_trans_id) {
  2871. reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n",
  2872. th->t_trans_id, journal->j_trans_id);
  2873. }
  2874. journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged) ;
  2875. if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
  2876. atomic_dec(&(journal->j_wcount)) ;
  2877. }
  2878. /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
  2879. ** will be dealt with by next transaction that actually writes something, but should be taken
  2880. ** care of in this trans
  2881. */
  2882. if (journal->j_len == 0) {
  2883. BUG();
  2884. }
  2885. /* if wcount > 0, and we are called to with flush or commit_now,
  2886. ** we wait on j_join_wait. We will wake up when the last writer has
  2887. ** finished the transaction, and started it on its way to the disk.
  2888. ** Then, we flush the commit or journal list, and just return 0
  2889. ** because the rest of journal end was already done for this transaction.
  2890. */
  2891. if (atomic_read(&(journal->j_wcount)) > 0) {
  2892. if (flush || commit_now) {
  2893. unsigned trans_id ;
  2894. jl = journal->j_current_jl;
  2895. trans_id = jl->j_trans_id;
  2896. if (wait_on_commit)
  2897. jl->j_state |= LIST_COMMIT_PENDING;
  2898. atomic_set(&(journal->j_jlock), 1) ;
  2899. if (flush) {
  2900. journal->j_next_full_flush = 1 ;
  2901. }
  2902. unlock_journal(p_s_sb) ;
  2903. /* sleep while the current transaction is still j_jlocked */
  2904. while(journal->j_trans_id == trans_id) {
  2905. if (atomic_read(&journal->j_jlock)) {
  2906. queue_log_writer(p_s_sb);
  2907. } else {
  2908. lock_journal(p_s_sb);
  2909. if (journal->j_trans_id == trans_id) {
  2910. atomic_set(&(journal->j_jlock), 1) ;
  2911. }
  2912. unlock_journal(p_s_sb);
  2913. }
  2914. }
  2915. if (journal->j_trans_id == trans_id) {
  2916. BUG();
  2917. }
  2918. if (commit_now && journal_list_still_alive(p_s_sb, trans_id) &&
  2919. wait_on_commit)
  2920. {
  2921. flush_commit_list(p_s_sb, jl, 1) ;
  2922. }
  2923. return 0 ;
  2924. }
  2925. unlock_journal(p_s_sb) ;
  2926. return 0 ;
  2927. }
  2928. /* deal with old transactions where we are the last writers */
  2929. now = get_seconds();
  2930. if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
  2931. commit_now = 1 ;
  2932. journal->j_next_async_flush = 1 ;
  2933. }
  2934. /* don't batch when someone is waiting on j_join_wait */
  2935. /* don't batch when syncing the commit or flushing the whole trans */
  2936. if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock))) && !flush && !commit_now &&
  2937. (journal->j_len < journal->j_max_batch) &&
  2938. journal->j_len_alloc < journal->j_max_batch && journal->j_cnode_free > (journal->j_trans_max * 3)) {
  2939. journal->j_bcount++ ;
  2940. unlock_journal(p_s_sb) ;
  2941. return 0 ;
  2942. }
  2943. if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
  2944. reiserfs_panic(p_s_sb, "journal-003: journal_end: j_start (%ld) is too high\n", journal->j_start) ;
  2945. }
  2946. return 1 ;
  2947. }
  2948. /*
  2949. ** Does all the work that makes deleting blocks safe.
  2950. ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
  2951. **
  2952. ** otherwise:
  2953. ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
  2954. ** before this transaction has finished.
  2955. **
  2956. ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
  2957. ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
  2958. ** the block can't be reallocated yet.
  2959. **
  2960. ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
  2961. */
  2962. int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, b_blocknr_t blocknr) {
  2963. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  2964. struct reiserfs_journal_cnode *cn = NULL ;
  2965. struct buffer_head *bh = NULL ;
  2966. struct reiserfs_list_bitmap *jb = NULL ;
  2967. int cleaned = 0 ;
  2968. BUG_ON (!th->t_trans_id);
  2969. cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
  2970. if (cn && cn->bh) {
  2971. bh = cn->bh ;
  2972. get_bh(bh) ;
  2973. }
  2974. /* if it is journal new, we just remove it from this transaction */
  2975. if (bh && buffer_journal_new(bh)) {
  2976. clear_buffer_journal_new (bh);
  2977. clear_prepared_bits(bh) ;
  2978. reiserfs_clean_and_file_buffer(bh) ;
  2979. cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
  2980. } else {
  2981. /* set the bit for this block in the journal bitmap for this transaction */
  2982. jb = journal->j_current_jl->j_list_bitmap;
  2983. if (!jb) {
  2984. reiserfs_panic(p_s_sb, "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n") ;
  2985. }
  2986. set_bit_in_list_bitmap(p_s_sb, blocknr, jb) ;
  2987. /* Note, the entire while loop is not allowed to schedule. */
  2988. if (bh) {
  2989. clear_prepared_bits(bh) ;
  2990. reiserfs_clean_and_file_buffer(bh) ;
  2991. }
  2992. cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
  2993. /* find all older transactions with this block, make sure they don't try to write it out */
  2994. cn = get_journal_hash_dev(p_s_sb,journal->j_list_hash_table, blocknr) ;
  2995. while (cn) {
  2996. if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
  2997. set_bit(BLOCK_FREED, &cn->state) ;
  2998. if (cn->bh) {
  2999. if (!cleaned) {
  3000. /* remove_from_transaction will brelse the buffer if it was
  3001. ** in the current trans
  3002. */
  3003. clear_buffer_journal_dirty (cn->bh);
  3004. clear_buffer_dirty(cn->bh);
  3005. clear_buffer_journal_test(cn->bh);
  3006. cleaned = 1 ;
  3007. put_bh(cn->bh) ;
  3008. if (atomic_read(&(cn->bh->b_count)) < 0) {
  3009. reiserfs_warning (p_s_sb, "journal-2138: cn->bh->b_count < 0");
  3010. }
  3011. }
  3012. if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
  3013. atomic_dec(&(cn->jlist->j_nonzerolen)) ;
  3014. }
  3015. cn->bh = NULL ;
  3016. }
  3017. }
  3018. cn = cn->hnext ;
  3019. }
  3020. }
  3021. if (bh) {
  3022. put_bh(bh) ; /* get_hash grabs the buffer */
  3023. if (atomic_read(&(bh->b_count)) < 0) {
  3024. reiserfs_warning (p_s_sb, "journal-2165: bh->b_count < 0");
  3025. }
  3026. }
  3027. return 0 ;
  3028. }
  3029. void reiserfs_update_inode_transaction(struct inode *inode) {
  3030. struct reiserfs_journal *journal = SB_JOURNAL (inode->i_sb);
  3031. REISERFS_I(inode)->i_jl = journal->j_current_jl;
  3032. REISERFS_I(inode)->i_trans_id = journal->j_trans_id ;
  3033. }
  3034. /*
  3035. * returns -1 on error, 0 if no commits/barriers were done and 1
  3036. * if a transaction was actually committed and the barrier was done
  3037. */
  3038. static int __commit_trans_jl(struct inode *inode, unsigned long id,
  3039. struct reiserfs_journal_list *jl)
  3040. {
  3041. struct reiserfs_transaction_handle th ;
  3042. struct super_block *sb = inode->i_sb ;
  3043. struct reiserfs_journal *journal = SB_JOURNAL (sb);
  3044. int ret = 0;
  3045. /* is it from the current transaction, or from an unknown transaction? */
  3046. if (id == journal->j_trans_id) {
  3047. jl = journal->j_current_jl;
  3048. /* try to let other writers come in and grow this transaction */
  3049. let_transaction_grow(sb, id);
  3050. if (journal->j_trans_id != id) {
  3051. goto flush_commit_only;
  3052. }
  3053. ret = journal_begin(&th, sb, 1) ;
  3054. if (ret)
  3055. return ret;
  3056. /* someone might have ended this transaction while we joined */
  3057. if (journal->j_trans_id != id) {
  3058. reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1) ;
  3059. journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb)) ;
  3060. ret = journal_end(&th, sb, 1) ;
  3061. goto flush_commit_only;
  3062. }
  3063. ret = journal_end_sync(&th, sb, 1) ;
  3064. if (!ret)
  3065. ret = 1;
  3066. } else {
  3067. /* this gets tricky, we have to make sure the journal list in
  3068. * the inode still exists. We know the list is still around
  3069. * if we've got a larger transaction id than the oldest list
  3070. */
  3071. flush_commit_only:
  3072. if (journal_list_still_alive(inode->i_sb, id)) {
  3073. /*
  3074. * we only set ret to 1 when we know for sure
  3075. * the barrier hasn't been started yet on the commit
  3076. * block.
  3077. */
  3078. if (atomic_read(&jl->j_commit_left) > 1)
  3079. ret = 1;
  3080. flush_commit_list(sb, jl, 1) ;
  3081. if (journal->j_errno)
  3082. ret = journal->j_errno;
  3083. }
  3084. }
  3085. /* otherwise the list is gone, and long since committed */
  3086. return ret;
  3087. }
  3088. int reiserfs_commit_for_inode(struct inode *inode) {
  3089. unsigned long id = REISERFS_I(inode)->i_trans_id;
  3090. struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
  3091. /* for the whole inode, assume unset id means it was
  3092. * changed in the current transaction. More conservative
  3093. */
  3094. if (!id || !jl) {
  3095. reiserfs_update_inode_transaction(inode) ;
  3096. id = REISERFS_I(inode)->i_trans_id;
  3097. /* jl will be updated in __commit_trans_jl */
  3098. }
  3099. return __commit_trans_jl(inode, id, jl);
  3100. }
  3101. void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
  3102. struct buffer_head *bh) {
  3103. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  3104. PROC_INFO_INC( p_s_sb, journal.restore_prepared );
  3105. if (!bh) {
  3106. return ;
  3107. }
  3108. if (test_clear_buffer_journal_restore_dirty (bh) &&
  3109. buffer_journal_dirty(bh)) {
  3110. struct reiserfs_journal_cnode *cn;
  3111. cn = get_journal_hash_dev(p_s_sb,
  3112. journal->j_list_hash_table,
  3113. bh->b_blocknr);
  3114. if (cn && can_dirty(cn)) {
  3115. set_buffer_journal_test (bh);
  3116. mark_buffer_dirty(bh);
  3117. }
  3118. }
  3119. clear_buffer_journal_prepared (bh);
  3120. }
  3121. extern struct tree_balance *cur_tb ;
  3122. /*
  3123. ** before we can change a metadata block, we have to make sure it won't
  3124. ** be written to disk while we are altering it. So, we must:
  3125. ** clean it
  3126. ** wait on it.
  3127. **
  3128. */
  3129. int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
  3130. struct buffer_head *bh, int wait) {
  3131. PROC_INFO_INC( p_s_sb, journal.prepare );
  3132. if (test_set_buffer_locked(bh)) {
  3133. if (!wait)
  3134. return 0;
  3135. lock_buffer(bh);
  3136. }
  3137. set_buffer_journal_prepared (bh);
  3138. if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
  3139. clear_buffer_journal_test (bh);
  3140. set_buffer_journal_restore_dirty (bh);
  3141. }
  3142. unlock_buffer(bh);
  3143. return 1;
  3144. }
  3145. static void flush_old_journal_lists(struct super_block *s) {
  3146. struct reiserfs_journal *journal = SB_JOURNAL (s);
  3147. struct reiserfs_journal_list *jl;
  3148. struct list_head *entry;
  3149. time_t now = get_seconds();
  3150. while(!list_empty(&journal->j_journal_list)) {
  3151. entry = journal->j_journal_list.next;
  3152. jl = JOURNAL_LIST_ENTRY(entry);
  3153. /* this check should always be run, to send old lists to disk */
  3154. if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4))) {
  3155. flush_used_journal_lists(s, jl);
  3156. } else {
  3157. break;
  3158. }
  3159. }
  3160. }
  3161. /*
  3162. ** long and ugly. If flush, will not return until all commit
  3163. ** blocks and all real buffers in the trans are on disk.
  3164. ** If no_async, won't return until all commit blocks are on disk.
  3165. **
  3166. ** keep reading, there are comments as you go along
  3167. **
  3168. ** If the journal is aborted, we just clean up. Things like flushing
  3169. ** journal lists, etc just won't happen.
  3170. */
  3171. static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks,
  3172. int flags) {
  3173. struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
  3174. struct reiserfs_journal_cnode *cn, *next, *jl_cn;
  3175. struct reiserfs_journal_cnode *last_cn = NULL;
  3176. struct reiserfs_journal_desc *desc ;
  3177. struct reiserfs_journal_commit *commit ;
  3178. struct buffer_head *c_bh ; /* commit bh */
  3179. struct buffer_head *d_bh ; /* desc bh */
  3180. int cur_write_start = 0 ; /* start index of current log write */
  3181. int old_start ;
  3182. int i ;
  3183. int flush = flags & FLUSH_ALL ;
  3184. int wait_on_commit = flags & WAIT ;
  3185. struct reiserfs_journal_list *jl, *temp_jl;
  3186. struct list_head *entry, *safe;
  3187. unsigned long jindex;
  3188. unsigned long commit_trans_id;
  3189. int trans_half;
  3190. BUG_ON (th->t_refcount > 1);
  3191. BUG_ON (!th->t_trans_id);
  3192. current->journal_info = th->t_handle_save;
  3193. reiserfs_check_lock_depth(p_s_sb, "journal end");
  3194. if (journal->j_len == 0) {
  3195. reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
  3196. journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
  3197. }
  3198. lock_journal(p_s_sb) ;
  3199. if (journal->j_next_full_flush) {
  3200. flags |= FLUSH_ALL ;
  3201. flush = 1 ;
  3202. }
  3203. if (journal->j_next_async_flush) {
  3204. flags |= COMMIT_NOW | WAIT;
  3205. wait_on_commit = 1;
  3206. }
  3207. /* check_journal_end locks the journal, and unlocks if it does not return 1
  3208. ** it tells us if we should continue with the journal_end, or just return
  3209. */
  3210. if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
  3211. p_s_sb->s_dirt = 1;
  3212. wake_queued_writers(p_s_sb);
  3213. reiserfs_async_progress_wait(p_s_sb);
  3214. goto out ;
  3215. }
  3216. /* check_journal_end might set these, check again */
  3217. if (journal->j_next_full_flush) {
  3218. flush = 1 ;
  3219. }
  3220. /*
  3221. ** j must wait means we have to flush the log blocks, and the real blocks for
  3222. ** this transaction
  3223. */
  3224. if (journal->j_must_wait > 0) {
  3225. flush = 1 ;
  3226. }
  3227. #ifdef REISERFS_PREALLOCATE
  3228. /* quota ops might need to nest, setup the journal_info pointer for them */
  3229. current->journal_info = th ;
  3230. reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
  3231. * the transaction */
  3232. current->journal_info = th->t_handle_save ;
  3233. #endif
  3234. /* setup description block */
  3235. d_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start) ;
  3236. set_buffer_uptodate(d_bh);
  3237. desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
  3238. memset(d_bh->b_data, 0, d_bh->b_size) ;
  3239. memcpy(get_journal_desc_magic (d_bh), JOURNAL_DESC_MAGIC, 8) ;
  3240. set_desc_trans_id(desc, journal->j_trans_id) ;
  3241. /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
  3242. c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
  3243. ((journal->j_start + journal->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
  3244. commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
  3245. memset(c_bh->b_data, 0, c_bh->b_size) ;
  3246. set_commit_trans_id(commit, journal->j_trans_id) ;
  3247. set_buffer_uptodate(c_bh) ;
  3248. /* init this journal list */
  3249. jl = journal->j_current_jl;
  3250. /* we lock the commit before doing anything because
  3251. * we want to make sure nobody tries to run flush_commit_list until
  3252. * the new transaction is fully setup, and we've already flushed the
  3253. * ordered bh list
  3254. */
  3255. down(&jl->j_commit_lock);
  3256. /* save the transaction id in case we need to commit it later */
  3257. commit_trans_id = jl->j_trans_id;
  3258. atomic_set(&jl->j_older_commits_done, 0) ;
  3259. jl->j_trans_id = journal->j_trans_id ;
  3260. jl->j_timestamp = journal->j_trans_start_time ;
  3261. jl->j_commit_bh = c_bh ;
  3262. jl->j_start = journal->j_start ;
  3263. jl->j_len = journal->j_len ;
  3264. atomic_set(&jl->j_nonzerolen, journal->j_len) ;
  3265. atomic_set(&jl->j_commit_left, journal->j_len + 2);
  3266. jl->j_realblock = NULL ;
  3267. /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
  3268. ** for each real block, add it to the journal list hash,
  3269. ** copy into real block index array in the commit or desc block
  3270. */
  3271. trans_half = journal_trans_half(p_s_sb->s_blocksize);
  3272. for (i = 0, cn = journal->j_first ; cn ; cn = cn->next, i++) {
  3273. if (buffer_journaled (cn->bh)) {
  3274. jl_cn = get_cnode(p_s_sb) ;
  3275. if (!jl_cn) {
  3276. reiserfs_panic(p_s_sb, "journal-1676, get_cnode returned NULL\n") ;
  3277. }
  3278. if (i == 0) {
  3279. jl->j_realblock = jl_cn ;
  3280. }
  3281. jl_cn->prev = last_cn ;
  3282. jl_cn->next = NULL ;
  3283. if (last_cn) {
  3284. last_cn->next = jl_cn ;
  3285. }
  3286. last_cn = jl_cn ;
  3287. /* make sure the block we are trying to log is not a block
  3288. of journal or reserved area */
  3289. if (is_block_in_log_or_reserved_area(p_s_sb, cn->bh->b_blocknr)) {
  3290. reiserfs_panic(p_s_sb, "journal-2332: Trying to log block %lu, which is a log block\n", cn->bh->b_blocknr) ;
  3291. }
  3292. jl_cn->blocknr = cn->bh->b_blocknr ;
  3293. jl_cn->state = 0 ;
  3294. jl_cn->sb = p_s_sb;
  3295. jl_cn->bh = cn->bh ;
  3296. jl_cn->jlist = jl;
  3297. insert_journal_hash(journal->j_list_hash_table, jl_cn) ;
  3298. if (i < trans_half) {
  3299. desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr) ;
  3300. } else {
  3301. commit->j_realblock[i - trans_half] = cpu_to_le32(cn->bh->b_blocknr) ;
  3302. }
  3303. } else {
  3304. i-- ;
  3305. }
  3306. }
  3307. set_desc_trans_len(desc, journal->j_len) ;
  3308. set_desc_mount_id(desc, journal->j_mount_id) ;
  3309. set_desc_trans_id(desc, journal->j_trans_id) ;
  3310. set_commit_trans_len(commit, journal->j_len);
  3311. /* special check in case all buffers in the journal were marked for not logging */
  3312. if (journal->j_len == 0) {
  3313. BUG();
  3314. }
  3315. /* we're about to dirty all the log blocks, mark the description block
  3316. * dirty now too. Don't mark the commit block dirty until all the
  3317. * others are on disk
  3318. */
  3319. mark_buffer_dirty(d_bh);
  3320. /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
  3321. cur_write_start = journal->j_start ;
  3322. cn = journal->j_first ;
  3323. jindex = 1 ; /* start at one so we don't get the desc again */
  3324. while(cn) {
  3325. clear_buffer_journal_new (cn->bh);
  3326. /* copy all the real blocks into log area. dirty log blocks */
  3327. if (buffer_journaled (cn->bh)) {
  3328. struct buffer_head *tmp_bh ;
  3329. char *addr;
  3330. struct page *page;
  3331. tmp_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
  3332. ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
  3333. set_buffer_uptodate(tmp_bh);
  3334. page = cn->bh->b_page;
  3335. addr = kmap(page);
  3336. memcpy(tmp_bh->b_data, addr + offset_in_page(cn->bh->b_data),
  3337. cn->bh->b_size);
  3338. kunmap(page);
  3339. mark_buffer_dirty(tmp_bh);
  3340. jindex++ ;
  3341. set_buffer_journal_dirty (cn->bh);
  3342. clear_buffer_journaled (cn->bh);
  3343. } else {
  3344. /* JDirty cleared sometime during transaction. don't log this one */
  3345. reiserfs_warning(p_s_sb, "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!") ;
  3346. brelse(cn->bh) ;
  3347. }
  3348. next = cn->next ;
  3349. free_cnode(p_s_sb, cn) ;
  3350. cn = next ;
  3351. cond_resched();
  3352. }
  3353. /* we are done with both the c_bh and d_bh, but
  3354. ** c_bh must be written after all other commit blocks,
  3355. ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
  3356. */
  3357. journal->j_current_jl = alloc_journal_list(p_s_sb);
  3358. /* now it is safe to insert this transaction on the main list */
  3359. list_add_tail(&jl->j_list, &journal->j_journal_list);
  3360. list_add_tail(&jl->j_working_list, &journal->j_working_list);
  3361. journal->j_num_work_lists++;
  3362. /* reset journal values for the next transaction */
  3363. old_start = journal->j_start ;
  3364. journal->j_start = (journal->j_start + journal->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
  3365. atomic_set(&(journal->j_wcount), 0) ;
  3366. journal->j_bcount = 0 ;
  3367. journal->j_last = NULL ;
  3368. journal->j_first = NULL ;
  3369. journal->j_len = 0 ;
  3370. journal->j_trans_start_time = 0 ;
  3371. journal->j_trans_id++ ;
  3372. journal->j_current_jl->j_trans_id = journal->j_trans_id;
  3373. journal->j_must_wait = 0 ;
  3374. journal->j_len_alloc = 0 ;
  3375. journal->j_next_full_flush = 0 ;
  3376. journal->j_next_async_flush = 0 ;
  3377. init_journal_hash(p_s_sb) ;
  3378. // make sure reiserfs_add_jh sees the new current_jl before we
  3379. // write out the tails
  3380. smp_mb();
  3381. /* tail conversion targets have to hit the disk before we end the
  3382. * transaction. Otherwise a later transaction might repack the tail
  3383. * before this transaction commits, leaving the data block unflushed and
  3384. * clean, if we crash before the later transaction commits, the data block
  3385. * is lost.
  3386. */
  3387. if (!list_empty(&jl->j_tail_bh_list)) {
  3388. unlock_kernel();
  3389. write_ordered_buffers(&journal->j_dirty_buffers_lock,
  3390. journal, jl, &jl->j_tail_bh_list);
  3391. lock_kernel();
  3392. }
  3393. if (!list_empty(&jl->j_tail_bh_list))
  3394. BUG();
  3395. up(&jl->j_commit_lock);
  3396. /* honor the flush wishes from the caller, simple commits can
  3397. ** be done outside the journal lock, they are done below
  3398. **
  3399. ** if we don't flush the commit list right now, we put it into
  3400. ** the work queue so the people waiting on the async progress work
  3401. ** queue don't wait for this proc to flush journal lists and such.
  3402. */
  3403. if (flush) {
  3404. flush_commit_list(p_s_sb, jl, 1) ;
  3405. flush_journal_list(p_s_sb, jl, 1) ;
  3406. } else if (!(jl->j_state & LIST_COMMIT_PENDING))
  3407. queue_delayed_work(commit_wq, &journal->j_work, HZ/10);
  3408. /* if the next transaction has any chance of wrapping, flush
  3409. ** transactions that might get overwritten. If any journal lists are very
  3410. ** old flush them as well.
  3411. */
  3412. first_jl:
  3413. list_for_each_safe(entry, safe, &journal->j_journal_list) {
  3414. temp_jl = JOURNAL_LIST_ENTRY(entry);
  3415. if (journal->j_start <= temp_jl->j_start) {
  3416. if ((journal->j_start + journal->j_trans_max + 1) >=
  3417. temp_jl->j_start)
  3418. {
  3419. flush_used_journal_lists(p_s_sb, temp_jl);
  3420. goto first_jl;
  3421. } else if ((journal->j_start +
  3422. journal->j_trans_max + 1) <
  3423. SB_ONDISK_JOURNAL_SIZE(p_s_sb))
  3424. {
  3425. /* if we don't cross into the next transaction and we don't
  3426. * wrap, there is no way we can overlap any later transactions
  3427. * break now
  3428. */
  3429. break;
  3430. }
  3431. } else if ((journal->j_start +
  3432. journal->j_trans_max + 1) >
  3433. SB_ONDISK_JOURNAL_SIZE(p_s_sb))
  3434. {
  3435. if (((journal->j_start + journal->j_trans_max + 1) %
  3436. SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >= temp_jl->j_start)
  3437. {
  3438. flush_used_journal_lists(p_s_sb, temp_jl);
  3439. goto first_jl;
  3440. } else {
  3441. /* we don't overlap anything from out start to the end of the
  3442. * log, and our wrapped portion doesn't overlap anything at
  3443. * the start of the log. We can break
  3444. */
  3445. break;
  3446. }
  3447. }
  3448. }
  3449. flush_old_journal_lists(p_s_sb);
  3450. journal->j_current_jl->j_list_bitmap = get_list_bitmap(p_s_sb, journal->j_current_jl) ;
  3451. if (!(journal->j_current_jl->j_list_bitmap)) {
  3452. reiserfs_panic(p_s_sb, "journal-1996: do_journal_end, could not get a list bitmap\n") ;
  3453. }
  3454. atomic_set(&(journal->j_jlock), 0) ;
  3455. unlock_journal(p_s_sb) ;
  3456. /* wake up any body waiting to join. */
  3457. clear_bit(J_WRITERS_QUEUED, &journal->j_state);
  3458. wake_up(&(journal->j_join_wait)) ;
  3459. if (!flush && wait_on_commit &&
  3460. journal_list_still_alive(p_s_sb, commit_trans_id)) {
  3461. flush_commit_list(p_s_sb, jl, 1) ;
  3462. }
  3463. out:
  3464. reiserfs_check_lock_depth(p_s_sb, "journal end2");
  3465. memset (th, 0, sizeof (*th));
  3466. /* Re-set th->t_super, so we can properly keep track of how many
  3467. * persistent transactions there are. We need to do this so if this
  3468. * call is part of a failed restart_transaction, we can free it later */
  3469. th->t_super = p_s_sb;
  3470. return journal->j_errno;
  3471. }
  3472. static void
  3473. __reiserfs_journal_abort_hard (struct super_block *sb)
  3474. {
  3475. struct reiserfs_journal *journal = SB_JOURNAL (sb);
  3476. if (test_bit (J_ABORTED, &journal->j_state))
  3477. return;
  3478. printk (KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
  3479. reiserfs_bdevname (sb));
  3480. sb->s_flags |= MS_RDONLY;
  3481. set_bit (J_ABORTED, &journal->j_state);
  3482. #ifdef CONFIG_REISERFS_CHECK
  3483. dump_stack();
  3484. #endif
  3485. }
  3486. static void
  3487. __reiserfs_journal_abort_soft (struct super_block *sb, int errno)
  3488. {
  3489. struct reiserfs_journal *journal = SB_JOURNAL (sb);
  3490. if (test_bit (J_ABORTED, &journal->j_state))
  3491. return;
  3492. if (!journal->j_errno)
  3493. journal->j_errno = errno;
  3494. __reiserfs_journal_abort_hard (sb);
  3495. }
  3496. void
  3497. reiserfs_journal_abort (struct super_block *sb, int errno)
  3498. {
  3499. return __reiserfs_journal_abort_soft (sb, errno);
  3500. }