xfs_log_recover.c 110 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_types.h"
  21. #include "xfs_bit.h"
  22. #include "xfs_log.h"
  23. #include "xfs_inum.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_dir2.h"
  28. #include "xfs_dmapi.h"
  29. #include "xfs_mount.h"
  30. #include "xfs_error.h"
  31. #include "xfs_bmap_btree.h"
  32. #include "xfs_alloc_btree.h"
  33. #include "xfs_ialloc_btree.h"
  34. #include "xfs_dir2_sf.h"
  35. #include "xfs_attr_sf.h"
  36. #include "xfs_dinode.h"
  37. #include "xfs_inode.h"
  38. #include "xfs_inode_item.h"
  39. #include "xfs_imap.h"
  40. #include "xfs_alloc.h"
  41. #include "xfs_ialloc.h"
  42. #include "xfs_log_priv.h"
  43. #include "xfs_buf_item.h"
  44. #include "xfs_log_recover.h"
  45. #include "xfs_extfree_item.h"
  46. #include "xfs_trans_priv.h"
  47. #include "xfs_quota.h"
  48. #include "xfs_rw.h"
  49. STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
  50. STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
  51. STATIC void xlog_recover_insert_item_backq(xlog_recover_item_t **q,
  52. xlog_recover_item_t *item);
  53. #if defined(DEBUG)
  54. STATIC void xlog_recover_check_summary(xlog_t *);
  55. STATIC void xlog_recover_check_ail(xfs_mount_t *, xfs_log_item_t *, int);
  56. #else
  57. #define xlog_recover_check_summary(log)
  58. #define xlog_recover_check_ail(mp, lip, gen)
  59. #endif
  60. /*
  61. * Sector aligned buffer routines for buffer create/read/write/access
  62. */
  63. #define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs) \
  64. ( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \
  65. ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
  66. #define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) ((bno) & ~(log)->l_sectbb_mask)
  67. xfs_buf_t *
  68. xlog_get_bp(
  69. xlog_t *log,
  70. int num_bblks)
  71. {
  72. ASSERT(num_bblks > 0);
  73. if (log->l_sectbb_log) {
  74. if (num_bblks > 1)
  75. num_bblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
  76. num_bblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, num_bblks);
  77. }
  78. return xfs_buf_get_noaddr(BBTOB(num_bblks), log->l_mp->m_logdev_targp);
  79. }
  80. void
  81. xlog_put_bp(
  82. xfs_buf_t *bp)
  83. {
  84. xfs_buf_free(bp);
  85. }
  86. /*
  87. * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
  88. */
  89. int
  90. xlog_bread(
  91. xlog_t *log,
  92. xfs_daddr_t blk_no,
  93. int nbblks,
  94. xfs_buf_t *bp)
  95. {
  96. int error;
  97. if (log->l_sectbb_log) {
  98. blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
  99. nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
  100. }
  101. ASSERT(nbblks > 0);
  102. ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
  103. ASSERT(bp);
  104. XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
  105. XFS_BUF_READ(bp);
  106. XFS_BUF_BUSY(bp);
  107. XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
  108. XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
  109. xfsbdstrat(log->l_mp, bp);
  110. if ((error = xfs_iowait(bp)))
  111. xfs_ioerror_alert("xlog_bread", log->l_mp,
  112. bp, XFS_BUF_ADDR(bp));
  113. return error;
  114. }
  115. /*
  116. * Write out the buffer at the given block for the given number of blocks.
  117. * The buffer is kept locked across the write and is returned locked.
  118. * This can only be used for synchronous log writes.
  119. */
  120. STATIC int
  121. xlog_bwrite(
  122. xlog_t *log,
  123. xfs_daddr_t blk_no,
  124. int nbblks,
  125. xfs_buf_t *bp)
  126. {
  127. int error;
  128. if (log->l_sectbb_log) {
  129. blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
  130. nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
  131. }
  132. ASSERT(nbblks > 0);
  133. ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
  134. XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
  135. XFS_BUF_ZEROFLAGS(bp);
  136. XFS_BUF_BUSY(bp);
  137. XFS_BUF_HOLD(bp);
  138. XFS_BUF_PSEMA(bp, PRIBIO);
  139. XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
  140. XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
  141. if ((error = xfs_bwrite(log->l_mp, bp)))
  142. xfs_ioerror_alert("xlog_bwrite", log->l_mp,
  143. bp, XFS_BUF_ADDR(bp));
  144. return error;
  145. }
  146. STATIC xfs_caddr_t
  147. xlog_align(
  148. xlog_t *log,
  149. xfs_daddr_t blk_no,
  150. int nbblks,
  151. xfs_buf_t *bp)
  152. {
  153. xfs_caddr_t ptr;
  154. if (!log->l_sectbb_log)
  155. return XFS_BUF_PTR(bp);
  156. ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
  157. ASSERT(XFS_BUF_SIZE(bp) >=
  158. BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
  159. return ptr;
  160. }
  161. #ifdef DEBUG
  162. /*
  163. * dump debug superblock and log record information
  164. */
  165. STATIC void
  166. xlog_header_check_dump(
  167. xfs_mount_t *mp,
  168. xlog_rec_header_t *head)
  169. {
  170. int b;
  171. cmn_err(CE_DEBUG, "%s: SB : uuid = ", __FUNCTION__);
  172. for (b = 0; b < 16; b++)
  173. cmn_err(CE_DEBUG, "%02x", ((uchar_t *)&mp->m_sb.sb_uuid)[b]);
  174. cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
  175. cmn_err(CE_DEBUG, " log : uuid = ");
  176. for (b = 0; b < 16; b++)
  177. cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]);
  178. cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt));
  179. }
  180. #else
  181. #define xlog_header_check_dump(mp, head)
  182. #endif
  183. /*
  184. * check log record header for recovery
  185. */
  186. STATIC int
  187. xlog_header_check_recover(
  188. xfs_mount_t *mp,
  189. xlog_rec_header_t *head)
  190. {
  191. ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
  192. /*
  193. * IRIX doesn't write the h_fmt field and leaves it zeroed
  194. * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
  195. * a dirty log created in IRIX.
  196. */
  197. if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
  198. xlog_warn(
  199. "XFS: dirty log written in incompatible format - can't recover");
  200. xlog_header_check_dump(mp, head);
  201. XFS_ERROR_REPORT("xlog_header_check_recover(1)",
  202. XFS_ERRLEVEL_HIGH, mp);
  203. return XFS_ERROR(EFSCORRUPTED);
  204. } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
  205. xlog_warn(
  206. "XFS: dirty log entry has mismatched uuid - can't recover");
  207. xlog_header_check_dump(mp, head);
  208. XFS_ERROR_REPORT("xlog_header_check_recover(2)",
  209. XFS_ERRLEVEL_HIGH, mp);
  210. return XFS_ERROR(EFSCORRUPTED);
  211. }
  212. return 0;
  213. }
  214. /*
  215. * read the head block of the log and check the header
  216. */
  217. STATIC int
  218. xlog_header_check_mount(
  219. xfs_mount_t *mp,
  220. xlog_rec_header_t *head)
  221. {
  222. ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
  223. if (uuid_is_nil(&head->h_fs_uuid)) {
  224. /*
  225. * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
  226. * h_fs_uuid is nil, we assume this log was last mounted
  227. * by IRIX and continue.
  228. */
  229. xlog_warn("XFS: nil uuid in log - IRIX style log");
  230. } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
  231. xlog_warn("XFS: log has mismatched uuid - can't recover");
  232. xlog_header_check_dump(mp, head);
  233. XFS_ERROR_REPORT("xlog_header_check_mount",
  234. XFS_ERRLEVEL_HIGH, mp);
  235. return XFS_ERROR(EFSCORRUPTED);
  236. }
  237. return 0;
  238. }
  239. STATIC void
  240. xlog_recover_iodone(
  241. struct xfs_buf *bp)
  242. {
  243. xfs_mount_t *mp;
  244. ASSERT(XFS_BUF_FSPRIVATE(bp, void *));
  245. if (XFS_BUF_GETERROR(bp)) {
  246. /*
  247. * We're not going to bother about retrying
  248. * this during recovery. One strike!
  249. */
  250. mp = XFS_BUF_FSPRIVATE(bp, xfs_mount_t *);
  251. xfs_ioerror_alert("xlog_recover_iodone",
  252. mp, bp, XFS_BUF_ADDR(bp));
  253. xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
  254. }
  255. XFS_BUF_SET_FSPRIVATE(bp, NULL);
  256. XFS_BUF_CLR_IODONE_FUNC(bp);
  257. xfs_biodone(bp);
  258. }
  259. /*
  260. * This routine finds (to an approximation) the first block in the physical
  261. * log which contains the given cycle. It uses a binary search algorithm.
  262. * Note that the algorithm can not be perfect because the disk will not
  263. * necessarily be perfect.
  264. */
  265. STATIC int
  266. xlog_find_cycle_start(
  267. xlog_t *log,
  268. xfs_buf_t *bp,
  269. xfs_daddr_t first_blk,
  270. xfs_daddr_t *last_blk,
  271. uint cycle)
  272. {
  273. xfs_caddr_t offset;
  274. xfs_daddr_t mid_blk;
  275. uint mid_cycle;
  276. int error;
  277. mid_blk = BLK_AVG(first_blk, *last_blk);
  278. while (mid_blk != first_blk && mid_blk != *last_blk) {
  279. if ((error = xlog_bread(log, mid_blk, 1, bp)))
  280. return error;
  281. offset = xlog_align(log, mid_blk, 1, bp);
  282. mid_cycle = xlog_get_cycle(offset);
  283. if (mid_cycle == cycle) {
  284. *last_blk = mid_blk;
  285. /* last_half_cycle == mid_cycle */
  286. } else {
  287. first_blk = mid_blk;
  288. /* first_half_cycle == mid_cycle */
  289. }
  290. mid_blk = BLK_AVG(first_blk, *last_blk);
  291. }
  292. ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) ||
  293. (mid_blk == *last_blk && mid_blk-1 == first_blk));
  294. return 0;
  295. }
  296. /*
  297. * Check that the range of blocks does not contain the cycle number
  298. * given. The scan needs to occur from front to back and the ptr into the
  299. * region must be updated since a later routine will need to perform another
  300. * test. If the region is completely good, we end up returning the same
  301. * last block number.
  302. *
  303. * Set blkno to -1 if we encounter no errors. This is an invalid block number
  304. * since we don't ever expect logs to get this large.
  305. */
  306. STATIC int
  307. xlog_find_verify_cycle(
  308. xlog_t *log,
  309. xfs_daddr_t start_blk,
  310. int nbblks,
  311. uint stop_on_cycle_no,
  312. xfs_daddr_t *new_blk)
  313. {
  314. xfs_daddr_t i, j;
  315. uint cycle;
  316. xfs_buf_t *bp;
  317. xfs_daddr_t bufblks;
  318. xfs_caddr_t buf = NULL;
  319. int error = 0;
  320. bufblks = 1 << ffs(nbblks);
  321. while (!(bp = xlog_get_bp(log, bufblks))) {
  322. /* can't get enough memory to do everything in one big buffer */
  323. bufblks >>= 1;
  324. if (bufblks <= log->l_sectbb_log)
  325. return ENOMEM;
  326. }
  327. for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
  328. int bcount;
  329. bcount = min(bufblks, (start_blk + nbblks - i));
  330. if ((error = xlog_bread(log, i, bcount, bp)))
  331. goto out;
  332. buf = xlog_align(log, i, bcount, bp);
  333. for (j = 0; j < bcount; j++) {
  334. cycle = xlog_get_cycle(buf);
  335. if (cycle == stop_on_cycle_no) {
  336. *new_blk = i+j;
  337. goto out;
  338. }
  339. buf += BBSIZE;
  340. }
  341. }
  342. *new_blk = -1;
  343. out:
  344. xlog_put_bp(bp);
  345. return error;
  346. }
  347. /*
  348. * Potentially backup over partial log record write.
  349. *
  350. * In the typical case, last_blk is the number of the block directly after
  351. * a good log record. Therefore, we subtract one to get the block number
  352. * of the last block in the given buffer. extra_bblks contains the number
  353. * of blocks we would have read on a previous read. This happens when the
  354. * last log record is split over the end of the physical log.
  355. *
  356. * extra_bblks is the number of blocks potentially verified on a previous
  357. * call to this routine.
  358. */
  359. STATIC int
  360. xlog_find_verify_log_record(
  361. xlog_t *log,
  362. xfs_daddr_t start_blk,
  363. xfs_daddr_t *last_blk,
  364. int extra_bblks)
  365. {
  366. xfs_daddr_t i;
  367. xfs_buf_t *bp;
  368. xfs_caddr_t offset = NULL;
  369. xlog_rec_header_t *head = NULL;
  370. int error = 0;
  371. int smallmem = 0;
  372. int num_blks = *last_blk - start_blk;
  373. int xhdrs;
  374. ASSERT(start_blk != 0 || *last_blk != start_blk);
  375. if (!(bp = xlog_get_bp(log, num_blks))) {
  376. if (!(bp = xlog_get_bp(log, 1)))
  377. return ENOMEM;
  378. smallmem = 1;
  379. } else {
  380. if ((error = xlog_bread(log, start_blk, num_blks, bp)))
  381. goto out;
  382. offset = xlog_align(log, start_blk, num_blks, bp);
  383. offset += ((num_blks - 1) << BBSHIFT);
  384. }
  385. for (i = (*last_blk) - 1; i >= 0; i--) {
  386. if (i < start_blk) {
  387. /* valid log record not found */
  388. xlog_warn(
  389. "XFS: Log inconsistent (didn't find previous header)");
  390. ASSERT(0);
  391. error = XFS_ERROR(EIO);
  392. goto out;
  393. }
  394. if (smallmem) {
  395. if ((error = xlog_bread(log, i, 1, bp)))
  396. goto out;
  397. offset = xlog_align(log, i, 1, bp);
  398. }
  399. head = (xlog_rec_header_t *)offset;
  400. if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno))
  401. break;
  402. if (!smallmem)
  403. offset -= BBSIZE;
  404. }
  405. /*
  406. * We hit the beginning of the physical log & still no header. Return
  407. * to caller. If caller can handle a return of -1, then this routine
  408. * will be called again for the end of the physical log.
  409. */
  410. if (i == -1) {
  411. error = -1;
  412. goto out;
  413. }
  414. /*
  415. * We have the final block of the good log (the first block
  416. * of the log record _before_ the head. So we check the uuid.
  417. */
  418. if ((error = xlog_header_check_mount(log->l_mp, head)))
  419. goto out;
  420. /*
  421. * We may have found a log record header before we expected one.
  422. * last_blk will be the 1st block # with a given cycle #. We may end
  423. * up reading an entire log record. In this case, we don't want to
  424. * reset last_blk. Only when last_blk points in the middle of a log
  425. * record do we update last_blk.
  426. */
  427. if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
  428. uint h_size = be32_to_cpu(head->h_size);
  429. xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
  430. if (h_size % XLOG_HEADER_CYCLE_SIZE)
  431. xhdrs++;
  432. } else {
  433. xhdrs = 1;
  434. }
  435. if (*last_blk - i + extra_bblks !=
  436. BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
  437. *last_blk = i;
  438. out:
  439. xlog_put_bp(bp);
  440. return error;
  441. }
  442. /*
  443. * Head is defined to be the point of the log where the next log write
  444. * write could go. This means that incomplete LR writes at the end are
  445. * eliminated when calculating the head. We aren't guaranteed that previous
  446. * LR have complete transactions. We only know that a cycle number of
  447. * current cycle number -1 won't be present in the log if we start writing
  448. * from our current block number.
  449. *
  450. * last_blk contains the block number of the first block with a given
  451. * cycle number.
  452. *
  453. * Return: zero if normal, non-zero if error.
  454. */
  455. STATIC int
  456. xlog_find_head(
  457. xlog_t *log,
  458. xfs_daddr_t *return_head_blk)
  459. {
  460. xfs_buf_t *bp;
  461. xfs_caddr_t offset;
  462. xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
  463. int num_scan_bblks;
  464. uint first_half_cycle, last_half_cycle;
  465. uint stop_on_cycle;
  466. int error, log_bbnum = log->l_logBBsize;
  467. /* Is the end of the log device zeroed? */
  468. if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
  469. *return_head_blk = first_blk;
  470. /* Is the whole lot zeroed? */
  471. if (!first_blk) {
  472. /* Linux XFS shouldn't generate totally zeroed logs -
  473. * mkfs etc write a dummy unmount record to a fresh
  474. * log so we can store the uuid in there
  475. */
  476. xlog_warn("XFS: totally zeroed log");
  477. }
  478. return 0;
  479. } else if (error) {
  480. xlog_warn("XFS: empty log check failed");
  481. return error;
  482. }
  483. first_blk = 0; /* get cycle # of 1st block */
  484. bp = xlog_get_bp(log, 1);
  485. if (!bp)
  486. return ENOMEM;
  487. if ((error = xlog_bread(log, 0, 1, bp)))
  488. goto bp_err;
  489. offset = xlog_align(log, 0, 1, bp);
  490. first_half_cycle = xlog_get_cycle(offset);
  491. last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
  492. if ((error = xlog_bread(log, last_blk, 1, bp)))
  493. goto bp_err;
  494. offset = xlog_align(log, last_blk, 1, bp);
  495. last_half_cycle = xlog_get_cycle(offset);
  496. ASSERT(last_half_cycle != 0);
  497. /*
  498. * If the 1st half cycle number is equal to the last half cycle number,
  499. * then the entire log is stamped with the same cycle number. In this
  500. * case, head_blk can't be set to zero (which makes sense). The below
  501. * math doesn't work out properly with head_blk equal to zero. Instead,
  502. * we set it to log_bbnum which is an invalid block number, but this
  503. * value makes the math correct. If head_blk doesn't changed through
  504. * all the tests below, *head_blk is set to zero at the very end rather
  505. * than log_bbnum. In a sense, log_bbnum and zero are the same block
  506. * in a circular file.
  507. */
  508. if (first_half_cycle == last_half_cycle) {
  509. /*
  510. * In this case we believe that the entire log should have
  511. * cycle number last_half_cycle. We need to scan backwards
  512. * from the end verifying that there are no holes still
  513. * containing last_half_cycle - 1. If we find such a hole,
  514. * then the start of that hole will be the new head. The
  515. * simple case looks like
  516. * x | x ... | x - 1 | x
  517. * Another case that fits this picture would be
  518. * x | x + 1 | x ... | x
  519. * In this case the head really is somewhere at the end of the
  520. * log, as one of the latest writes at the beginning was
  521. * incomplete.
  522. * One more case is
  523. * x | x + 1 | x ... | x - 1 | x
  524. * This is really the combination of the above two cases, and
  525. * the head has to end up at the start of the x-1 hole at the
  526. * end of the log.
  527. *
  528. * In the 256k log case, we will read from the beginning to the
  529. * end of the log and search for cycle numbers equal to x-1.
  530. * We don't worry about the x+1 blocks that we encounter,
  531. * because we know that they cannot be the head since the log
  532. * started with x.
  533. */
  534. head_blk = log_bbnum;
  535. stop_on_cycle = last_half_cycle - 1;
  536. } else {
  537. /*
  538. * In this case we want to find the first block with cycle
  539. * number matching last_half_cycle. We expect the log to be
  540. * some variation on
  541. * x + 1 ... | x ...
  542. * The first block with cycle number x (last_half_cycle) will
  543. * be where the new head belongs. First we do a binary search
  544. * for the first occurrence of last_half_cycle. The binary
  545. * search may not be totally accurate, so then we scan back
  546. * from there looking for occurrences of last_half_cycle before
  547. * us. If that backwards scan wraps around the beginning of
  548. * the log, then we look for occurrences of last_half_cycle - 1
  549. * at the end of the log. The cases we're looking for look
  550. * like
  551. * x + 1 ... | x | x + 1 | x ...
  552. * ^ binary search stopped here
  553. * or
  554. * x + 1 ... | x ... | x - 1 | x
  555. * <---------> less than scan distance
  556. */
  557. stop_on_cycle = last_half_cycle;
  558. if ((error = xlog_find_cycle_start(log, bp, first_blk,
  559. &head_blk, last_half_cycle)))
  560. goto bp_err;
  561. }
  562. /*
  563. * Now validate the answer. Scan back some number of maximum possible
  564. * blocks and make sure each one has the expected cycle number. The
  565. * maximum is determined by the total possible amount of buffering
  566. * in the in-core log. The following number can be made tighter if
  567. * we actually look at the block size of the filesystem.
  568. */
  569. num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
  570. if (head_blk >= num_scan_bblks) {
  571. /*
  572. * We are guaranteed that the entire check can be performed
  573. * in one buffer.
  574. */
  575. start_blk = head_blk - num_scan_bblks;
  576. if ((error = xlog_find_verify_cycle(log,
  577. start_blk, num_scan_bblks,
  578. stop_on_cycle, &new_blk)))
  579. goto bp_err;
  580. if (new_blk != -1)
  581. head_blk = new_blk;
  582. } else { /* need to read 2 parts of log */
  583. /*
  584. * We are going to scan backwards in the log in two parts.
  585. * First we scan the physical end of the log. In this part
  586. * of the log, we are looking for blocks with cycle number
  587. * last_half_cycle - 1.
  588. * If we find one, then we know that the log starts there, as
  589. * we've found a hole that didn't get written in going around
  590. * the end of the physical log. The simple case for this is
  591. * x + 1 ... | x ... | x - 1 | x
  592. * <---------> less than scan distance
  593. * If all of the blocks at the end of the log have cycle number
  594. * last_half_cycle, then we check the blocks at the start of
  595. * the log looking for occurrences of last_half_cycle. If we
  596. * find one, then our current estimate for the location of the
  597. * first occurrence of last_half_cycle is wrong and we move
  598. * back to the hole we've found. This case looks like
  599. * x + 1 ... | x | x + 1 | x ...
  600. * ^ binary search stopped here
  601. * Another case we need to handle that only occurs in 256k
  602. * logs is
  603. * x + 1 ... | x ... | x+1 | x ...
  604. * ^ binary search stops here
  605. * In a 256k log, the scan at the end of the log will see the
  606. * x + 1 blocks. We need to skip past those since that is
  607. * certainly not the head of the log. By searching for
  608. * last_half_cycle-1 we accomplish that.
  609. */
  610. start_blk = log_bbnum - num_scan_bblks + head_blk;
  611. ASSERT(head_blk <= INT_MAX &&
  612. (xfs_daddr_t) num_scan_bblks - head_blk >= 0);
  613. if ((error = xlog_find_verify_cycle(log, start_blk,
  614. num_scan_bblks - (int)head_blk,
  615. (stop_on_cycle - 1), &new_blk)))
  616. goto bp_err;
  617. if (new_blk != -1) {
  618. head_blk = new_blk;
  619. goto bad_blk;
  620. }
  621. /*
  622. * Scan beginning of log now. The last part of the physical
  623. * log is good. This scan needs to verify that it doesn't find
  624. * the last_half_cycle.
  625. */
  626. start_blk = 0;
  627. ASSERT(head_blk <= INT_MAX);
  628. if ((error = xlog_find_verify_cycle(log,
  629. start_blk, (int)head_blk,
  630. stop_on_cycle, &new_blk)))
  631. goto bp_err;
  632. if (new_blk != -1)
  633. head_blk = new_blk;
  634. }
  635. bad_blk:
  636. /*
  637. * Now we need to make sure head_blk is not pointing to a block in
  638. * the middle of a log record.
  639. */
  640. num_scan_bblks = XLOG_REC_SHIFT(log);
  641. if (head_blk >= num_scan_bblks) {
  642. start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
  643. /* start ptr at last block ptr before head_blk */
  644. if ((error = xlog_find_verify_log_record(log, start_blk,
  645. &head_blk, 0)) == -1) {
  646. error = XFS_ERROR(EIO);
  647. goto bp_err;
  648. } else if (error)
  649. goto bp_err;
  650. } else {
  651. start_blk = 0;
  652. ASSERT(head_blk <= INT_MAX);
  653. if ((error = xlog_find_verify_log_record(log, start_blk,
  654. &head_blk, 0)) == -1) {
  655. /* We hit the beginning of the log during our search */
  656. start_blk = log_bbnum - num_scan_bblks + head_blk;
  657. new_blk = log_bbnum;
  658. ASSERT(start_blk <= INT_MAX &&
  659. (xfs_daddr_t) log_bbnum-start_blk >= 0);
  660. ASSERT(head_blk <= INT_MAX);
  661. if ((error = xlog_find_verify_log_record(log,
  662. start_blk, &new_blk,
  663. (int)head_blk)) == -1) {
  664. error = XFS_ERROR(EIO);
  665. goto bp_err;
  666. } else if (error)
  667. goto bp_err;
  668. if (new_blk != log_bbnum)
  669. head_blk = new_blk;
  670. } else if (error)
  671. goto bp_err;
  672. }
  673. xlog_put_bp(bp);
  674. if (head_blk == log_bbnum)
  675. *return_head_blk = 0;
  676. else
  677. *return_head_blk = head_blk;
  678. /*
  679. * When returning here, we have a good block number. Bad block
  680. * means that during a previous crash, we didn't have a clean break
  681. * from cycle number N to cycle number N-1. In this case, we need
  682. * to find the first block with cycle number N-1.
  683. */
  684. return 0;
  685. bp_err:
  686. xlog_put_bp(bp);
  687. if (error)
  688. xlog_warn("XFS: failed to find log head");
  689. return error;
  690. }
  691. /*
  692. * Find the sync block number or the tail of the log.
  693. *
  694. * This will be the block number of the last record to have its
  695. * associated buffers synced to disk. Every log record header has
  696. * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
  697. * to get a sync block number. The only concern is to figure out which
  698. * log record header to believe.
  699. *
  700. * The following algorithm uses the log record header with the largest
  701. * lsn. The entire log record does not need to be valid. We only care
  702. * that the header is valid.
  703. *
  704. * We could speed up search by using current head_blk buffer, but it is not
  705. * available.
  706. */
  707. int
  708. xlog_find_tail(
  709. xlog_t *log,
  710. xfs_daddr_t *head_blk,
  711. xfs_daddr_t *tail_blk)
  712. {
  713. xlog_rec_header_t *rhead;
  714. xlog_op_header_t *op_head;
  715. xfs_caddr_t offset = NULL;
  716. xfs_buf_t *bp;
  717. int error, i, found;
  718. xfs_daddr_t umount_data_blk;
  719. xfs_daddr_t after_umount_blk;
  720. xfs_lsn_t tail_lsn;
  721. int hblks;
  722. found = 0;
  723. /*
  724. * Find previous log record
  725. */
  726. if ((error = xlog_find_head(log, head_blk)))
  727. return error;
  728. bp = xlog_get_bp(log, 1);
  729. if (!bp)
  730. return ENOMEM;
  731. if (*head_blk == 0) { /* special case */
  732. if ((error = xlog_bread(log, 0, 1, bp)))
  733. goto bread_err;
  734. offset = xlog_align(log, 0, 1, bp);
  735. if (xlog_get_cycle(offset) == 0) {
  736. *tail_blk = 0;
  737. /* leave all other log inited values alone */
  738. goto exit;
  739. }
  740. }
  741. /*
  742. * Search backwards looking for log record header block
  743. */
  744. ASSERT(*head_blk < INT_MAX);
  745. for (i = (int)(*head_blk) - 1; i >= 0; i--) {
  746. if ((error = xlog_bread(log, i, 1, bp)))
  747. goto bread_err;
  748. offset = xlog_align(log, i, 1, bp);
  749. if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
  750. found = 1;
  751. break;
  752. }
  753. }
  754. /*
  755. * If we haven't found the log record header block, start looking
  756. * again from the end of the physical log. XXXmiken: There should be
  757. * a check here to make sure we didn't search more than N blocks in
  758. * the previous code.
  759. */
  760. if (!found) {
  761. for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
  762. if ((error = xlog_bread(log, i, 1, bp)))
  763. goto bread_err;
  764. offset = xlog_align(log, i, 1, bp);
  765. if (XLOG_HEADER_MAGIC_NUM ==
  766. be32_to_cpu(*(__be32 *)offset)) {
  767. found = 2;
  768. break;
  769. }
  770. }
  771. }
  772. if (!found) {
  773. xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
  774. ASSERT(0);
  775. return XFS_ERROR(EIO);
  776. }
  777. /* find blk_no of tail of log */
  778. rhead = (xlog_rec_header_t *)offset;
  779. *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
  780. /*
  781. * Reset log values according to the state of the log when we
  782. * crashed. In the case where head_blk == 0, we bump curr_cycle
  783. * one because the next write starts a new cycle rather than
  784. * continuing the cycle of the last good log record. At this
  785. * point we have guaranteed that all partial log records have been
  786. * accounted for. Therefore, we know that the last good log record
  787. * written was complete and ended exactly on the end boundary
  788. * of the physical log.
  789. */
  790. log->l_prev_block = i;
  791. log->l_curr_block = (int)*head_blk;
  792. log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
  793. if (found == 2)
  794. log->l_curr_cycle++;
  795. log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
  796. log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
  797. log->l_grant_reserve_cycle = log->l_curr_cycle;
  798. log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
  799. log->l_grant_write_cycle = log->l_curr_cycle;
  800. log->l_grant_write_bytes = BBTOB(log->l_curr_block);
  801. /*
  802. * Look for unmount record. If we find it, then we know there
  803. * was a clean unmount. Since 'i' could be the last block in
  804. * the physical log, we convert to a log block before comparing
  805. * to the head_blk.
  806. *
  807. * Save the current tail lsn to use to pass to
  808. * xlog_clear_stale_blocks() below. We won't want to clear the
  809. * unmount record if there is one, so we pass the lsn of the
  810. * unmount record rather than the block after it.
  811. */
  812. if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
  813. int h_size = be32_to_cpu(rhead->h_size);
  814. int h_version = be32_to_cpu(rhead->h_version);
  815. if ((h_version & XLOG_VERSION_2) &&
  816. (h_size > XLOG_HEADER_CYCLE_SIZE)) {
  817. hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
  818. if (h_size % XLOG_HEADER_CYCLE_SIZE)
  819. hblks++;
  820. } else {
  821. hblks = 1;
  822. }
  823. } else {
  824. hblks = 1;
  825. }
  826. after_umount_blk = (i + hblks + (int)
  827. BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
  828. tail_lsn = log->l_tail_lsn;
  829. if (*head_blk == after_umount_blk &&
  830. be32_to_cpu(rhead->h_num_logops) == 1) {
  831. umount_data_blk = (i + hblks) % log->l_logBBsize;
  832. if ((error = xlog_bread(log, umount_data_blk, 1, bp))) {
  833. goto bread_err;
  834. }
  835. offset = xlog_align(log, umount_data_blk, 1, bp);
  836. op_head = (xlog_op_header_t *)offset;
  837. if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
  838. /*
  839. * Set tail and last sync so that newly written
  840. * log records will point recovery to after the
  841. * current unmount record.
  842. */
  843. log->l_tail_lsn =
  844. xlog_assign_lsn(log->l_curr_cycle,
  845. after_umount_blk);
  846. log->l_last_sync_lsn =
  847. xlog_assign_lsn(log->l_curr_cycle,
  848. after_umount_blk);
  849. *tail_blk = after_umount_blk;
  850. /*
  851. * Note that the unmount was clean. If the unmount
  852. * was not clean, we need to know this to rebuild the
  853. * superblock counters from the perag headers if we
  854. * have a filesystem using non-persistent counters.
  855. */
  856. log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
  857. }
  858. }
  859. /*
  860. * Make sure that there are no blocks in front of the head
  861. * with the same cycle number as the head. This can happen
  862. * because we allow multiple outstanding log writes concurrently,
  863. * and the later writes might make it out before earlier ones.
  864. *
  865. * We use the lsn from before modifying it so that we'll never
  866. * overwrite the unmount record after a clean unmount.
  867. *
  868. * Do this only if we are going to recover the filesystem
  869. *
  870. * NOTE: This used to say "if (!readonly)"
  871. * However on Linux, we can & do recover a read-only filesystem.
  872. * We only skip recovery if NORECOVERY is specified on mount,
  873. * in which case we would not be here.
  874. *
  875. * But... if the -device- itself is readonly, just skip this.
  876. * We can't recover this device anyway, so it won't matter.
  877. */
  878. if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
  879. error = xlog_clear_stale_blocks(log, tail_lsn);
  880. }
  881. bread_err:
  882. exit:
  883. xlog_put_bp(bp);
  884. if (error)
  885. xlog_warn("XFS: failed to locate log tail");
  886. return error;
  887. }
  888. /*
  889. * Is the log zeroed at all?
  890. *
  891. * The last binary search should be changed to perform an X block read
  892. * once X becomes small enough. You can then search linearly through
  893. * the X blocks. This will cut down on the number of reads we need to do.
  894. *
  895. * If the log is partially zeroed, this routine will pass back the blkno
  896. * of the first block with cycle number 0. It won't have a complete LR
  897. * preceding it.
  898. *
  899. * Return:
  900. * 0 => the log is completely written to
  901. * -1 => use *blk_no as the first block of the log
  902. * >0 => error has occurred
  903. */
  904. STATIC int
  905. xlog_find_zeroed(
  906. xlog_t *log,
  907. xfs_daddr_t *blk_no)
  908. {
  909. xfs_buf_t *bp;
  910. xfs_caddr_t offset;
  911. uint first_cycle, last_cycle;
  912. xfs_daddr_t new_blk, last_blk, start_blk;
  913. xfs_daddr_t num_scan_bblks;
  914. int error, log_bbnum = log->l_logBBsize;
  915. *blk_no = 0;
  916. /* check totally zeroed log */
  917. bp = xlog_get_bp(log, 1);
  918. if (!bp)
  919. return ENOMEM;
  920. if ((error = xlog_bread(log, 0, 1, bp)))
  921. goto bp_err;
  922. offset = xlog_align(log, 0, 1, bp);
  923. first_cycle = xlog_get_cycle(offset);
  924. if (first_cycle == 0) { /* completely zeroed log */
  925. *blk_no = 0;
  926. xlog_put_bp(bp);
  927. return -1;
  928. }
  929. /* check partially zeroed log */
  930. if ((error = xlog_bread(log, log_bbnum-1, 1, bp)))
  931. goto bp_err;
  932. offset = xlog_align(log, log_bbnum-1, 1, bp);
  933. last_cycle = xlog_get_cycle(offset);
  934. if (last_cycle != 0) { /* log completely written to */
  935. xlog_put_bp(bp);
  936. return 0;
  937. } else if (first_cycle != 1) {
  938. /*
  939. * If the cycle of the last block is zero, the cycle of
  940. * the first block must be 1. If it's not, maybe we're
  941. * not looking at a log... Bail out.
  942. */
  943. xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
  944. return XFS_ERROR(EINVAL);
  945. }
  946. /* we have a partially zeroed log */
  947. last_blk = log_bbnum-1;
  948. if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
  949. goto bp_err;
  950. /*
  951. * Validate the answer. Because there is no way to guarantee that
  952. * the entire log is made up of log records which are the same size,
  953. * we scan over the defined maximum blocks. At this point, the maximum
  954. * is not chosen to mean anything special. XXXmiken
  955. */
  956. num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
  957. ASSERT(num_scan_bblks <= INT_MAX);
  958. if (last_blk < num_scan_bblks)
  959. num_scan_bblks = last_blk;
  960. start_blk = last_blk - num_scan_bblks;
  961. /*
  962. * We search for any instances of cycle number 0 that occur before
  963. * our current estimate of the head. What we're trying to detect is
  964. * 1 ... | 0 | 1 | 0...
  965. * ^ binary search ends here
  966. */
  967. if ((error = xlog_find_verify_cycle(log, start_blk,
  968. (int)num_scan_bblks, 0, &new_blk)))
  969. goto bp_err;
  970. if (new_blk != -1)
  971. last_blk = new_blk;
  972. /*
  973. * Potentially backup over partial log record write. We don't need
  974. * to search the end of the log because we know it is zero.
  975. */
  976. if ((error = xlog_find_verify_log_record(log, start_blk,
  977. &last_blk, 0)) == -1) {
  978. error = XFS_ERROR(EIO);
  979. goto bp_err;
  980. } else if (error)
  981. goto bp_err;
  982. *blk_no = last_blk;
  983. bp_err:
  984. xlog_put_bp(bp);
  985. if (error)
  986. return error;
  987. return -1;
  988. }
  989. /*
  990. * These are simple subroutines used by xlog_clear_stale_blocks() below
  991. * to initialize a buffer full of empty log record headers and write
  992. * them into the log.
  993. */
  994. STATIC void
  995. xlog_add_record(
  996. xlog_t *log,
  997. xfs_caddr_t buf,
  998. int cycle,
  999. int block,
  1000. int tail_cycle,
  1001. int tail_block)
  1002. {
  1003. xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
  1004. memset(buf, 0, BBSIZE);
  1005. recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
  1006. recp->h_cycle = cpu_to_be32(cycle);
  1007. recp->h_version = cpu_to_be32(
  1008. XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
  1009. recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
  1010. recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
  1011. recp->h_fmt = cpu_to_be32(XLOG_FMT);
  1012. memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
  1013. }
  1014. STATIC int
  1015. xlog_write_log_records(
  1016. xlog_t *log,
  1017. int cycle,
  1018. int start_block,
  1019. int blocks,
  1020. int tail_cycle,
  1021. int tail_block)
  1022. {
  1023. xfs_caddr_t offset;
  1024. xfs_buf_t *bp;
  1025. int balign, ealign;
  1026. int sectbb = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
  1027. int end_block = start_block + blocks;
  1028. int bufblks;
  1029. int error = 0;
  1030. int i, j = 0;
  1031. bufblks = 1 << ffs(blocks);
  1032. while (!(bp = xlog_get_bp(log, bufblks))) {
  1033. bufblks >>= 1;
  1034. if (bufblks <= log->l_sectbb_log)
  1035. return ENOMEM;
  1036. }
  1037. /* We may need to do a read at the start to fill in part of
  1038. * the buffer in the starting sector not covered by the first
  1039. * write below.
  1040. */
  1041. balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
  1042. if (balign != start_block) {
  1043. if ((error = xlog_bread(log, start_block, 1, bp))) {
  1044. xlog_put_bp(bp);
  1045. return error;
  1046. }
  1047. j = start_block - balign;
  1048. }
  1049. for (i = start_block; i < end_block; i += bufblks) {
  1050. int bcount, endcount;
  1051. bcount = min(bufblks, end_block - start_block);
  1052. endcount = bcount - j;
  1053. /* We may need to do a read at the end to fill in part of
  1054. * the buffer in the final sector not covered by the write.
  1055. * If this is the same sector as the above read, skip it.
  1056. */
  1057. ealign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, end_block);
  1058. if (j == 0 && (start_block + endcount > ealign)) {
  1059. offset = XFS_BUF_PTR(bp);
  1060. balign = BBTOB(ealign - start_block);
  1061. XFS_BUF_SET_PTR(bp, offset + balign, BBTOB(sectbb));
  1062. if ((error = xlog_bread(log, ealign, sectbb, bp)))
  1063. break;
  1064. XFS_BUF_SET_PTR(bp, offset, bufblks);
  1065. }
  1066. offset = xlog_align(log, start_block, endcount, bp);
  1067. for (; j < endcount; j++) {
  1068. xlog_add_record(log, offset, cycle, i+j,
  1069. tail_cycle, tail_block);
  1070. offset += BBSIZE;
  1071. }
  1072. error = xlog_bwrite(log, start_block, endcount, bp);
  1073. if (error)
  1074. break;
  1075. start_block += endcount;
  1076. j = 0;
  1077. }
  1078. xlog_put_bp(bp);
  1079. return error;
  1080. }
  1081. /*
  1082. * This routine is called to blow away any incomplete log writes out
  1083. * in front of the log head. We do this so that we won't become confused
  1084. * if we come up, write only a little bit more, and then crash again.
  1085. * If we leave the partial log records out there, this situation could
  1086. * cause us to think those partial writes are valid blocks since they
  1087. * have the current cycle number. We get rid of them by overwriting them
  1088. * with empty log records with the old cycle number rather than the
  1089. * current one.
  1090. *
  1091. * The tail lsn is passed in rather than taken from
  1092. * the log so that we will not write over the unmount record after a
  1093. * clean unmount in a 512 block log. Doing so would leave the log without
  1094. * any valid log records in it until a new one was written. If we crashed
  1095. * during that time we would not be able to recover.
  1096. */
  1097. STATIC int
  1098. xlog_clear_stale_blocks(
  1099. xlog_t *log,
  1100. xfs_lsn_t tail_lsn)
  1101. {
  1102. int tail_cycle, head_cycle;
  1103. int tail_block, head_block;
  1104. int tail_distance, max_distance;
  1105. int distance;
  1106. int error;
  1107. tail_cycle = CYCLE_LSN(tail_lsn);
  1108. tail_block = BLOCK_LSN(tail_lsn);
  1109. head_cycle = log->l_curr_cycle;
  1110. head_block = log->l_curr_block;
  1111. /*
  1112. * Figure out the distance between the new head of the log
  1113. * and the tail. We want to write over any blocks beyond the
  1114. * head that we may have written just before the crash, but
  1115. * we don't want to overwrite the tail of the log.
  1116. */
  1117. if (head_cycle == tail_cycle) {
  1118. /*
  1119. * The tail is behind the head in the physical log,
  1120. * so the distance from the head to the tail is the
  1121. * distance from the head to the end of the log plus
  1122. * the distance from the beginning of the log to the
  1123. * tail.
  1124. */
  1125. if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
  1126. XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
  1127. XFS_ERRLEVEL_LOW, log->l_mp);
  1128. return XFS_ERROR(EFSCORRUPTED);
  1129. }
  1130. tail_distance = tail_block + (log->l_logBBsize - head_block);
  1131. } else {
  1132. /*
  1133. * The head is behind the tail in the physical log,
  1134. * so the distance from the head to the tail is just
  1135. * the tail block minus the head block.
  1136. */
  1137. if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
  1138. XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
  1139. XFS_ERRLEVEL_LOW, log->l_mp);
  1140. return XFS_ERROR(EFSCORRUPTED);
  1141. }
  1142. tail_distance = tail_block - head_block;
  1143. }
  1144. /*
  1145. * If the head is right up against the tail, we can't clear
  1146. * anything.
  1147. */
  1148. if (tail_distance <= 0) {
  1149. ASSERT(tail_distance == 0);
  1150. return 0;
  1151. }
  1152. max_distance = XLOG_TOTAL_REC_SHIFT(log);
  1153. /*
  1154. * Take the smaller of the maximum amount of outstanding I/O
  1155. * we could have and the distance to the tail to clear out.
  1156. * We take the smaller so that we don't overwrite the tail and
  1157. * we don't waste all day writing from the head to the tail
  1158. * for no reason.
  1159. */
  1160. max_distance = MIN(max_distance, tail_distance);
  1161. if ((head_block + max_distance) <= log->l_logBBsize) {
  1162. /*
  1163. * We can stomp all the blocks we need to without
  1164. * wrapping around the end of the log. Just do it
  1165. * in a single write. Use the cycle number of the
  1166. * current cycle minus one so that the log will look like:
  1167. * n ... | n - 1 ...
  1168. */
  1169. error = xlog_write_log_records(log, (head_cycle - 1),
  1170. head_block, max_distance, tail_cycle,
  1171. tail_block);
  1172. if (error)
  1173. return error;
  1174. } else {
  1175. /*
  1176. * We need to wrap around the end of the physical log in
  1177. * order to clear all the blocks. Do it in two separate
  1178. * I/Os. The first write should be from the head to the
  1179. * end of the physical log, and it should use the current
  1180. * cycle number minus one just like above.
  1181. */
  1182. distance = log->l_logBBsize - head_block;
  1183. error = xlog_write_log_records(log, (head_cycle - 1),
  1184. head_block, distance, tail_cycle,
  1185. tail_block);
  1186. if (error)
  1187. return error;
  1188. /*
  1189. * Now write the blocks at the start of the physical log.
  1190. * This writes the remainder of the blocks we want to clear.
  1191. * It uses the current cycle number since we're now on the
  1192. * same cycle as the head so that we get:
  1193. * n ... n ... | n - 1 ...
  1194. * ^^^^^ blocks we're writing
  1195. */
  1196. distance = max_distance - (log->l_logBBsize - head_block);
  1197. error = xlog_write_log_records(log, head_cycle, 0, distance,
  1198. tail_cycle, tail_block);
  1199. if (error)
  1200. return error;
  1201. }
  1202. return 0;
  1203. }
  1204. /******************************************************************************
  1205. *
  1206. * Log recover routines
  1207. *
  1208. ******************************************************************************
  1209. */
  1210. STATIC xlog_recover_t *
  1211. xlog_recover_find_tid(
  1212. xlog_recover_t *q,
  1213. xlog_tid_t tid)
  1214. {
  1215. xlog_recover_t *p = q;
  1216. while (p != NULL) {
  1217. if (p->r_log_tid == tid)
  1218. break;
  1219. p = p->r_next;
  1220. }
  1221. return p;
  1222. }
  1223. STATIC void
  1224. xlog_recover_put_hashq(
  1225. xlog_recover_t **q,
  1226. xlog_recover_t *trans)
  1227. {
  1228. trans->r_next = *q;
  1229. *q = trans;
  1230. }
  1231. STATIC void
  1232. xlog_recover_add_item(
  1233. xlog_recover_item_t **itemq)
  1234. {
  1235. xlog_recover_item_t *item;
  1236. item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
  1237. xlog_recover_insert_item_backq(itemq, item);
  1238. }
  1239. STATIC int
  1240. xlog_recover_add_to_cont_trans(
  1241. xlog_recover_t *trans,
  1242. xfs_caddr_t dp,
  1243. int len)
  1244. {
  1245. xlog_recover_item_t *item;
  1246. xfs_caddr_t ptr, old_ptr;
  1247. int old_len;
  1248. item = trans->r_itemq;
  1249. if (item == NULL) {
  1250. /* finish copying rest of trans header */
  1251. xlog_recover_add_item(&trans->r_itemq);
  1252. ptr = (xfs_caddr_t) &trans->r_theader +
  1253. sizeof(xfs_trans_header_t) - len;
  1254. memcpy(ptr, dp, len); /* d, s, l */
  1255. return 0;
  1256. }
  1257. item = item->ri_prev;
  1258. old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
  1259. old_len = item->ri_buf[item->ri_cnt-1].i_len;
  1260. ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
  1261. memcpy(&ptr[old_len], dp, len); /* d, s, l */
  1262. item->ri_buf[item->ri_cnt-1].i_len += len;
  1263. item->ri_buf[item->ri_cnt-1].i_addr = ptr;
  1264. return 0;
  1265. }
  1266. /*
  1267. * The next region to add is the start of a new region. It could be
  1268. * a whole region or it could be the first part of a new region. Because
  1269. * of this, the assumption here is that the type and size fields of all
  1270. * format structures fit into the first 32 bits of the structure.
  1271. *
  1272. * This works because all regions must be 32 bit aligned. Therefore, we
  1273. * either have both fields or we have neither field. In the case we have
  1274. * neither field, the data part of the region is zero length. We only have
  1275. * a log_op_header and can throw away the header since a new one will appear
  1276. * later. If we have at least 4 bytes, then we can determine how many regions
  1277. * will appear in the current log item.
  1278. */
  1279. STATIC int
  1280. xlog_recover_add_to_trans(
  1281. xlog_recover_t *trans,
  1282. xfs_caddr_t dp,
  1283. int len)
  1284. {
  1285. xfs_inode_log_format_t *in_f; /* any will do */
  1286. xlog_recover_item_t *item;
  1287. xfs_caddr_t ptr;
  1288. if (!len)
  1289. return 0;
  1290. item = trans->r_itemq;
  1291. if (item == NULL) {
  1292. ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
  1293. if (len == sizeof(xfs_trans_header_t))
  1294. xlog_recover_add_item(&trans->r_itemq);
  1295. memcpy(&trans->r_theader, dp, len); /* d, s, l */
  1296. return 0;
  1297. }
  1298. ptr = kmem_alloc(len, KM_SLEEP);
  1299. memcpy(ptr, dp, len);
  1300. in_f = (xfs_inode_log_format_t *)ptr;
  1301. if (item->ri_prev->ri_total != 0 &&
  1302. item->ri_prev->ri_total == item->ri_prev->ri_cnt) {
  1303. xlog_recover_add_item(&trans->r_itemq);
  1304. }
  1305. item = trans->r_itemq;
  1306. item = item->ri_prev;
  1307. if (item->ri_total == 0) { /* first region to be added */
  1308. item->ri_total = in_f->ilf_size;
  1309. ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM);
  1310. item->ri_buf = kmem_zalloc((item->ri_total *
  1311. sizeof(xfs_log_iovec_t)), KM_SLEEP);
  1312. }
  1313. ASSERT(item->ri_total > item->ri_cnt);
  1314. /* Description region is ri_buf[0] */
  1315. item->ri_buf[item->ri_cnt].i_addr = ptr;
  1316. item->ri_buf[item->ri_cnt].i_len = len;
  1317. item->ri_cnt++;
  1318. return 0;
  1319. }
  1320. STATIC void
  1321. xlog_recover_new_tid(
  1322. xlog_recover_t **q,
  1323. xlog_tid_t tid,
  1324. xfs_lsn_t lsn)
  1325. {
  1326. xlog_recover_t *trans;
  1327. trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
  1328. trans->r_log_tid = tid;
  1329. trans->r_lsn = lsn;
  1330. xlog_recover_put_hashq(q, trans);
  1331. }
  1332. STATIC int
  1333. xlog_recover_unlink_tid(
  1334. xlog_recover_t **q,
  1335. xlog_recover_t *trans)
  1336. {
  1337. xlog_recover_t *tp;
  1338. int found = 0;
  1339. ASSERT(trans != NULL);
  1340. if (trans == *q) {
  1341. *q = (*q)->r_next;
  1342. } else {
  1343. tp = *q;
  1344. while (tp) {
  1345. if (tp->r_next == trans) {
  1346. found = 1;
  1347. break;
  1348. }
  1349. tp = tp->r_next;
  1350. }
  1351. if (!found) {
  1352. xlog_warn(
  1353. "XFS: xlog_recover_unlink_tid: trans not found");
  1354. ASSERT(0);
  1355. return XFS_ERROR(EIO);
  1356. }
  1357. tp->r_next = tp->r_next->r_next;
  1358. }
  1359. return 0;
  1360. }
  1361. STATIC void
  1362. xlog_recover_insert_item_backq(
  1363. xlog_recover_item_t **q,
  1364. xlog_recover_item_t *item)
  1365. {
  1366. if (*q == NULL) {
  1367. item->ri_prev = item->ri_next = item;
  1368. *q = item;
  1369. } else {
  1370. item->ri_next = *q;
  1371. item->ri_prev = (*q)->ri_prev;
  1372. (*q)->ri_prev = item;
  1373. item->ri_prev->ri_next = item;
  1374. }
  1375. }
  1376. STATIC void
  1377. xlog_recover_insert_item_frontq(
  1378. xlog_recover_item_t **q,
  1379. xlog_recover_item_t *item)
  1380. {
  1381. xlog_recover_insert_item_backq(q, item);
  1382. *q = item;
  1383. }
  1384. STATIC int
  1385. xlog_recover_reorder_trans(
  1386. xlog_recover_t *trans)
  1387. {
  1388. xlog_recover_item_t *first_item, *itemq, *itemq_next;
  1389. xfs_buf_log_format_t *buf_f;
  1390. ushort flags = 0;
  1391. first_item = itemq = trans->r_itemq;
  1392. trans->r_itemq = NULL;
  1393. do {
  1394. itemq_next = itemq->ri_next;
  1395. buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
  1396. switch (ITEM_TYPE(itemq)) {
  1397. case XFS_LI_BUF:
  1398. flags = buf_f->blf_flags;
  1399. if (!(flags & XFS_BLI_CANCEL)) {
  1400. xlog_recover_insert_item_frontq(&trans->r_itemq,
  1401. itemq);
  1402. break;
  1403. }
  1404. case XFS_LI_INODE:
  1405. case XFS_LI_DQUOT:
  1406. case XFS_LI_QUOTAOFF:
  1407. case XFS_LI_EFD:
  1408. case XFS_LI_EFI:
  1409. xlog_recover_insert_item_backq(&trans->r_itemq, itemq);
  1410. break;
  1411. default:
  1412. xlog_warn(
  1413. "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
  1414. ASSERT(0);
  1415. return XFS_ERROR(EIO);
  1416. }
  1417. itemq = itemq_next;
  1418. } while (first_item != itemq);
  1419. return 0;
  1420. }
  1421. /*
  1422. * Build up the table of buf cancel records so that we don't replay
  1423. * cancelled data in the second pass. For buffer records that are
  1424. * not cancel records, there is nothing to do here so we just return.
  1425. *
  1426. * If we get a cancel record which is already in the table, this indicates
  1427. * that the buffer was cancelled multiple times. In order to ensure
  1428. * that during pass 2 we keep the record in the table until we reach its
  1429. * last occurrence in the log, we keep a reference count in the cancel
  1430. * record in the table to tell us how many times we expect to see this
  1431. * record during the second pass.
  1432. */
  1433. STATIC void
  1434. xlog_recover_do_buffer_pass1(
  1435. xlog_t *log,
  1436. xfs_buf_log_format_t *buf_f)
  1437. {
  1438. xfs_buf_cancel_t *bcp;
  1439. xfs_buf_cancel_t *nextp;
  1440. xfs_buf_cancel_t *prevp;
  1441. xfs_buf_cancel_t **bucket;
  1442. xfs_daddr_t blkno = 0;
  1443. uint len = 0;
  1444. ushort flags = 0;
  1445. switch (buf_f->blf_type) {
  1446. case XFS_LI_BUF:
  1447. blkno = buf_f->blf_blkno;
  1448. len = buf_f->blf_len;
  1449. flags = buf_f->blf_flags;
  1450. break;
  1451. }
  1452. /*
  1453. * If this isn't a cancel buffer item, then just return.
  1454. */
  1455. if (!(flags & XFS_BLI_CANCEL))
  1456. return;
  1457. /*
  1458. * Insert an xfs_buf_cancel record into the hash table of
  1459. * them. If there is already an identical record, bump
  1460. * its reference count.
  1461. */
  1462. bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
  1463. XLOG_BC_TABLE_SIZE];
  1464. /*
  1465. * If the hash bucket is empty then just insert a new record into
  1466. * the bucket.
  1467. */
  1468. if (*bucket == NULL) {
  1469. bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
  1470. KM_SLEEP);
  1471. bcp->bc_blkno = blkno;
  1472. bcp->bc_len = len;
  1473. bcp->bc_refcount = 1;
  1474. bcp->bc_next = NULL;
  1475. *bucket = bcp;
  1476. return;
  1477. }
  1478. /*
  1479. * The hash bucket is not empty, so search for duplicates of our
  1480. * record. If we find one them just bump its refcount. If not
  1481. * then add us at the end of the list.
  1482. */
  1483. prevp = NULL;
  1484. nextp = *bucket;
  1485. while (nextp != NULL) {
  1486. if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
  1487. nextp->bc_refcount++;
  1488. return;
  1489. }
  1490. prevp = nextp;
  1491. nextp = nextp->bc_next;
  1492. }
  1493. ASSERT(prevp != NULL);
  1494. bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
  1495. KM_SLEEP);
  1496. bcp->bc_blkno = blkno;
  1497. bcp->bc_len = len;
  1498. bcp->bc_refcount = 1;
  1499. bcp->bc_next = NULL;
  1500. prevp->bc_next = bcp;
  1501. }
  1502. /*
  1503. * Check to see whether the buffer being recovered has a corresponding
  1504. * entry in the buffer cancel record table. If it does then return 1
  1505. * so that it will be cancelled, otherwise return 0. If the buffer is
  1506. * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement
  1507. * the refcount on the entry in the table and remove it from the table
  1508. * if this is the last reference.
  1509. *
  1510. * We remove the cancel record from the table when we encounter its
  1511. * last occurrence in the log so that if the same buffer is re-used
  1512. * again after its last cancellation we actually replay the changes
  1513. * made at that point.
  1514. */
  1515. STATIC int
  1516. xlog_check_buffer_cancelled(
  1517. xlog_t *log,
  1518. xfs_daddr_t blkno,
  1519. uint len,
  1520. ushort flags)
  1521. {
  1522. xfs_buf_cancel_t *bcp;
  1523. xfs_buf_cancel_t *prevp;
  1524. xfs_buf_cancel_t **bucket;
  1525. if (log->l_buf_cancel_table == NULL) {
  1526. /*
  1527. * There is nothing in the table built in pass one,
  1528. * so this buffer must not be cancelled.
  1529. */
  1530. ASSERT(!(flags & XFS_BLI_CANCEL));
  1531. return 0;
  1532. }
  1533. bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
  1534. XLOG_BC_TABLE_SIZE];
  1535. bcp = *bucket;
  1536. if (bcp == NULL) {
  1537. /*
  1538. * There is no corresponding entry in the table built
  1539. * in pass one, so this buffer has not been cancelled.
  1540. */
  1541. ASSERT(!(flags & XFS_BLI_CANCEL));
  1542. return 0;
  1543. }
  1544. /*
  1545. * Search for an entry in the buffer cancel table that
  1546. * matches our buffer.
  1547. */
  1548. prevp = NULL;
  1549. while (bcp != NULL) {
  1550. if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
  1551. /*
  1552. * We've go a match, so return 1 so that the
  1553. * recovery of this buffer is cancelled.
  1554. * If this buffer is actually a buffer cancel
  1555. * log item, then decrement the refcount on the
  1556. * one in the table and remove it if this is the
  1557. * last reference.
  1558. */
  1559. if (flags & XFS_BLI_CANCEL) {
  1560. bcp->bc_refcount--;
  1561. if (bcp->bc_refcount == 0) {
  1562. if (prevp == NULL) {
  1563. *bucket = bcp->bc_next;
  1564. } else {
  1565. prevp->bc_next = bcp->bc_next;
  1566. }
  1567. kmem_free(bcp,
  1568. sizeof(xfs_buf_cancel_t));
  1569. }
  1570. }
  1571. return 1;
  1572. }
  1573. prevp = bcp;
  1574. bcp = bcp->bc_next;
  1575. }
  1576. /*
  1577. * We didn't find a corresponding entry in the table, so
  1578. * return 0 so that the buffer is NOT cancelled.
  1579. */
  1580. ASSERT(!(flags & XFS_BLI_CANCEL));
  1581. return 0;
  1582. }
  1583. STATIC int
  1584. xlog_recover_do_buffer_pass2(
  1585. xlog_t *log,
  1586. xfs_buf_log_format_t *buf_f)
  1587. {
  1588. xfs_daddr_t blkno = 0;
  1589. ushort flags = 0;
  1590. uint len = 0;
  1591. switch (buf_f->blf_type) {
  1592. case XFS_LI_BUF:
  1593. blkno = buf_f->blf_blkno;
  1594. flags = buf_f->blf_flags;
  1595. len = buf_f->blf_len;
  1596. break;
  1597. }
  1598. return xlog_check_buffer_cancelled(log, blkno, len, flags);
  1599. }
  1600. /*
  1601. * Perform recovery for a buffer full of inodes. In these buffers,
  1602. * the only data which should be recovered is that which corresponds
  1603. * to the di_next_unlinked pointers in the on disk inode structures.
  1604. * The rest of the data for the inodes is always logged through the
  1605. * inodes themselves rather than the inode buffer and is recovered
  1606. * in xlog_recover_do_inode_trans().
  1607. *
  1608. * The only time when buffers full of inodes are fully recovered is
  1609. * when the buffer is full of newly allocated inodes. In this case
  1610. * the buffer will not be marked as an inode buffer and so will be
  1611. * sent to xlog_recover_do_reg_buffer() below during recovery.
  1612. */
  1613. STATIC int
  1614. xlog_recover_do_inode_buffer(
  1615. xfs_mount_t *mp,
  1616. xlog_recover_item_t *item,
  1617. xfs_buf_t *bp,
  1618. xfs_buf_log_format_t *buf_f)
  1619. {
  1620. int i;
  1621. int item_index;
  1622. int bit;
  1623. int nbits;
  1624. int reg_buf_offset;
  1625. int reg_buf_bytes;
  1626. int next_unlinked_offset;
  1627. int inodes_per_buf;
  1628. xfs_agino_t *logged_nextp;
  1629. xfs_agino_t *buffer_nextp;
  1630. unsigned int *data_map = NULL;
  1631. unsigned int map_size = 0;
  1632. switch (buf_f->blf_type) {
  1633. case XFS_LI_BUF:
  1634. data_map = buf_f->blf_data_map;
  1635. map_size = buf_f->blf_map_size;
  1636. break;
  1637. }
  1638. /*
  1639. * Set the variables corresponding to the current region to
  1640. * 0 so that we'll initialize them on the first pass through
  1641. * the loop.
  1642. */
  1643. reg_buf_offset = 0;
  1644. reg_buf_bytes = 0;
  1645. bit = 0;
  1646. nbits = 0;
  1647. item_index = 0;
  1648. inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
  1649. for (i = 0; i < inodes_per_buf; i++) {
  1650. next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
  1651. offsetof(xfs_dinode_t, di_next_unlinked);
  1652. while (next_unlinked_offset >=
  1653. (reg_buf_offset + reg_buf_bytes)) {
  1654. /*
  1655. * The next di_next_unlinked field is beyond
  1656. * the current logged region. Find the next
  1657. * logged region that contains or is beyond
  1658. * the current di_next_unlinked field.
  1659. */
  1660. bit += nbits;
  1661. bit = xfs_next_bit(data_map, map_size, bit);
  1662. /*
  1663. * If there are no more logged regions in the
  1664. * buffer, then we're done.
  1665. */
  1666. if (bit == -1) {
  1667. return 0;
  1668. }
  1669. nbits = xfs_contig_bits(data_map, map_size,
  1670. bit);
  1671. ASSERT(nbits > 0);
  1672. reg_buf_offset = bit << XFS_BLI_SHIFT;
  1673. reg_buf_bytes = nbits << XFS_BLI_SHIFT;
  1674. item_index++;
  1675. }
  1676. /*
  1677. * If the current logged region starts after the current
  1678. * di_next_unlinked field, then move on to the next
  1679. * di_next_unlinked field.
  1680. */
  1681. if (next_unlinked_offset < reg_buf_offset) {
  1682. continue;
  1683. }
  1684. ASSERT(item->ri_buf[item_index].i_addr != NULL);
  1685. ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0);
  1686. ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
  1687. /*
  1688. * The current logged region contains a copy of the
  1689. * current di_next_unlinked field. Extract its value
  1690. * and copy it to the buffer copy.
  1691. */
  1692. logged_nextp = (xfs_agino_t *)
  1693. ((char *)(item->ri_buf[item_index].i_addr) +
  1694. (next_unlinked_offset - reg_buf_offset));
  1695. if (unlikely(*logged_nextp == 0)) {
  1696. xfs_fs_cmn_err(CE_ALERT, mp,
  1697. "bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field",
  1698. item, bp);
  1699. XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
  1700. XFS_ERRLEVEL_LOW, mp);
  1701. return XFS_ERROR(EFSCORRUPTED);
  1702. }
  1703. buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
  1704. next_unlinked_offset);
  1705. *buffer_nextp = *logged_nextp;
  1706. }
  1707. return 0;
  1708. }
  1709. /*
  1710. * Perform a 'normal' buffer recovery. Each logged region of the
  1711. * buffer should be copied over the corresponding region in the
  1712. * given buffer. The bitmap in the buf log format structure indicates
  1713. * where to place the logged data.
  1714. */
  1715. /*ARGSUSED*/
  1716. STATIC void
  1717. xlog_recover_do_reg_buffer(
  1718. xlog_recover_item_t *item,
  1719. xfs_buf_t *bp,
  1720. xfs_buf_log_format_t *buf_f)
  1721. {
  1722. int i;
  1723. int bit;
  1724. int nbits;
  1725. unsigned int *data_map = NULL;
  1726. unsigned int map_size = 0;
  1727. int error;
  1728. switch (buf_f->blf_type) {
  1729. case XFS_LI_BUF:
  1730. data_map = buf_f->blf_data_map;
  1731. map_size = buf_f->blf_map_size;
  1732. break;
  1733. }
  1734. bit = 0;
  1735. i = 1; /* 0 is the buf format structure */
  1736. while (1) {
  1737. bit = xfs_next_bit(data_map, map_size, bit);
  1738. if (bit == -1)
  1739. break;
  1740. nbits = xfs_contig_bits(data_map, map_size, bit);
  1741. ASSERT(nbits > 0);
  1742. ASSERT(item->ri_buf[i].i_addr != NULL);
  1743. ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
  1744. ASSERT(XFS_BUF_COUNT(bp) >=
  1745. ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
  1746. /*
  1747. * Do a sanity check if this is a dquot buffer. Just checking
  1748. * the first dquot in the buffer should do. XXXThis is
  1749. * probably a good thing to do for other buf types also.
  1750. */
  1751. error = 0;
  1752. if (buf_f->blf_flags &
  1753. (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
  1754. error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
  1755. item->ri_buf[i].i_addr,
  1756. -1, 0, XFS_QMOPT_DOWARN,
  1757. "dquot_buf_recover");
  1758. }
  1759. if (!error)
  1760. memcpy(xfs_buf_offset(bp,
  1761. (uint)bit << XFS_BLI_SHIFT), /* dest */
  1762. item->ri_buf[i].i_addr, /* source */
  1763. nbits<<XFS_BLI_SHIFT); /* length */
  1764. i++;
  1765. bit += nbits;
  1766. }
  1767. /* Shouldn't be any more regions */
  1768. ASSERT(i == item->ri_total);
  1769. }
  1770. /*
  1771. * Do some primitive error checking on ondisk dquot data structures.
  1772. */
  1773. int
  1774. xfs_qm_dqcheck(
  1775. xfs_disk_dquot_t *ddq,
  1776. xfs_dqid_t id,
  1777. uint type, /* used only when IO_dorepair is true */
  1778. uint flags,
  1779. char *str)
  1780. {
  1781. xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
  1782. int errs = 0;
  1783. /*
  1784. * We can encounter an uninitialized dquot buffer for 2 reasons:
  1785. * 1. If we crash while deleting the quotainode(s), and those blks got
  1786. * used for user data. This is because we take the path of regular
  1787. * file deletion; however, the size field of quotainodes is never
  1788. * updated, so all the tricks that we play in itruncate_finish
  1789. * don't quite matter.
  1790. *
  1791. * 2. We don't play the quota buffers when there's a quotaoff logitem.
  1792. * But the allocation will be replayed so we'll end up with an
  1793. * uninitialized quota block.
  1794. *
  1795. * This is all fine; things are still consistent, and we haven't lost
  1796. * any quota information. Just don't complain about bad dquot blks.
  1797. */
  1798. if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
  1799. if (flags & XFS_QMOPT_DOWARN)
  1800. cmn_err(CE_ALERT,
  1801. "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
  1802. str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
  1803. errs++;
  1804. }
  1805. if (ddq->d_version != XFS_DQUOT_VERSION) {
  1806. if (flags & XFS_QMOPT_DOWARN)
  1807. cmn_err(CE_ALERT,
  1808. "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
  1809. str, id, ddq->d_version, XFS_DQUOT_VERSION);
  1810. errs++;
  1811. }
  1812. if (ddq->d_flags != XFS_DQ_USER &&
  1813. ddq->d_flags != XFS_DQ_PROJ &&
  1814. ddq->d_flags != XFS_DQ_GROUP) {
  1815. if (flags & XFS_QMOPT_DOWARN)
  1816. cmn_err(CE_ALERT,
  1817. "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
  1818. str, id, ddq->d_flags);
  1819. errs++;
  1820. }
  1821. if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
  1822. if (flags & XFS_QMOPT_DOWARN)
  1823. cmn_err(CE_ALERT,
  1824. "%s : ondisk-dquot 0x%p, ID mismatch: "
  1825. "0x%x expected, found id 0x%x",
  1826. str, ddq, id, be32_to_cpu(ddq->d_id));
  1827. errs++;
  1828. }
  1829. if (!errs && ddq->d_id) {
  1830. if (ddq->d_blk_softlimit &&
  1831. be64_to_cpu(ddq->d_bcount) >=
  1832. be64_to_cpu(ddq->d_blk_softlimit)) {
  1833. if (!ddq->d_btimer) {
  1834. if (flags & XFS_QMOPT_DOWARN)
  1835. cmn_err(CE_ALERT,
  1836. "%s : Dquot ID 0x%x (0x%p) "
  1837. "BLK TIMER NOT STARTED",
  1838. str, (int)be32_to_cpu(ddq->d_id), ddq);
  1839. errs++;
  1840. }
  1841. }
  1842. if (ddq->d_ino_softlimit &&
  1843. be64_to_cpu(ddq->d_icount) >=
  1844. be64_to_cpu(ddq->d_ino_softlimit)) {
  1845. if (!ddq->d_itimer) {
  1846. if (flags & XFS_QMOPT_DOWARN)
  1847. cmn_err(CE_ALERT,
  1848. "%s : Dquot ID 0x%x (0x%p) "
  1849. "INODE TIMER NOT STARTED",
  1850. str, (int)be32_to_cpu(ddq->d_id), ddq);
  1851. errs++;
  1852. }
  1853. }
  1854. if (ddq->d_rtb_softlimit &&
  1855. be64_to_cpu(ddq->d_rtbcount) >=
  1856. be64_to_cpu(ddq->d_rtb_softlimit)) {
  1857. if (!ddq->d_rtbtimer) {
  1858. if (flags & XFS_QMOPT_DOWARN)
  1859. cmn_err(CE_ALERT,
  1860. "%s : Dquot ID 0x%x (0x%p) "
  1861. "RTBLK TIMER NOT STARTED",
  1862. str, (int)be32_to_cpu(ddq->d_id), ddq);
  1863. errs++;
  1864. }
  1865. }
  1866. }
  1867. if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
  1868. return errs;
  1869. if (flags & XFS_QMOPT_DOWARN)
  1870. cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
  1871. /*
  1872. * Typically, a repair is only requested by quotacheck.
  1873. */
  1874. ASSERT(id != -1);
  1875. ASSERT(flags & XFS_QMOPT_DQREPAIR);
  1876. memset(d, 0, sizeof(xfs_dqblk_t));
  1877. d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
  1878. d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
  1879. d->dd_diskdq.d_flags = type;
  1880. d->dd_diskdq.d_id = cpu_to_be32(id);
  1881. return errs;
  1882. }
  1883. /*
  1884. * Perform a dquot buffer recovery.
  1885. * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
  1886. * (ie. USR or GRP), then just toss this buffer away; don't recover it.
  1887. * Else, treat it as a regular buffer and do recovery.
  1888. */
  1889. STATIC void
  1890. xlog_recover_do_dquot_buffer(
  1891. xfs_mount_t *mp,
  1892. xlog_t *log,
  1893. xlog_recover_item_t *item,
  1894. xfs_buf_t *bp,
  1895. xfs_buf_log_format_t *buf_f)
  1896. {
  1897. uint type;
  1898. /*
  1899. * Filesystems are required to send in quota flags at mount time.
  1900. */
  1901. if (mp->m_qflags == 0) {
  1902. return;
  1903. }
  1904. type = 0;
  1905. if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)
  1906. type |= XFS_DQ_USER;
  1907. if (buf_f->blf_flags & XFS_BLI_PDQUOT_BUF)
  1908. type |= XFS_DQ_PROJ;
  1909. if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)
  1910. type |= XFS_DQ_GROUP;
  1911. /*
  1912. * This type of quotas was turned off, so ignore this buffer
  1913. */
  1914. if (log->l_quotaoffs_flag & type)
  1915. return;
  1916. xlog_recover_do_reg_buffer(item, bp, buf_f);
  1917. }
  1918. /*
  1919. * This routine replays a modification made to a buffer at runtime.
  1920. * There are actually two types of buffer, regular and inode, which
  1921. * are handled differently. Inode buffers are handled differently
  1922. * in that we only recover a specific set of data from them, namely
  1923. * the inode di_next_unlinked fields. This is because all other inode
  1924. * data is actually logged via inode records and any data we replay
  1925. * here which overlaps that may be stale.
  1926. *
  1927. * When meta-data buffers are freed at run time we log a buffer item
  1928. * with the XFS_BLI_CANCEL bit set to indicate that previous copies
  1929. * of the buffer in the log should not be replayed at recovery time.
  1930. * This is so that if the blocks covered by the buffer are reused for
  1931. * file data before we crash we don't end up replaying old, freed
  1932. * meta-data into a user's file.
  1933. *
  1934. * To handle the cancellation of buffer log items, we make two passes
  1935. * over the log during recovery. During the first we build a table of
  1936. * those buffers which have been cancelled, and during the second we
  1937. * only replay those buffers which do not have corresponding cancel
  1938. * records in the table. See xlog_recover_do_buffer_pass[1,2] above
  1939. * for more details on the implementation of the table of cancel records.
  1940. */
  1941. STATIC int
  1942. xlog_recover_do_buffer_trans(
  1943. xlog_t *log,
  1944. xlog_recover_item_t *item,
  1945. int pass)
  1946. {
  1947. xfs_buf_log_format_t *buf_f;
  1948. xfs_mount_t *mp;
  1949. xfs_buf_t *bp;
  1950. int error;
  1951. int cancel;
  1952. xfs_daddr_t blkno;
  1953. int len;
  1954. ushort flags;
  1955. buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
  1956. if (pass == XLOG_RECOVER_PASS1) {
  1957. /*
  1958. * In this pass we're only looking for buf items
  1959. * with the XFS_BLI_CANCEL bit set.
  1960. */
  1961. xlog_recover_do_buffer_pass1(log, buf_f);
  1962. return 0;
  1963. } else {
  1964. /*
  1965. * In this pass we want to recover all the buffers
  1966. * which have not been cancelled and are not
  1967. * cancellation buffers themselves. The routine
  1968. * we call here will tell us whether or not to
  1969. * continue with the replay of this buffer.
  1970. */
  1971. cancel = xlog_recover_do_buffer_pass2(log, buf_f);
  1972. if (cancel) {
  1973. return 0;
  1974. }
  1975. }
  1976. switch (buf_f->blf_type) {
  1977. case XFS_LI_BUF:
  1978. blkno = buf_f->blf_blkno;
  1979. len = buf_f->blf_len;
  1980. flags = buf_f->blf_flags;
  1981. break;
  1982. default:
  1983. xfs_fs_cmn_err(CE_ALERT, log->l_mp,
  1984. "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
  1985. buf_f->blf_type, log->l_mp->m_logname ?
  1986. log->l_mp->m_logname : "internal");
  1987. XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
  1988. XFS_ERRLEVEL_LOW, log->l_mp);
  1989. return XFS_ERROR(EFSCORRUPTED);
  1990. }
  1991. mp = log->l_mp;
  1992. if (flags & XFS_BLI_INODE_BUF) {
  1993. bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len,
  1994. XFS_BUF_LOCK);
  1995. } else {
  1996. bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0);
  1997. }
  1998. if (XFS_BUF_ISERROR(bp)) {
  1999. xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
  2000. bp, blkno);
  2001. error = XFS_BUF_GETERROR(bp);
  2002. xfs_buf_relse(bp);
  2003. return error;
  2004. }
  2005. error = 0;
  2006. if (flags & XFS_BLI_INODE_BUF) {
  2007. error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
  2008. } else if (flags &
  2009. (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
  2010. xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
  2011. } else {
  2012. xlog_recover_do_reg_buffer(item, bp, buf_f);
  2013. }
  2014. if (error)
  2015. return XFS_ERROR(error);
  2016. /*
  2017. * Perform delayed write on the buffer. Asynchronous writes will be
  2018. * slower when taking into account all the buffers to be flushed.
  2019. *
  2020. * Also make sure that only inode buffers with good sizes stay in
  2021. * the buffer cache. The kernel moves inodes in buffers of 1 block
  2022. * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode
  2023. * buffers in the log can be a different size if the log was generated
  2024. * by an older kernel using unclustered inode buffers or a newer kernel
  2025. * running with a different inode cluster size. Regardless, if the
  2026. * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
  2027. * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
  2028. * the buffer out of the buffer cache so that the buffer won't
  2029. * overlap with future reads of those inodes.
  2030. */
  2031. if (XFS_DINODE_MAGIC ==
  2032. be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
  2033. (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
  2034. (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
  2035. XFS_BUF_STALE(bp);
  2036. error = xfs_bwrite(mp, bp);
  2037. } else {
  2038. ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
  2039. XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
  2040. XFS_BUF_SET_FSPRIVATE(bp, mp);
  2041. XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
  2042. xfs_bdwrite(mp, bp);
  2043. }
  2044. return (error);
  2045. }
  2046. STATIC int
  2047. xlog_recover_do_inode_trans(
  2048. xlog_t *log,
  2049. xlog_recover_item_t *item,
  2050. int pass)
  2051. {
  2052. xfs_inode_log_format_t *in_f;
  2053. xfs_mount_t *mp;
  2054. xfs_buf_t *bp;
  2055. xfs_imap_t imap;
  2056. xfs_dinode_t *dip;
  2057. xfs_ino_t ino;
  2058. int len;
  2059. xfs_caddr_t src;
  2060. xfs_caddr_t dest;
  2061. int error;
  2062. int attr_index;
  2063. uint fields;
  2064. xfs_icdinode_t *dicp;
  2065. int need_free = 0;
  2066. if (pass == XLOG_RECOVER_PASS1) {
  2067. return 0;
  2068. }
  2069. if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
  2070. in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
  2071. } else {
  2072. in_f = (xfs_inode_log_format_t *)kmem_alloc(
  2073. sizeof(xfs_inode_log_format_t), KM_SLEEP);
  2074. need_free = 1;
  2075. error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
  2076. if (error)
  2077. goto error;
  2078. }
  2079. ino = in_f->ilf_ino;
  2080. mp = log->l_mp;
  2081. if (ITEM_TYPE(item) == XFS_LI_INODE) {
  2082. imap.im_blkno = (xfs_daddr_t)in_f->ilf_blkno;
  2083. imap.im_len = in_f->ilf_len;
  2084. imap.im_boffset = in_f->ilf_boffset;
  2085. } else {
  2086. /*
  2087. * It's an old inode format record. We don't know where
  2088. * its cluster is located on disk, and we can't allow
  2089. * xfs_imap() to figure it out because the inode btrees
  2090. * are not ready to be used. Therefore do not pass the
  2091. * XFS_IMAP_LOOKUP flag to xfs_imap(). This will give
  2092. * us only the single block in which the inode lives
  2093. * rather than its cluster, so we must make sure to
  2094. * invalidate the buffer when we write it out below.
  2095. */
  2096. imap.im_blkno = 0;
  2097. xfs_imap(log->l_mp, NULL, ino, &imap, 0);
  2098. }
  2099. /*
  2100. * Inode buffers can be freed, look out for it,
  2101. * and do not replay the inode.
  2102. */
  2103. if (xlog_check_buffer_cancelled(log, imap.im_blkno, imap.im_len, 0)) {
  2104. error = 0;
  2105. goto error;
  2106. }
  2107. bp = xfs_buf_read_flags(mp->m_ddev_targp, imap.im_blkno, imap.im_len,
  2108. XFS_BUF_LOCK);
  2109. if (XFS_BUF_ISERROR(bp)) {
  2110. xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
  2111. bp, imap.im_blkno);
  2112. error = XFS_BUF_GETERROR(bp);
  2113. xfs_buf_relse(bp);
  2114. goto error;
  2115. }
  2116. error = 0;
  2117. ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
  2118. dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
  2119. /*
  2120. * Make sure the place we're flushing out to really looks
  2121. * like an inode!
  2122. */
  2123. if (unlikely(be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC)) {
  2124. xfs_buf_relse(bp);
  2125. xfs_fs_cmn_err(CE_ALERT, mp,
  2126. "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
  2127. dip, bp, ino);
  2128. XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
  2129. XFS_ERRLEVEL_LOW, mp);
  2130. error = EFSCORRUPTED;
  2131. goto error;
  2132. }
  2133. dicp = (xfs_icdinode_t *)(item->ri_buf[1].i_addr);
  2134. if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
  2135. xfs_buf_relse(bp);
  2136. xfs_fs_cmn_err(CE_ALERT, mp,
  2137. "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
  2138. item, ino);
  2139. XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
  2140. XFS_ERRLEVEL_LOW, mp);
  2141. error = EFSCORRUPTED;
  2142. goto error;
  2143. }
  2144. /* Skip replay when the on disk inode is newer than the log one */
  2145. if (dicp->di_flushiter < be16_to_cpu(dip->di_core.di_flushiter)) {
  2146. /*
  2147. * Deal with the wrap case, DI_MAX_FLUSH is less
  2148. * than smaller numbers
  2149. */
  2150. if (be16_to_cpu(dip->di_core.di_flushiter) == DI_MAX_FLUSH &&
  2151. dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
  2152. /* do nothing */
  2153. } else {
  2154. xfs_buf_relse(bp);
  2155. error = 0;
  2156. goto error;
  2157. }
  2158. }
  2159. /* Take the opportunity to reset the flush iteration count */
  2160. dicp->di_flushiter = 0;
  2161. if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
  2162. if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
  2163. (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
  2164. XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
  2165. XFS_ERRLEVEL_LOW, mp, dicp);
  2166. xfs_buf_relse(bp);
  2167. xfs_fs_cmn_err(CE_ALERT, mp,
  2168. "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
  2169. item, dip, bp, ino);
  2170. error = EFSCORRUPTED;
  2171. goto error;
  2172. }
  2173. } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
  2174. if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
  2175. (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
  2176. (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
  2177. XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
  2178. XFS_ERRLEVEL_LOW, mp, dicp);
  2179. xfs_buf_relse(bp);
  2180. xfs_fs_cmn_err(CE_ALERT, mp,
  2181. "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
  2182. item, dip, bp, ino);
  2183. error = EFSCORRUPTED;
  2184. goto error;
  2185. }
  2186. }
  2187. if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
  2188. XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
  2189. XFS_ERRLEVEL_LOW, mp, dicp);
  2190. xfs_buf_relse(bp);
  2191. xfs_fs_cmn_err(CE_ALERT, mp,
  2192. "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
  2193. item, dip, bp, ino,
  2194. dicp->di_nextents + dicp->di_anextents,
  2195. dicp->di_nblocks);
  2196. error = EFSCORRUPTED;
  2197. goto error;
  2198. }
  2199. if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
  2200. XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
  2201. XFS_ERRLEVEL_LOW, mp, dicp);
  2202. xfs_buf_relse(bp);
  2203. xfs_fs_cmn_err(CE_ALERT, mp,
  2204. "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
  2205. item, dip, bp, ino, dicp->di_forkoff);
  2206. error = EFSCORRUPTED;
  2207. goto error;
  2208. }
  2209. if (unlikely(item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t))) {
  2210. XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
  2211. XFS_ERRLEVEL_LOW, mp, dicp);
  2212. xfs_buf_relse(bp);
  2213. xfs_fs_cmn_err(CE_ALERT, mp,
  2214. "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
  2215. item->ri_buf[1].i_len, item);
  2216. error = EFSCORRUPTED;
  2217. goto error;
  2218. }
  2219. /* The core is in in-core format */
  2220. xfs_dinode_to_disk(&dip->di_core,
  2221. (xfs_icdinode_t *)item->ri_buf[1].i_addr);
  2222. /* the rest is in on-disk format */
  2223. if (item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t)) {
  2224. memcpy((xfs_caddr_t) dip + sizeof(xfs_dinode_core_t),
  2225. item->ri_buf[1].i_addr + sizeof(xfs_dinode_core_t),
  2226. item->ri_buf[1].i_len - sizeof(xfs_dinode_core_t));
  2227. }
  2228. fields = in_f->ilf_fields;
  2229. switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
  2230. case XFS_ILOG_DEV:
  2231. dip->di_u.di_dev = cpu_to_be32(in_f->ilf_u.ilfu_rdev);
  2232. break;
  2233. case XFS_ILOG_UUID:
  2234. dip->di_u.di_muuid = in_f->ilf_u.ilfu_uuid;
  2235. break;
  2236. }
  2237. if (in_f->ilf_size == 2)
  2238. goto write_inode_buffer;
  2239. len = item->ri_buf[2].i_len;
  2240. src = item->ri_buf[2].i_addr;
  2241. ASSERT(in_f->ilf_size <= 4);
  2242. ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
  2243. ASSERT(!(fields & XFS_ILOG_DFORK) ||
  2244. (len == in_f->ilf_dsize));
  2245. switch (fields & XFS_ILOG_DFORK) {
  2246. case XFS_ILOG_DDATA:
  2247. case XFS_ILOG_DEXT:
  2248. memcpy(&dip->di_u, src, len);
  2249. break;
  2250. case XFS_ILOG_DBROOT:
  2251. xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len,
  2252. &(dip->di_u.di_bmbt),
  2253. XFS_DFORK_DSIZE(dip, mp));
  2254. break;
  2255. default:
  2256. /*
  2257. * There are no data fork flags set.
  2258. */
  2259. ASSERT((fields & XFS_ILOG_DFORK) == 0);
  2260. break;
  2261. }
  2262. /*
  2263. * If we logged any attribute data, recover it. There may or
  2264. * may not have been any other non-core data logged in this
  2265. * transaction.
  2266. */
  2267. if (in_f->ilf_fields & XFS_ILOG_AFORK) {
  2268. if (in_f->ilf_fields & XFS_ILOG_DFORK) {
  2269. attr_index = 3;
  2270. } else {
  2271. attr_index = 2;
  2272. }
  2273. len = item->ri_buf[attr_index].i_len;
  2274. src = item->ri_buf[attr_index].i_addr;
  2275. ASSERT(len == in_f->ilf_asize);
  2276. switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
  2277. case XFS_ILOG_ADATA:
  2278. case XFS_ILOG_AEXT:
  2279. dest = XFS_DFORK_APTR(dip);
  2280. ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
  2281. memcpy(dest, src, len);
  2282. break;
  2283. case XFS_ILOG_ABROOT:
  2284. dest = XFS_DFORK_APTR(dip);
  2285. xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len,
  2286. (xfs_bmdr_block_t*)dest,
  2287. XFS_DFORK_ASIZE(dip, mp));
  2288. break;
  2289. default:
  2290. xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
  2291. ASSERT(0);
  2292. xfs_buf_relse(bp);
  2293. error = EIO;
  2294. goto error;
  2295. }
  2296. }
  2297. write_inode_buffer:
  2298. if (ITEM_TYPE(item) == XFS_LI_INODE) {
  2299. ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
  2300. XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
  2301. XFS_BUF_SET_FSPRIVATE(bp, mp);
  2302. XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
  2303. xfs_bdwrite(mp, bp);
  2304. } else {
  2305. XFS_BUF_STALE(bp);
  2306. error = xfs_bwrite(mp, bp);
  2307. }
  2308. error:
  2309. if (need_free)
  2310. kmem_free(in_f, sizeof(*in_f));
  2311. return XFS_ERROR(error);
  2312. }
  2313. /*
  2314. * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
  2315. * structure, so that we know not to do any dquot item or dquot buffer recovery,
  2316. * of that type.
  2317. */
  2318. STATIC int
  2319. xlog_recover_do_quotaoff_trans(
  2320. xlog_t *log,
  2321. xlog_recover_item_t *item,
  2322. int pass)
  2323. {
  2324. xfs_qoff_logformat_t *qoff_f;
  2325. if (pass == XLOG_RECOVER_PASS2) {
  2326. return (0);
  2327. }
  2328. qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
  2329. ASSERT(qoff_f);
  2330. /*
  2331. * The logitem format's flag tells us if this was user quotaoff,
  2332. * group/project quotaoff or both.
  2333. */
  2334. if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
  2335. log->l_quotaoffs_flag |= XFS_DQ_USER;
  2336. if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
  2337. log->l_quotaoffs_flag |= XFS_DQ_PROJ;
  2338. if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
  2339. log->l_quotaoffs_flag |= XFS_DQ_GROUP;
  2340. return (0);
  2341. }
  2342. /*
  2343. * Recover a dquot record
  2344. */
  2345. STATIC int
  2346. xlog_recover_do_dquot_trans(
  2347. xlog_t *log,
  2348. xlog_recover_item_t *item,
  2349. int pass)
  2350. {
  2351. xfs_mount_t *mp;
  2352. xfs_buf_t *bp;
  2353. struct xfs_disk_dquot *ddq, *recddq;
  2354. int error;
  2355. xfs_dq_logformat_t *dq_f;
  2356. uint type;
  2357. if (pass == XLOG_RECOVER_PASS1) {
  2358. return 0;
  2359. }
  2360. mp = log->l_mp;
  2361. /*
  2362. * Filesystems are required to send in quota flags at mount time.
  2363. */
  2364. if (mp->m_qflags == 0)
  2365. return (0);
  2366. recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
  2367. ASSERT(recddq);
  2368. /*
  2369. * This type of quotas was turned off, so ignore this record.
  2370. */
  2371. type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
  2372. ASSERT(type);
  2373. if (log->l_quotaoffs_flag & type)
  2374. return (0);
  2375. /*
  2376. * At this point we know that quota was _not_ turned off.
  2377. * Since the mount flags are not indicating to us otherwise, this
  2378. * must mean that quota is on, and the dquot needs to be replayed.
  2379. * Remember that we may not have fully recovered the superblock yet,
  2380. * so we can't do the usual trick of looking at the SB quota bits.
  2381. *
  2382. * The other possibility, of course, is that the quota subsystem was
  2383. * removed since the last mount - ENOSYS.
  2384. */
  2385. dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
  2386. ASSERT(dq_f);
  2387. if ((error = xfs_qm_dqcheck(recddq,
  2388. dq_f->qlf_id,
  2389. 0, XFS_QMOPT_DOWARN,
  2390. "xlog_recover_do_dquot_trans (log copy)"))) {
  2391. return XFS_ERROR(EIO);
  2392. }
  2393. ASSERT(dq_f->qlf_len == 1);
  2394. error = xfs_read_buf(mp, mp->m_ddev_targp,
  2395. dq_f->qlf_blkno,
  2396. XFS_FSB_TO_BB(mp, dq_f->qlf_len),
  2397. 0, &bp);
  2398. if (error) {
  2399. xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
  2400. bp, dq_f->qlf_blkno);
  2401. return error;
  2402. }
  2403. ASSERT(bp);
  2404. ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
  2405. /*
  2406. * At least the magic num portion should be on disk because this
  2407. * was among a chunk of dquots created earlier, and we did some
  2408. * minimal initialization then.
  2409. */
  2410. if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
  2411. "xlog_recover_do_dquot_trans")) {
  2412. xfs_buf_relse(bp);
  2413. return XFS_ERROR(EIO);
  2414. }
  2415. memcpy(ddq, recddq, item->ri_buf[1].i_len);
  2416. ASSERT(dq_f->qlf_size == 2);
  2417. ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
  2418. XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
  2419. XFS_BUF_SET_FSPRIVATE(bp, mp);
  2420. XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
  2421. xfs_bdwrite(mp, bp);
  2422. return (0);
  2423. }
  2424. /*
  2425. * This routine is called to create an in-core extent free intent
  2426. * item from the efi format structure which was logged on disk.
  2427. * It allocates an in-core efi, copies the extents from the format
  2428. * structure into it, and adds the efi to the AIL with the given
  2429. * LSN.
  2430. */
  2431. STATIC int
  2432. xlog_recover_do_efi_trans(
  2433. xlog_t *log,
  2434. xlog_recover_item_t *item,
  2435. xfs_lsn_t lsn,
  2436. int pass)
  2437. {
  2438. int error;
  2439. xfs_mount_t *mp;
  2440. xfs_efi_log_item_t *efip;
  2441. xfs_efi_log_format_t *efi_formatp;
  2442. if (pass == XLOG_RECOVER_PASS1) {
  2443. return 0;
  2444. }
  2445. efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
  2446. mp = log->l_mp;
  2447. efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
  2448. if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
  2449. &(efip->efi_format)))) {
  2450. xfs_efi_item_free(efip);
  2451. return error;
  2452. }
  2453. efip->efi_next_extent = efi_formatp->efi_nextents;
  2454. efip->efi_flags |= XFS_EFI_COMMITTED;
  2455. spin_lock(&mp->m_ail_lock);
  2456. /*
  2457. * xfs_trans_update_ail() drops the AIL lock.
  2458. */
  2459. xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn);
  2460. return 0;
  2461. }
  2462. /*
  2463. * This routine is called when an efd format structure is found in
  2464. * a committed transaction in the log. It's purpose is to cancel
  2465. * the corresponding efi if it was still in the log. To do this
  2466. * it searches the AIL for the efi with an id equal to that in the
  2467. * efd format structure. If we find it, we remove the efi from the
  2468. * AIL and free it.
  2469. */
  2470. STATIC void
  2471. xlog_recover_do_efd_trans(
  2472. xlog_t *log,
  2473. xlog_recover_item_t *item,
  2474. int pass)
  2475. {
  2476. xfs_mount_t *mp;
  2477. xfs_efd_log_format_t *efd_formatp;
  2478. xfs_efi_log_item_t *efip = NULL;
  2479. xfs_log_item_t *lip;
  2480. int gen;
  2481. __uint64_t efi_id;
  2482. if (pass == XLOG_RECOVER_PASS1) {
  2483. return;
  2484. }
  2485. efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
  2486. ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
  2487. ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
  2488. (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
  2489. ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
  2490. efi_id = efd_formatp->efd_efi_id;
  2491. /*
  2492. * Search for the efi with the id in the efd format structure
  2493. * in the AIL.
  2494. */
  2495. mp = log->l_mp;
  2496. spin_lock(&mp->m_ail_lock);
  2497. lip = xfs_trans_first_ail(mp, &gen);
  2498. while (lip != NULL) {
  2499. if (lip->li_type == XFS_LI_EFI) {
  2500. efip = (xfs_efi_log_item_t *)lip;
  2501. if (efip->efi_format.efi_id == efi_id) {
  2502. /*
  2503. * xfs_trans_delete_ail() drops the
  2504. * AIL lock.
  2505. */
  2506. xfs_trans_delete_ail(mp, lip);
  2507. xfs_efi_item_free(efip);
  2508. return;
  2509. }
  2510. }
  2511. lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
  2512. }
  2513. spin_unlock(&mp->m_ail_lock);
  2514. }
  2515. /*
  2516. * Perform the transaction
  2517. *
  2518. * If the transaction modifies a buffer or inode, do it now. Otherwise,
  2519. * EFIs and EFDs get queued up by adding entries into the AIL for them.
  2520. */
  2521. STATIC int
  2522. xlog_recover_do_trans(
  2523. xlog_t *log,
  2524. xlog_recover_t *trans,
  2525. int pass)
  2526. {
  2527. int error = 0;
  2528. xlog_recover_item_t *item, *first_item;
  2529. if ((error = xlog_recover_reorder_trans(trans)))
  2530. return error;
  2531. first_item = item = trans->r_itemq;
  2532. do {
  2533. /*
  2534. * we don't need to worry about the block number being
  2535. * truncated in > 1 TB buffers because in user-land,
  2536. * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so
  2537. * the blknos will get through the user-mode buffer
  2538. * cache properly. The only bad case is o32 kernels
  2539. * where xfs_daddr_t is 32-bits but mount will warn us
  2540. * off a > 1 TB filesystem before we get here.
  2541. */
  2542. if ((ITEM_TYPE(item) == XFS_LI_BUF)) {
  2543. if ((error = xlog_recover_do_buffer_trans(log, item,
  2544. pass)))
  2545. break;
  2546. } else if ((ITEM_TYPE(item) == XFS_LI_INODE)) {
  2547. if ((error = xlog_recover_do_inode_trans(log, item,
  2548. pass)))
  2549. break;
  2550. } else if (ITEM_TYPE(item) == XFS_LI_EFI) {
  2551. if ((error = xlog_recover_do_efi_trans(log, item, trans->r_lsn,
  2552. pass)))
  2553. break;
  2554. } else if (ITEM_TYPE(item) == XFS_LI_EFD) {
  2555. xlog_recover_do_efd_trans(log, item, pass);
  2556. } else if (ITEM_TYPE(item) == XFS_LI_DQUOT) {
  2557. if ((error = xlog_recover_do_dquot_trans(log, item,
  2558. pass)))
  2559. break;
  2560. } else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) {
  2561. if ((error = xlog_recover_do_quotaoff_trans(log, item,
  2562. pass)))
  2563. break;
  2564. } else {
  2565. xlog_warn("XFS: xlog_recover_do_trans");
  2566. ASSERT(0);
  2567. error = XFS_ERROR(EIO);
  2568. break;
  2569. }
  2570. item = item->ri_next;
  2571. } while (first_item != item);
  2572. return error;
  2573. }
  2574. /*
  2575. * Free up any resources allocated by the transaction
  2576. *
  2577. * Remember that EFIs, EFDs, and IUNLINKs are handled later.
  2578. */
  2579. STATIC void
  2580. xlog_recover_free_trans(
  2581. xlog_recover_t *trans)
  2582. {
  2583. xlog_recover_item_t *first_item, *item, *free_item;
  2584. int i;
  2585. item = first_item = trans->r_itemq;
  2586. do {
  2587. free_item = item;
  2588. item = item->ri_next;
  2589. /* Free the regions in the item. */
  2590. for (i = 0; i < free_item->ri_cnt; i++) {
  2591. kmem_free(free_item->ri_buf[i].i_addr,
  2592. free_item->ri_buf[i].i_len);
  2593. }
  2594. /* Free the item itself */
  2595. kmem_free(free_item->ri_buf,
  2596. (free_item->ri_total * sizeof(xfs_log_iovec_t)));
  2597. kmem_free(free_item, sizeof(xlog_recover_item_t));
  2598. } while (first_item != item);
  2599. /* Free the transaction recover structure */
  2600. kmem_free(trans, sizeof(xlog_recover_t));
  2601. }
  2602. STATIC int
  2603. xlog_recover_commit_trans(
  2604. xlog_t *log,
  2605. xlog_recover_t **q,
  2606. xlog_recover_t *trans,
  2607. int pass)
  2608. {
  2609. int error;
  2610. if ((error = xlog_recover_unlink_tid(q, trans)))
  2611. return error;
  2612. if ((error = xlog_recover_do_trans(log, trans, pass)))
  2613. return error;
  2614. xlog_recover_free_trans(trans); /* no error */
  2615. return 0;
  2616. }
  2617. STATIC int
  2618. xlog_recover_unmount_trans(
  2619. xlog_recover_t *trans)
  2620. {
  2621. /* Do nothing now */
  2622. xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
  2623. return 0;
  2624. }
  2625. /*
  2626. * There are two valid states of the r_state field. 0 indicates that the
  2627. * transaction structure is in a normal state. We have either seen the
  2628. * start of the transaction or the last operation we added was not a partial
  2629. * operation. If the last operation we added to the transaction was a
  2630. * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
  2631. *
  2632. * NOTE: skip LRs with 0 data length.
  2633. */
  2634. STATIC int
  2635. xlog_recover_process_data(
  2636. xlog_t *log,
  2637. xlog_recover_t *rhash[],
  2638. xlog_rec_header_t *rhead,
  2639. xfs_caddr_t dp,
  2640. int pass)
  2641. {
  2642. xfs_caddr_t lp;
  2643. int num_logops;
  2644. xlog_op_header_t *ohead;
  2645. xlog_recover_t *trans;
  2646. xlog_tid_t tid;
  2647. int error;
  2648. unsigned long hash;
  2649. uint flags;
  2650. lp = dp + be32_to_cpu(rhead->h_len);
  2651. num_logops = be32_to_cpu(rhead->h_num_logops);
  2652. /* check the log format matches our own - else we can't recover */
  2653. if (xlog_header_check_recover(log->l_mp, rhead))
  2654. return (XFS_ERROR(EIO));
  2655. while ((dp < lp) && num_logops) {
  2656. ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
  2657. ohead = (xlog_op_header_t *)dp;
  2658. dp += sizeof(xlog_op_header_t);
  2659. if (ohead->oh_clientid != XFS_TRANSACTION &&
  2660. ohead->oh_clientid != XFS_LOG) {
  2661. xlog_warn(
  2662. "XFS: xlog_recover_process_data: bad clientid");
  2663. ASSERT(0);
  2664. return (XFS_ERROR(EIO));
  2665. }
  2666. tid = be32_to_cpu(ohead->oh_tid);
  2667. hash = XLOG_RHASH(tid);
  2668. trans = xlog_recover_find_tid(rhash[hash], tid);
  2669. if (trans == NULL) { /* not found; add new tid */
  2670. if (ohead->oh_flags & XLOG_START_TRANS)
  2671. xlog_recover_new_tid(&rhash[hash], tid,
  2672. be64_to_cpu(rhead->h_lsn));
  2673. } else {
  2674. if (dp + be32_to_cpu(ohead->oh_len) > lp) {
  2675. xlog_warn(
  2676. "XFS: xlog_recover_process_data: bad length");
  2677. WARN_ON(1);
  2678. return (XFS_ERROR(EIO));
  2679. }
  2680. flags = ohead->oh_flags & ~XLOG_END_TRANS;
  2681. if (flags & XLOG_WAS_CONT_TRANS)
  2682. flags &= ~XLOG_CONTINUE_TRANS;
  2683. switch (flags) {
  2684. case XLOG_COMMIT_TRANS:
  2685. error = xlog_recover_commit_trans(log,
  2686. &rhash[hash], trans, pass);
  2687. break;
  2688. case XLOG_UNMOUNT_TRANS:
  2689. error = xlog_recover_unmount_trans(trans);
  2690. break;
  2691. case XLOG_WAS_CONT_TRANS:
  2692. error = xlog_recover_add_to_cont_trans(trans,
  2693. dp, be32_to_cpu(ohead->oh_len));
  2694. break;
  2695. case XLOG_START_TRANS:
  2696. xlog_warn(
  2697. "XFS: xlog_recover_process_data: bad transaction");
  2698. ASSERT(0);
  2699. error = XFS_ERROR(EIO);
  2700. break;
  2701. case 0:
  2702. case XLOG_CONTINUE_TRANS:
  2703. error = xlog_recover_add_to_trans(trans,
  2704. dp, be32_to_cpu(ohead->oh_len));
  2705. break;
  2706. default:
  2707. xlog_warn(
  2708. "XFS: xlog_recover_process_data: bad flag");
  2709. ASSERT(0);
  2710. error = XFS_ERROR(EIO);
  2711. break;
  2712. }
  2713. if (error)
  2714. return error;
  2715. }
  2716. dp += be32_to_cpu(ohead->oh_len);
  2717. num_logops--;
  2718. }
  2719. return 0;
  2720. }
  2721. /*
  2722. * Process an extent free intent item that was recovered from
  2723. * the log. We need to free the extents that it describes.
  2724. */
  2725. STATIC void
  2726. xlog_recover_process_efi(
  2727. xfs_mount_t *mp,
  2728. xfs_efi_log_item_t *efip)
  2729. {
  2730. xfs_efd_log_item_t *efdp;
  2731. xfs_trans_t *tp;
  2732. int i;
  2733. xfs_extent_t *extp;
  2734. xfs_fsblock_t startblock_fsb;
  2735. ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
  2736. /*
  2737. * First check the validity of the extents described by the
  2738. * EFI. If any are bad, then assume that all are bad and
  2739. * just toss the EFI.
  2740. */
  2741. for (i = 0; i < efip->efi_format.efi_nextents; i++) {
  2742. extp = &(efip->efi_format.efi_extents[i]);
  2743. startblock_fsb = XFS_BB_TO_FSB(mp,
  2744. XFS_FSB_TO_DADDR(mp, extp->ext_start));
  2745. if ((startblock_fsb == 0) ||
  2746. (extp->ext_len == 0) ||
  2747. (startblock_fsb >= mp->m_sb.sb_dblocks) ||
  2748. (extp->ext_len >= mp->m_sb.sb_agblocks)) {
  2749. /*
  2750. * This will pull the EFI from the AIL and
  2751. * free the memory associated with it.
  2752. */
  2753. xfs_efi_release(efip, efip->efi_format.efi_nextents);
  2754. return;
  2755. }
  2756. }
  2757. tp = xfs_trans_alloc(mp, 0);
  2758. xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
  2759. efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
  2760. for (i = 0; i < efip->efi_format.efi_nextents; i++) {
  2761. extp = &(efip->efi_format.efi_extents[i]);
  2762. xfs_free_extent(tp, extp->ext_start, extp->ext_len);
  2763. xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
  2764. extp->ext_len);
  2765. }
  2766. efip->efi_flags |= XFS_EFI_RECOVERED;
  2767. xfs_trans_commit(tp, 0);
  2768. }
  2769. /*
  2770. * Verify that once we've encountered something other than an EFI
  2771. * in the AIL that there are no more EFIs in the AIL.
  2772. */
  2773. #if defined(DEBUG)
  2774. STATIC void
  2775. xlog_recover_check_ail(
  2776. xfs_mount_t *mp,
  2777. xfs_log_item_t *lip,
  2778. int gen)
  2779. {
  2780. int orig_gen = gen;
  2781. do {
  2782. ASSERT(lip->li_type != XFS_LI_EFI);
  2783. lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
  2784. /*
  2785. * The check will be bogus if we restart from the
  2786. * beginning of the AIL, so ASSERT that we don't.
  2787. * We never should since we're holding the AIL lock
  2788. * the entire time.
  2789. */
  2790. ASSERT(gen == orig_gen);
  2791. } while (lip != NULL);
  2792. }
  2793. #endif /* DEBUG */
  2794. /*
  2795. * When this is called, all of the EFIs which did not have
  2796. * corresponding EFDs should be in the AIL. What we do now
  2797. * is free the extents associated with each one.
  2798. *
  2799. * Since we process the EFIs in normal transactions, they
  2800. * will be removed at some point after the commit. This prevents
  2801. * us from just walking down the list processing each one.
  2802. * We'll use a flag in the EFI to skip those that we've already
  2803. * processed and use the AIL iteration mechanism's generation
  2804. * count to try to speed this up at least a bit.
  2805. *
  2806. * When we start, we know that the EFIs are the only things in
  2807. * the AIL. As we process them, however, other items are added
  2808. * to the AIL. Since everything added to the AIL must come after
  2809. * everything already in the AIL, we stop processing as soon as
  2810. * we see something other than an EFI in the AIL.
  2811. */
  2812. STATIC void
  2813. xlog_recover_process_efis(
  2814. xlog_t *log)
  2815. {
  2816. xfs_log_item_t *lip;
  2817. xfs_efi_log_item_t *efip;
  2818. int gen;
  2819. xfs_mount_t *mp;
  2820. mp = log->l_mp;
  2821. spin_lock(&mp->m_ail_lock);
  2822. lip = xfs_trans_first_ail(mp, &gen);
  2823. while (lip != NULL) {
  2824. /*
  2825. * We're done when we see something other than an EFI.
  2826. */
  2827. if (lip->li_type != XFS_LI_EFI) {
  2828. xlog_recover_check_ail(mp, lip, gen);
  2829. break;
  2830. }
  2831. /*
  2832. * Skip EFIs that we've already processed.
  2833. */
  2834. efip = (xfs_efi_log_item_t *)lip;
  2835. if (efip->efi_flags & XFS_EFI_RECOVERED) {
  2836. lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
  2837. continue;
  2838. }
  2839. spin_unlock(&mp->m_ail_lock);
  2840. xlog_recover_process_efi(mp, efip);
  2841. spin_lock(&mp->m_ail_lock);
  2842. lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
  2843. }
  2844. spin_unlock(&mp->m_ail_lock);
  2845. }
  2846. /*
  2847. * This routine performs a transaction to null out a bad inode pointer
  2848. * in an agi unlinked inode hash bucket.
  2849. */
  2850. STATIC void
  2851. xlog_recover_clear_agi_bucket(
  2852. xfs_mount_t *mp,
  2853. xfs_agnumber_t agno,
  2854. int bucket)
  2855. {
  2856. xfs_trans_t *tp;
  2857. xfs_agi_t *agi;
  2858. xfs_buf_t *agibp;
  2859. int offset;
  2860. int error;
  2861. tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
  2862. xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0);
  2863. error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
  2864. XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
  2865. XFS_FSS_TO_BB(mp, 1), 0, &agibp);
  2866. if (error) {
  2867. xfs_trans_cancel(tp, XFS_TRANS_ABORT);
  2868. return;
  2869. }
  2870. agi = XFS_BUF_TO_AGI(agibp);
  2871. if (be32_to_cpu(agi->agi_magicnum) != XFS_AGI_MAGIC) {
  2872. xfs_trans_cancel(tp, XFS_TRANS_ABORT);
  2873. return;
  2874. }
  2875. agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
  2876. offset = offsetof(xfs_agi_t, agi_unlinked) +
  2877. (sizeof(xfs_agino_t) * bucket);
  2878. xfs_trans_log_buf(tp, agibp, offset,
  2879. (offset + sizeof(xfs_agino_t) - 1));
  2880. (void) xfs_trans_commit(tp, 0);
  2881. }
  2882. /*
  2883. * xlog_iunlink_recover
  2884. *
  2885. * This is called during recovery to process any inodes which
  2886. * we unlinked but not freed when the system crashed. These
  2887. * inodes will be on the lists in the AGI blocks. What we do
  2888. * here is scan all the AGIs and fully truncate and free any
  2889. * inodes found on the lists. Each inode is removed from the
  2890. * lists when it has been fully truncated and is freed. The
  2891. * freeing of the inode and its removal from the list must be
  2892. * atomic.
  2893. */
  2894. void
  2895. xlog_recover_process_iunlinks(
  2896. xlog_t *log)
  2897. {
  2898. xfs_mount_t *mp;
  2899. xfs_agnumber_t agno;
  2900. xfs_agi_t *agi;
  2901. xfs_buf_t *agibp;
  2902. xfs_buf_t *ibp;
  2903. xfs_dinode_t *dip;
  2904. xfs_inode_t *ip;
  2905. xfs_agino_t agino;
  2906. xfs_ino_t ino;
  2907. int bucket;
  2908. int error;
  2909. uint mp_dmevmask;
  2910. mp = log->l_mp;
  2911. /*
  2912. * Prevent any DMAPI event from being sent while in this function.
  2913. */
  2914. mp_dmevmask = mp->m_dmevmask;
  2915. mp->m_dmevmask = 0;
  2916. for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
  2917. /*
  2918. * Find the agi for this ag.
  2919. */
  2920. agibp = xfs_buf_read(mp->m_ddev_targp,
  2921. XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
  2922. XFS_FSS_TO_BB(mp, 1), 0);
  2923. if (XFS_BUF_ISERROR(agibp)) {
  2924. xfs_ioerror_alert("xlog_recover_process_iunlinks(#1)",
  2925. log->l_mp, agibp,
  2926. XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)));
  2927. }
  2928. agi = XFS_BUF_TO_AGI(agibp);
  2929. ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agi->agi_magicnum));
  2930. for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
  2931. agino = be32_to_cpu(agi->agi_unlinked[bucket]);
  2932. while (agino != NULLAGINO) {
  2933. /*
  2934. * Release the agi buffer so that it can
  2935. * be acquired in the normal course of the
  2936. * transaction to truncate and free the inode.
  2937. */
  2938. xfs_buf_relse(agibp);
  2939. ino = XFS_AGINO_TO_INO(mp, agno, agino);
  2940. error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
  2941. ASSERT(error || (ip != NULL));
  2942. if (!error) {
  2943. /*
  2944. * Get the on disk inode to find the
  2945. * next inode in the bucket.
  2946. */
  2947. error = xfs_itobp(mp, NULL, ip, &dip,
  2948. &ibp, 0, 0);
  2949. ASSERT(error || (dip != NULL));
  2950. }
  2951. if (!error) {
  2952. ASSERT(ip->i_d.di_nlink == 0);
  2953. /* setup for the next pass */
  2954. agino = be32_to_cpu(
  2955. dip->di_next_unlinked);
  2956. xfs_buf_relse(ibp);
  2957. /*
  2958. * Prevent any DMAPI event from
  2959. * being sent when the
  2960. * reference on the inode is
  2961. * dropped.
  2962. */
  2963. ip->i_d.di_dmevmask = 0;
  2964. /*
  2965. * If this is a new inode, handle
  2966. * it specially. Otherwise,
  2967. * just drop our reference to the
  2968. * inode. If there are no
  2969. * other references, this will
  2970. * send the inode to
  2971. * xfs_inactive() which will
  2972. * truncate the file and free
  2973. * the inode.
  2974. */
  2975. if (ip->i_d.di_mode == 0)
  2976. xfs_iput_new(ip, 0);
  2977. else
  2978. VN_RELE(XFS_ITOV(ip));
  2979. } else {
  2980. /*
  2981. * We can't read in the inode
  2982. * this bucket points to, or
  2983. * this inode is messed up. Just
  2984. * ditch this bucket of inodes. We
  2985. * will lose some inodes and space,
  2986. * but at least we won't hang. Call
  2987. * xlog_recover_clear_agi_bucket()
  2988. * to perform a transaction to clear
  2989. * the inode pointer in the bucket.
  2990. */
  2991. xlog_recover_clear_agi_bucket(mp, agno,
  2992. bucket);
  2993. agino = NULLAGINO;
  2994. }
  2995. /*
  2996. * Reacquire the agibuffer and continue around
  2997. * the loop.
  2998. */
  2999. agibp = xfs_buf_read(mp->m_ddev_targp,
  3000. XFS_AG_DADDR(mp, agno,
  3001. XFS_AGI_DADDR(mp)),
  3002. XFS_FSS_TO_BB(mp, 1), 0);
  3003. if (XFS_BUF_ISERROR(agibp)) {
  3004. xfs_ioerror_alert(
  3005. "xlog_recover_process_iunlinks(#2)",
  3006. log->l_mp, agibp,
  3007. XFS_AG_DADDR(mp, agno,
  3008. XFS_AGI_DADDR(mp)));
  3009. }
  3010. agi = XFS_BUF_TO_AGI(agibp);
  3011. ASSERT(XFS_AGI_MAGIC == be32_to_cpu(
  3012. agi->agi_magicnum));
  3013. }
  3014. }
  3015. /*
  3016. * Release the buffer for the current agi so we can
  3017. * go on to the next one.
  3018. */
  3019. xfs_buf_relse(agibp);
  3020. }
  3021. mp->m_dmevmask = mp_dmevmask;
  3022. }
  3023. #ifdef DEBUG
  3024. STATIC void
  3025. xlog_pack_data_checksum(
  3026. xlog_t *log,
  3027. xlog_in_core_t *iclog,
  3028. int size)
  3029. {
  3030. int i;
  3031. __be32 *up;
  3032. uint chksum = 0;
  3033. up = (__be32 *)iclog->ic_datap;
  3034. /* divide length by 4 to get # words */
  3035. for (i = 0; i < (size >> 2); i++) {
  3036. chksum ^= be32_to_cpu(*up);
  3037. up++;
  3038. }
  3039. iclog->ic_header.h_chksum = cpu_to_be32(chksum);
  3040. }
  3041. #else
  3042. #define xlog_pack_data_checksum(log, iclog, size)
  3043. #endif
  3044. /*
  3045. * Stamp cycle number in every block
  3046. */
  3047. void
  3048. xlog_pack_data(
  3049. xlog_t *log,
  3050. xlog_in_core_t *iclog,
  3051. int roundoff)
  3052. {
  3053. int i, j, k;
  3054. int size = iclog->ic_offset + roundoff;
  3055. __be32 cycle_lsn;
  3056. xfs_caddr_t dp;
  3057. xlog_in_core_2_t *xhdr;
  3058. xlog_pack_data_checksum(log, iclog, size);
  3059. cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
  3060. dp = iclog->ic_datap;
  3061. for (i = 0; i < BTOBB(size) &&
  3062. i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
  3063. iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
  3064. *(__be32 *)dp = cycle_lsn;
  3065. dp += BBSIZE;
  3066. }
  3067. if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
  3068. xhdr = (xlog_in_core_2_t *)&iclog->ic_header;
  3069. for ( ; i < BTOBB(size); i++) {
  3070. j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
  3071. k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
  3072. xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
  3073. *(__be32 *)dp = cycle_lsn;
  3074. dp += BBSIZE;
  3075. }
  3076. for (i = 1; i < log->l_iclog_heads; i++) {
  3077. xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
  3078. }
  3079. }
  3080. }
  3081. #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
  3082. STATIC void
  3083. xlog_unpack_data_checksum(
  3084. xlog_rec_header_t *rhead,
  3085. xfs_caddr_t dp,
  3086. xlog_t *log)
  3087. {
  3088. __be32 *up = (__be32 *)dp;
  3089. uint chksum = 0;
  3090. int i;
  3091. /* divide length by 4 to get # words */
  3092. for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) {
  3093. chksum ^= be32_to_cpu(*up);
  3094. up++;
  3095. }
  3096. if (chksum != be32_to_cpu(rhead->h_chksum)) {
  3097. if (rhead->h_chksum ||
  3098. ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
  3099. cmn_err(CE_DEBUG,
  3100. "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n",
  3101. be32_to_cpu(rhead->h_chksum), chksum);
  3102. cmn_err(CE_DEBUG,
  3103. "XFS: Disregard message if filesystem was created with non-DEBUG kernel");
  3104. if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
  3105. cmn_err(CE_DEBUG,
  3106. "XFS: LogR this is a LogV2 filesystem\n");
  3107. }
  3108. log->l_flags |= XLOG_CHKSUM_MISMATCH;
  3109. }
  3110. }
  3111. }
  3112. #else
  3113. #define xlog_unpack_data_checksum(rhead, dp, log)
  3114. #endif
  3115. STATIC void
  3116. xlog_unpack_data(
  3117. xlog_rec_header_t *rhead,
  3118. xfs_caddr_t dp,
  3119. xlog_t *log)
  3120. {
  3121. int i, j, k;
  3122. xlog_in_core_2_t *xhdr;
  3123. for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
  3124. i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
  3125. *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
  3126. dp += BBSIZE;
  3127. }
  3128. if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
  3129. xhdr = (xlog_in_core_2_t *)rhead;
  3130. for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
  3131. j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
  3132. k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
  3133. *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
  3134. dp += BBSIZE;
  3135. }
  3136. }
  3137. xlog_unpack_data_checksum(rhead, dp, log);
  3138. }
  3139. STATIC int
  3140. xlog_valid_rec_header(
  3141. xlog_t *log,
  3142. xlog_rec_header_t *rhead,
  3143. xfs_daddr_t blkno)
  3144. {
  3145. int hlen;
  3146. if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) {
  3147. XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
  3148. XFS_ERRLEVEL_LOW, log->l_mp);
  3149. return XFS_ERROR(EFSCORRUPTED);
  3150. }
  3151. if (unlikely(
  3152. (!rhead->h_version ||
  3153. (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
  3154. xlog_warn("XFS: %s: unrecognised log version (%d).",
  3155. __FUNCTION__, be32_to_cpu(rhead->h_version));
  3156. return XFS_ERROR(EIO);
  3157. }
  3158. /* LR body must have data or it wouldn't have been written */
  3159. hlen = be32_to_cpu(rhead->h_len);
  3160. if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
  3161. XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
  3162. XFS_ERRLEVEL_LOW, log->l_mp);
  3163. return XFS_ERROR(EFSCORRUPTED);
  3164. }
  3165. if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
  3166. XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
  3167. XFS_ERRLEVEL_LOW, log->l_mp);
  3168. return XFS_ERROR(EFSCORRUPTED);
  3169. }
  3170. return 0;
  3171. }
  3172. /*
  3173. * Read the log from tail to head and process the log records found.
  3174. * Handle the two cases where the tail and head are in the same cycle
  3175. * and where the active portion of the log wraps around the end of
  3176. * the physical log separately. The pass parameter is passed through
  3177. * to the routines called to process the data and is not looked at
  3178. * here.
  3179. */
  3180. STATIC int
  3181. xlog_do_recovery_pass(
  3182. xlog_t *log,
  3183. xfs_daddr_t head_blk,
  3184. xfs_daddr_t tail_blk,
  3185. int pass)
  3186. {
  3187. xlog_rec_header_t *rhead;
  3188. xfs_daddr_t blk_no;
  3189. xfs_caddr_t bufaddr, offset;
  3190. xfs_buf_t *hbp, *dbp;
  3191. int error = 0, h_size;
  3192. int bblks, split_bblks;
  3193. int hblks, split_hblks, wrapped_hblks;
  3194. xlog_recover_t *rhash[XLOG_RHASH_SIZE];
  3195. ASSERT(head_blk != tail_blk);
  3196. /*
  3197. * Read the header of the tail block and get the iclog buffer size from
  3198. * h_size. Use this to tell how many sectors make up the log header.
  3199. */
  3200. if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
  3201. /*
  3202. * When using variable length iclogs, read first sector of
  3203. * iclog header and extract the header size from it. Get a
  3204. * new hbp that is the correct size.
  3205. */
  3206. hbp = xlog_get_bp(log, 1);
  3207. if (!hbp)
  3208. return ENOMEM;
  3209. if ((error = xlog_bread(log, tail_blk, 1, hbp)))
  3210. goto bread_err1;
  3211. offset = xlog_align(log, tail_blk, 1, hbp);
  3212. rhead = (xlog_rec_header_t *)offset;
  3213. error = xlog_valid_rec_header(log, rhead, tail_blk);
  3214. if (error)
  3215. goto bread_err1;
  3216. h_size = be32_to_cpu(rhead->h_size);
  3217. if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
  3218. (h_size > XLOG_HEADER_CYCLE_SIZE)) {
  3219. hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
  3220. if (h_size % XLOG_HEADER_CYCLE_SIZE)
  3221. hblks++;
  3222. xlog_put_bp(hbp);
  3223. hbp = xlog_get_bp(log, hblks);
  3224. } else {
  3225. hblks = 1;
  3226. }
  3227. } else {
  3228. ASSERT(log->l_sectbb_log == 0);
  3229. hblks = 1;
  3230. hbp = xlog_get_bp(log, 1);
  3231. h_size = XLOG_BIG_RECORD_BSIZE;
  3232. }
  3233. if (!hbp)
  3234. return ENOMEM;
  3235. dbp = xlog_get_bp(log, BTOBB(h_size));
  3236. if (!dbp) {
  3237. xlog_put_bp(hbp);
  3238. return ENOMEM;
  3239. }
  3240. memset(rhash, 0, sizeof(rhash));
  3241. if (tail_blk <= head_blk) {
  3242. for (blk_no = tail_blk; blk_no < head_blk; ) {
  3243. if ((error = xlog_bread(log, blk_no, hblks, hbp)))
  3244. goto bread_err2;
  3245. offset = xlog_align(log, blk_no, hblks, hbp);
  3246. rhead = (xlog_rec_header_t *)offset;
  3247. error = xlog_valid_rec_header(log, rhead, blk_no);
  3248. if (error)
  3249. goto bread_err2;
  3250. /* blocks in data section */
  3251. bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
  3252. error = xlog_bread(log, blk_no + hblks, bblks, dbp);
  3253. if (error)
  3254. goto bread_err2;
  3255. offset = xlog_align(log, blk_no + hblks, bblks, dbp);
  3256. xlog_unpack_data(rhead, offset, log);
  3257. if ((error = xlog_recover_process_data(log,
  3258. rhash, rhead, offset, pass)))
  3259. goto bread_err2;
  3260. blk_no += bblks + hblks;
  3261. }
  3262. } else {
  3263. /*
  3264. * Perform recovery around the end of the physical log.
  3265. * When the head is not on the same cycle number as the tail,
  3266. * we can't do a sequential recovery as above.
  3267. */
  3268. blk_no = tail_blk;
  3269. while (blk_no < log->l_logBBsize) {
  3270. /*
  3271. * Check for header wrapping around physical end-of-log
  3272. */
  3273. offset = NULL;
  3274. split_hblks = 0;
  3275. wrapped_hblks = 0;
  3276. if (blk_no + hblks <= log->l_logBBsize) {
  3277. /* Read header in one read */
  3278. error = xlog_bread(log, blk_no, hblks, hbp);
  3279. if (error)
  3280. goto bread_err2;
  3281. offset = xlog_align(log, blk_no, hblks, hbp);
  3282. } else {
  3283. /* This LR is split across physical log end */
  3284. if (blk_no != log->l_logBBsize) {
  3285. /* some data before physical log end */
  3286. ASSERT(blk_no <= INT_MAX);
  3287. split_hblks = log->l_logBBsize - (int)blk_no;
  3288. ASSERT(split_hblks > 0);
  3289. if ((error = xlog_bread(log, blk_no,
  3290. split_hblks, hbp)))
  3291. goto bread_err2;
  3292. offset = xlog_align(log, blk_no,
  3293. split_hblks, hbp);
  3294. }
  3295. /*
  3296. * Note: this black magic still works with
  3297. * large sector sizes (non-512) only because:
  3298. * - we increased the buffer size originally
  3299. * by 1 sector giving us enough extra space
  3300. * for the second read;
  3301. * - the log start is guaranteed to be sector
  3302. * aligned;
  3303. * - we read the log end (LR header start)
  3304. * _first_, then the log start (LR header end)
  3305. * - order is important.
  3306. */
  3307. bufaddr = XFS_BUF_PTR(hbp);
  3308. XFS_BUF_SET_PTR(hbp,
  3309. bufaddr + BBTOB(split_hblks),
  3310. BBTOB(hblks - split_hblks));
  3311. wrapped_hblks = hblks - split_hblks;
  3312. error = xlog_bread(log, 0, wrapped_hblks, hbp);
  3313. if (error)
  3314. goto bread_err2;
  3315. XFS_BUF_SET_PTR(hbp, bufaddr, BBTOB(hblks));
  3316. if (!offset)
  3317. offset = xlog_align(log, 0,
  3318. wrapped_hblks, hbp);
  3319. }
  3320. rhead = (xlog_rec_header_t *)offset;
  3321. error = xlog_valid_rec_header(log, rhead,
  3322. split_hblks ? blk_no : 0);
  3323. if (error)
  3324. goto bread_err2;
  3325. bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
  3326. blk_no += hblks;
  3327. /* Read in data for log record */
  3328. if (blk_no + bblks <= log->l_logBBsize) {
  3329. error = xlog_bread(log, blk_no, bblks, dbp);
  3330. if (error)
  3331. goto bread_err2;
  3332. offset = xlog_align(log, blk_no, bblks, dbp);
  3333. } else {
  3334. /* This log record is split across the
  3335. * physical end of log */
  3336. offset = NULL;
  3337. split_bblks = 0;
  3338. if (blk_no != log->l_logBBsize) {
  3339. /* some data is before the physical
  3340. * end of log */
  3341. ASSERT(!wrapped_hblks);
  3342. ASSERT(blk_no <= INT_MAX);
  3343. split_bblks =
  3344. log->l_logBBsize - (int)blk_no;
  3345. ASSERT(split_bblks > 0);
  3346. if ((error = xlog_bread(log, blk_no,
  3347. split_bblks, dbp)))
  3348. goto bread_err2;
  3349. offset = xlog_align(log, blk_no,
  3350. split_bblks, dbp);
  3351. }
  3352. /*
  3353. * Note: this black magic still works with
  3354. * large sector sizes (non-512) only because:
  3355. * - we increased the buffer size originally
  3356. * by 1 sector giving us enough extra space
  3357. * for the second read;
  3358. * - the log start is guaranteed to be sector
  3359. * aligned;
  3360. * - we read the log end (LR header start)
  3361. * _first_, then the log start (LR header end)
  3362. * - order is important.
  3363. */
  3364. bufaddr = XFS_BUF_PTR(dbp);
  3365. XFS_BUF_SET_PTR(dbp,
  3366. bufaddr + BBTOB(split_bblks),
  3367. BBTOB(bblks - split_bblks));
  3368. if ((error = xlog_bread(log, wrapped_hblks,
  3369. bblks - split_bblks, dbp)))
  3370. goto bread_err2;
  3371. XFS_BUF_SET_PTR(dbp, bufaddr, h_size);
  3372. if (!offset)
  3373. offset = xlog_align(log, wrapped_hblks,
  3374. bblks - split_bblks, dbp);
  3375. }
  3376. xlog_unpack_data(rhead, offset, log);
  3377. if ((error = xlog_recover_process_data(log, rhash,
  3378. rhead, offset, pass)))
  3379. goto bread_err2;
  3380. blk_no += bblks;
  3381. }
  3382. ASSERT(blk_no >= log->l_logBBsize);
  3383. blk_no -= log->l_logBBsize;
  3384. /* read first part of physical log */
  3385. while (blk_no < head_blk) {
  3386. if ((error = xlog_bread(log, blk_no, hblks, hbp)))
  3387. goto bread_err2;
  3388. offset = xlog_align(log, blk_no, hblks, hbp);
  3389. rhead = (xlog_rec_header_t *)offset;
  3390. error = xlog_valid_rec_header(log, rhead, blk_no);
  3391. if (error)
  3392. goto bread_err2;
  3393. bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
  3394. if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp)))
  3395. goto bread_err2;
  3396. offset = xlog_align(log, blk_no+hblks, bblks, dbp);
  3397. xlog_unpack_data(rhead, offset, log);
  3398. if ((error = xlog_recover_process_data(log, rhash,
  3399. rhead, offset, pass)))
  3400. goto bread_err2;
  3401. blk_no += bblks + hblks;
  3402. }
  3403. }
  3404. bread_err2:
  3405. xlog_put_bp(dbp);
  3406. bread_err1:
  3407. xlog_put_bp(hbp);
  3408. return error;
  3409. }
  3410. /*
  3411. * Do the recovery of the log. We actually do this in two phases.
  3412. * The two passes are necessary in order to implement the function
  3413. * of cancelling a record written into the log. The first pass
  3414. * determines those things which have been cancelled, and the
  3415. * second pass replays log items normally except for those which
  3416. * have been cancelled. The handling of the replay and cancellations
  3417. * takes place in the log item type specific routines.
  3418. *
  3419. * The table of items which have cancel records in the log is allocated
  3420. * and freed at this level, since only here do we know when all of
  3421. * the log recovery has been completed.
  3422. */
  3423. STATIC int
  3424. xlog_do_log_recovery(
  3425. xlog_t *log,
  3426. xfs_daddr_t head_blk,
  3427. xfs_daddr_t tail_blk)
  3428. {
  3429. int error;
  3430. ASSERT(head_blk != tail_blk);
  3431. /*
  3432. * First do a pass to find all of the cancelled buf log items.
  3433. * Store them in the buf_cancel_table for use in the second pass.
  3434. */
  3435. log->l_buf_cancel_table =
  3436. (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
  3437. sizeof(xfs_buf_cancel_t*),
  3438. KM_SLEEP);
  3439. error = xlog_do_recovery_pass(log, head_blk, tail_blk,
  3440. XLOG_RECOVER_PASS1);
  3441. if (error != 0) {
  3442. kmem_free(log->l_buf_cancel_table,
  3443. XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
  3444. log->l_buf_cancel_table = NULL;
  3445. return error;
  3446. }
  3447. /*
  3448. * Then do a second pass to actually recover the items in the log.
  3449. * When it is complete free the table of buf cancel items.
  3450. */
  3451. error = xlog_do_recovery_pass(log, head_blk, tail_blk,
  3452. XLOG_RECOVER_PASS2);
  3453. #ifdef DEBUG
  3454. if (!error) {
  3455. int i;
  3456. for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
  3457. ASSERT(log->l_buf_cancel_table[i] == NULL);
  3458. }
  3459. #endif /* DEBUG */
  3460. kmem_free(log->l_buf_cancel_table,
  3461. XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
  3462. log->l_buf_cancel_table = NULL;
  3463. return error;
  3464. }
  3465. /*
  3466. * Do the actual recovery
  3467. */
  3468. STATIC int
  3469. xlog_do_recover(
  3470. xlog_t *log,
  3471. xfs_daddr_t head_blk,
  3472. xfs_daddr_t tail_blk)
  3473. {
  3474. int error;
  3475. xfs_buf_t *bp;
  3476. xfs_sb_t *sbp;
  3477. /*
  3478. * First replay the images in the log.
  3479. */
  3480. error = xlog_do_log_recovery(log, head_blk, tail_blk);
  3481. if (error) {
  3482. return error;
  3483. }
  3484. XFS_bflush(log->l_mp->m_ddev_targp);
  3485. /*
  3486. * If IO errors happened during recovery, bail out.
  3487. */
  3488. if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
  3489. return (EIO);
  3490. }
  3491. /*
  3492. * We now update the tail_lsn since much of the recovery has completed
  3493. * and there may be space available to use. If there were no extent
  3494. * or iunlinks, we can free up the entire log and set the tail_lsn to
  3495. * be the last_sync_lsn. This was set in xlog_find_tail to be the
  3496. * lsn of the last known good LR on disk. If there are extent frees
  3497. * or iunlinks they will have some entries in the AIL; so we look at
  3498. * the AIL to determine how to set the tail_lsn.
  3499. */
  3500. xlog_assign_tail_lsn(log->l_mp);
  3501. /*
  3502. * Now that we've finished replaying all buffer and inode
  3503. * updates, re-read in the superblock.
  3504. */
  3505. bp = xfs_getsb(log->l_mp, 0);
  3506. XFS_BUF_UNDONE(bp);
  3507. ASSERT(!(XFS_BUF_ISWRITE(bp)));
  3508. ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
  3509. XFS_BUF_READ(bp);
  3510. XFS_BUF_UNASYNC(bp);
  3511. xfsbdstrat(log->l_mp, bp);
  3512. if ((error = xfs_iowait(bp))) {
  3513. xfs_ioerror_alert("xlog_do_recover",
  3514. log->l_mp, bp, XFS_BUF_ADDR(bp));
  3515. ASSERT(0);
  3516. xfs_buf_relse(bp);
  3517. return error;
  3518. }
  3519. /* Convert superblock from on-disk format */
  3520. sbp = &log->l_mp->m_sb;
  3521. xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
  3522. ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
  3523. ASSERT(XFS_SB_GOOD_VERSION(sbp));
  3524. xfs_buf_relse(bp);
  3525. /* We've re-read the superblock so re-initialize per-cpu counters */
  3526. xfs_icsb_reinit_counters(log->l_mp);
  3527. xlog_recover_check_summary(log);
  3528. /* Normal transactions can now occur */
  3529. log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
  3530. return 0;
  3531. }
  3532. /*
  3533. * Perform recovery and re-initialize some log variables in xlog_find_tail.
  3534. *
  3535. * Return error or zero.
  3536. */
  3537. int
  3538. xlog_recover(
  3539. xlog_t *log)
  3540. {
  3541. xfs_daddr_t head_blk, tail_blk;
  3542. int error;
  3543. /* find the tail of the log */
  3544. if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
  3545. return error;
  3546. if (tail_blk != head_blk) {
  3547. /* There used to be a comment here:
  3548. *
  3549. * disallow recovery on read-only mounts. note -- mount
  3550. * checks for ENOSPC and turns it into an intelligent
  3551. * error message.
  3552. * ...but this is no longer true. Now, unless you specify
  3553. * NORECOVERY (in which case this function would never be
  3554. * called), we just go ahead and recover. We do this all
  3555. * under the vfs layer, so we can get away with it unless
  3556. * the device itself is read-only, in which case we fail.
  3557. */
  3558. if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
  3559. return error;
  3560. }
  3561. cmn_err(CE_NOTE,
  3562. "Starting XFS recovery on filesystem: %s (logdev: %s)",
  3563. log->l_mp->m_fsname, log->l_mp->m_logname ?
  3564. log->l_mp->m_logname : "internal");
  3565. error = xlog_do_recover(log, head_blk, tail_blk);
  3566. log->l_flags |= XLOG_RECOVERY_NEEDED;
  3567. }
  3568. return error;
  3569. }
  3570. /*
  3571. * In the first part of recovery we replay inodes and buffers and build
  3572. * up the list of extent free items which need to be processed. Here
  3573. * we process the extent free items and clean up the on disk unlinked
  3574. * inode lists. This is separated from the first part of recovery so
  3575. * that the root and real-time bitmap inodes can be read in from disk in
  3576. * between the two stages. This is necessary so that we can free space
  3577. * in the real-time portion of the file system.
  3578. */
  3579. int
  3580. xlog_recover_finish(
  3581. xlog_t *log,
  3582. int mfsi_flags)
  3583. {
  3584. /*
  3585. * Now we're ready to do the transactions needed for the
  3586. * rest of recovery. Start with completing all the extent
  3587. * free intent records and then process the unlinked inode
  3588. * lists. At this point, we essentially run in normal mode
  3589. * except that we're still performing recovery actions
  3590. * rather than accepting new requests.
  3591. */
  3592. if (log->l_flags & XLOG_RECOVERY_NEEDED) {
  3593. xlog_recover_process_efis(log);
  3594. /*
  3595. * Sync the log to get all the EFIs out of the AIL.
  3596. * This isn't absolutely necessary, but it helps in
  3597. * case the unlink transactions would have problems
  3598. * pushing the EFIs out of the way.
  3599. */
  3600. xfs_log_force(log->l_mp, (xfs_lsn_t)0,
  3601. (XFS_LOG_FORCE | XFS_LOG_SYNC));
  3602. if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) {
  3603. xlog_recover_process_iunlinks(log);
  3604. }
  3605. xlog_recover_check_summary(log);
  3606. cmn_err(CE_NOTE,
  3607. "Ending XFS recovery on filesystem: %s (logdev: %s)",
  3608. log->l_mp->m_fsname, log->l_mp->m_logname ?
  3609. log->l_mp->m_logname : "internal");
  3610. log->l_flags &= ~XLOG_RECOVERY_NEEDED;
  3611. } else {
  3612. cmn_err(CE_DEBUG,
  3613. "!Ending clean XFS mount for filesystem: %s\n",
  3614. log->l_mp->m_fsname);
  3615. }
  3616. return 0;
  3617. }
  3618. #if defined(DEBUG)
  3619. /*
  3620. * Read all of the agf and agi counters and check that they
  3621. * are consistent with the superblock counters.
  3622. */
  3623. void
  3624. xlog_recover_check_summary(
  3625. xlog_t *log)
  3626. {
  3627. xfs_mount_t *mp;
  3628. xfs_agf_t *agfp;
  3629. xfs_agi_t *agip;
  3630. xfs_buf_t *agfbp;
  3631. xfs_buf_t *agibp;
  3632. xfs_daddr_t agfdaddr;
  3633. xfs_daddr_t agidaddr;
  3634. xfs_buf_t *sbbp;
  3635. #ifdef XFS_LOUD_RECOVERY
  3636. xfs_sb_t *sbp;
  3637. #endif
  3638. xfs_agnumber_t agno;
  3639. __uint64_t freeblks;
  3640. __uint64_t itotal;
  3641. __uint64_t ifree;
  3642. mp = log->l_mp;
  3643. freeblks = 0LL;
  3644. itotal = 0LL;
  3645. ifree = 0LL;
  3646. for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
  3647. agfdaddr = XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp));
  3648. agfbp = xfs_buf_read(mp->m_ddev_targp, agfdaddr,
  3649. XFS_FSS_TO_BB(mp, 1), 0);
  3650. if (XFS_BUF_ISERROR(agfbp)) {
  3651. xfs_ioerror_alert("xlog_recover_check_summary(agf)",
  3652. mp, agfbp, agfdaddr);
  3653. }
  3654. agfp = XFS_BUF_TO_AGF(agfbp);
  3655. ASSERT(XFS_AGF_MAGIC == be32_to_cpu(agfp->agf_magicnum));
  3656. ASSERT(XFS_AGF_GOOD_VERSION(be32_to_cpu(agfp->agf_versionnum)));
  3657. ASSERT(be32_to_cpu(agfp->agf_seqno) == agno);
  3658. freeblks += be32_to_cpu(agfp->agf_freeblks) +
  3659. be32_to_cpu(agfp->agf_flcount);
  3660. xfs_buf_relse(agfbp);
  3661. agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
  3662. agibp = xfs_buf_read(mp->m_ddev_targp, agidaddr,
  3663. XFS_FSS_TO_BB(mp, 1), 0);
  3664. if (XFS_BUF_ISERROR(agibp)) {
  3665. xfs_ioerror_alert("xlog_recover_check_summary(agi)",
  3666. mp, agibp, agidaddr);
  3667. }
  3668. agip = XFS_BUF_TO_AGI(agibp);
  3669. ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agip->agi_magicnum));
  3670. ASSERT(XFS_AGI_GOOD_VERSION(be32_to_cpu(agip->agi_versionnum)));
  3671. ASSERT(be32_to_cpu(agip->agi_seqno) == agno);
  3672. itotal += be32_to_cpu(agip->agi_count);
  3673. ifree += be32_to_cpu(agip->agi_freecount);
  3674. xfs_buf_relse(agibp);
  3675. }
  3676. sbbp = xfs_getsb(mp, 0);
  3677. #ifdef XFS_LOUD_RECOVERY
  3678. sbp = &mp->m_sb;
  3679. xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(sbbp));
  3680. cmn_err(CE_NOTE,
  3681. "xlog_recover_check_summary: sb_icount %Lu itotal %Lu",
  3682. sbp->sb_icount, itotal);
  3683. cmn_err(CE_NOTE,
  3684. "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu",
  3685. sbp->sb_ifree, ifree);
  3686. cmn_err(CE_NOTE,
  3687. "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu",
  3688. sbp->sb_fdblocks, freeblks);
  3689. #if 0
  3690. /*
  3691. * This is turned off until I account for the allocation
  3692. * btree blocks which live in free space.
  3693. */
  3694. ASSERT(sbp->sb_icount == itotal);
  3695. ASSERT(sbp->sb_ifree == ifree);
  3696. ASSERT(sbp->sb_fdblocks == freeblks);
  3697. #endif
  3698. #endif
  3699. xfs_buf_relse(sbbp);
  3700. }
  3701. #endif /* DEBUG */