xfs_log_recover.c 111 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073
  1. /*
  2. * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_types.h"
  21. #include "xfs_bit.h"
  22. #include "xfs_log.h"
  23. #include "xfs_inum.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_dir.h"
  28. #include "xfs_dir2.h"
  29. #include "xfs_dmapi.h"
  30. #include "xfs_mount.h"
  31. #include "xfs_error.h"
  32. #include "xfs_bmap_btree.h"
  33. #include "xfs_alloc_btree.h"
  34. #include "xfs_ialloc_btree.h"
  35. #include "xfs_dir_sf.h"
  36. #include "xfs_dir2_sf.h"
  37. #include "xfs_attr_sf.h"
  38. #include "xfs_dinode.h"
  39. #include "xfs_inode.h"
  40. #include "xfs_inode_item.h"
  41. #include "xfs_imap.h"
  42. #include "xfs_alloc.h"
  43. #include "xfs_ialloc.h"
  44. #include "xfs_log_priv.h"
  45. #include "xfs_buf_item.h"
  46. #include "xfs_log_recover.h"
  47. #include "xfs_extfree_item.h"
  48. #include "xfs_trans_priv.h"
  49. #include "xfs_quota.h"
  50. #include "xfs_rw.h"
  51. STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
  52. STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
  53. STATIC void xlog_recover_insert_item_backq(xlog_recover_item_t **q,
  54. xlog_recover_item_t *item);
  55. #if defined(DEBUG)
  56. STATIC void xlog_recover_check_summary(xlog_t *);
  57. STATIC void xlog_recover_check_ail(xfs_mount_t *, xfs_log_item_t *, int);
  58. #else
  59. #define xlog_recover_check_summary(log)
  60. #define xlog_recover_check_ail(mp, lip, gen)
  61. #endif
  62. /*
  63. * Sector aligned buffer routines for buffer create/read/write/access
  64. */
  65. #define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs) \
  66. ( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \
  67. ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
  68. #define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) ((bno) & ~(log)->l_sectbb_mask)
  69. xfs_buf_t *
  70. xlog_get_bp(
  71. xlog_t *log,
  72. int num_bblks)
  73. {
  74. ASSERT(num_bblks > 0);
  75. if (log->l_sectbb_log) {
  76. if (num_bblks > 1)
  77. num_bblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
  78. num_bblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, num_bblks);
  79. }
  80. return xfs_buf_get_noaddr(BBTOB(num_bblks), log->l_mp->m_logdev_targp);
  81. }
  82. void
  83. xlog_put_bp(
  84. xfs_buf_t *bp)
  85. {
  86. xfs_buf_free(bp);
  87. }
  88. /*
  89. * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
  90. */
  91. int
  92. xlog_bread(
  93. xlog_t *log,
  94. xfs_daddr_t blk_no,
  95. int nbblks,
  96. xfs_buf_t *bp)
  97. {
  98. int error;
  99. if (log->l_sectbb_log) {
  100. blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
  101. nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
  102. }
  103. ASSERT(nbblks > 0);
  104. ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
  105. ASSERT(bp);
  106. XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
  107. XFS_BUF_READ(bp);
  108. XFS_BUF_BUSY(bp);
  109. XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
  110. XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
  111. xfsbdstrat(log->l_mp, bp);
  112. if ((error = xfs_iowait(bp)))
  113. xfs_ioerror_alert("xlog_bread", log->l_mp,
  114. bp, XFS_BUF_ADDR(bp));
  115. return error;
  116. }
  117. /*
  118. * Write out the buffer at the given block for the given number of blocks.
  119. * The buffer is kept locked across the write and is returned locked.
  120. * This can only be used for synchronous log writes.
  121. */
  122. STATIC int
  123. xlog_bwrite(
  124. xlog_t *log,
  125. xfs_daddr_t blk_no,
  126. int nbblks,
  127. xfs_buf_t *bp)
  128. {
  129. int error;
  130. if (log->l_sectbb_log) {
  131. blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
  132. nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
  133. }
  134. ASSERT(nbblks > 0);
  135. ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
  136. XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
  137. XFS_BUF_ZEROFLAGS(bp);
  138. XFS_BUF_BUSY(bp);
  139. XFS_BUF_HOLD(bp);
  140. XFS_BUF_PSEMA(bp, PRIBIO);
  141. XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
  142. XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
  143. if ((error = xfs_bwrite(log->l_mp, bp)))
  144. xfs_ioerror_alert("xlog_bwrite", log->l_mp,
  145. bp, XFS_BUF_ADDR(bp));
  146. return error;
  147. }
  148. STATIC xfs_caddr_t
  149. xlog_align(
  150. xlog_t *log,
  151. xfs_daddr_t blk_no,
  152. int nbblks,
  153. xfs_buf_t *bp)
  154. {
  155. xfs_caddr_t ptr;
  156. if (!log->l_sectbb_log)
  157. return XFS_BUF_PTR(bp);
  158. ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
  159. ASSERT(XFS_BUF_SIZE(bp) >=
  160. BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
  161. return ptr;
  162. }
  163. #ifdef DEBUG
  164. /*
  165. * dump debug superblock and log record information
  166. */
  167. STATIC void
  168. xlog_header_check_dump(
  169. xfs_mount_t *mp,
  170. xlog_rec_header_t *head)
  171. {
  172. int b;
  173. printk("%s: SB : uuid = ", __FUNCTION__);
  174. for (b = 0; b < 16; b++)
  175. printk("%02x",((unsigned char *)&mp->m_sb.sb_uuid)[b]);
  176. printk(", fmt = %d\n", XLOG_FMT);
  177. printk(" log : uuid = ");
  178. for (b = 0; b < 16; b++)
  179. printk("%02x",((unsigned char *)&head->h_fs_uuid)[b]);
  180. printk(", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT));
  181. }
  182. #else
  183. #define xlog_header_check_dump(mp, head)
  184. #endif
  185. /*
  186. * check log record header for recovery
  187. */
  188. STATIC int
  189. xlog_header_check_recover(
  190. xfs_mount_t *mp,
  191. xlog_rec_header_t *head)
  192. {
  193. ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
  194. /*
  195. * IRIX doesn't write the h_fmt field and leaves it zeroed
  196. * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
  197. * a dirty log created in IRIX.
  198. */
  199. if (unlikely(INT_GET(head->h_fmt, ARCH_CONVERT) != XLOG_FMT)) {
  200. xlog_warn(
  201. "XFS: dirty log written in incompatible format - can't recover");
  202. xlog_header_check_dump(mp, head);
  203. XFS_ERROR_REPORT("xlog_header_check_recover(1)",
  204. XFS_ERRLEVEL_HIGH, mp);
  205. return XFS_ERROR(EFSCORRUPTED);
  206. } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
  207. xlog_warn(
  208. "XFS: dirty log entry has mismatched uuid - can't recover");
  209. xlog_header_check_dump(mp, head);
  210. XFS_ERROR_REPORT("xlog_header_check_recover(2)",
  211. XFS_ERRLEVEL_HIGH, mp);
  212. return XFS_ERROR(EFSCORRUPTED);
  213. }
  214. return 0;
  215. }
  216. /*
  217. * read the head block of the log and check the header
  218. */
  219. STATIC int
  220. xlog_header_check_mount(
  221. xfs_mount_t *mp,
  222. xlog_rec_header_t *head)
  223. {
  224. ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
  225. if (uuid_is_nil(&head->h_fs_uuid)) {
  226. /*
  227. * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
  228. * h_fs_uuid is nil, we assume this log was last mounted
  229. * by IRIX and continue.
  230. */
  231. xlog_warn("XFS: nil uuid in log - IRIX style log");
  232. } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
  233. xlog_warn("XFS: log has mismatched uuid - can't recover");
  234. xlog_header_check_dump(mp, head);
  235. XFS_ERROR_REPORT("xlog_header_check_mount",
  236. XFS_ERRLEVEL_HIGH, mp);
  237. return XFS_ERROR(EFSCORRUPTED);
  238. }
  239. return 0;
  240. }
  241. STATIC void
  242. xlog_recover_iodone(
  243. struct xfs_buf *bp)
  244. {
  245. xfs_mount_t *mp;
  246. ASSERT(XFS_BUF_FSPRIVATE(bp, void *));
  247. if (XFS_BUF_GETERROR(bp)) {
  248. /*
  249. * We're not going to bother about retrying
  250. * this during recovery. One strike!
  251. */
  252. mp = XFS_BUF_FSPRIVATE(bp, xfs_mount_t *);
  253. xfs_ioerror_alert("xlog_recover_iodone",
  254. mp, bp, XFS_BUF_ADDR(bp));
  255. xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
  256. }
  257. XFS_BUF_SET_FSPRIVATE(bp, NULL);
  258. XFS_BUF_CLR_IODONE_FUNC(bp);
  259. xfs_biodone(bp);
  260. }
  261. /*
  262. * This routine finds (to an approximation) the first block in the physical
  263. * log which contains the given cycle. It uses a binary search algorithm.
  264. * Note that the algorithm can not be perfect because the disk will not
  265. * necessarily be perfect.
  266. */
  267. int
  268. xlog_find_cycle_start(
  269. xlog_t *log,
  270. xfs_buf_t *bp,
  271. xfs_daddr_t first_blk,
  272. xfs_daddr_t *last_blk,
  273. uint cycle)
  274. {
  275. xfs_caddr_t offset;
  276. xfs_daddr_t mid_blk;
  277. uint mid_cycle;
  278. int error;
  279. mid_blk = BLK_AVG(first_blk, *last_blk);
  280. while (mid_blk != first_blk && mid_blk != *last_blk) {
  281. if ((error = xlog_bread(log, mid_blk, 1, bp)))
  282. return error;
  283. offset = xlog_align(log, mid_blk, 1, bp);
  284. mid_cycle = GET_CYCLE(offset, ARCH_CONVERT);
  285. if (mid_cycle == cycle) {
  286. *last_blk = mid_blk;
  287. /* last_half_cycle == mid_cycle */
  288. } else {
  289. first_blk = mid_blk;
  290. /* first_half_cycle == mid_cycle */
  291. }
  292. mid_blk = BLK_AVG(first_blk, *last_blk);
  293. }
  294. ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) ||
  295. (mid_blk == *last_blk && mid_blk-1 == first_blk));
  296. return 0;
  297. }
  298. /*
  299. * Check that the range of blocks does not contain the cycle number
  300. * given. The scan needs to occur from front to back and the ptr into the
  301. * region must be updated since a later routine will need to perform another
  302. * test. If the region is completely good, we end up returning the same
  303. * last block number.
  304. *
  305. * Set blkno to -1 if we encounter no errors. This is an invalid block number
  306. * since we don't ever expect logs to get this large.
  307. */
  308. STATIC int
  309. xlog_find_verify_cycle(
  310. xlog_t *log,
  311. xfs_daddr_t start_blk,
  312. int nbblks,
  313. uint stop_on_cycle_no,
  314. xfs_daddr_t *new_blk)
  315. {
  316. xfs_daddr_t i, j;
  317. uint cycle;
  318. xfs_buf_t *bp;
  319. xfs_daddr_t bufblks;
  320. xfs_caddr_t buf = NULL;
  321. int error = 0;
  322. bufblks = 1 << ffs(nbblks);
  323. while (!(bp = xlog_get_bp(log, bufblks))) {
  324. /* can't get enough memory to do everything in one big buffer */
  325. bufblks >>= 1;
  326. if (bufblks <= log->l_sectbb_log)
  327. return ENOMEM;
  328. }
  329. for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
  330. int bcount;
  331. bcount = min(bufblks, (start_blk + nbblks - i));
  332. if ((error = xlog_bread(log, i, bcount, bp)))
  333. goto out;
  334. buf = xlog_align(log, i, bcount, bp);
  335. for (j = 0; j < bcount; j++) {
  336. cycle = GET_CYCLE(buf, ARCH_CONVERT);
  337. if (cycle == stop_on_cycle_no) {
  338. *new_blk = i+j;
  339. goto out;
  340. }
  341. buf += BBSIZE;
  342. }
  343. }
  344. *new_blk = -1;
  345. out:
  346. xlog_put_bp(bp);
  347. return error;
  348. }
  349. /*
  350. * Potentially backup over partial log record write.
  351. *
  352. * In the typical case, last_blk is the number of the block directly after
  353. * a good log record. Therefore, we subtract one to get the block number
  354. * of the last block in the given buffer. extra_bblks contains the number
  355. * of blocks we would have read on a previous read. This happens when the
  356. * last log record is split over the end of the physical log.
  357. *
  358. * extra_bblks is the number of blocks potentially verified on a previous
  359. * call to this routine.
  360. */
  361. STATIC int
  362. xlog_find_verify_log_record(
  363. xlog_t *log,
  364. xfs_daddr_t start_blk,
  365. xfs_daddr_t *last_blk,
  366. int extra_bblks)
  367. {
  368. xfs_daddr_t i;
  369. xfs_buf_t *bp;
  370. xfs_caddr_t offset = NULL;
  371. xlog_rec_header_t *head = NULL;
  372. int error = 0;
  373. int smallmem = 0;
  374. int num_blks = *last_blk - start_blk;
  375. int xhdrs;
  376. ASSERT(start_blk != 0 || *last_blk != start_blk);
  377. if (!(bp = xlog_get_bp(log, num_blks))) {
  378. if (!(bp = xlog_get_bp(log, 1)))
  379. return ENOMEM;
  380. smallmem = 1;
  381. } else {
  382. if ((error = xlog_bread(log, start_blk, num_blks, bp)))
  383. goto out;
  384. offset = xlog_align(log, start_blk, num_blks, bp);
  385. offset += ((num_blks - 1) << BBSHIFT);
  386. }
  387. for (i = (*last_blk) - 1; i >= 0; i--) {
  388. if (i < start_blk) {
  389. /* valid log record not found */
  390. xlog_warn(
  391. "XFS: Log inconsistent (didn't find previous header)");
  392. ASSERT(0);
  393. error = XFS_ERROR(EIO);
  394. goto out;
  395. }
  396. if (smallmem) {
  397. if ((error = xlog_bread(log, i, 1, bp)))
  398. goto out;
  399. offset = xlog_align(log, i, 1, bp);
  400. }
  401. head = (xlog_rec_header_t *)offset;
  402. if (XLOG_HEADER_MAGIC_NUM ==
  403. INT_GET(head->h_magicno, ARCH_CONVERT))
  404. break;
  405. if (!smallmem)
  406. offset -= BBSIZE;
  407. }
  408. /*
  409. * We hit the beginning of the physical log & still no header. Return
  410. * to caller. If caller can handle a return of -1, then this routine
  411. * will be called again for the end of the physical log.
  412. */
  413. if (i == -1) {
  414. error = -1;
  415. goto out;
  416. }
  417. /*
  418. * We have the final block of the good log (the first block
  419. * of the log record _before_ the head. So we check the uuid.
  420. */
  421. if ((error = xlog_header_check_mount(log->l_mp, head)))
  422. goto out;
  423. /*
  424. * We may have found a log record header before we expected one.
  425. * last_blk will be the 1st block # with a given cycle #. We may end
  426. * up reading an entire log record. In this case, we don't want to
  427. * reset last_blk. Only when last_blk points in the middle of a log
  428. * record do we update last_blk.
  429. */
  430. if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
  431. uint h_size = INT_GET(head->h_size, ARCH_CONVERT);
  432. xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
  433. if (h_size % XLOG_HEADER_CYCLE_SIZE)
  434. xhdrs++;
  435. } else {
  436. xhdrs = 1;
  437. }
  438. if (*last_blk - i + extra_bblks
  439. != BTOBB(INT_GET(head->h_len, ARCH_CONVERT)) + xhdrs)
  440. *last_blk = i;
  441. out:
  442. xlog_put_bp(bp);
  443. return error;
  444. }
  445. /*
  446. * Head is defined to be the point of the log where the next log write
  447. * write could go. This means that incomplete LR writes at the end are
  448. * eliminated when calculating the head. We aren't guaranteed that previous
  449. * LR have complete transactions. We only know that a cycle number of
  450. * current cycle number -1 won't be present in the log if we start writing
  451. * from our current block number.
  452. *
  453. * last_blk contains the block number of the first block with a given
  454. * cycle number.
  455. *
  456. * Return: zero if normal, non-zero if error.
  457. */
  458. STATIC int
  459. xlog_find_head(
  460. xlog_t *log,
  461. xfs_daddr_t *return_head_blk)
  462. {
  463. xfs_buf_t *bp;
  464. xfs_caddr_t offset;
  465. xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
  466. int num_scan_bblks;
  467. uint first_half_cycle, last_half_cycle;
  468. uint stop_on_cycle;
  469. int error, log_bbnum = log->l_logBBsize;
  470. /* Is the end of the log device zeroed? */
  471. if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
  472. *return_head_blk = first_blk;
  473. /* Is the whole lot zeroed? */
  474. if (!first_blk) {
  475. /* Linux XFS shouldn't generate totally zeroed logs -
  476. * mkfs etc write a dummy unmount record to a fresh
  477. * log so we can store the uuid in there
  478. */
  479. xlog_warn("XFS: totally zeroed log");
  480. }
  481. return 0;
  482. } else if (error) {
  483. xlog_warn("XFS: empty log check failed");
  484. return error;
  485. }
  486. first_blk = 0; /* get cycle # of 1st block */
  487. bp = xlog_get_bp(log, 1);
  488. if (!bp)
  489. return ENOMEM;
  490. if ((error = xlog_bread(log, 0, 1, bp)))
  491. goto bp_err;
  492. offset = xlog_align(log, 0, 1, bp);
  493. first_half_cycle = GET_CYCLE(offset, ARCH_CONVERT);
  494. last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
  495. if ((error = xlog_bread(log, last_blk, 1, bp)))
  496. goto bp_err;
  497. offset = xlog_align(log, last_blk, 1, bp);
  498. last_half_cycle = GET_CYCLE(offset, ARCH_CONVERT);
  499. ASSERT(last_half_cycle != 0);
  500. /*
  501. * If the 1st half cycle number is equal to the last half cycle number,
  502. * then the entire log is stamped with the same cycle number. In this
  503. * case, head_blk can't be set to zero (which makes sense). The below
  504. * math doesn't work out properly with head_blk equal to zero. Instead,
  505. * we set it to log_bbnum which is an invalid block number, but this
  506. * value makes the math correct. If head_blk doesn't changed through
  507. * all the tests below, *head_blk is set to zero at the very end rather
  508. * than log_bbnum. In a sense, log_bbnum and zero are the same block
  509. * in a circular file.
  510. */
  511. if (first_half_cycle == last_half_cycle) {
  512. /*
  513. * In this case we believe that the entire log should have
  514. * cycle number last_half_cycle. We need to scan backwards
  515. * from the end verifying that there are no holes still
  516. * containing last_half_cycle - 1. If we find such a hole,
  517. * then the start of that hole will be the new head. The
  518. * simple case looks like
  519. * x | x ... | x - 1 | x
  520. * Another case that fits this picture would be
  521. * x | x + 1 | x ... | x
  522. * In this case the head really is somwhere at the end of the
  523. * log, as one of the latest writes at the beginning was
  524. * incomplete.
  525. * One more case is
  526. * x | x + 1 | x ... | x - 1 | x
  527. * This is really the combination of the above two cases, and
  528. * the head has to end up at the start of the x-1 hole at the
  529. * end of the log.
  530. *
  531. * In the 256k log case, we will read from the beginning to the
  532. * end of the log and search for cycle numbers equal to x-1.
  533. * We don't worry about the x+1 blocks that we encounter,
  534. * because we know that they cannot be the head since the log
  535. * started with x.
  536. */
  537. head_blk = log_bbnum;
  538. stop_on_cycle = last_half_cycle - 1;
  539. } else {
  540. /*
  541. * In this case we want to find the first block with cycle
  542. * number matching last_half_cycle. We expect the log to be
  543. * some variation on
  544. * x + 1 ... | x ...
  545. * The first block with cycle number x (last_half_cycle) will
  546. * be where the new head belongs. First we do a binary search
  547. * for the first occurrence of last_half_cycle. The binary
  548. * search may not be totally accurate, so then we scan back
  549. * from there looking for occurrences of last_half_cycle before
  550. * us. If that backwards scan wraps around the beginning of
  551. * the log, then we look for occurrences of last_half_cycle - 1
  552. * at the end of the log. The cases we're looking for look
  553. * like
  554. * x + 1 ... | x | x + 1 | x ...
  555. * ^ binary search stopped here
  556. * or
  557. * x + 1 ... | x ... | x - 1 | x
  558. * <---------> less than scan distance
  559. */
  560. stop_on_cycle = last_half_cycle;
  561. if ((error = xlog_find_cycle_start(log, bp, first_blk,
  562. &head_blk, last_half_cycle)))
  563. goto bp_err;
  564. }
  565. /*
  566. * Now validate the answer. Scan back some number of maximum possible
  567. * blocks and make sure each one has the expected cycle number. The
  568. * maximum is determined by the total possible amount of buffering
  569. * in the in-core log. The following number can be made tighter if
  570. * we actually look at the block size of the filesystem.
  571. */
  572. num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
  573. if (head_blk >= num_scan_bblks) {
  574. /*
  575. * We are guaranteed that the entire check can be performed
  576. * in one buffer.
  577. */
  578. start_blk = head_blk - num_scan_bblks;
  579. if ((error = xlog_find_verify_cycle(log,
  580. start_blk, num_scan_bblks,
  581. stop_on_cycle, &new_blk)))
  582. goto bp_err;
  583. if (new_blk != -1)
  584. head_blk = new_blk;
  585. } else { /* need to read 2 parts of log */
  586. /*
  587. * We are going to scan backwards in the log in two parts.
  588. * First we scan the physical end of the log. In this part
  589. * of the log, we are looking for blocks with cycle number
  590. * last_half_cycle - 1.
  591. * If we find one, then we know that the log starts there, as
  592. * we've found a hole that didn't get written in going around
  593. * the end of the physical log. The simple case for this is
  594. * x + 1 ... | x ... | x - 1 | x
  595. * <---------> less than scan distance
  596. * If all of the blocks at the end of the log have cycle number
  597. * last_half_cycle, then we check the blocks at the start of
  598. * the log looking for occurrences of last_half_cycle. If we
  599. * find one, then our current estimate for the location of the
  600. * first occurrence of last_half_cycle is wrong and we move
  601. * back to the hole we've found. This case looks like
  602. * x + 1 ... | x | x + 1 | x ...
  603. * ^ binary search stopped here
  604. * Another case we need to handle that only occurs in 256k
  605. * logs is
  606. * x + 1 ... | x ... | x+1 | x ...
  607. * ^ binary search stops here
  608. * In a 256k log, the scan at the end of the log will see the
  609. * x + 1 blocks. We need to skip past those since that is
  610. * certainly not the head of the log. By searching for
  611. * last_half_cycle-1 we accomplish that.
  612. */
  613. start_blk = log_bbnum - num_scan_bblks + head_blk;
  614. ASSERT(head_blk <= INT_MAX &&
  615. (xfs_daddr_t) num_scan_bblks - head_blk >= 0);
  616. if ((error = xlog_find_verify_cycle(log, start_blk,
  617. num_scan_bblks - (int)head_blk,
  618. (stop_on_cycle - 1), &new_blk)))
  619. goto bp_err;
  620. if (new_blk != -1) {
  621. head_blk = new_blk;
  622. goto bad_blk;
  623. }
  624. /*
  625. * Scan beginning of log now. The last part of the physical
  626. * log is good. This scan needs to verify that it doesn't find
  627. * the last_half_cycle.
  628. */
  629. start_blk = 0;
  630. ASSERT(head_blk <= INT_MAX);
  631. if ((error = xlog_find_verify_cycle(log,
  632. start_blk, (int)head_blk,
  633. stop_on_cycle, &new_blk)))
  634. goto bp_err;
  635. if (new_blk != -1)
  636. head_blk = new_blk;
  637. }
  638. bad_blk:
  639. /*
  640. * Now we need to make sure head_blk is not pointing to a block in
  641. * the middle of a log record.
  642. */
  643. num_scan_bblks = XLOG_REC_SHIFT(log);
  644. if (head_blk >= num_scan_bblks) {
  645. start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
  646. /* start ptr at last block ptr before head_blk */
  647. if ((error = xlog_find_verify_log_record(log, start_blk,
  648. &head_blk, 0)) == -1) {
  649. error = XFS_ERROR(EIO);
  650. goto bp_err;
  651. } else if (error)
  652. goto bp_err;
  653. } else {
  654. start_blk = 0;
  655. ASSERT(head_blk <= INT_MAX);
  656. if ((error = xlog_find_verify_log_record(log, start_blk,
  657. &head_blk, 0)) == -1) {
  658. /* We hit the beginning of the log during our search */
  659. start_blk = log_bbnum - num_scan_bblks + head_blk;
  660. new_blk = log_bbnum;
  661. ASSERT(start_blk <= INT_MAX &&
  662. (xfs_daddr_t) log_bbnum-start_blk >= 0);
  663. ASSERT(head_blk <= INT_MAX);
  664. if ((error = xlog_find_verify_log_record(log,
  665. start_blk, &new_blk,
  666. (int)head_blk)) == -1) {
  667. error = XFS_ERROR(EIO);
  668. goto bp_err;
  669. } else if (error)
  670. goto bp_err;
  671. if (new_blk != log_bbnum)
  672. head_blk = new_blk;
  673. } else if (error)
  674. goto bp_err;
  675. }
  676. xlog_put_bp(bp);
  677. if (head_blk == log_bbnum)
  678. *return_head_blk = 0;
  679. else
  680. *return_head_blk = head_blk;
  681. /*
  682. * When returning here, we have a good block number. Bad block
  683. * means that during a previous crash, we didn't have a clean break
  684. * from cycle number N to cycle number N-1. In this case, we need
  685. * to find the first block with cycle number N-1.
  686. */
  687. return 0;
  688. bp_err:
  689. xlog_put_bp(bp);
  690. if (error)
  691. xlog_warn("XFS: failed to find log head");
  692. return error;
  693. }
  694. /*
  695. * Find the sync block number or the tail of the log.
  696. *
  697. * This will be the block number of the last record to have its
  698. * associated buffers synced to disk. Every log record header has
  699. * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
  700. * to get a sync block number. The only concern is to figure out which
  701. * log record header to believe.
  702. *
  703. * The following algorithm uses the log record header with the largest
  704. * lsn. The entire log record does not need to be valid. We only care
  705. * that the header is valid.
  706. *
  707. * We could speed up search by using current head_blk buffer, but it is not
  708. * available.
  709. */
  710. int
  711. xlog_find_tail(
  712. xlog_t *log,
  713. xfs_daddr_t *head_blk,
  714. xfs_daddr_t *tail_blk)
  715. {
  716. xlog_rec_header_t *rhead;
  717. xlog_op_header_t *op_head;
  718. xfs_caddr_t offset = NULL;
  719. xfs_buf_t *bp;
  720. int error, i, found;
  721. xfs_daddr_t umount_data_blk;
  722. xfs_daddr_t after_umount_blk;
  723. xfs_lsn_t tail_lsn;
  724. int hblks;
  725. found = 0;
  726. /*
  727. * Find previous log record
  728. */
  729. if ((error = xlog_find_head(log, head_blk)))
  730. return error;
  731. bp = xlog_get_bp(log, 1);
  732. if (!bp)
  733. return ENOMEM;
  734. if (*head_blk == 0) { /* special case */
  735. if ((error = xlog_bread(log, 0, 1, bp)))
  736. goto bread_err;
  737. offset = xlog_align(log, 0, 1, bp);
  738. if (GET_CYCLE(offset, ARCH_CONVERT) == 0) {
  739. *tail_blk = 0;
  740. /* leave all other log inited values alone */
  741. goto exit;
  742. }
  743. }
  744. /*
  745. * Search backwards looking for log record header block
  746. */
  747. ASSERT(*head_blk < INT_MAX);
  748. for (i = (int)(*head_blk) - 1; i >= 0; i--) {
  749. if ((error = xlog_bread(log, i, 1, bp)))
  750. goto bread_err;
  751. offset = xlog_align(log, i, 1, bp);
  752. if (XLOG_HEADER_MAGIC_NUM ==
  753. INT_GET(*(uint *)offset, ARCH_CONVERT)) {
  754. found = 1;
  755. break;
  756. }
  757. }
  758. /*
  759. * If we haven't found the log record header block, start looking
  760. * again from the end of the physical log. XXXmiken: There should be
  761. * a check here to make sure we didn't search more than N blocks in
  762. * the previous code.
  763. */
  764. if (!found) {
  765. for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
  766. if ((error = xlog_bread(log, i, 1, bp)))
  767. goto bread_err;
  768. offset = xlog_align(log, i, 1, bp);
  769. if (XLOG_HEADER_MAGIC_NUM ==
  770. INT_GET(*(uint*)offset, ARCH_CONVERT)) {
  771. found = 2;
  772. break;
  773. }
  774. }
  775. }
  776. if (!found) {
  777. xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
  778. ASSERT(0);
  779. return XFS_ERROR(EIO);
  780. }
  781. /* find blk_no of tail of log */
  782. rhead = (xlog_rec_header_t *)offset;
  783. *tail_blk = BLOCK_LSN(INT_GET(rhead->h_tail_lsn, ARCH_CONVERT));
  784. /*
  785. * Reset log values according to the state of the log when we
  786. * crashed. In the case where head_blk == 0, we bump curr_cycle
  787. * one because the next write starts a new cycle rather than
  788. * continuing the cycle of the last good log record. At this
  789. * point we have guaranteed that all partial log records have been
  790. * accounted for. Therefore, we know that the last good log record
  791. * written was complete and ended exactly on the end boundary
  792. * of the physical log.
  793. */
  794. log->l_prev_block = i;
  795. log->l_curr_block = (int)*head_blk;
  796. log->l_curr_cycle = INT_GET(rhead->h_cycle, ARCH_CONVERT);
  797. if (found == 2)
  798. log->l_curr_cycle++;
  799. log->l_tail_lsn = INT_GET(rhead->h_tail_lsn, ARCH_CONVERT);
  800. log->l_last_sync_lsn = INT_GET(rhead->h_lsn, ARCH_CONVERT);
  801. log->l_grant_reserve_cycle = log->l_curr_cycle;
  802. log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
  803. log->l_grant_write_cycle = log->l_curr_cycle;
  804. log->l_grant_write_bytes = BBTOB(log->l_curr_block);
  805. /*
  806. * Look for unmount record. If we find it, then we know there
  807. * was a clean unmount. Since 'i' could be the last block in
  808. * the physical log, we convert to a log block before comparing
  809. * to the head_blk.
  810. *
  811. * Save the current tail lsn to use to pass to
  812. * xlog_clear_stale_blocks() below. We won't want to clear the
  813. * unmount record if there is one, so we pass the lsn of the
  814. * unmount record rather than the block after it.
  815. */
  816. if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
  817. int h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
  818. int h_version = INT_GET(rhead->h_version, ARCH_CONVERT);
  819. if ((h_version & XLOG_VERSION_2) &&
  820. (h_size > XLOG_HEADER_CYCLE_SIZE)) {
  821. hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
  822. if (h_size % XLOG_HEADER_CYCLE_SIZE)
  823. hblks++;
  824. } else {
  825. hblks = 1;
  826. }
  827. } else {
  828. hblks = 1;
  829. }
  830. after_umount_blk = (i + hblks + (int)
  831. BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT))) % log->l_logBBsize;
  832. tail_lsn = log->l_tail_lsn;
  833. if (*head_blk == after_umount_blk &&
  834. INT_GET(rhead->h_num_logops, ARCH_CONVERT) == 1) {
  835. umount_data_blk = (i + hblks) % log->l_logBBsize;
  836. if ((error = xlog_bread(log, umount_data_blk, 1, bp))) {
  837. goto bread_err;
  838. }
  839. offset = xlog_align(log, umount_data_blk, 1, bp);
  840. op_head = (xlog_op_header_t *)offset;
  841. if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
  842. /*
  843. * Set tail and last sync so that newly written
  844. * log records will point recovery to after the
  845. * current unmount record.
  846. */
  847. ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, log->l_curr_cycle,
  848. after_umount_blk);
  849. ASSIGN_ANY_LSN_HOST(log->l_last_sync_lsn, log->l_curr_cycle,
  850. after_umount_blk);
  851. *tail_blk = after_umount_blk;
  852. }
  853. }
  854. /*
  855. * Make sure that there are no blocks in front of the head
  856. * with the same cycle number as the head. This can happen
  857. * because we allow multiple outstanding log writes concurrently,
  858. * and the later writes might make it out before earlier ones.
  859. *
  860. * We use the lsn from before modifying it so that we'll never
  861. * overwrite the unmount record after a clean unmount.
  862. *
  863. * Do this only if we are going to recover the filesystem
  864. *
  865. * NOTE: This used to say "if (!readonly)"
  866. * However on Linux, we can & do recover a read-only filesystem.
  867. * We only skip recovery if NORECOVERY is specified on mount,
  868. * in which case we would not be here.
  869. *
  870. * But... if the -device- itself is readonly, just skip this.
  871. * We can't recover this device anyway, so it won't matter.
  872. */
  873. if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
  874. error = xlog_clear_stale_blocks(log, tail_lsn);
  875. }
  876. bread_err:
  877. exit:
  878. xlog_put_bp(bp);
  879. if (error)
  880. xlog_warn("XFS: failed to locate log tail");
  881. return error;
  882. }
  883. /*
  884. * Is the log zeroed at all?
  885. *
  886. * The last binary search should be changed to perform an X block read
  887. * once X becomes small enough. You can then search linearly through
  888. * the X blocks. This will cut down on the number of reads we need to do.
  889. *
  890. * If the log is partially zeroed, this routine will pass back the blkno
  891. * of the first block with cycle number 0. It won't have a complete LR
  892. * preceding it.
  893. *
  894. * Return:
  895. * 0 => the log is completely written to
  896. * -1 => use *blk_no as the first block of the log
  897. * >0 => error has occurred
  898. */
  899. int
  900. xlog_find_zeroed(
  901. xlog_t *log,
  902. xfs_daddr_t *blk_no)
  903. {
  904. xfs_buf_t *bp;
  905. xfs_caddr_t offset;
  906. uint first_cycle, last_cycle;
  907. xfs_daddr_t new_blk, last_blk, start_blk;
  908. xfs_daddr_t num_scan_bblks;
  909. int error, log_bbnum = log->l_logBBsize;
  910. /* check totally zeroed log */
  911. bp = xlog_get_bp(log, 1);
  912. if (!bp)
  913. return ENOMEM;
  914. if ((error = xlog_bread(log, 0, 1, bp)))
  915. goto bp_err;
  916. offset = xlog_align(log, 0, 1, bp);
  917. first_cycle = GET_CYCLE(offset, ARCH_CONVERT);
  918. if (first_cycle == 0) { /* completely zeroed log */
  919. *blk_no = 0;
  920. xlog_put_bp(bp);
  921. return -1;
  922. }
  923. /* check partially zeroed log */
  924. if ((error = xlog_bread(log, log_bbnum-1, 1, bp)))
  925. goto bp_err;
  926. offset = xlog_align(log, log_bbnum-1, 1, bp);
  927. last_cycle = GET_CYCLE(offset, ARCH_CONVERT);
  928. if (last_cycle != 0) { /* log completely written to */
  929. xlog_put_bp(bp);
  930. return 0;
  931. } else if (first_cycle != 1) {
  932. /*
  933. * If the cycle of the last block is zero, the cycle of
  934. * the first block must be 1. If it's not, maybe we're
  935. * not looking at a log... Bail out.
  936. */
  937. xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
  938. return XFS_ERROR(EINVAL);
  939. }
  940. /* we have a partially zeroed log */
  941. last_blk = log_bbnum-1;
  942. if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
  943. goto bp_err;
  944. /*
  945. * Validate the answer. Because there is no way to guarantee that
  946. * the entire log is made up of log records which are the same size,
  947. * we scan over the defined maximum blocks. At this point, the maximum
  948. * is not chosen to mean anything special. XXXmiken
  949. */
  950. num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
  951. ASSERT(num_scan_bblks <= INT_MAX);
  952. if (last_blk < num_scan_bblks)
  953. num_scan_bblks = last_blk;
  954. start_blk = last_blk - num_scan_bblks;
  955. /*
  956. * We search for any instances of cycle number 0 that occur before
  957. * our current estimate of the head. What we're trying to detect is
  958. * 1 ... | 0 | 1 | 0...
  959. * ^ binary search ends here
  960. */
  961. if ((error = xlog_find_verify_cycle(log, start_blk,
  962. (int)num_scan_bblks, 0, &new_blk)))
  963. goto bp_err;
  964. if (new_blk != -1)
  965. last_blk = new_blk;
  966. /*
  967. * Potentially backup over partial log record write. We don't need
  968. * to search the end of the log because we know it is zero.
  969. */
  970. if ((error = xlog_find_verify_log_record(log, start_blk,
  971. &last_blk, 0)) == -1) {
  972. error = XFS_ERROR(EIO);
  973. goto bp_err;
  974. } else if (error)
  975. goto bp_err;
  976. *blk_no = last_blk;
  977. bp_err:
  978. xlog_put_bp(bp);
  979. if (error)
  980. return error;
  981. return -1;
  982. }
  983. /*
  984. * These are simple subroutines used by xlog_clear_stale_blocks() below
  985. * to initialize a buffer full of empty log record headers and write
  986. * them into the log.
  987. */
  988. STATIC void
  989. xlog_add_record(
  990. xlog_t *log,
  991. xfs_caddr_t buf,
  992. int cycle,
  993. int block,
  994. int tail_cycle,
  995. int tail_block)
  996. {
  997. xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
  998. memset(buf, 0, BBSIZE);
  999. INT_SET(recp->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM);
  1000. INT_SET(recp->h_cycle, ARCH_CONVERT, cycle);
  1001. INT_SET(recp->h_version, ARCH_CONVERT,
  1002. XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
  1003. ASSIGN_ANY_LSN_DISK(recp->h_lsn, cycle, block);
  1004. ASSIGN_ANY_LSN_DISK(recp->h_tail_lsn, tail_cycle, tail_block);
  1005. INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT);
  1006. memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
  1007. }
  1008. STATIC int
  1009. xlog_write_log_records(
  1010. xlog_t *log,
  1011. int cycle,
  1012. int start_block,
  1013. int blocks,
  1014. int tail_cycle,
  1015. int tail_block)
  1016. {
  1017. xfs_caddr_t offset;
  1018. xfs_buf_t *bp;
  1019. int balign, ealign;
  1020. int sectbb = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
  1021. int end_block = start_block + blocks;
  1022. int bufblks;
  1023. int error = 0;
  1024. int i, j = 0;
  1025. bufblks = 1 << ffs(blocks);
  1026. while (!(bp = xlog_get_bp(log, bufblks))) {
  1027. bufblks >>= 1;
  1028. if (bufblks <= log->l_sectbb_log)
  1029. return ENOMEM;
  1030. }
  1031. /* We may need to do a read at the start to fill in part of
  1032. * the buffer in the starting sector not covered by the first
  1033. * write below.
  1034. */
  1035. balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
  1036. if (balign != start_block) {
  1037. if ((error = xlog_bread(log, start_block, 1, bp))) {
  1038. xlog_put_bp(bp);
  1039. return error;
  1040. }
  1041. j = start_block - balign;
  1042. }
  1043. for (i = start_block; i < end_block; i += bufblks) {
  1044. int bcount, endcount;
  1045. bcount = min(bufblks, end_block - start_block);
  1046. endcount = bcount - j;
  1047. /* We may need to do a read at the end to fill in part of
  1048. * the buffer in the final sector not covered by the write.
  1049. * If this is the same sector as the above read, skip it.
  1050. */
  1051. ealign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, end_block);
  1052. if (j == 0 && (start_block + endcount > ealign)) {
  1053. offset = XFS_BUF_PTR(bp);
  1054. balign = BBTOB(ealign - start_block);
  1055. XFS_BUF_SET_PTR(bp, offset + balign, BBTOB(sectbb));
  1056. if ((error = xlog_bread(log, ealign, sectbb, bp)))
  1057. break;
  1058. XFS_BUF_SET_PTR(bp, offset, bufblks);
  1059. }
  1060. offset = xlog_align(log, start_block, endcount, bp);
  1061. for (; j < endcount; j++) {
  1062. xlog_add_record(log, offset, cycle, i+j,
  1063. tail_cycle, tail_block);
  1064. offset += BBSIZE;
  1065. }
  1066. error = xlog_bwrite(log, start_block, endcount, bp);
  1067. if (error)
  1068. break;
  1069. start_block += endcount;
  1070. j = 0;
  1071. }
  1072. xlog_put_bp(bp);
  1073. return error;
  1074. }
  1075. /*
  1076. * This routine is called to blow away any incomplete log writes out
  1077. * in front of the log head. We do this so that we won't become confused
  1078. * if we come up, write only a little bit more, and then crash again.
  1079. * If we leave the partial log records out there, this situation could
  1080. * cause us to think those partial writes are valid blocks since they
  1081. * have the current cycle number. We get rid of them by overwriting them
  1082. * with empty log records with the old cycle number rather than the
  1083. * current one.
  1084. *
  1085. * The tail lsn is passed in rather than taken from
  1086. * the log so that we will not write over the unmount record after a
  1087. * clean unmount in a 512 block log. Doing so would leave the log without
  1088. * any valid log records in it until a new one was written. If we crashed
  1089. * during that time we would not be able to recover.
  1090. */
  1091. STATIC int
  1092. xlog_clear_stale_blocks(
  1093. xlog_t *log,
  1094. xfs_lsn_t tail_lsn)
  1095. {
  1096. int tail_cycle, head_cycle;
  1097. int tail_block, head_block;
  1098. int tail_distance, max_distance;
  1099. int distance;
  1100. int error;
  1101. tail_cycle = CYCLE_LSN(tail_lsn);
  1102. tail_block = BLOCK_LSN(tail_lsn);
  1103. head_cycle = log->l_curr_cycle;
  1104. head_block = log->l_curr_block;
  1105. /*
  1106. * Figure out the distance between the new head of the log
  1107. * and the tail. We want to write over any blocks beyond the
  1108. * head that we may have written just before the crash, but
  1109. * we don't want to overwrite the tail of the log.
  1110. */
  1111. if (head_cycle == tail_cycle) {
  1112. /*
  1113. * The tail is behind the head in the physical log,
  1114. * so the distance from the head to the tail is the
  1115. * distance from the head to the end of the log plus
  1116. * the distance from the beginning of the log to the
  1117. * tail.
  1118. */
  1119. if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
  1120. XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
  1121. XFS_ERRLEVEL_LOW, log->l_mp);
  1122. return XFS_ERROR(EFSCORRUPTED);
  1123. }
  1124. tail_distance = tail_block + (log->l_logBBsize - head_block);
  1125. } else {
  1126. /*
  1127. * The head is behind the tail in the physical log,
  1128. * so the distance from the head to the tail is just
  1129. * the tail block minus the head block.
  1130. */
  1131. if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
  1132. XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
  1133. XFS_ERRLEVEL_LOW, log->l_mp);
  1134. return XFS_ERROR(EFSCORRUPTED);
  1135. }
  1136. tail_distance = tail_block - head_block;
  1137. }
  1138. /*
  1139. * If the head is right up against the tail, we can't clear
  1140. * anything.
  1141. */
  1142. if (tail_distance <= 0) {
  1143. ASSERT(tail_distance == 0);
  1144. return 0;
  1145. }
  1146. max_distance = XLOG_TOTAL_REC_SHIFT(log);
  1147. /*
  1148. * Take the smaller of the maximum amount of outstanding I/O
  1149. * we could have and the distance to the tail to clear out.
  1150. * We take the smaller so that we don't overwrite the tail and
  1151. * we don't waste all day writing from the head to the tail
  1152. * for no reason.
  1153. */
  1154. max_distance = MIN(max_distance, tail_distance);
  1155. if ((head_block + max_distance) <= log->l_logBBsize) {
  1156. /*
  1157. * We can stomp all the blocks we need to without
  1158. * wrapping around the end of the log. Just do it
  1159. * in a single write. Use the cycle number of the
  1160. * current cycle minus one so that the log will look like:
  1161. * n ... | n - 1 ...
  1162. */
  1163. error = xlog_write_log_records(log, (head_cycle - 1),
  1164. head_block, max_distance, tail_cycle,
  1165. tail_block);
  1166. if (error)
  1167. return error;
  1168. } else {
  1169. /*
  1170. * We need to wrap around the end of the physical log in
  1171. * order to clear all the blocks. Do it in two separate
  1172. * I/Os. The first write should be from the head to the
  1173. * end of the physical log, and it should use the current
  1174. * cycle number minus one just like above.
  1175. */
  1176. distance = log->l_logBBsize - head_block;
  1177. error = xlog_write_log_records(log, (head_cycle - 1),
  1178. head_block, distance, tail_cycle,
  1179. tail_block);
  1180. if (error)
  1181. return error;
  1182. /*
  1183. * Now write the blocks at the start of the physical log.
  1184. * This writes the remainder of the blocks we want to clear.
  1185. * It uses the current cycle number since we're now on the
  1186. * same cycle as the head so that we get:
  1187. * n ... n ... | n - 1 ...
  1188. * ^^^^^ blocks we're writing
  1189. */
  1190. distance = max_distance - (log->l_logBBsize - head_block);
  1191. error = xlog_write_log_records(log, head_cycle, 0, distance,
  1192. tail_cycle, tail_block);
  1193. if (error)
  1194. return error;
  1195. }
  1196. return 0;
  1197. }
  1198. /******************************************************************************
  1199. *
  1200. * Log recover routines
  1201. *
  1202. ******************************************************************************
  1203. */
  1204. STATIC xlog_recover_t *
  1205. xlog_recover_find_tid(
  1206. xlog_recover_t *q,
  1207. xlog_tid_t tid)
  1208. {
  1209. xlog_recover_t *p = q;
  1210. while (p != NULL) {
  1211. if (p->r_log_tid == tid)
  1212. break;
  1213. p = p->r_next;
  1214. }
  1215. return p;
  1216. }
  1217. STATIC void
  1218. xlog_recover_put_hashq(
  1219. xlog_recover_t **q,
  1220. xlog_recover_t *trans)
  1221. {
  1222. trans->r_next = *q;
  1223. *q = trans;
  1224. }
  1225. STATIC void
  1226. xlog_recover_add_item(
  1227. xlog_recover_item_t **itemq)
  1228. {
  1229. xlog_recover_item_t *item;
  1230. item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
  1231. xlog_recover_insert_item_backq(itemq, item);
  1232. }
  1233. STATIC int
  1234. xlog_recover_add_to_cont_trans(
  1235. xlog_recover_t *trans,
  1236. xfs_caddr_t dp,
  1237. int len)
  1238. {
  1239. xlog_recover_item_t *item;
  1240. xfs_caddr_t ptr, old_ptr;
  1241. int old_len;
  1242. item = trans->r_itemq;
  1243. if (item == 0) {
  1244. /* finish copying rest of trans header */
  1245. xlog_recover_add_item(&trans->r_itemq);
  1246. ptr = (xfs_caddr_t) &trans->r_theader +
  1247. sizeof(xfs_trans_header_t) - len;
  1248. memcpy(ptr, dp, len); /* d, s, l */
  1249. return 0;
  1250. }
  1251. item = item->ri_prev;
  1252. old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
  1253. old_len = item->ri_buf[item->ri_cnt-1].i_len;
  1254. ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
  1255. memcpy(&ptr[old_len], dp, len); /* d, s, l */
  1256. item->ri_buf[item->ri_cnt-1].i_len += len;
  1257. item->ri_buf[item->ri_cnt-1].i_addr = ptr;
  1258. return 0;
  1259. }
  1260. /*
  1261. * The next region to add is the start of a new region. It could be
  1262. * a whole region or it could be the first part of a new region. Because
  1263. * of this, the assumption here is that the type and size fields of all
  1264. * format structures fit into the first 32 bits of the structure.
  1265. *
  1266. * This works because all regions must be 32 bit aligned. Therefore, we
  1267. * either have both fields or we have neither field. In the case we have
  1268. * neither field, the data part of the region is zero length. We only have
  1269. * a log_op_header and can throw away the header since a new one will appear
  1270. * later. If we have at least 4 bytes, then we can determine how many regions
  1271. * will appear in the current log item.
  1272. */
  1273. STATIC int
  1274. xlog_recover_add_to_trans(
  1275. xlog_recover_t *trans,
  1276. xfs_caddr_t dp,
  1277. int len)
  1278. {
  1279. xfs_inode_log_format_t *in_f; /* any will do */
  1280. xlog_recover_item_t *item;
  1281. xfs_caddr_t ptr;
  1282. if (!len)
  1283. return 0;
  1284. item = trans->r_itemq;
  1285. if (item == 0) {
  1286. ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
  1287. if (len == sizeof(xfs_trans_header_t))
  1288. xlog_recover_add_item(&trans->r_itemq);
  1289. memcpy(&trans->r_theader, dp, len); /* d, s, l */
  1290. return 0;
  1291. }
  1292. ptr = kmem_alloc(len, KM_SLEEP);
  1293. memcpy(ptr, dp, len);
  1294. in_f = (xfs_inode_log_format_t *)ptr;
  1295. if (item->ri_prev->ri_total != 0 &&
  1296. item->ri_prev->ri_total == item->ri_prev->ri_cnt) {
  1297. xlog_recover_add_item(&trans->r_itemq);
  1298. }
  1299. item = trans->r_itemq;
  1300. item = item->ri_prev;
  1301. if (item->ri_total == 0) { /* first region to be added */
  1302. item->ri_total = in_f->ilf_size;
  1303. ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM);
  1304. item->ri_buf = kmem_zalloc((item->ri_total *
  1305. sizeof(xfs_log_iovec_t)), KM_SLEEP);
  1306. }
  1307. ASSERT(item->ri_total > item->ri_cnt);
  1308. /* Description region is ri_buf[0] */
  1309. item->ri_buf[item->ri_cnt].i_addr = ptr;
  1310. item->ri_buf[item->ri_cnt].i_len = len;
  1311. item->ri_cnt++;
  1312. return 0;
  1313. }
  1314. STATIC void
  1315. xlog_recover_new_tid(
  1316. xlog_recover_t **q,
  1317. xlog_tid_t tid,
  1318. xfs_lsn_t lsn)
  1319. {
  1320. xlog_recover_t *trans;
  1321. trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
  1322. trans->r_log_tid = tid;
  1323. trans->r_lsn = lsn;
  1324. xlog_recover_put_hashq(q, trans);
  1325. }
  1326. STATIC int
  1327. xlog_recover_unlink_tid(
  1328. xlog_recover_t **q,
  1329. xlog_recover_t *trans)
  1330. {
  1331. xlog_recover_t *tp;
  1332. int found = 0;
  1333. ASSERT(trans != 0);
  1334. if (trans == *q) {
  1335. *q = (*q)->r_next;
  1336. } else {
  1337. tp = *q;
  1338. while (tp != 0) {
  1339. if (tp->r_next == trans) {
  1340. found = 1;
  1341. break;
  1342. }
  1343. tp = tp->r_next;
  1344. }
  1345. if (!found) {
  1346. xlog_warn(
  1347. "XFS: xlog_recover_unlink_tid: trans not found");
  1348. ASSERT(0);
  1349. return XFS_ERROR(EIO);
  1350. }
  1351. tp->r_next = tp->r_next->r_next;
  1352. }
  1353. return 0;
  1354. }
  1355. STATIC void
  1356. xlog_recover_insert_item_backq(
  1357. xlog_recover_item_t **q,
  1358. xlog_recover_item_t *item)
  1359. {
  1360. if (*q == 0) {
  1361. item->ri_prev = item->ri_next = item;
  1362. *q = item;
  1363. } else {
  1364. item->ri_next = *q;
  1365. item->ri_prev = (*q)->ri_prev;
  1366. (*q)->ri_prev = item;
  1367. item->ri_prev->ri_next = item;
  1368. }
  1369. }
  1370. STATIC void
  1371. xlog_recover_insert_item_frontq(
  1372. xlog_recover_item_t **q,
  1373. xlog_recover_item_t *item)
  1374. {
  1375. xlog_recover_insert_item_backq(q, item);
  1376. *q = item;
  1377. }
  1378. STATIC int
  1379. xlog_recover_reorder_trans(
  1380. xlog_t *log,
  1381. xlog_recover_t *trans)
  1382. {
  1383. xlog_recover_item_t *first_item, *itemq, *itemq_next;
  1384. xfs_buf_log_format_t *buf_f;
  1385. xfs_buf_log_format_v1_t *obuf_f;
  1386. ushort flags = 0;
  1387. first_item = itemq = trans->r_itemq;
  1388. trans->r_itemq = NULL;
  1389. do {
  1390. itemq_next = itemq->ri_next;
  1391. buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
  1392. switch (ITEM_TYPE(itemq)) {
  1393. case XFS_LI_BUF:
  1394. flags = buf_f->blf_flags;
  1395. break;
  1396. case XFS_LI_6_1_BUF:
  1397. case XFS_LI_5_3_BUF:
  1398. obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
  1399. flags = obuf_f->blf_flags;
  1400. break;
  1401. }
  1402. switch (ITEM_TYPE(itemq)) {
  1403. case XFS_LI_BUF:
  1404. case XFS_LI_6_1_BUF:
  1405. case XFS_LI_5_3_BUF:
  1406. if (!(flags & XFS_BLI_CANCEL)) {
  1407. xlog_recover_insert_item_frontq(&trans->r_itemq,
  1408. itemq);
  1409. break;
  1410. }
  1411. case XFS_LI_INODE:
  1412. case XFS_LI_6_1_INODE:
  1413. case XFS_LI_5_3_INODE:
  1414. case XFS_LI_DQUOT:
  1415. case XFS_LI_QUOTAOFF:
  1416. case XFS_LI_EFD:
  1417. case XFS_LI_EFI:
  1418. xlog_recover_insert_item_backq(&trans->r_itemq, itemq);
  1419. break;
  1420. default:
  1421. xlog_warn(
  1422. "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
  1423. ASSERT(0);
  1424. return XFS_ERROR(EIO);
  1425. }
  1426. itemq = itemq_next;
  1427. } while (first_item != itemq);
  1428. return 0;
  1429. }
  1430. /*
  1431. * Build up the table of buf cancel records so that we don't replay
  1432. * cancelled data in the second pass. For buffer records that are
  1433. * not cancel records, there is nothing to do here so we just return.
  1434. *
  1435. * If we get a cancel record which is already in the table, this indicates
  1436. * that the buffer was cancelled multiple times. In order to ensure
  1437. * that during pass 2 we keep the record in the table until we reach its
  1438. * last occurrence in the log, we keep a reference count in the cancel
  1439. * record in the table to tell us how many times we expect to see this
  1440. * record during the second pass.
  1441. */
  1442. STATIC void
  1443. xlog_recover_do_buffer_pass1(
  1444. xlog_t *log,
  1445. xfs_buf_log_format_t *buf_f)
  1446. {
  1447. xfs_buf_cancel_t *bcp;
  1448. xfs_buf_cancel_t *nextp;
  1449. xfs_buf_cancel_t *prevp;
  1450. xfs_buf_cancel_t **bucket;
  1451. xfs_buf_log_format_v1_t *obuf_f;
  1452. xfs_daddr_t blkno = 0;
  1453. uint len = 0;
  1454. ushort flags = 0;
  1455. switch (buf_f->blf_type) {
  1456. case XFS_LI_BUF:
  1457. blkno = buf_f->blf_blkno;
  1458. len = buf_f->blf_len;
  1459. flags = buf_f->blf_flags;
  1460. break;
  1461. case XFS_LI_6_1_BUF:
  1462. case XFS_LI_5_3_BUF:
  1463. obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
  1464. blkno = (xfs_daddr_t) obuf_f->blf_blkno;
  1465. len = obuf_f->blf_len;
  1466. flags = obuf_f->blf_flags;
  1467. break;
  1468. }
  1469. /*
  1470. * If this isn't a cancel buffer item, then just return.
  1471. */
  1472. if (!(flags & XFS_BLI_CANCEL))
  1473. return;
  1474. /*
  1475. * Insert an xfs_buf_cancel record into the hash table of
  1476. * them. If there is already an identical record, bump
  1477. * its reference count.
  1478. */
  1479. bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
  1480. XLOG_BC_TABLE_SIZE];
  1481. /*
  1482. * If the hash bucket is empty then just insert a new record into
  1483. * the bucket.
  1484. */
  1485. if (*bucket == NULL) {
  1486. bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
  1487. KM_SLEEP);
  1488. bcp->bc_blkno = blkno;
  1489. bcp->bc_len = len;
  1490. bcp->bc_refcount = 1;
  1491. bcp->bc_next = NULL;
  1492. *bucket = bcp;
  1493. return;
  1494. }
  1495. /*
  1496. * The hash bucket is not empty, so search for duplicates of our
  1497. * record. If we find one them just bump its refcount. If not
  1498. * then add us at the end of the list.
  1499. */
  1500. prevp = NULL;
  1501. nextp = *bucket;
  1502. while (nextp != NULL) {
  1503. if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
  1504. nextp->bc_refcount++;
  1505. return;
  1506. }
  1507. prevp = nextp;
  1508. nextp = nextp->bc_next;
  1509. }
  1510. ASSERT(prevp != NULL);
  1511. bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
  1512. KM_SLEEP);
  1513. bcp->bc_blkno = blkno;
  1514. bcp->bc_len = len;
  1515. bcp->bc_refcount = 1;
  1516. bcp->bc_next = NULL;
  1517. prevp->bc_next = bcp;
  1518. }
  1519. /*
  1520. * Check to see whether the buffer being recovered has a corresponding
  1521. * entry in the buffer cancel record table. If it does then return 1
  1522. * so that it will be cancelled, otherwise return 0. If the buffer is
  1523. * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement
  1524. * the refcount on the entry in the table and remove it from the table
  1525. * if this is the last reference.
  1526. *
  1527. * We remove the cancel record from the table when we encounter its
  1528. * last occurrence in the log so that if the same buffer is re-used
  1529. * again after its last cancellation we actually replay the changes
  1530. * made at that point.
  1531. */
  1532. STATIC int
  1533. xlog_check_buffer_cancelled(
  1534. xlog_t *log,
  1535. xfs_daddr_t blkno,
  1536. uint len,
  1537. ushort flags)
  1538. {
  1539. xfs_buf_cancel_t *bcp;
  1540. xfs_buf_cancel_t *prevp;
  1541. xfs_buf_cancel_t **bucket;
  1542. if (log->l_buf_cancel_table == NULL) {
  1543. /*
  1544. * There is nothing in the table built in pass one,
  1545. * so this buffer must not be cancelled.
  1546. */
  1547. ASSERT(!(flags & XFS_BLI_CANCEL));
  1548. return 0;
  1549. }
  1550. bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
  1551. XLOG_BC_TABLE_SIZE];
  1552. bcp = *bucket;
  1553. if (bcp == NULL) {
  1554. /*
  1555. * There is no corresponding entry in the table built
  1556. * in pass one, so this buffer has not been cancelled.
  1557. */
  1558. ASSERT(!(flags & XFS_BLI_CANCEL));
  1559. return 0;
  1560. }
  1561. /*
  1562. * Search for an entry in the buffer cancel table that
  1563. * matches our buffer.
  1564. */
  1565. prevp = NULL;
  1566. while (bcp != NULL) {
  1567. if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
  1568. /*
  1569. * We've go a match, so return 1 so that the
  1570. * recovery of this buffer is cancelled.
  1571. * If this buffer is actually a buffer cancel
  1572. * log item, then decrement the refcount on the
  1573. * one in the table and remove it if this is the
  1574. * last reference.
  1575. */
  1576. if (flags & XFS_BLI_CANCEL) {
  1577. bcp->bc_refcount--;
  1578. if (bcp->bc_refcount == 0) {
  1579. if (prevp == NULL) {
  1580. *bucket = bcp->bc_next;
  1581. } else {
  1582. prevp->bc_next = bcp->bc_next;
  1583. }
  1584. kmem_free(bcp,
  1585. sizeof(xfs_buf_cancel_t));
  1586. }
  1587. }
  1588. return 1;
  1589. }
  1590. prevp = bcp;
  1591. bcp = bcp->bc_next;
  1592. }
  1593. /*
  1594. * We didn't find a corresponding entry in the table, so
  1595. * return 0 so that the buffer is NOT cancelled.
  1596. */
  1597. ASSERT(!(flags & XFS_BLI_CANCEL));
  1598. return 0;
  1599. }
  1600. STATIC int
  1601. xlog_recover_do_buffer_pass2(
  1602. xlog_t *log,
  1603. xfs_buf_log_format_t *buf_f)
  1604. {
  1605. xfs_buf_log_format_v1_t *obuf_f;
  1606. xfs_daddr_t blkno = 0;
  1607. ushort flags = 0;
  1608. uint len = 0;
  1609. switch (buf_f->blf_type) {
  1610. case XFS_LI_BUF:
  1611. blkno = buf_f->blf_blkno;
  1612. flags = buf_f->blf_flags;
  1613. len = buf_f->blf_len;
  1614. break;
  1615. case XFS_LI_6_1_BUF:
  1616. case XFS_LI_5_3_BUF:
  1617. obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
  1618. blkno = (xfs_daddr_t) obuf_f->blf_blkno;
  1619. flags = obuf_f->blf_flags;
  1620. len = (xfs_daddr_t) obuf_f->blf_len;
  1621. break;
  1622. }
  1623. return xlog_check_buffer_cancelled(log, blkno, len, flags);
  1624. }
  1625. /*
  1626. * Perform recovery for a buffer full of inodes. In these buffers,
  1627. * the only data which should be recovered is that which corresponds
  1628. * to the di_next_unlinked pointers in the on disk inode structures.
  1629. * The rest of the data for the inodes is always logged through the
  1630. * inodes themselves rather than the inode buffer and is recovered
  1631. * in xlog_recover_do_inode_trans().
  1632. *
  1633. * The only time when buffers full of inodes are fully recovered is
  1634. * when the buffer is full of newly allocated inodes. In this case
  1635. * the buffer will not be marked as an inode buffer and so will be
  1636. * sent to xlog_recover_do_reg_buffer() below during recovery.
  1637. */
  1638. STATIC int
  1639. xlog_recover_do_inode_buffer(
  1640. xfs_mount_t *mp,
  1641. xlog_recover_item_t *item,
  1642. xfs_buf_t *bp,
  1643. xfs_buf_log_format_t *buf_f)
  1644. {
  1645. int i;
  1646. int item_index;
  1647. int bit;
  1648. int nbits;
  1649. int reg_buf_offset;
  1650. int reg_buf_bytes;
  1651. int next_unlinked_offset;
  1652. int inodes_per_buf;
  1653. xfs_agino_t *logged_nextp;
  1654. xfs_agino_t *buffer_nextp;
  1655. xfs_buf_log_format_v1_t *obuf_f;
  1656. unsigned int *data_map = NULL;
  1657. unsigned int map_size = 0;
  1658. switch (buf_f->blf_type) {
  1659. case XFS_LI_BUF:
  1660. data_map = buf_f->blf_data_map;
  1661. map_size = buf_f->blf_map_size;
  1662. break;
  1663. case XFS_LI_6_1_BUF:
  1664. case XFS_LI_5_3_BUF:
  1665. obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
  1666. data_map = obuf_f->blf_data_map;
  1667. map_size = obuf_f->blf_map_size;
  1668. break;
  1669. }
  1670. /*
  1671. * Set the variables corresponding to the current region to
  1672. * 0 so that we'll initialize them on the first pass through
  1673. * the loop.
  1674. */
  1675. reg_buf_offset = 0;
  1676. reg_buf_bytes = 0;
  1677. bit = 0;
  1678. nbits = 0;
  1679. item_index = 0;
  1680. inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
  1681. for (i = 0; i < inodes_per_buf; i++) {
  1682. next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
  1683. offsetof(xfs_dinode_t, di_next_unlinked);
  1684. while (next_unlinked_offset >=
  1685. (reg_buf_offset + reg_buf_bytes)) {
  1686. /*
  1687. * The next di_next_unlinked field is beyond
  1688. * the current logged region. Find the next
  1689. * logged region that contains or is beyond
  1690. * the current di_next_unlinked field.
  1691. */
  1692. bit += nbits;
  1693. bit = xfs_next_bit(data_map, map_size, bit);
  1694. /*
  1695. * If there are no more logged regions in the
  1696. * buffer, then we're done.
  1697. */
  1698. if (bit == -1) {
  1699. return 0;
  1700. }
  1701. nbits = xfs_contig_bits(data_map, map_size,
  1702. bit);
  1703. ASSERT(nbits > 0);
  1704. reg_buf_offset = bit << XFS_BLI_SHIFT;
  1705. reg_buf_bytes = nbits << XFS_BLI_SHIFT;
  1706. item_index++;
  1707. }
  1708. /*
  1709. * If the current logged region starts after the current
  1710. * di_next_unlinked field, then move on to the next
  1711. * di_next_unlinked field.
  1712. */
  1713. if (next_unlinked_offset < reg_buf_offset) {
  1714. continue;
  1715. }
  1716. ASSERT(item->ri_buf[item_index].i_addr != NULL);
  1717. ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0);
  1718. ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
  1719. /*
  1720. * The current logged region contains a copy of the
  1721. * current di_next_unlinked field. Extract its value
  1722. * and copy it to the buffer copy.
  1723. */
  1724. logged_nextp = (xfs_agino_t *)
  1725. ((char *)(item->ri_buf[item_index].i_addr) +
  1726. (next_unlinked_offset - reg_buf_offset));
  1727. if (unlikely(*logged_nextp == 0)) {
  1728. xfs_fs_cmn_err(CE_ALERT, mp,
  1729. "bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field",
  1730. item, bp);
  1731. XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
  1732. XFS_ERRLEVEL_LOW, mp);
  1733. return XFS_ERROR(EFSCORRUPTED);
  1734. }
  1735. buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
  1736. next_unlinked_offset);
  1737. INT_SET(*buffer_nextp, ARCH_CONVERT, *logged_nextp);
  1738. }
  1739. return 0;
  1740. }
  1741. /*
  1742. * Perform a 'normal' buffer recovery. Each logged region of the
  1743. * buffer should be copied over the corresponding region in the
  1744. * given buffer. The bitmap in the buf log format structure indicates
  1745. * where to place the logged data.
  1746. */
  1747. /*ARGSUSED*/
  1748. STATIC void
  1749. xlog_recover_do_reg_buffer(
  1750. xfs_mount_t *mp,
  1751. xlog_recover_item_t *item,
  1752. xfs_buf_t *bp,
  1753. xfs_buf_log_format_t *buf_f)
  1754. {
  1755. int i;
  1756. int bit;
  1757. int nbits;
  1758. xfs_buf_log_format_v1_t *obuf_f;
  1759. unsigned int *data_map = NULL;
  1760. unsigned int map_size = 0;
  1761. int error;
  1762. switch (buf_f->blf_type) {
  1763. case XFS_LI_BUF:
  1764. data_map = buf_f->blf_data_map;
  1765. map_size = buf_f->blf_map_size;
  1766. break;
  1767. case XFS_LI_6_1_BUF:
  1768. case XFS_LI_5_3_BUF:
  1769. obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
  1770. data_map = obuf_f->blf_data_map;
  1771. map_size = obuf_f->blf_map_size;
  1772. break;
  1773. }
  1774. bit = 0;
  1775. i = 1; /* 0 is the buf format structure */
  1776. while (1) {
  1777. bit = xfs_next_bit(data_map, map_size, bit);
  1778. if (bit == -1)
  1779. break;
  1780. nbits = xfs_contig_bits(data_map, map_size, bit);
  1781. ASSERT(nbits > 0);
  1782. ASSERT(item->ri_buf[i].i_addr != 0);
  1783. ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
  1784. ASSERT(XFS_BUF_COUNT(bp) >=
  1785. ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
  1786. /*
  1787. * Do a sanity check if this is a dquot buffer. Just checking
  1788. * the first dquot in the buffer should do. XXXThis is
  1789. * probably a good thing to do for other buf types also.
  1790. */
  1791. error = 0;
  1792. if (buf_f->blf_flags &
  1793. (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
  1794. error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
  1795. item->ri_buf[i].i_addr,
  1796. -1, 0, XFS_QMOPT_DOWARN,
  1797. "dquot_buf_recover");
  1798. }
  1799. if (!error)
  1800. memcpy(xfs_buf_offset(bp,
  1801. (uint)bit << XFS_BLI_SHIFT), /* dest */
  1802. item->ri_buf[i].i_addr, /* source */
  1803. nbits<<XFS_BLI_SHIFT); /* length */
  1804. i++;
  1805. bit += nbits;
  1806. }
  1807. /* Shouldn't be any more regions */
  1808. ASSERT(i == item->ri_total);
  1809. }
  1810. /*
  1811. * Do some primitive error checking on ondisk dquot data structures.
  1812. */
  1813. int
  1814. xfs_qm_dqcheck(
  1815. xfs_disk_dquot_t *ddq,
  1816. xfs_dqid_t id,
  1817. uint type, /* used only when IO_dorepair is true */
  1818. uint flags,
  1819. char *str)
  1820. {
  1821. xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
  1822. int errs = 0;
  1823. /*
  1824. * We can encounter an uninitialized dquot buffer for 2 reasons:
  1825. * 1. If we crash while deleting the quotainode(s), and those blks got
  1826. * used for user data. This is because we take the path of regular
  1827. * file deletion; however, the size field of quotainodes is never
  1828. * updated, so all the tricks that we play in itruncate_finish
  1829. * don't quite matter.
  1830. *
  1831. * 2. We don't play the quota buffers when there's a quotaoff logitem.
  1832. * But the allocation will be replayed so we'll end up with an
  1833. * uninitialized quota block.
  1834. *
  1835. * This is all fine; things are still consistent, and we haven't lost
  1836. * any quota information. Just don't complain about bad dquot blks.
  1837. */
  1838. if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
  1839. if (flags & XFS_QMOPT_DOWARN)
  1840. cmn_err(CE_ALERT,
  1841. "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
  1842. str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
  1843. errs++;
  1844. }
  1845. if (ddq->d_version != XFS_DQUOT_VERSION) {
  1846. if (flags & XFS_QMOPT_DOWARN)
  1847. cmn_err(CE_ALERT,
  1848. "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
  1849. str, id, ddq->d_version, XFS_DQUOT_VERSION);
  1850. errs++;
  1851. }
  1852. if (ddq->d_flags != XFS_DQ_USER &&
  1853. ddq->d_flags != XFS_DQ_PROJ &&
  1854. ddq->d_flags != XFS_DQ_GROUP) {
  1855. if (flags & XFS_QMOPT_DOWARN)
  1856. cmn_err(CE_ALERT,
  1857. "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
  1858. str, id, ddq->d_flags);
  1859. errs++;
  1860. }
  1861. if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
  1862. if (flags & XFS_QMOPT_DOWARN)
  1863. cmn_err(CE_ALERT,
  1864. "%s : ondisk-dquot 0x%p, ID mismatch: "
  1865. "0x%x expected, found id 0x%x",
  1866. str, ddq, id, be32_to_cpu(ddq->d_id));
  1867. errs++;
  1868. }
  1869. if (!errs && ddq->d_id) {
  1870. if (ddq->d_blk_softlimit &&
  1871. be64_to_cpu(ddq->d_bcount) >=
  1872. be64_to_cpu(ddq->d_blk_softlimit)) {
  1873. if (!ddq->d_btimer) {
  1874. if (flags & XFS_QMOPT_DOWARN)
  1875. cmn_err(CE_ALERT,
  1876. "%s : Dquot ID 0x%x (0x%p) "
  1877. "BLK TIMER NOT STARTED",
  1878. str, (int)be32_to_cpu(ddq->d_id), ddq);
  1879. errs++;
  1880. }
  1881. }
  1882. if (ddq->d_ino_softlimit &&
  1883. be64_to_cpu(ddq->d_icount) >=
  1884. be64_to_cpu(ddq->d_ino_softlimit)) {
  1885. if (!ddq->d_itimer) {
  1886. if (flags & XFS_QMOPT_DOWARN)
  1887. cmn_err(CE_ALERT,
  1888. "%s : Dquot ID 0x%x (0x%p) "
  1889. "INODE TIMER NOT STARTED",
  1890. str, (int)be32_to_cpu(ddq->d_id), ddq);
  1891. errs++;
  1892. }
  1893. }
  1894. if (ddq->d_rtb_softlimit &&
  1895. be64_to_cpu(ddq->d_rtbcount) >=
  1896. be64_to_cpu(ddq->d_rtb_softlimit)) {
  1897. if (!ddq->d_rtbtimer) {
  1898. if (flags & XFS_QMOPT_DOWARN)
  1899. cmn_err(CE_ALERT,
  1900. "%s : Dquot ID 0x%x (0x%p) "
  1901. "RTBLK TIMER NOT STARTED",
  1902. str, (int)be32_to_cpu(ddq->d_id), ddq);
  1903. errs++;
  1904. }
  1905. }
  1906. }
  1907. if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
  1908. return errs;
  1909. if (flags & XFS_QMOPT_DOWARN)
  1910. cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
  1911. /*
  1912. * Typically, a repair is only requested by quotacheck.
  1913. */
  1914. ASSERT(id != -1);
  1915. ASSERT(flags & XFS_QMOPT_DQREPAIR);
  1916. memset(d, 0, sizeof(xfs_dqblk_t));
  1917. d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
  1918. d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
  1919. d->dd_diskdq.d_flags = type;
  1920. d->dd_diskdq.d_id = cpu_to_be32(id);
  1921. return errs;
  1922. }
  1923. /*
  1924. * Perform a dquot buffer recovery.
  1925. * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
  1926. * (ie. USR or GRP), then just toss this buffer away; don't recover it.
  1927. * Else, treat it as a regular buffer and do recovery.
  1928. */
  1929. STATIC void
  1930. xlog_recover_do_dquot_buffer(
  1931. xfs_mount_t *mp,
  1932. xlog_t *log,
  1933. xlog_recover_item_t *item,
  1934. xfs_buf_t *bp,
  1935. xfs_buf_log_format_t *buf_f)
  1936. {
  1937. uint type;
  1938. /*
  1939. * Filesystems are required to send in quota flags at mount time.
  1940. */
  1941. if (mp->m_qflags == 0) {
  1942. return;
  1943. }
  1944. type = 0;
  1945. if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)
  1946. type |= XFS_DQ_USER;
  1947. if (buf_f->blf_flags & XFS_BLI_PDQUOT_BUF)
  1948. type |= XFS_DQ_PROJ;
  1949. if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)
  1950. type |= XFS_DQ_GROUP;
  1951. /*
  1952. * This type of quotas was turned off, so ignore this buffer
  1953. */
  1954. if (log->l_quotaoffs_flag & type)
  1955. return;
  1956. xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
  1957. }
  1958. /*
  1959. * This routine replays a modification made to a buffer at runtime.
  1960. * There are actually two types of buffer, regular and inode, which
  1961. * are handled differently. Inode buffers are handled differently
  1962. * in that we only recover a specific set of data from them, namely
  1963. * the inode di_next_unlinked fields. This is because all other inode
  1964. * data is actually logged via inode records and any data we replay
  1965. * here which overlaps that may be stale.
  1966. *
  1967. * When meta-data buffers are freed at run time we log a buffer item
  1968. * with the XFS_BLI_CANCEL bit set to indicate that previous copies
  1969. * of the buffer in the log should not be replayed at recovery time.
  1970. * This is so that if the blocks covered by the buffer are reused for
  1971. * file data before we crash we don't end up replaying old, freed
  1972. * meta-data into a user's file.
  1973. *
  1974. * To handle the cancellation of buffer log items, we make two passes
  1975. * over the log during recovery. During the first we build a table of
  1976. * those buffers which have been cancelled, and during the second we
  1977. * only replay those buffers which do not have corresponding cancel
  1978. * records in the table. See xlog_recover_do_buffer_pass[1,2] above
  1979. * for more details on the implementation of the table of cancel records.
  1980. */
  1981. STATIC int
  1982. xlog_recover_do_buffer_trans(
  1983. xlog_t *log,
  1984. xlog_recover_item_t *item,
  1985. int pass)
  1986. {
  1987. xfs_buf_log_format_t *buf_f;
  1988. xfs_buf_log_format_v1_t *obuf_f;
  1989. xfs_mount_t *mp;
  1990. xfs_buf_t *bp;
  1991. int error;
  1992. int cancel;
  1993. xfs_daddr_t blkno;
  1994. int len;
  1995. ushort flags;
  1996. buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
  1997. if (pass == XLOG_RECOVER_PASS1) {
  1998. /*
  1999. * In this pass we're only looking for buf items
  2000. * with the XFS_BLI_CANCEL bit set.
  2001. */
  2002. xlog_recover_do_buffer_pass1(log, buf_f);
  2003. return 0;
  2004. } else {
  2005. /*
  2006. * In this pass we want to recover all the buffers
  2007. * which have not been cancelled and are not
  2008. * cancellation buffers themselves. The routine
  2009. * we call here will tell us whether or not to
  2010. * continue with the replay of this buffer.
  2011. */
  2012. cancel = xlog_recover_do_buffer_pass2(log, buf_f);
  2013. if (cancel) {
  2014. return 0;
  2015. }
  2016. }
  2017. switch (buf_f->blf_type) {
  2018. case XFS_LI_BUF:
  2019. blkno = buf_f->blf_blkno;
  2020. len = buf_f->blf_len;
  2021. flags = buf_f->blf_flags;
  2022. break;
  2023. case XFS_LI_6_1_BUF:
  2024. case XFS_LI_5_3_BUF:
  2025. obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
  2026. blkno = obuf_f->blf_blkno;
  2027. len = obuf_f->blf_len;
  2028. flags = obuf_f->blf_flags;
  2029. break;
  2030. default:
  2031. xfs_fs_cmn_err(CE_ALERT, log->l_mp,
  2032. "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
  2033. buf_f->blf_type, log->l_mp->m_logname ?
  2034. log->l_mp->m_logname : "internal");
  2035. XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
  2036. XFS_ERRLEVEL_LOW, log->l_mp);
  2037. return XFS_ERROR(EFSCORRUPTED);
  2038. }
  2039. mp = log->l_mp;
  2040. if (flags & XFS_BLI_INODE_BUF) {
  2041. bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len,
  2042. XFS_BUF_LOCK);
  2043. } else {
  2044. bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0);
  2045. }
  2046. if (XFS_BUF_ISERROR(bp)) {
  2047. xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
  2048. bp, blkno);
  2049. error = XFS_BUF_GETERROR(bp);
  2050. xfs_buf_relse(bp);
  2051. return error;
  2052. }
  2053. error = 0;
  2054. if (flags & XFS_BLI_INODE_BUF) {
  2055. error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
  2056. } else if (flags &
  2057. (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
  2058. xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
  2059. } else {
  2060. xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
  2061. }
  2062. if (error)
  2063. return XFS_ERROR(error);
  2064. /*
  2065. * Perform delayed write on the buffer. Asynchronous writes will be
  2066. * slower when taking into account all the buffers to be flushed.
  2067. *
  2068. * Also make sure that only inode buffers with good sizes stay in
  2069. * the buffer cache. The kernel moves inodes in buffers of 1 block
  2070. * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode
  2071. * buffers in the log can be a different size if the log was generated
  2072. * by an older kernel using unclustered inode buffers or a newer kernel
  2073. * running with a different inode cluster size. Regardless, if the
  2074. * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
  2075. * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
  2076. * the buffer out of the buffer cache so that the buffer won't
  2077. * overlap with future reads of those inodes.
  2078. */
  2079. if (XFS_DINODE_MAGIC ==
  2080. INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) &&
  2081. (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
  2082. (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
  2083. XFS_BUF_STALE(bp);
  2084. error = xfs_bwrite(mp, bp);
  2085. } else {
  2086. ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
  2087. XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
  2088. XFS_BUF_SET_FSPRIVATE(bp, mp);
  2089. XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
  2090. xfs_bdwrite(mp, bp);
  2091. }
  2092. return (error);
  2093. }
  2094. STATIC int
  2095. xlog_recover_do_inode_trans(
  2096. xlog_t *log,
  2097. xlog_recover_item_t *item,
  2098. int pass)
  2099. {
  2100. xfs_inode_log_format_t *in_f;
  2101. xfs_mount_t *mp;
  2102. xfs_buf_t *bp;
  2103. xfs_imap_t imap;
  2104. xfs_dinode_t *dip;
  2105. xfs_ino_t ino;
  2106. int len;
  2107. xfs_caddr_t src;
  2108. xfs_caddr_t dest;
  2109. int error;
  2110. int attr_index;
  2111. uint fields;
  2112. xfs_dinode_core_t *dicp;
  2113. if (pass == XLOG_RECOVER_PASS1) {
  2114. return 0;
  2115. }
  2116. in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
  2117. ino = in_f->ilf_ino;
  2118. mp = log->l_mp;
  2119. if (ITEM_TYPE(item) == XFS_LI_INODE) {
  2120. imap.im_blkno = (xfs_daddr_t)in_f->ilf_blkno;
  2121. imap.im_len = in_f->ilf_len;
  2122. imap.im_boffset = in_f->ilf_boffset;
  2123. } else {
  2124. /*
  2125. * It's an old inode format record. We don't know where
  2126. * its cluster is located on disk, and we can't allow
  2127. * xfs_imap() to figure it out because the inode btrees
  2128. * are not ready to be used. Therefore do not pass the
  2129. * XFS_IMAP_LOOKUP flag to xfs_imap(). This will give
  2130. * us only the single block in which the inode lives
  2131. * rather than its cluster, so we must make sure to
  2132. * invalidate the buffer when we write it out below.
  2133. */
  2134. imap.im_blkno = 0;
  2135. xfs_imap(log->l_mp, NULL, ino, &imap, 0);
  2136. }
  2137. /*
  2138. * Inode buffers can be freed, look out for it,
  2139. * and do not replay the inode.
  2140. */
  2141. if (xlog_check_buffer_cancelled(log, imap.im_blkno, imap.im_len, 0))
  2142. return 0;
  2143. bp = xfs_buf_read_flags(mp->m_ddev_targp, imap.im_blkno, imap.im_len,
  2144. XFS_BUF_LOCK);
  2145. if (XFS_BUF_ISERROR(bp)) {
  2146. xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
  2147. bp, imap.im_blkno);
  2148. error = XFS_BUF_GETERROR(bp);
  2149. xfs_buf_relse(bp);
  2150. return error;
  2151. }
  2152. error = 0;
  2153. ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
  2154. dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
  2155. /*
  2156. * Make sure the place we're flushing out to really looks
  2157. * like an inode!
  2158. */
  2159. if (unlikely(INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC)) {
  2160. xfs_buf_relse(bp);
  2161. xfs_fs_cmn_err(CE_ALERT, mp,
  2162. "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
  2163. dip, bp, ino);
  2164. XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
  2165. XFS_ERRLEVEL_LOW, mp);
  2166. return XFS_ERROR(EFSCORRUPTED);
  2167. }
  2168. dicp = (xfs_dinode_core_t*)(item->ri_buf[1].i_addr);
  2169. if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
  2170. xfs_buf_relse(bp);
  2171. xfs_fs_cmn_err(CE_ALERT, mp,
  2172. "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
  2173. item, ino);
  2174. XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
  2175. XFS_ERRLEVEL_LOW, mp);
  2176. return XFS_ERROR(EFSCORRUPTED);
  2177. }
  2178. /* Skip replay when the on disk inode is newer than the log one */
  2179. if (dicp->di_flushiter <
  2180. INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT)) {
  2181. /*
  2182. * Deal with the wrap case, DI_MAX_FLUSH is less
  2183. * than smaller numbers
  2184. */
  2185. if ((INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT)
  2186. == DI_MAX_FLUSH) &&
  2187. (dicp->di_flushiter < (DI_MAX_FLUSH>>1))) {
  2188. /* do nothing */
  2189. } else {
  2190. xfs_buf_relse(bp);
  2191. return 0;
  2192. }
  2193. }
  2194. /* Take the opportunity to reset the flush iteration count */
  2195. dicp->di_flushiter = 0;
  2196. if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
  2197. if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
  2198. (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
  2199. XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
  2200. XFS_ERRLEVEL_LOW, mp, dicp);
  2201. xfs_buf_relse(bp);
  2202. xfs_fs_cmn_err(CE_ALERT, mp,
  2203. "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
  2204. item, dip, bp, ino);
  2205. return XFS_ERROR(EFSCORRUPTED);
  2206. }
  2207. } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
  2208. if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
  2209. (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
  2210. (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
  2211. XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
  2212. XFS_ERRLEVEL_LOW, mp, dicp);
  2213. xfs_buf_relse(bp);
  2214. xfs_fs_cmn_err(CE_ALERT, mp,
  2215. "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
  2216. item, dip, bp, ino);
  2217. return XFS_ERROR(EFSCORRUPTED);
  2218. }
  2219. }
  2220. if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
  2221. XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
  2222. XFS_ERRLEVEL_LOW, mp, dicp);
  2223. xfs_buf_relse(bp);
  2224. xfs_fs_cmn_err(CE_ALERT, mp,
  2225. "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
  2226. item, dip, bp, ino,
  2227. dicp->di_nextents + dicp->di_anextents,
  2228. dicp->di_nblocks);
  2229. return XFS_ERROR(EFSCORRUPTED);
  2230. }
  2231. if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
  2232. XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
  2233. XFS_ERRLEVEL_LOW, mp, dicp);
  2234. xfs_buf_relse(bp);
  2235. xfs_fs_cmn_err(CE_ALERT, mp,
  2236. "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
  2237. item, dip, bp, ino, dicp->di_forkoff);
  2238. return XFS_ERROR(EFSCORRUPTED);
  2239. }
  2240. if (unlikely(item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t))) {
  2241. XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
  2242. XFS_ERRLEVEL_LOW, mp, dicp);
  2243. xfs_buf_relse(bp);
  2244. xfs_fs_cmn_err(CE_ALERT, mp,
  2245. "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
  2246. item->ri_buf[1].i_len, item);
  2247. return XFS_ERROR(EFSCORRUPTED);
  2248. }
  2249. /* The core is in in-core format */
  2250. xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core,
  2251. (xfs_dinode_core_t*)item->ri_buf[1].i_addr, -1);
  2252. /* the rest is in on-disk format */
  2253. if (item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t)) {
  2254. memcpy((xfs_caddr_t) dip + sizeof(xfs_dinode_core_t),
  2255. item->ri_buf[1].i_addr + sizeof(xfs_dinode_core_t),
  2256. item->ri_buf[1].i_len - sizeof(xfs_dinode_core_t));
  2257. }
  2258. fields = in_f->ilf_fields;
  2259. switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
  2260. case XFS_ILOG_DEV:
  2261. INT_SET(dip->di_u.di_dev, ARCH_CONVERT, in_f->ilf_u.ilfu_rdev);
  2262. break;
  2263. case XFS_ILOG_UUID:
  2264. dip->di_u.di_muuid = in_f->ilf_u.ilfu_uuid;
  2265. break;
  2266. }
  2267. if (in_f->ilf_size == 2)
  2268. goto write_inode_buffer;
  2269. len = item->ri_buf[2].i_len;
  2270. src = item->ri_buf[2].i_addr;
  2271. ASSERT(in_f->ilf_size <= 4);
  2272. ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
  2273. ASSERT(!(fields & XFS_ILOG_DFORK) ||
  2274. (len == in_f->ilf_dsize));
  2275. switch (fields & XFS_ILOG_DFORK) {
  2276. case XFS_ILOG_DDATA:
  2277. case XFS_ILOG_DEXT:
  2278. memcpy(&dip->di_u, src, len);
  2279. break;
  2280. case XFS_ILOG_DBROOT:
  2281. xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len,
  2282. &(dip->di_u.di_bmbt),
  2283. XFS_DFORK_DSIZE(dip, mp));
  2284. break;
  2285. default:
  2286. /*
  2287. * There are no data fork flags set.
  2288. */
  2289. ASSERT((fields & XFS_ILOG_DFORK) == 0);
  2290. break;
  2291. }
  2292. /*
  2293. * If we logged any attribute data, recover it. There may or
  2294. * may not have been any other non-core data logged in this
  2295. * transaction.
  2296. */
  2297. if (in_f->ilf_fields & XFS_ILOG_AFORK) {
  2298. if (in_f->ilf_fields & XFS_ILOG_DFORK) {
  2299. attr_index = 3;
  2300. } else {
  2301. attr_index = 2;
  2302. }
  2303. len = item->ri_buf[attr_index].i_len;
  2304. src = item->ri_buf[attr_index].i_addr;
  2305. ASSERT(len == in_f->ilf_asize);
  2306. switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
  2307. case XFS_ILOG_ADATA:
  2308. case XFS_ILOG_AEXT:
  2309. dest = XFS_DFORK_APTR(dip);
  2310. ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
  2311. memcpy(dest, src, len);
  2312. break;
  2313. case XFS_ILOG_ABROOT:
  2314. dest = XFS_DFORK_APTR(dip);
  2315. xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len,
  2316. (xfs_bmdr_block_t*)dest,
  2317. XFS_DFORK_ASIZE(dip, mp));
  2318. break;
  2319. default:
  2320. xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
  2321. ASSERT(0);
  2322. xfs_buf_relse(bp);
  2323. return XFS_ERROR(EIO);
  2324. }
  2325. }
  2326. write_inode_buffer:
  2327. if (ITEM_TYPE(item) == XFS_LI_INODE) {
  2328. ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
  2329. XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
  2330. XFS_BUF_SET_FSPRIVATE(bp, mp);
  2331. XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
  2332. xfs_bdwrite(mp, bp);
  2333. } else {
  2334. XFS_BUF_STALE(bp);
  2335. error = xfs_bwrite(mp, bp);
  2336. }
  2337. return (error);
  2338. }
  2339. /*
  2340. * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
  2341. * structure, so that we know not to do any dquot item or dquot buffer recovery,
  2342. * of that type.
  2343. */
  2344. STATIC int
  2345. xlog_recover_do_quotaoff_trans(
  2346. xlog_t *log,
  2347. xlog_recover_item_t *item,
  2348. int pass)
  2349. {
  2350. xfs_qoff_logformat_t *qoff_f;
  2351. if (pass == XLOG_RECOVER_PASS2) {
  2352. return (0);
  2353. }
  2354. qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
  2355. ASSERT(qoff_f);
  2356. /*
  2357. * The logitem format's flag tells us if this was user quotaoff,
  2358. * group/project quotaoff or both.
  2359. */
  2360. if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
  2361. log->l_quotaoffs_flag |= XFS_DQ_USER;
  2362. if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
  2363. log->l_quotaoffs_flag |= XFS_DQ_PROJ;
  2364. if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
  2365. log->l_quotaoffs_flag |= XFS_DQ_GROUP;
  2366. return (0);
  2367. }
  2368. /*
  2369. * Recover a dquot record
  2370. */
  2371. STATIC int
  2372. xlog_recover_do_dquot_trans(
  2373. xlog_t *log,
  2374. xlog_recover_item_t *item,
  2375. int pass)
  2376. {
  2377. xfs_mount_t *mp;
  2378. xfs_buf_t *bp;
  2379. struct xfs_disk_dquot *ddq, *recddq;
  2380. int error;
  2381. xfs_dq_logformat_t *dq_f;
  2382. uint type;
  2383. if (pass == XLOG_RECOVER_PASS1) {
  2384. return 0;
  2385. }
  2386. mp = log->l_mp;
  2387. /*
  2388. * Filesystems are required to send in quota flags at mount time.
  2389. */
  2390. if (mp->m_qflags == 0)
  2391. return (0);
  2392. recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
  2393. ASSERT(recddq);
  2394. /*
  2395. * This type of quotas was turned off, so ignore this record.
  2396. */
  2397. type = INT_GET(recddq->d_flags, ARCH_CONVERT) &
  2398. (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
  2399. ASSERT(type);
  2400. if (log->l_quotaoffs_flag & type)
  2401. return (0);
  2402. /*
  2403. * At this point we know that quota was _not_ turned off.
  2404. * Since the mount flags are not indicating to us otherwise, this
  2405. * must mean that quota is on, and the dquot needs to be replayed.
  2406. * Remember that we may not have fully recovered the superblock yet,
  2407. * so we can't do the usual trick of looking at the SB quota bits.
  2408. *
  2409. * The other possibility, of course, is that the quota subsystem was
  2410. * removed since the last mount - ENOSYS.
  2411. */
  2412. dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
  2413. ASSERT(dq_f);
  2414. if ((error = xfs_qm_dqcheck(recddq,
  2415. dq_f->qlf_id,
  2416. 0, XFS_QMOPT_DOWARN,
  2417. "xlog_recover_do_dquot_trans (log copy)"))) {
  2418. return XFS_ERROR(EIO);
  2419. }
  2420. ASSERT(dq_f->qlf_len == 1);
  2421. error = xfs_read_buf(mp, mp->m_ddev_targp,
  2422. dq_f->qlf_blkno,
  2423. XFS_FSB_TO_BB(mp, dq_f->qlf_len),
  2424. 0, &bp);
  2425. if (error) {
  2426. xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
  2427. bp, dq_f->qlf_blkno);
  2428. return error;
  2429. }
  2430. ASSERT(bp);
  2431. ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
  2432. /*
  2433. * At least the magic num portion should be on disk because this
  2434. * was among a chunk of dquots created earlier, and we did some
  2435. * minimal initialization then.
  2436. */
  2437. if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
  2438. "xlog_recover_do_dquot_trans")) {
  2439. xfs_buf_relse(bp);
  2440. return XFS_ERROR(EIO);
  2441. }
  2442. memcpy(ddq, recddq, item->ri_buf[1].i_len);
  2443. ASSERT(dq_f->qlf_size == 2);
  2444. ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
  2445. XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
  2446. XFS_BUF_SET_FSPRIVATE(bp, mp);
  2447. XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
  2448. xfs_bdwrite(mp, bp);
  2449. return (0);
  2450. }
  2451. /*
  2452. * This routine is called to create an in-core extent free intent
  2453. * item from the efi format structure which was logged on disk.
  2454. * It allocates an in-core efi, copies the extents from the format
  2455. * structure into it, and adds the efi to the AIL with the given
  2456. * LSN.
  2457. */
  2458. STATIC void
  2459. xlog_recover_do_efi_trans(
  2460. xlog_t *log,
  2461. xlog_recover_item_t *item,
  2462. xfs_lsn_t lsn,
  2463. int pass)
  2464. {
  2465. xfs_mount_t *mp;
  2466. xfs_efi_log_item_t *efip;
  2467. xfs_efi_log_format_t *efi_formatp;
  2468. SPLDECL(s);
  2469. if (pass == XLOG_RECOVER_PASS1) {
  2470. return;
  2471. }
  2472. efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
  2473. ASSERT(item->ri_buf[0].i_len ==
  2474. (sizeof(xfs_efi_log_format_t) +
  2475. ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t))));
  2476. mp = log->l_mp;
  2477. efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
  2478. memcpy((char *)&(efip->efi_format), (char *)efi_formatp,
  2479. sizeof(xfs_efi_log_format_t) +
  2480. ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t)));
  2481. efip->efi_next_extent = efi_formatp->efi_nextents;
  2482. efip->efi_flags |= XFS_EFI_COMMITTED;
  2483. AIL_LOCK(mp,s);
  2484. /*
  2485. * xfs_trans_update_ail() drops the AIL lock.
  2486. */
  2487. xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn, s);
  2488. }
  2489. /*
  2490. * This routine is called when an efd format structure is found in
  2491. * a committed transaction in the log. It's purpose is to cancel
  2492. * the corresponding efi if it was still in the log. To do this
  2493. * it searches the AIL for the efi with an id equal to that in the
  2494. * efd format structure. If we find it, we remove the efi from the
  2495. * AIL and free it.
  2496. */
  2497. STATIC void
  2498. xlog_recover_do_efd_trans(
  2499. xlog_t *log,
  2500. xlog_recover_item_t *item,
  2501. int pass)
  2502. {
  2503. xfs_mount_t *mp;
  2504. xfs_efd_log_format_t *efd_formatp;
  2505. xfs_efi_log_item_t *efip = NULL;
  2506. xfs_log_item_t *lip;
  2507. int gen;
  2508. __uint64_t efi_id;
  2509. SPLDECL(s);
  2510. if (pass == XLOG_RECOVER_PASS1) {
  2511. return;
  2512. }
  2513. efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
  2514. ASSERT(item->ri_buf[0].i_len ==
  2515. (sizeof(xfs_efd_log_format_t) +
  2516. ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_t))));
  2517. efi_id = efd_formatp->efd_efi_id;
  2518. /*
  2519. * Search for the efi with the id in the efd format structure
  2520. * in the AIL.
  2521. */
  2522. mp = log->l_mp;
  2523. AIL_LOCK(mp,s);
  2524. lip = xfs_trans_first_ail(mp, &gen);
  2525. while (lip != NULL) {
  2526. if (lip->li_type == XFS_LI_EFI) {
  2527. efip = (xfs_efi_log_item_t *)lip;
  2528. if (efip->efi_format.efi_id == efi_id) {
  2529. /*
  2530. * xfs_trans_delete_ail() drops the
  2531. * AIL lock.
  2532. */
  2533. xfs_trans_delete_ail(mp, lip, s);
  2534. break;
  2535. }
  2536. }
  2537. lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
  2538. }
  2539. /*
  2540. * If we found it, then free it up. If it wasn't there, it
  2541. * must have been overwritten in the log. Oh well.
  2542. */
  2543. if (lip != NULL) {
  2544. xfs_efi_item_free(efip);
  2545. } else {
  2546. AIL_UNLOCK(mp, s);
  2547. }
  2548. }
  2549. /*
  2550. * Perform the transaction
  2551. *
  2552. * If the transaction modifies a buffer or inode, do it now. Otherwise,
  2553. * EFIs and EFDs get queued up by adding entries into the AIL for them.
  2554. */
  2555. STATIC int
  2556. xlog_recover_do_trans(
  2557. xlog_t *log,
  2558. xlog_recover_t *trans,
  2559. int pass)
  2560. {
  2561. int error = 0;
  2562. xlog_recover_item_t *item, *first_item;
  2563. if ((error = xlog_recover_reorder_trans(log, trans)))
  2564. return error;
  2565. first_item = item = trans->r_itemq;
  2566. do {
  2567. /*
  2568. * we don't need to worry about the block number being
  2569. * truncated in > 1 TB buffers because in user-land,
  2570. * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so
  2571. * the blkno's will get through the user-mode buffer
  2572. * cache properly. The only bad case is o32 kernels
  2573. * where xfs_daddr_t is 32-bits but mount will warn us
  2574. * off a > 1 TB filesystem before we get here.
  2575. */
  2576. if ((ITEM_TYPE(item) == XFS_LI_BUF) ||
  2577. (ITEM_TYPE(item) == XFS_LI_6_1_BUF) ||
  2578. (ITEM_TYPE(item) == XFS_LI_5_3_BUF)) {
  2579. if ((error = xlog_recover_do_buffer_trans(log, item,
  2580. pass)))
  2581. break;
  2582. } else if ((ITEM_TYPE(item) == XFS_LI_INODE) ||
  2583. (ITEM_TYPE(item) == XFS_LI_6_1_INODE) ||
  2584. (ITEM_TYPE(item) == XFS_LI_5_3_INODE)) {
  2585. if ((error = xlog_recover_do_inode_trans(log, item,
  2586. pass)))
  2587. break;
  2588. } else if (ITEM_TYPE(item) == XFS_LI_EFI) {
  2589. xlog_recover_do_efi_trans(log, item, trans->r_lsn,
  2590. pass);
  2591. } else if (ITEM_TYPE(item) == XFS_LI_EFD) {
  2592. xlog_recover_do_efd_trans(log, item, pass);
  2593. } else if (ITEM_TYPE(item) == XFS_LI_DQUOT) {
  2594. if ((error = xlog_recover_do_dquot_trans(log, item,
  2595. pass)))
  2596. break;
  2597. } else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) {
  2598. if ((error = xlog_recover_do_quotaoff_trans(log, item,
  2599. pass)))
  2600. break;
  2601. } else {
  2602. xlog_warn("XFS: xlog_recover_do_trans");
  2603. ASSERT(0);
  2604. error = XFS_ERROR(EIO);
  2605. break;
  2606. }
  2607. item = item->ri_next;
  2608. } while (first_item != item);
  2609. return error;
  2610. }
  2611. /*
  2612. * Free up any resources allocated by the transaction
  2613. *
  2614. * Remember that EFIs, EFDs, and IUNLINKs are handled later.
  2615. */
  2616. STATIC void
  2617. xlog_recover_free_trans(
  2618. xlog_recover_t *trans)
  2619. {
  2620. xlog_recover_item_t *first_item, *item, *free_item;
  2621. int i;
  2622. item = first_item = trans->r_itemq;
  2623. do {
  2624. free_item = item;
  2625. item = item->ri_next;
  2626. /* Free the regions in the item. */
  2627. for (i = 0; i < free_item->ri_cnt; i++) {
  2628. kmem_free(free_item->ri_buf[i].i_addr,
  2629. free_item->ri_buf[i].i_len);
  2630. }
  2631. /* Free the item itself */
  2632. kmem_free(free_item->ri_buf,
  2633. (free_item->ri_total * sizeof(xfs_log_iovec_t)));
  2634. kmem_free(free_item, sizeof(xlog_recover_item_t));
  2635. } while (first_item != item);
  2636. /* Free the transaction recover structure */
  2637. kmem_free(trans, sizeof(xlog_recover_t));
  2638. }
  2639. STATIC int
  2640. xlog_recover_commit_trans(
  2641. xlog_t *log,
  2642. xlog_recover_t **q,
  2643. xlog_recover_t *trans,
  2644. int pass)
  2645. {
  2646. int error;
  2647. if ((error = xlog_recover_unlink_tid(q, trans)))
  2648. return error;
  2649. if ((error = xlog_recover_do_trans(log, trans, pass)))
  2650. return error;
  2651. xlog_recover_free_trans(trans); /* no error */
  2652. return 0;
  2653. }
  2654. STATIC int
  2655. xlog_recover_unmount_trans(
  2656. xlog_recover_t *trans)
  2657. {
  2658. /* Do nothing now */
  2659. xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
  2660. return 0;
  2661. }
  2662. /*
  2663. * There are two valid states of the r_state field. 0 indicates that the
  2664. * transaction structure is in a normal state. We have either seen the
  2665. * start of the transaction or the last operation we added was not a partial
  2666. * operation. If the last operation we added to the transaction was a
  2667. * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
  2668. *
  2669. * NOTE: skip LRs with 0 data length.
  2670. */
  2671. STATIC int
  2672. xlog_recover_process_data(
  2673. xlog_t *log,
  2674. xlog_recover_t *rhash[],
  2675. xlog_rec_header_t *rhead,
  2676. xfs_caddr_t dp,
  2677. int pass)
  2678. {
  2679. xfs_caddr_t lp;
  2680. int num_logops;
  2681. xlog_op_header_t *ohead;
  2682. xlog_recover_t *trans;
  2683. xlog_tid_t tid;
  2684. int error;
  2685. unsigned long hash;
  2686. uint flags;
  2687. lp = dp + INT_GET(rhead->h_len, ARCH_CONVERT);
  2688. num_logops = INT_GET(rhead->h_num_logops, ARCH_CONVERT);
  2689. /* check the log format matches our own - else we can't recover */
  2690. if (xlog_header_check_recover(log->l_mp, rhead))
  2691. return (XFS_ERROR(EIO));
  2692. while ((dp < lp) && num_logops) {
  2693. ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
  2694. ohead = (xlog_op_header_t *)dp;
  2695. dp += sizeof(xlog_op_header_t);
  2696. if (ohead->oh_clientid != XFS_TRANSACTION &&
  2697. ohead->oh_clientid != XFS_LOG) {
  2698. xlog_warn(
  2699. "XFS: xlog_recover_process_data: bad clientid");
  2700. ASSERT(0);
  2701. return (XFS_ERROR(EIO));
  2702. }
  2703. tid = INT_GET(ohead->oh_tid, ARCH_CONVERT);
  2704. hash = XLOG_RHASH(tid);
  2705. trans = xlog_recover_find_tid(rhash[hash], tid);
  2706. if (trans == NULL) { /* not found; add new tid */
  2707. if (ohead->oh_flags & XLOG_START_TRANS)
  2708. xlog_recover_new_tid(&rhash[hash], tid,
  2709. INT_GET(rhead->h_lsn, ARCH_CONVERT));
  2710. } else {
  2711. ASSERT(dp+INT_GET(ohead->oh_len, ARCH_CONVERT) <= lp);
  2712. flags = ohead->oh_flags & ~XLOG_END_TRANS;
  2713. if (flags & XLOG_WAS_CONT_TRANS)
  2714. flags &= ~XLOG_CONTINUE_TRANS;
  2715. switch (flags) {
  2716. case XLOG_COMMIT_TRANS:
  2717. error = xlog_recover_commit_trans(log,
  2718. &rhash[hash], trans, pass);
  2719. break;
  2720. case XLOG_UNMOUNT_TRANS:
  2721. error = xlog_recover_unmount_trans(trans);
  2722. break;
  2723. case XLOG_WAS_CONT_TRANS:
  2724. error = xlog_recover_add_to_cont_trans(trans,
  2725. dp, INT_GET(ohead->oh_len,
  2726. ARCH_CONVERT));
  2727. break;
  2728. case XLOG_START_TRANS:
  2729. xlog_warn(
  2730. "XFS: xlog_recover_process_data: bad transaction");
  2731. ASSERT(0);
  2732. error = XFS_ERROR(EIO);
  2733. break;
  2734. case 0:
  2735. case XLOG_CONTINUE_TRANS:
  2736. error = xlog_recover_add_to_trans(trans,
  2737. dp, INT_GET(ohead->oh_len,
  2738. ARCH_CONVERT));
  2739. break;
  2740. default:
  2741. xlog_warn(
  2742. "XFS: xlog_recover_process_data: bad flag");
  2743. ASSERT(0);
  2744. error = XFS_ERROR(EIO);
  2745. break;
  2746. }
  2747. if (error)
  2748. return error;
  2749. }
  2750. dp += INT_GET(ohead->oh_len, ARCH_CONVERT);
  2751. num_logops--;
  2752. }
  2753. return 0;
  2754. }
  2755. /*
  2756. * Process an extent free intent item that was recovered from
  2757. * the log. We need to free the extents that it describes.
  2758. */
  2759. STATIC void
  2760. xlog_recover_process_efi(
  2761. xfs_mount_t *mp,
  2762. xfs_efi_log_item_t *efip)
  2763. {
  2764. xfs_efd_log_item_t *efdp;
  2765. xfs_trans_t *tp;
  2766. int i;
  2767. xfs_extent_t *extp;
  2768. xfs_fsblock_t startblock_fsb;
  2769. ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
  2770. /*
  2771. * First check the validity of the extents described by the
  2772. * EFI. If any are bad, then assume that all are bad and
  2773. * just toss the EFI.
  2774. */
  2775. for (i = 0; i < efip->efi_format.efi_nextents; i++) {
  2776. extp = &(efip->efi_format.efi_extents[i]);
  2777. startblock_fsb = XFS_BB_TO_FSB(mp,
  2778. XFS_FSB_TO_DADDR(mp, extp->ext_start));
  2779. if ((startblock_fsb == 0) ||
  2780. (extp->ext_len == 0) ||
  2781. (startblock_fsb >= mp->m_sb.sb_dblocks) ||
  2782. (extp->ext_len >= mp->m_sb.sb_agblocks)) {
  2783. /*
  2784. * This will pull the EFI from the AIL and
  2785. * free the memory associated with it.
  2786. */
  2787. xfs_efi_release(efip, efip->efi_format.efi_nextents);
  2788. return;
  2789. }
  2790. }
  2791. tp = xfs_trans_alloc(mp, 0);
  2792. xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
  2793. efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
  2794. for (i = 0; i < efip->efi_format.efi_nextents; i++) {
  2795. extp = &(efip->efi_format.efi_extents[i]);
  2796. xfs_free_extent(tp, extp->ext_start, extp->ext_len);
  2797. xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
  2798. extp->ext_len);
  2799. }
  2800. efip->efi_flags |= XFS_EFI_RECOVERED;
  2801. xfs_trans_commit(tp, 0, NULL);
  2802. }
  2803. /*
  2804. * Verify that once we've encountered something other than an EFI
  2805. * in the AIL that there are no more EFIs in the AIL.
  2806. */
  2807. #if defined(DEBUG)
  2808. STATIC void
  2809. xlog_recover_check_ail(
  2810. xfs_mount_t *mp,
  2811. xfs_log_item_t *lip,
  2812. int gen)
  2813. {
  2814. int orig_gen = gen;
  2815. do {
  2816. ASSERT(lip->li_type != XFS_LI_EFI);
  2817. lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
  2818. /*
  2819. * The check will be bogus if we restart from the
  2820. * beginning of the AIL, so ASSERT that we don't.
  2821. * We never should since we're holding the AIL lock
  2822. * the entire time.
  2823. */
  2824. ASSERT(gen == orig_gen);
  2825. } while (lip != NULL);
  2826. }
  2827. #endif /* DEBUG */
  2828. /*
  2829. * When this is called, all of the EFIs which did not have
  2830. * corresponding EFDs should be in the AIL. What we do now
  2831. * is free the extents associated with each one.
  2832. *
  2833. * Since we process the EFIs in normal transactions, they
  2834. * will be removed at some point after the commit. This prevents
  2835. * us from just walking down the list processing each one.
  2836. * We'll use a flag in the EFI to skip those that we've already
  2837. * processed and use the AIL iteration mechanism's generation
  2838. * count to try to speed this up at least a bit.
  2839. *
  2840. * When we start, we know that the EFIs are the only things in
  2841. * the AIL. As we process them, however, other items are added
  2842. * to the AIL. Since everything added to the AIL must come after
  2843. * everything already in the AIL, we stop processing as soon as
  2844. * we see something other than an EFI in the AIL.
  2845. */
  2846. STATIC void
  2847. xlog_recover_process_efis(
  2848. xlog_t *log)
  2849. {
  2850. xfs_log_item_t *lip;
  2851. xfs_efi_log_item_t *efip;
  2852. int gen;
  2853. xfs_mount_t *mp;
  2854. SPLDECL(s);
  2855. mp = log->l_mp;
  2856. AIL_LOCK(mp,s);
  2857. lip = xfs_trans_first_ail(mp, &gen);
  2858. while (lip != NULL) {
  2859. /*
  2860. * We're done when we see something other than an EFI.
  2861. */
  2862. if (lip->li_type != XFS_LI_EFI) {
  2863. xlog_recover_check_ail(mp, lip, gen);
  2864. break;
  2865. }
  2866. /*
  2867. * Skip EFIs that we've already processed.
  2868. */
  2869. efip = (xfs_efi_log_item_t *)lip;
  2870. if (efip->efi_flags & XFS_EFI_RECOVERED) {
  2871. lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
  2872. continue;
  2873. }
  2874. AIL_UNLOCK(mp, s);
  2875. xlog_recover_process_efi(mp, efip);
  2876. AIL_LOCK(mp,s);
  2877. lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
  2878. }
  2879. AIL_UNLOCK(mp, s);
  2880. }
  2881. /*
  2882. * This routine performs a transaction to null out a bad inode pointer
  2883. * in an agi unlinked inode hash bucket.
  2884. */
  2885. STATIC void
  2886. xlog_recover_clear_agi_bucket(
  2887. xfs_mount_t *mp,
  2888. xfs_agnumber_t agno,
  2889. int bucket)
  2890. {
  2891. xfs_trans_t *tp;
  2892. xfs_agi_t *agi;
  2893. xfs_buf_t *agibp;
  2894. int offset;
  2895. int error;
  2896. tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
  2897. xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0);
  2898. error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
  2899. XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
  2900. XFS_FSS_TO_BB(mp, 1), 0, &agibp);
  2901. if (error) {
  2902. xfs_trans_cancel(tp, XFS_TRANS_ABORT);
  2903. return;
  2904. }
  2905. agi = XFS_BUF_TO_AGI(agibp);
  2906. if (be32_to_cpu(agi->agi_magicnum) != XFS_AGI_MAGIC) {
  2907. xfs_trans_cancel(tp, XFS_TRANS_ABORT);
  2908. return;
  2909. }
  2910. agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
  2911. offset = offsetof(xfs_agi_t, agi_unlinked) +
  2912. (sizeof(xfs_agino_t) * bucket);
  2913. xfs_trans_log_buf(tp, agibp, offset,
  2914. (offset + sizeof(xfs_agino_t) - 1));
  2915. (void) xfs_trans_commit(tp, 0, NULL);
  2916. }
  2917. /*
  2918. * xlog_iunlink_recover
  2919. *
  2920. * This is called during recovery to process any inodes which
  2921. * we unlinked but not freed when the system crashed. These
  2922. * inodes will be on the lists in the AGI blocks. What we do
  2923. * here is scan all the AGIs and fully truncate and free any
  2924. * inodes found on the lists. Each inode is removed from the
  2925. * lists when it has been fully truncated and is freed. The
  2926. * freeing of the inode and its removal from the list must be
  2927. * atomic.
  2928. */
  2929. void
  2930. xlog_recover_process_iunlinks(
  2931. xlog_t *log)
  2932. {
  2933. xfs_mount_t *mp;
  2934. xfs_agnumber_t agno;
  2935. xfs_agi_t *agi;
  2936. xfs_buf_t *agibp;
  2937. xfs_buf_t *ibp;
  2938. xfs_dinode_t *dip;
  2939. xfs_inode_t *ip;
  2940. xfs_agino_t agino;
  2941. xfs_ino_t ino;
  2942. int bucket;
  2943. int error;
  2944. uint mp_dmevmask;
  2945. mp = log->l_mp;
  2946. /*
  2947. * Prevent any DMAPI event from being sent while in this function.
  2948. */
  2949. mp_dmevmask = mp->m_dmevmask;
  2950. mp->m_dmevmask = 0;
  2951. for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
  2952. /*
  2953. * Find the agi for this ag.
  2954. */
  2955. agibp = xfs_buf_read(mp->m_ddev_targp,
  2956. XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
  2957. XFS_FSS_TO_BB(mp, 1), 0);
  2958. if (XFS_BUF_ISERROR(agibp)) {
  2959. xfs_ioerror_alert("xlog_recover_process_iunlinks(#1)",
  2960. log->l_mp, agibp,
  2961. XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)));
  2962. }
  2963. agi = XFS_BUF_TO_AGI(agibp);
  2964. ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agi->agi_magicnum));
  2965. for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
  2966. agino = be32_to_cpu(agi->agi_unlinked[bucket]);
  2967. while (agino != NULLAGINO) {
  2968. /*
  2969. * Release the agi buffer so that it can
  2970. * be acquired in the normal course of the
  2971. * transaction to truncate and free the inode.
  2972. */
  2973. xfs_buf_relse(agibp);
  2974. ino = XFS_AGINO_TO_INO(mp, agno, agino);
  2975. error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
  2976. ASSERT(error || (ip != NULL));
  2977. if (!error) {
  2978. /*
  2979. * Get the on disk inode to find the
  2980. * next inode in the bucket.
  2981. */
  2982. error = xfs_itobp(mp, NULL, ip, &dip,
  2983. &ibp, 0);
  2984. ASSERT(error || (dip != NULL));
  2985. }
  2986. if (!error) {
  2987. ASSERT(ip->i_d.di_nlink == 0);
  2988. /* setup for the next pass */
  2989. agino = INT_GET(dip->di_next_unlinked,
  2990. ARCH_CONVERT);
  2991. xfs_buf_relse(ibp);
  2992. /*
  2993. * Prevent any DMAPI event from
  2994. * being sent when the
  2995. * reference on the inode is
  2996. * dropped.
  2997. */
  2998. ip->i_d.di_dmevmask = 0;
  2999. /*
  3000. * If this is a new inode, handle
  3001. * it specially. Otherwise,
  3002. * just drop our reference to the
  3003. * inode. If there are no
  3004. * other references, this will
  3005. * send the inode to
  3006. * xfs_inactive() which will
  3007. * truncate the file and free
  3008. * the inode.
  3009. */
  3010. if (ip->i_d.di_mode == 0)
  3011. xfs_iput_new(ip, 0);
  3012. else
  3013. VN_RELE(XFS_ITOV(ip));
  3014. } else {
  3015. /*
  3016. * We can't read in the inode
  3017. * this bucket points to, or
  3018. * this inode is messed up. Just
  3019. * ditch this bucket of inodes. We
  3020. * will lose some inodes and space,
  3021. * but at least we won't hang. Call
  3022. * xlog_recover_clear_agi_bucket()
  3023. * to perform a transaction to clear
  3024. * the inode pointer in the bucket.
  3025. */
  3026. xlog_recover_clear_agi_bucket(mp, agno,
  3027. bucket);
  3028. agino = NULLAGINO;
  3029. }
  3030. /*
  3031. * Reacquire the agibuffer and continue around
  3032. * the loop.
  3033. */
  3034. agibp = xfs_buf_read(mp->m_ddev_targp,
  3035. XFS_AG_DADDR(mp, agno,
  3036. XFS_AGI_DADDR(mp)),
  3037. XFS_FSS_TO_BB(mp, 1), 0);
  3038. if (XFS_BUF_ISERROR(agibp)) {
  3039. xfs_ioerror_alert(
  3040. "xlog_recover_process_iunlinks(#2)",
  3041. log->l_mp, agibp,
  3042. XFS_AG_DADDR(mp, agno,
  3043. XFS_AGI_DADDR(mp)));
  3044. }
  3045. agi = XFS_BUF_TO_AGI(agibp);
  3046. ASSERT(XFS_AGI_MAGIC == be32_to_cpu(
  3047. agi->agi_magicnum));
  3048. }
  3049. }
  3050. /*
  3051. * Release the buffer for the current agi so we can
  3052. * go on to the next one.
  3053. */
  3054. xfs_buf_relse(agibp);
  3055. }
  3056. mp->m_dmevmask = mp_dmevmask;
  3057. }
  3058. #ifdef DEBUG
  3059. STATIC void
  3060. xlog_pack_data_checksum(
  3061. xlog_t *log,
  3062. xlog_in_core_t *iclog,
  3063. int size)
  3064. {
  3065. int i;
  3066. uint *up;
  3067. uint chksum = 0;
  3068. up = (uint *)iclog->ic_datap;
  3069. /* divide length by 4 to get # words */
  3070. for (i = 0; i < (size >> 2); i++) {
  3071. chksum ^= INT_GET(*up, ARCH_CONVERT);
  3072. up++;
  3073. }
  3074. INT_SET(iclog->ic_header.h_chksum, ARCH_CONVERT, chksum);
  3075. }
  3076. #else
  3077. #define xlog_pack_data_checksum(log, iclog, size)
  3078. #endif
  3079. /*
  3080. * Stamp cycle number in every block
  3081. */
  3082. void
  3083. xlog_pack_data(
  3084. xlog_t *log,
  3085. xlog_in_core_t *iclog,
  3086. int roundoff)
  3087. {
  3088. int i, j, k;
  3089. int size = iclog->ic_offset + roundoff;
  3090. uint cycle_lsn;
  3091. xfs_caddr_t dp;
  3092. xlog_in_core_2_t *xhdr;
  3093. xlog_pack_data_checksum(log, iclog, size);
  3094. cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
  3095. dp = iclog->ic_datap;
  3096. for (i = 0; i < BTOBB(size) &&
  3097. i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
  3098. iclog->ic_header.h_cycle_data[i] = *(uint *)dp;
  3099. *(uint *)dp = cycle_lsn;
  3100. dp += BBSIZE;
  3101. }
  3102. if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
  3103. xhdr = (xlog_in_core_2_t *)&iclog->ic_header;
  3104. for ( ; i < BTOBB(size); i++) {
  3105. j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
  3106. k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
  3107. xhdr[j].hic_xheader.xh_cycle_data[k] = *(uint *)dp;
  3108. *(uint *)dp = cycle_lsn;
  3109. dp += BBSIZE;
  3110. }
  3111. for (i = 1; i < log->l_iclog_heads; i++) {
  3112. xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
  3113. }
  3114. }
  3115. }
  3116. #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
  3117. STATIC void
  3118. xlog_unpack_data_checksum(
  3119. xlog_rec_header_t *rhead,
  3120. xfs_caddr_t dp,
  3121. xlog_t *log)
  3122. {
  3123. uint *up = (uint *)dp;
  3124. uint chksum = 0;
  3125. int i;
  3126. /* divide length by 4 to get # words */
  3127. for (i=0; i < INT_GET(rhead->h_len, ARCH_CONVERT) >> 2; i++) {
  3128. chksum ^= INT_GET(*up, ARCH_CONVERT);
  3129. up++;
  3130. }
  3131. if (chksum != INT_GET(rhead->h_chksum, ARCH_CONVERT)) {
  3132. if (rhead->h_chksum ||
  3133. ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
  3134. cmn_err(CE_DEBUG,
  3135. "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)",
  3136. INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum);
  3137. cmn_err(CE_DEBUG,
  3138. "XFS: Disregard message if filesystem was created with non-DEBUG kernel");
  3139. if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
  3140. cmn_err(CE_DEBUG,
  3141. "XFS: LogR this is a LogV2 filesystem");
  3142. }
  3143. log->l_flags |= XLOG_CHKSUM_MISMATCH;
  3144. }
  3145. }
  3146. }
  3147. #else
  3148. #define xlog_unpack_data_checksum(rhead, dp, log)
  3149. #endif
  3150. STATIC void
  3151. xlog_unpack_data(
  3152. xlog_rec_header_t *rhead,
  3153. xfs_caddr_t dp,
  3154. xlog_t *log)
  3155. {
  3156. int i, j, k;
  3157. xlog_in_core_2_t *xhdr;
  3158. for (i = 0; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)) &&
  3159. i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
  3160. *(uint *)dp = *(uint *)&rhead->h_cycle_data[i];
  3161. dp += BBSIZE;
  3162. }
  3163. if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
  3164. xhdr = (xlog_in_core_2_t *)rhead;
  3165. for ( ; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); i++) {
  3166. j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
  3167. k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
  3168. *(uint *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
  3169. dp += BBSIZE;
  3170. }
  3171. }
  3172. xlog_unpack_data_checksum(rhead, dp, log);
  3173. }
  3174. STATIC int
  3175. xlog_valid_rec_header(
  3176. xlog_t *log,
  3177. xlog_rec_header_t *rhead,
  3178. xfs_daddr_t blkno)
  3179. {
  3180. int hlen;
  3181. if (unlikely(
  3182. (INT_GET(rhead->h_magicno, ARCH_CONVERT) !=
  3183. XLOG_HEADER_MAGIC_NUM))) {
  3184. XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
  3185. XFS_ERRLEVEL_LOW, log->l_mp);
  3186. return XFS_ERROR(EFSCORRUPTED);
  3187. }
  3188. if (unlikely(
  3189. (!rhead->h_version ||
  3190. (INT_GET(rhead->h_version, ARCH_CONVERT) &
  3191. (~XLOG_VERSION_OKBITS)) != 0))) {
  3192. xlog_warn("XFS: %s: unrecognised log version (%d).",
  3193. __FUNCTION__, INT_GET(rhead->h_version, ARCH_CONVERT));
  3194. return XFS_ERROR(EIO);
  3195. }
  3196. /* LR body must have data or it wouldn't have been written */
  3197. hlen = INT_GET(rhead->h_len, ARCH_CONVERT);
  3198. if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
  3199. XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
  3200. XFS_ERRLEVEL_LOW, log->l_mp);
  3201. return XFS_ERROR(EFSCORRUPTED);
  3202. }
  3203. if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
  3204. XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
  3205. XFS_ERRLEVEL_LOW, log->l_mp);
  3206. return XFS_ERROR(EFSCORRUPTED);
  3207. }
  3208. return 0;
  3209. }
  3210. /*
  3211. * Read the log from tail to head and process the log records found.
  3212. * Handle the two cases where the tail and head are in the same cycle
  3213. * and where the active portion of the log wraps around the end of
  3214. * the physical log separately. The pass parameter is passed through
  3215. * to the routines called to process the data and is not looked at
  3216. * here.
  3217. */
  3218. STATIC int
  3219. xlog_do_recovery_pass(
  3220. xlog_t *log,
  3221. xfs_daddr_t head_blk,
  3222. xfs_daddr_t tail_blk,
  3223. int pass)
  3224. {
  3225. xlog_rec_header_t *rhead;
  3226. xfs_daddr_t blk_no;
  3227. xfs_caddr_t bufaddr, offset;
  3228. xfs_buf_t *hbp, *dbp;
  3229. int error = 0, h_size;
  3230. int bblks, split_bblks;
  3231. int hblks, split_hblks, wrapped_hblks;
  3232. xlog_recover_t *rhash[XLOG_RHASH_SIZE];
  3233. ASSERT(head_blk != tail_blk);
  3234. /*
  3235. * Read the header of the tail block and get the iclog buffer size from
  3236. * h_size. Use this to tell how many sectors make up the log header.
  3237. */
  3238. if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
  3239. /*
  3240. * When using variable length iclogs, read first sector of
  3241. * iclog header and extract the header size from it. Get a
  3242. * new hbp that is the correct size.
  3243. */
  3244. hbp = xlog_get_bp(log, 1);
  3245. if (!hbp)
  3246. return ENOMEM;
  3247. if ((error = xlog_bread(log, tail_blk, 1, hbp)))
  3248. goto bread_err1;
  3249. offset = xlog_align(log, tail_blk, 1, hbp);
  3250. rhead = (xlog_rec_header_t *)offset;
  3251. error = xlog_valid_rec_header(log, rhead, tail_blk);
  3252. if (error)
  3253. goto bread_err1;
  3254. h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
  3255. if ((INT_GET(rhead->h_version, ARCH_CONVERT)
  3256. & XLOG_VERSION_2) &&
  3257. (h_size > XLOG_HEADER_CYCLE_SIZE)) {
  3258. hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
  3259. if (h_size % XLOG_HEADER_CYCLE_SIZE)
  3260. hblks++;
  3261. xlog_put_bp(hbp);
  3262. hbp = xlog_get_bp(log, hblks);
  3263. } else {
  3264. hblks = 1;
  3265. }
  3266. } else {
  3267. ASSERT(log->l_sectbb_log == 0);
  3268. hblks = 1;
  3269. hbp = xlog_get_bp(log, 1);
  3270. h_size = XLOG_BIG_RECORD_BSIZE;
  3271. }
  3272. if (!hbp)
  3273. return ENOMEM;
  3274. dbp = xlog_get_bp(log, BTOBB(h_size));
  3275. if (!dbp) {
  3276. xlog_put_bp(hbp);
  3277. return ENOMEM;
  3278. }
  3279. memset(rhash, 0, sizeof(rhash));
  3280. if (tail_blk <= head_blk) {
  3281. for (blk_no = tail_blk; blk_no < head_blk; ) {
  3282. if ((error = xlog_bread(log, blk_no, hblks, hbp)))
  3283. goto bread_err2;
  3284. offset = xlog_align(log, blk_no, hblks, hbp);
  3285. rhead = (xlog_rec_header_t *)offset;
  3286. error = xlog_valid_rec_header(log, rhead, blk_no);
  3287. if (error)
  3288. goto bread_err2;
  3289. /* blocks in data section */
  3290. bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
  3291. error = xlog_bread(log, blk_no + hblks, bblks, dbp);
  3292. if (error)
  3293. goto bread_err2;
  3294. offset = xlog_align(log, blk_no + hblks, bblks, dbp);
  3295. xlog_unpack_data(rhead, offset, log);
  3296. if ((error = xlog_recover_process_data(log,
  3297. rhash, rhead, offset, pass)))
  3298. goto bread_err2;
  3299. blk_no += bblks + hblks;
  3300. }
  3301. } else {
  3302. /*
  3303. * Perform recovery around the end of the physical log.
  3304. * When the head is not on the same cycle number as the tail,
  3305. * we can't do a sequential recovery as above.
  3306. */
  3307. blk_no = tail_blk;
  3308. while (blk_no < log->l_logBBsize) {
  3309. /*
  3310. * Check for header wrapping around physical end-of-log
  3311. */
  3312. offset = NULL;
  3313. split_hblks = 0;
  3314. wrapped_hblks = 0;
  3315. if (blk_no + hblks <= log->l_logBBsize) {
  3316. /* Read header in one read */
  3317. error = xlog_bread(log, blk_no, hblks, hbp);
  3318. if (error)
  3319. goto bread_err2;
  3320. offset = xlog_align(log, blk_no, hblks, hbp);
  3321. } else {
  3322. /* This LR is split across physical log end */
  3323. if (blk_no != log->l_logBBsize) {
  3324. /* some data before physical log end */
  3325. ASSERT(blk_no <= INT_MAX);
  3326. split_hblks = log->l_logBBsize - (int)blk_no;
  3327. ASSERT(split_hblks > 0);
  3328. if ((error = xlog_bread(log, blk_no,
  3329. split_hblks, hbp)))
  3330. goto bread_err2;
  3331. offset = xlog_align(log, blk_no,
  3332. split_hblks, hbp);
  3333. }
  3334. /*
  3335. * Note: this black magic still works with
  3336. * large sector sizes (non-512) only because:
  3337. * - we increased the buffer size originally
  3338. * by 1 sector giving us enough extra space
  3339. * for the second read;
  3340. * - the log start is guaranteed to be sector
  3341. * aligned;
  3342. * - we read the log end (LR header start)
  3343. * _first_, then the log start (LR header end)
  3344. * - order is important.
  3345. */
  3346. bufaddr = XFS_BUF_PTR(hbp);
  3347. XFS_BUF_SET_PTR(hbp,
  3348. bufaddr + BBTOB(split_hblks),
  3349. BBTOB(hblks - split_hblks));
  3350. wrapped_hblks = hblks - split_hblks;
  3351. error = xlog_bread(log, 0, wrapped_hblks, hbp);
  3352. if (error)
  3353. goto bread_err2;
  3354. XFS_BUF_SET_PTR(hbp, bufaddr, BBTOB(hblks));
  3355. if (!offset)
  3356. offset = xlog_align(log, 0,
  3357. wrapped_hblks, hbp);
  3358. }
  3359. rhead = (xlog_rec_header_t *)offset;
  3360. error = xlog_valid_rec_header(log, rhead,
  3361. split_hblks ? blk_no : 0);
  3362. if (error)
  3363. goto bread_err2;
  3364. bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
  3365. blk_no += hblks;
  3366. /* Read in data for log record */
  3367. if (blk_no + bblks <= log->l_logBBsize) {
  3368. error = xlog_bread(log, blk_no, bblks, dbp);
  3369. if (error)
  3370. goto bread_err2;
  3371. offset = xlog_align(log, blk_no, bblks, dbp);
  3372. } else {
  3373. /* This log record is split across the
  3374. * physical end of log */
  3375. offset = NULL;
  3376. split_bblks = 0;
  3377. if (blk_no != log->l_logBBsize) {
  3378. /* some data is before the physical
  3379. * end of log */
  3380. ASSERT(!wrapped_hblks);
  3381. ASSERT(blk_no <= INT_MAX);
  3382. split_bblks =
  3383. log->l_logBBsize - (int)blk_no;
  3384. ASSERT(split_bblks > 0);
  3385. if ((error = xlog_bread(log, blk_no,
  3386. split_bblks, dbp)))
  3387. goto bread_err2;
  3388. offset = xlog_align(log, blk_no,
  3389. split_bblks, dbp);
  3390. }
  3391. /*
  3392. * Note: this black magic still works with
  3393. * large sector sizes (non-512) only because:
  3394. * - we increased the buffer size originally
  3395. * by 1 sector giving us enough extra space
  3396. * for the second read;
  3397. * - the log start is guaranteed to be sector
  3398. * aligned;
  3399. * - we read the log end (LR header start)
  3400. * _first_, then the log start (LR header end)
  3401. * - order is important.
  3402. */
  3403. bufaddr = XFS_BUF_PTR(dbp);
  3404. XFS_BUF_SET_PTR(dbp,
  3405. bufaddr + BBTOB(split_bblks),
  3406. BBTOB(bblks - split_bblks));
  3407. if ((error = xlog_bread(log, wrapped_hblks,
  3408. bblks - split_bblks, dbp)))
  3409. goto bread_err2;
  3410. XFS_BUF_SET_PTR(dbp, bufaddr, h_size);
  3411. if (!offset)
  3412. offset = xlog_align(log, wrapped_hblks,
  3413. bblks - split_bblks, dbp);
  3414. }
  3415. xlog_unpack_data(rhead, offset, log);
  3416. if ((error = xlog_recover_process_data(log, rhash,
  3417. rhead, offset, pass)))
  3418. goto bread_err2;
  3419. blk_no += bblks;
  3420. }
  3421. ASSERT(blk_no >= log->l_logBBsize);
  3422. blk_no -= log->l_logBBsize;
  3423. /* read first part of physical log */
  3424. while (blk_no < head_blk) {
  3425. if ((error = xlog_bread(log, blk_no, hblks, hbp)))
  3426. goto bread_err2;
  3427. offset = xlog_align(log, blk_no, hblks, hbp);
  3428. rhead = (xlog_rec_header_t *)offset;
  3429. error = xlog_valid_rec_header(log, rhead, blk_no);
  3430. if (error)
  3431. goto bread_err2;
  3432. bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
  3433. if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp)))
  3434. goto bread_err2;
  3435. offset = xlog_align(log, blk_no+hblks, bblks, dbp);
  3436. xlog_unpack_data(rhead, offset, log);
  3437. if ((error = xlog_recover_process_data(log, rhash,
  3438. rhead, offset, pass)))
  3439. goto bread_err2;
  3440. blk_no += bblks + hblks;
  3441. }
  3442. }
  3443. bread_err2:
  3444. xlog_put_bp(dbp);
  3445. bread_err1:
  3446. xlog_put_bp(hbp);
  3447. return error;
  3448. }
  3449. /*
  3450. * Do the recovery of the log. We actually do this in two phases.
  3451. * The two passes are necessary in order to implement the function
  3452. * of cancelling a record written into the log. The first pass
  3453. * determines those things which have been cancelled, and the
  3454. * second pass replays log items normally except for those which
  3455. * have been cancelled. The handling of the replay and cancellations
  3456. * takes place in the log item type specific routines.
  3457. *
  3458. * The table of items which have cancel records in the log is allocated
  3459. * and freed at this level, since only here do we know when all of
  3460. * the log recovery has been completed.
  3461. */
  3462. STATIC int
  3463. xlog_do_log_recovery(
  3464. xlog_t *log,
  3465. xfs_daddr_t head_blk,
  3466. xfs_daddr_t tail_blk)
  3467. {
  3468. int error;
  3469. ASSERT(head_blk != tail_blk);
  3470. /*
  3471. * First do a pass to find all of the cancelled buf log items.
  3472. * Store them in the buf_cancel_table for use in the second pass.
  3473. */
  3474. log->l_buf_cancel_table =
  3475. (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
  3476. sizeof(xfs_buf_cancel_t*),
  3477. KM_SLEEP);
  3478. error = xlog_do_recovery_pass(log, head_blk, tail_blk,
  3479. XLOG_RECOVER_PASS1);
  3480. if (error != 0) {
  3481. kmem_free(log->l_buf_cancel_table,
  3482. XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
  3483. log->l_buf_cancel_table = NULL;
  3484. return error;
  3485. }
  3486. /*
  3487. * Then do a second pass to actually recover the items in the log.
  3488. * When it is complete free the table of buf cancel items.
  3489. */
  3490. error = xlog_do_recovery_pass(log, head_blk, tail_blk,
  3491. XLOG_RECOVER_PASS2);
  3492. #ifdef DEBUG
  3493. {
  3494. int i;
  3495. for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
  3496. ASSERT(log->l_buf_cancel_table[i] == NULL);
  3497. }
  3498. #endif /* DEBUG */
  3499. kmem_free(log->l_buf_cancel_table,
  3500. XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
  3501. log->l_buf_cancel_table = NULL;
  3502. return error;
  3503. }
  3504. /*
  3505. * Do the actual recovery
  3506. */
  3507. STATIC int
  3508. xlog_do_recover(
  3509. xlog_t *log,
  3510. xfs_daddr_t head_blk,
  3511. xfs_daddr_t tail_blk)
  3512. {
  3513. int error;
  3514. xfs_buf_t *bp;
  3515. xfs_sb_t *sbp;
  3516. /*
  3517. * First replay the images in the log.
  3518. */
  3519. error = xlog_do_log_recovery(log, head_blk, tail_blk);
  3520. if (error) {
  3521. return error;
  3522. }
  3523. XFS_bflush(log->l_mp->m_ddev_targp);
  3524. /*
  3525. * If IO errors happened during recovery, bail out.
  3526. */
  3527. if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
  3528. return (EIO);
  3529. }
  3530. /*
  3531. * We now update the tail_lsn since much of the recovery has completed
  3532. * and there may be space available to use. If there were no extent
  3533. * or iunlinks, we can free up the entire log and set the tail_lsn to
  3534. * be the last_sync_lsn. This was set in xlog_find_tail to be the
  3535. * lsn of the last known good LR on disk. If there are extent frees
  3536. * or iunlinks they will have some entries in the AIL; so we look at
  3537. * the AIL to determine how to set the tail_lsn.
  3538. */
  3539. xlog_assign_tail_lsn(log->l_mp);
  3540. /*
  3541. * Now that we've finished replaying all buffer and inode
  3542. * updates, re-read in the superblock.
  3543. */
  3544. bp = xfs_getsb(log->l_mp, 0);
  3545. XFS_BUF_UNDONE(bp);
  3546. XFS_BUF_READ(bp);
  3547. xfsbdstrat(log->l_mp, bp);
  3548. if ((error = xfs_iowait(bp))) {
  3549. xfs_ioerror_alert("xlog_do_recover",
  3550. log->l_mp, bp, XFS_BUF_ADDR(bp));
  3551. ASSERT(0);
  3552. xfs_buf_relse(bp);
  3553. return error;
  3554. }
  3555. /* Convert superblock from on-disk format */
  3556. sbp = &log->l_mp->m_sb;
  3557. xfs_xlatesb(XFS_BUF_TO_SBP(bp), sbp, 1, XFS_SB_ALL_BITS);
  3558. ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
  3559. ASSERT(XFS_SB_GOOD_VERSION(sbp));
  3560. xfs_buf_relse(bp);
  3561. xlog_recover_check_summary(log);
  3562. /* Normal transactions can now occur */
  3563. log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
  3564. return 0;
  3565. }
  3566. /*
  3567. * Perform recovery and re-initialize some log variables in xlog_find_tail.
  3568. *
  3569. * Return error or zero.
  3570. */
  3571. int
  3572. xlog_recover(
  3573. xlog_t *log)
  3574. {
  3575. xfs_daddr_t head_blk, tail_blk;
  3576. int error;
  3577. /* find the tail of the log */
  3578. if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
  3579. return error;
  3580. if (tail_blk != head_blk) {
  3581. /* There used to be a comment here:
  3582. *
  3583. * disallow recovery on read-only mounts. note -- mount
  3584. * checks for ENOSPC and turns it into an intelligent
  3585. * error message.
  3586. * ...but this is no longer true. Now, unless you specify
  3587. * NORECOVERY (in which case this function would never be
  3588. * called), we just go ahead and recover. We do this all
  3589. * under the vfs layer, so we can get away with it unless
  3590. * the device itself is read-only, in which case we fail.
  3591. */
  3592. if ((error = xfs_dev_is_read_only(log->l_mp,
  3593. "recovery required"))) {
  3594. return error;
  3595. }
  3596. cmn_err(CE_NOTE,
  3597. "Starting XFS recovery on filesystem: %s (logdev: %s)",
  3598. log->l_mp->m_fsname, log->l_mp->m_logname ?
  3599. log->l_mp->m_logname : "internal");
  3600. error = xlog_do_recover(log, head_blk, tail_blk);
  3601. log->l_flags |= XLOG_RECOVERY_NEEDED;
  3602. }
  3603. return error;
  3604. }
  3605. /*
  3606. * In the first part of recovery we replay inodes and buffers and build
  3607. * up the list of extent free items which need to be processed. Here
  3608. * we process the extent free items and clean up the on disk unlinked
  3609. * inode lists. This is separated from the first part of recovery so
  3610. * that the root and real-time bitmap inodes can be read in from disk in
  3611. * between the two stages. This is necessary so that we can free space
  3612. * in the real-time portion of the file system.
  3613. */
  3614. int
  3615. xlog_recover_finish(
  3616. xlog_t *log,
  3617. int mfsi_flags)
  3618. {
  3619. /*
  3620. * Now we're ready to do the transactions needed for the
  3621. * rest of recovery. Start with completing all the extent
  3622. * free intent records and then process the unlinked inode
  3623. * lists. At this point, we essentially run in normal mode
  3624. * except that we're still performing recovery actions
  3625. * rather than accepting new requests.
  3626. */
  3627. if (log->l_flags & XLOG_RECOVERY_NEEDED) {
  3628. xlog_recover_process_efis(log);
  3629. /*
  3630. * Sync the log to get all the EFIs out of the AIL.
  3631. * This isn't absolutely necessary, but it helps in
  3632. * case the unlink transactions would have problems
  3633. * pushing the EFIs out of the way.
  3634. */
  3635. xfs_log_force(log->l_mp, (xfs_lsn_t)0,
  3636. (XFS_LOG_FORCE | XFS_LOG_SYNC));
  3637. if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) {
  3638. xlog_recover_process_iunlinks(log);
  3639. }
  3640. xlog_recover_check_summary(log);
  3641. cmn_err(CE_NOTE,
  3642. "Ending XFS recovery on filesystem: %s (logdev: %s)",
  3643. log->l_mp->m_fsname, log->l_mp->m_logname ?
  3644. log->l_mp->m_logname : "internal");
  3645. log->l_flags &= ~XLOG_RECOVERY_NEEDED;
  3646. } else {
  3647. cmn_err(CE_DEBUG,
  3648. "!Ending clean XFS mount for filesystem: %s",
  3649. log->l_mp->m_fsname);
  3650. }
  3651. return 0;
  3652. }
  3653. #if defined(DEBUG)
  3654. /*
  3655. * Read all of the agf and agi counters and check that they
  3656. * are consistent with the superblock counters.
  3657. */
  3658. void
  3659. xlog_recover_check_summary(
  3660. xlog_t *log)
  3661. {
  3662. xfs_mount_t *mp;
  3663. xfs_agf_t *agfp;
  3664. xfs_agi_t *agip;
  3665. xfs_buf_t *agfbp;
  3666. xfs_buf_t *agibp;
  3667. xfs_daddr_t agfdaddr;
  3668. xfs_daddr_t agidaddr;
  3669. xfs_buf_t *sbbp;
  3670. #ifdef XFS_LOUD_RECOVERY
  3671. xfs_sb_t *sbp;
  3672. #endif
  3673. xfs_agnumber_t agno;
  3674. __uint64_t freeblks;
  3675. __uint64_t itotal;
  3676. __uint64_t ifree;
  3677. mp = log->l_mp;
  3678. freeblks = 0LL;
  3679. itotal = 0LL;
  3680. ifree = 0LL;
  3681. for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
  3682. agfdaddr = XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp));
  3683. agfbp = xfs_buf_read(mp->m_ddev_targp, agfdaddr,
  3684. XFS_FSS_TO_BB(mp, 1), 0);
  3685. if (XFS_BUF_ISERROR(agfbp)) {
  3686. xfs_ioerror_alert("xlog_recover_check_summary(agf)",
  3687. mp, agfbp, agfdaddr);
  3688. }
  3689. agfp = XFS_BUF_TO_AGF(agfbp);
  3690. ASSERT(XFS_AGF_MAGIC == be32_to_cpu(agfp->agf_magicnum));
  3691. ASSERT(XFS_AGF_GOOD_VERSION(be32_to_cpu(agfp->agf_versionnum)));
  3692. ASSERT(be32_to_cpu(agfp->agf_seqno) == agno);
  3693. freeblks += be32_to_cpu(agfp->agf_freeblks) +
  3694. be32_to_cpu(agfp->agf_flcount);
  3695. xfs_buf_relse(agfbp);
  3696. agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
  3697. agibp = xfs_buf_read(mp->m_ddev_targp, agidaddr,
  3698. XFS_FSS_TO_BB(mp, 1), 0);
  3699. if (XFS_BUF_ISERROR(agibp)) {
  3700. xfs_ioerror_alert("xlog_recover_check_summary(agi)",
  3701. mp, agibp, agidaddr);
  3702. }
  3703. agip = XFS_BUF_TO_AGI(agibp);
  3704. ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agip->agi_magicnum));
  3705. ASSERT(XFS_AGI_GOOD_VERSION(be32_to_cpu(agip->agi_versionnum)));
  3706. ASSERT(be32_to_cpu(agip->agi_seqno) == agno);
  3707. itotal += be32_to_cpu(agip->agi_count);
  3708. ifree += be32_to_cpu(agip->agi_freecount);
  3709. xfs_buf_relse(agibp);
  3710. }
  3711. sbbp = xfs_getsb(mp, 0);
  3712. #ifdef XFS_LOUD_RECOVERY
  3713. sbp = &mp->m_sb;
  3714. xfs_xlatesb(XFS_BUF_TO_SBP(sbbp), sbp, 1, XFS_SB_ALL_BITS);
  3715. cmn_err(CE_NOTE,
  3716. "xlog_recover_check_summary: sb_icount %Lu itotal %Lu",
  3717. sbp->sb_icount, itotal);
  3718. cmn_err(CE_NOTE,
  3719. "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu",
  3720. sbp->sb_ifree, ifree);
  3721. cmn_err(CE_NOTE,
  3722. "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu",
  3723. sbp->sb_fdblocks, freeblks);
  3724. #if 0
  3725. /*
  3726. * This is turned off until I account for the allocation
  3727. * btree blocks which live in free space.
  3728. */
  3729. ASSERT(sbp->sb_icount == itotal);
  3730. ASSERT(sbp->sb_ifree == ifree);
  3731. ASSERT(sbp->sb_fdblocks == freeblks);
  3732. #endif
  3733. #endif
  3734. xfs_buf_relse(sbbp);
  3735. }
  3736. #endif /* DEBUG */