xfs_log_recover.c 129 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_format.h"
  21. #include "xfs_bit.h"
  22. #include "xfs_log.h"
  23. #include "xfs_inum.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_mount.h"
  28. #include "xfs_error.h"
  29. #include "xfs_bmap_btree.h"
  30. #include "xfs_alloc_btree.h"
  31. #include "xfs_ialloc_btree.h"
  32. #include "xfs_btree.h"
  33. #include "xfs_dinode.h"
  34. #include "xfs_inode.h"
  35. #include "xfs_inode_item.h"
  36. #include "xfs_alloc.h"
  37. #include "xfs_ialloc.h"
  38. #include "xfs_log_priv.h"
  39. #include "xfs_buf_item.h"
  40. #include "xfs_log_recover.h"
  41. #include "xfs_extfree_item.h"
  42. #include "xfs_trans_priv.h"
  43. #include "xfs_quota.h"
  44. #include "xfs_cksum.h"
  45. #include "xfs_trace.h"
  46. #include "xfs_icache.h"
  47. #include "xfs_icreate_item.h"
  48. /* Need all the magic numbers and buffer ops structures from these headers */
  49. #include "xfs_symlink.h"
  50. #include "xfs_da_btree.h"
  51. #include "xfs_dir2_format.h"
  52. #include "xfs_dir2.h"
  53. #include "xfs_attr_leaf.h"
  54. #include "xfs_attr_remote.h"
  55. #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
  56. STATIC int
  57. xlog_find_zeroed(
  58. struct xlog *,
  59. xfs_daddr_t *);
  60. STATIC int
  61. xlog_clear_stale_blocks(
  62. struct xlog *,
  63. xfs_lsn_t);
  64. #if defined(DEBUG)
  65. STATIC void
  66. xlog_recover_check_summary(
  67. struct xlog *);
  68. #else
  69. #define xlog_recover_check_summary(log)
  70. #endif
  71. /*
  72. * This structure is used during recovery to record the buf log items which
  73. * have been canceled and should not be replayed.
  74. */
  75. struct xfs_buf_cancel {
  76. xfs_daddr_t bc_blkno;
  77. uint bc_len;
  78. int bc_refcount;
  79. struct list_head bc_list;
  80. };
  81. /*
  82. * Sector aligned buffer routines for buffer create/read/write/access
  83. */
  84. /*
  85. * Verify the given count of basic blocks is valid number of blocks
  86. * to specify for an operation involving the given XFS log buffer.
  87. * Returns nonzero if the count is valid, 0 otherwise.
  88. */
  89. static inline int
  90. xlog_buf_bbcount_valid(
  91. struct xlog *log,
  92. int bbcount)
  93. {
  94. return bbcount > 0 && bbcount <= log->l_logBBsize;
  95. }
  96. /*
  97. * Allocate a buffer to hold log data. The buffer needs to be able
  98. * to map to a range of nbblks basic blocks at any valid (basic
  99. * block) offset within the log.
  100. */
  101. STATIC xfs_buf_t *
  102. xlog_get_bp(
  103. struct xlog *log,
  104. int nbblks)
  105. {
  106. struct xfs_buf *bp;
  107. if (!xlog_buf_bbcount_valid(log, nbblks)) {
  108. xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
  109. nbblks);
  110. XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
  111. return NULL;
  112. }
  113. /*
  114. * We do log I/O in units of log sectors (a power-of-2
  115. * multiple of the basic block size), so we round up the
  116. * requested size to accommodate the basic blocks required
  117. * for complete log sectors.
  118. *
  119. * In addition, the buffer may be used for a non-sector-
  120. * aligned block offset, in which case an I/O of the
  121. * requested size could extend beyond the end of the
  122. * buffer. If the requested size is only 1 basic block it
  123. * will never straddle a sector boundary, so this won't be
  124. * an issue. Nor will this be a problem if the log I/O is
  125. * done in basic blocks (sector size 1). But otherwise we
  126. * extend the buffer by one extra log sector to ensure
  127. * there's space to accommodate this possibility.
  128. */
  129. if (nbblks > 1 && log->l_sectBBsize > 1)
  130. nbblks += log->l_sectBBsize;
  131. nbblks = round_up(nbblks, log->l_sectBBsize);
  132. bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
  133. if (bp)
  134. xfs_buf_unlock(bp);
  135. return bp;
  136. }
  137. STATIC void
  138. xlog_put_bp(
  139. xfs_buf_t *bp)
  140. {
  141. xfs_buf_free(bp);
  142. }
  143. /*
  144. * Return the address of the start of the given block number's data
  145. * in a log buffer. The buffer covers a log sector-aligned region.
  146. */
  147. STATIC xfs_caddr_t
  148. xlog_align(
  149. struct xlog *log,
  150. xfs_daddr_t blk_no,
  151. int nbblks,
  152. struct xfs_buf *bp)
  153. {
  154. xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
  155. ASSERT(offset + nbblks <= bp->b_length);
  156. return bp->b_addr + BBTOB(offset);
  157. }
  158. /*
  159. * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
  160. */
  161. STATIC int
  162. xlog_bread_noalign(
  163. struct xlog *log,
  164. xfs_daddr_t blk_no,
  165. int nbblks,
  166. struct xfs_buf *bp)
  167. {
  168. int error;
  169. if (!xlog_buf_bbcount_valid(log, nbblks)) {
  170. xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
  171. nbblks);
  172. XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
  173. return EFSCORRUPTED;
  174. }
  175. blk_no = round_down(blk_no, log->l_sectBBsize);
  176. nbblks = round_up(nbblks, log->l_sectBBsize);
  177. ASSERT(nbblks > 0);
  178. ASSERT(nbblks <= bp->b_length);
  179. XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
  180. XFS_BUF_READ(bp);
  181. bp->b_io_length = nbblks;
  182. bp->b_error = 0;
  183. xfsbdstrat(log->l_mp, bp);
  184. error = xfs_buf_iowait(bp);
  185. if (error)
  186. xfs_buf_ioerror_alert(bp, __func__);
  187. return error;
  188. }
  189. STATIC int
  190. xlog_bread(
  191. struct xlog *log,
  192. xfs_daddr_t blk_no,
  193. int nbblks,
  194. struct xfs_buf *bp,
  195. xfs_caddr_t *offset)
  196. {
  197. int error;
  198. error = xlog_bread_noalign(log, blk_no, nbblks, bp);
  199. if (error)
  200. return error;
  201. *offset = xlog_align(log, blk_no, nbblks, bp);
  202. return 0;
  203. }
  204. /*
  205. * Read at an offset into the buffer. Returns with the buffer in it's original
  206. * state regardless of the result of the read.
  207. */
  208. STATIC int
  209. xlog_bread_offset(
  210. struct xlog *log,
  211. xfs_daddr_t blk_no, /* block to read from */
  212. int nbblks, /* blocks to read */
  213. struct xfs_buf *bp,
  214. xfs_caddr_t offset)
  215. {
  216. xfs_caddr_t orig_offset = bp->b_addr;
  217. int orig_len = BBTOB(bp->b_length);
  218. int error, error2;
  219. error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
  220. if (error)
  221. return error;
  222. error = xlog_bread_noalign(log, blk_no, nbblks, bp);
  223. /* must reset buffer pointer even on error */
  224. error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
  225. if (error)
  226. return error;
  227. return error2;
  228. }
  229. /*
  230. * Write out the buffer at the given block for the given number of blocks.
  231. * The buffer is kept locked across the write and is returned locked.
  232. * This can only be used for synchronous log writes.
  233. */
  234. STATIC int
  235. xlog_bwrite(
  236. struct xlog *log,
  237. xfs_daddr_t blk_no,
  238. int nbblks,
  239. struct xfs_buf *bp)
  240. {
  241. int error;
  242. if (!xlog_buf_bbcount_valid(log, nbblks)) {
  243. xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
  244. nbblks);
  245. XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
  246. return EFSCORRUPTED;
  247. }
  248. blk_no = round_down(blk_no, log->l_sectBBsize);
  249. nbblks = round_up(nbblks, log->l_sectBBsize);
  250. ASSERT(nbblks > 0);
  251. ASSERT(nbblks <= bp->b_length);
  252. XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
  253. XFS_BUF_ZEROFLAGS(bp);
  254. xfs_buf_hold(bp);
  255. xfs_buf_lock(bp);
  256. bp->b_io_length = nbblks;
  257. bp->b_error = 0;
  258. error = xfs_bwrite(bp);
  259. if (error)
  260. xfs_buf_ioerror_alert(bp, __func__);
  261. xfs_buf_relse(bp);
  262. return error;
  263. }
  264. #ifdef DEBUG
  265. /*
  266. * dump debug superblock and log record information
  267. */
  268. STATIC void
  269. xlog_header_check_dump(
  270. xfs_mount_t *mp,
  271. xlog_rec_header_t *head)
  272. {
  273. xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n",
  274. __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
  275. xfs_debug(mp, " log : uuid = %pU, fmt = %d\n",
  276. &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
  277. }
  278. #else
  279. #define xlog_header_check_dump(mp, head)
  280. #endif
  281. /*
  282. * check log record header for recovery
  283. */
  284. STATIC int
  285. xlog_header_check_recover(
  286. xfs_mount_t *mp,
  287. xlog_rec_header_t *head)
  288. {
  289. ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
  290. /*
  291. * IRIX doesn't write the h_fmt field and leaves it zeroed
  292. * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
  293. * a dirty log created in IRIX.
  294. */
  295. if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
  296. xfs_warn(mp,
  297. "dirty log written in incompatible format - can't recover");
  298. xlog_header_check_dump(mp, head);
  299. XFS_ERROR_REPORT("xlog_header_check_recover(1)",
  300. XFS_ERRLEVEL_HIGH, mp);
  301. return XFS_ERROR(EFSCORRUPTED);
  302. } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
  303. xfs_warn(mp,
  304. "dirty log entry has mismatched uuid - can't recover");
  305. xlog_header_check_dump(mp, head);
  306. XFS_ERROR_REPORT("xlog_header_check_recover(2)",
  307. XFS_ERRLEVEL_HIGH, mp);
  308. return XFS_ERROR(EFSCORRUPTED);
  309. }
  310. return 0;
  311. }
  312. /*
  313. * read the head block of the log and check the header
  314. */
  315. STATIC int
  316. xlog_header_check_mount(
  317. xfs_mount_t *mp,
  318. xlog_rec_header_t *head)
  319. {
  320. ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
  321. if (uuid_is_nil(&head->h_fs_uuid)) {
  322. /*
  323. * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
  324. * h_fs_uuid is nil, we assume this log was last mounted
  325. * by IRIX and continue.
  326. */
  327. xfs_warn(mp, "nil uuid in log - IRIX style log");
  328. } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
  329. xfs_warn(mp, "log has mismatched uuid - can't recover");
  330. xlog_header_check_dump(mp, head);
  331. XFS_ERROR_REPORT("xlog_header_check_mount",
  332. XFS_ERRLEVEL_HIGH, mp);
  333. return XFS_ERROR(EFSCORRUPTED);
  334. }
  335. return 0;
  336. }
  337. STATIC void
  338. xlog_recover_iodone(
  339. struct xfs_buf *bp)
  340. {
  341. if (bp->b_error) {
  342. /*
  343. * We're not going to bother about retrying
  344. * this during recovery. One strike!
  345. */
  346. xfs_buf_ioerror_alert(bp, __func__);
  347. xfs_force_shutdown(bp->b_target->bt_mount,
  348. SHUTDOWN_META_IO_ERROR);
  349. }
  350. bp->b_iodone = NULL;
  351. xfs_buf_ioend(bp, 0);
  352. }
  353. /*
  354. * This routine finds (to an approximation) the first block in the physical
  355. * log which contains the given cycle. It uses a binary search algorithm.
  356. * Note that the algorithm can not be perfect because the disk will not
  357. * necessarily be perfect.
  358. */
  359. STATIC int
  360. xlog_find_cycle_start(
  361. struct xlog *log,
  362. struct xfs_buf *bp,
  363. xfs_daddr_t first_blk,
  364. xfs_daddr_t *last_blk,
  365. uint cycle)
  366. {
  367. xfs_caddr_t offset;
  368. xfs_daddr_t mid_blk;
  369. xfs_daddr_t end_blk;
  370. uint mid_cycle;
  371. int error;
  372. end_blk = *last_blk;
  373. mid_blk = BLK_AVG(first_blk, end_blk);
  374. while (mid_blk != first_blk && mid_blk != end_blk) {
  375. error = xlog_bread(log, mid_blk, 1, bp, &offset);
  376. if (error)
  377. return error;
  378. mid_cycle = xlog_get_cycle(offset);
  379. if (mid_cycle == cycle)
  380. end_blk = mid_blk; /* last_half_cycle == mid_cycle */
  381. else
  382. first_blk = mid_blk; /* first_half_cycle == mid_cycle */
  383. mid_blk = BLK_AVG(first_blk, end_blk);
  384. }
  385. ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
  386. (mid_blk == end_blk && mid_blk-1 == first_blk));
  387. *last_blk = end_blk;
  388. return 0;
  389. }
  390. /*
  391. * Check that a range of blocks does not contain stop_on_cycle_no.
  392. * Fill in *new_blk with the block offset where such a block is
  393. * found, or with -1 (an invalid block number) if there is no such
  394. * block in the range. The scan needs to occur from front to back
  395. * and the pointer into the region must be updated since a later
  396. * routine will need to perform another test.
  397. */
  398. STATIC int
  399. xlog_find_verify_cycle(
  400. struct xlog *log,
  401. xfs_daddr_t start_blk,
  402. int nbblks,
  403. uint stop_on_cycle_no,
  404. xfs_daddr_t *new_blk)
  405. {
  406. xfs_daddr_t i, j;
  407. uint cycle;
  408. xfs_buf_t *bp;
  409. xfs_daddr_t bufblks;
  410. xfs_caddr_t buf = NULL;
  411. int error = 0;
  412. /*
  413. * Greedily allocate a buffer big enough to handle the full
  414. * range of basic blocks we'll be examining. If that fails,
  415. * try a smaller size. We need to be able to read at least
  416. * a log sector, or we're out of luck.
  417. */
  418. bufblks = 1 << ffs(nbblks);
  419. while (bufblks > log->l_logBBsize)
  420. bufblks >>= 1;
  421. while (!(bp = xlog_get_bp(log, bufblks))) {
  422. bufblks >>= 1;
  423. if (bufblks < log->l_sectBBsize)
  424. return ENOMEM;
  425. }
  426. for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
  427. int bcount;
  428. bcount = min(bufblks, (start_blk + nbblks - i));
  429. error = xlog_bread(log, i, bcount, bp, &buf);
  430. if (error)
  431. goto out;
  432. for (j = 0; j < bcount; j++) {
  433. cycle = xlog_get_cycle(buf);
  434. if (cycle == stop_on_cycle_no) {
  435. *new_blk = i+j;
  436. goto out;
  437. }
  438. buf += BBSIZE;
  439. }
  440. }
  441. *new_blk = -1;
  442. out:
  443. xlog_put_bp(bp);
  444. return error;
  445. }
  446. /*
  447. * Potentially backup over partial log record write.
  448. *
  449. * In the typical case, last_blk is the number of the block directly after
  450. * a good log record. Therefore, we subtract one to get the block number
  451. * of the last block in the given buffer. extra_bblks contains the number
  452. * of blocks we would have read on a previous read. This happens when the
  453. * last log record is split over the end of the physical log.
  454. *
  455. * extra_bblks is the number of blocks potentially verified on a previous
  456. * call to this routine.
  457. */
  458. STATIC int
  459. xlog_find_verify_log_record(
  460. struct xlog *log,
  461. xfs_daddr_t start_blk,
  462. xfs_daddr_t *last_blk,
  463. int extra_bblks)
  464. {
  465. xfs_daddr_t i;
  466. xfs_buf_t *bp;
  467. xfs_caddr_t offset = NULL;
  468. xlog_rec_header_t *head = NULL;
  469. int error = 0;
  470. int smallmem = 0;
  471. int num_blks = *last_blk - start_blk;
  472. int xhdrs;
  473. ASSERT(start_blk != 0 || *last_blk != start_blk);
  474. if (!(bp = xlog_get_bp(log, num_blks))) {
  475. if (!(bp = xlog_get_bp(log, 1)))
  476. return ENOMEM;
  477. smallmem = 1;
  478. } else {
  479. error = xlog_bread(log, start_blk, num_blks, bp, &offset);
  480. if (error)
  481. goto out;
  482. offset += ((num_blks - 1) << BBSHIFT);
  483. }
  484. for (i = (*last_blk) - 1; i >= 0; i--) {
  485. if (i < start_blk) {
  486. /* valid log record not found */
  487. xfs_warn(log->l_mp,
  488. "Log inconsistent (didn't find previous header)");
  489. ASSERT(0);
  490. error = XFS_ERROR(EIO);
  491. goto out;
  492. }
  493. if (smallmem) {
  494. error = xlog_bread(log, i, 1, bp, &offset);
  495. if (error)
  496. goto out;
  497. }
  498. head = (xlog_rec_header_t *)offset;
  499. if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
  500. break;
  501. if (!smallmem)
  502. offset -= BBSIZE;
  503. }
  504. /*
  505. * We hit the beginning of the physical log & still no header. Return
  506. * to caller. If caller can handle a return of -1, then this routine
  507. * will be called again for the end of the physical log.
  508. */
  509. if (i == -1) {
  510. error = -1;
  511. goto out;
  512. }
  513. /*
  514. * We have the final block of the good log (the first block
  515. * of the log record _before_ the head. So we check the uuid.
  516. */
  517. if ((error = xlog_header_check_mount(log->l_mp, head)))
  518. goto out;
  519. /*
  520. * We may have found a log record header before we expected one.
  521. * last_blk will be the 1st block # with a given cycle #. We may end
  522. * up reading an entire log record. In this case, we don't want to
  523. * reset last_blk. Only when last_blk points in the middle of a log
  524. * record do we update last_blk.
  525. */
  526. if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
  527. uint h_size = be32_to_cpu(head->h_size);
  528. xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
  529. if (h_size % XLOG_HEADER_CYCLE_SIZE)
  530. xhdrs++;
  531. } else {
  532. xhdrs = 1;
  533. }
  534. if (*last_blk - i + extra_bblks !=
  535. BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
  536. *last_blk = i;
  537. out:
  538. xlog_put_bp(bp);
  539. return error;
  540. }
  541. /*
  542. * Head is defined to be the point of the log where the next log write
  543. * could go. This means that incomplete LR writes at the end are
  544. * eliminated when calculating the head. We aren't guaranteed that previous
  545. * LR have complete transactions. We only know that a cycle number of
  546. * current cycle number -1 won't be present in the log if we start writing
  547. * from our current block number.
  548. *
  549. * last_blk contains the block number of the first block with a given
  550. * cycle number.
  551. *
  552. * Return: zero if normal, non-zero if error.
  553. */
  554. STATIC int
  555. xlog_find_head(
  556. struct xlog *log,
  557. xfs_daddr_t *return_head_blk)
  558. {
  559. xfs_buf_t *bp;
  560. xfs_caddr_t offset;
  561. xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
  562. int num_scan_bblks;
  563. uint first_half_cycle, last_half_cycle;
  564. uint stop_on_cycle;
  565. int error, log_bbnum = log->l_logBBsize;
  566. /* Is the end of the log device zeroed? */
  567. if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
  568. *return_head_blk = first_blk;
  569. /* Is the whole lot zeroed? */
  570. if (!first_blk) {
  571. /* Linux XFS shouldn't generate totally zeroed logs -
  572. * mkfs etc write a dummy unmount record to a fresh
  573. * log so we can store the uuid in there
  574. */
  575. xfs_warn(log->l_mp, "totally zeroed log");
  576. }
  577. return 0;
  578. } else if (error) {
  579. xfs_warn(log->l_mp, "empty log check failed");
  580. return error;
  581. }
  582. first_blk = 0; /* get cycle # of 1st block */
  583. bp = xlog_get_bp(log, 1);
  584. if (!bp)
  585. return ENOMEM;
  586. error = xlog_bread(log, 0, 1, bp, &offset);
  587. if (error)
  588. goto bp_err;
  589. first_half_cycle = xlog_get_cycle(offset);
  590. last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
  591. error = xlog_bread(log, last_blk, 1, bp, &offset);
  592. if (error)
  593. goto bp_err;
  594. last_half_cycle = xlog_get_cycle(offset);
  595. ASSERT(last_half_cycle != 0);
  596. /*
  597. * If the 1st half cycle number is equal to the last half cycle number,
  598. * then the entire log is stamped with the same cycle number. In this
  599. * case, head_blk can't be set to zero (which makes sense). The below
  600. * math doesn't work out properly with head_blk equal to zero. Instead,
  601. * we set it to log_bbnum which is an invalid block number, but this
  602. * value makes the math correct. If head_blk doesn't changed through
  603. * all the tests below, *head_blk is set to zero at the very end rather
  604. * than log_bbnum. In a sense, log_bbnum and zero are the same block
  605. * in a circular file.
  606. */
  607. if (first_half_cycle == last_half_cycle) {
  608. /*
  609. * In this case we believe that the entire log should have
  610. * cycle number last_half_cycle. We need to scan backwards
  611. * from the end verifying that there are no holes still
  612. * containing last_half_cycle - 1. If we find such a hole,
  613. * then the start of that hole will be the new head. The
  614. * simple case looks like
  615. * x | x ... | x - 1 | x
  616. * Another case that fits this picture would be
  617. * x | x + 1 | x ... | x
  618. * In this case the head really is somewhere at the end of the
  619. * log, as one of the latest writes at the beginning was
  620. * incomplete.
  621. * One more case is
  622. * x | x + 1 | x ... | x - 1 | x
  623. * This is really the combination of the above two cases, and
  624. * the head has to end up at the start of the x-1 hole at the
  625. * end of the log.
  626. *
  627. * In the 256k log case, we will read from the beginning to the
  628. * end of the log and search for cycle numbers equal to x-1.
  629. * We don't worry about the x+1 blocks that we encounter,
  630. * because we know that they cannot be the head since the log
  631. * started with x.
  632. */
  633. head_blk = log_bbnum;
  634. stop_on_cycle = last_half_cycle - 1;
  635. } else {
  636. /*
  637. * In this case we want to find the first block with cycle
  638. * number matching last_half_cycle. We expect the log to be
  639. * some variation on
  640. * x + 1 ... | x ... | x
  641. * The first block with cycle number x (last_half_cycle) will
  642. * be where the new head belongs. First we do a binary search
  643. * for the first occurrence of last_half_cycle. The binary
  644. * search may not be totally accurate, so then we scan back
  645. * from there looking for occurrences of last_half_cycle before
  646. * us. If that backwards scan wraps around the beginning of
  647. * the log, then we look for occurrences of last_half_cycle - 1
  648. * at the end of the log. The cases we're looking for look
  649. * like
  650. * v binary search stopped here
  651. * x + 1 ... | x | x + 1 | x ... | x
  652. * ^ but we want to locate this spot
  653. * or
  654. * <---------> less than scan distance
  655. * x + 1 ... | x ... | x - 1 | x
  656. * ^ we want to locate this spot
  657. */
  658. stop_on_cycle = last_half_cycle;
  659. if ((error = xlog_find_cycle_start(log, bp, first_blk,
  660. &head_blk, last_half_cycle)))
  661. goto bp_err;
  662. }
  663. /*
  664. * Now validate the answer. Scan back some number of maximum possible
  665. * blocks and make sure each one has the expected cycle number. The
  666. * maximum is determined by the total possible amount of buffering
  667. * in the in-core log. The following number can be made tighter if
  668. * we actually look at the block size of the filesystem.
  669. */
  670. num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
  671. if (head_blk >= num_scan_bblks) {
  672. /*
  673. * We are guaranteed that the entire check can be performed
  674. * in one buffer.
  675. */
  676. start_blk = head_blk - num_scan_bblks;
  677. if ((error = xlog_find_verify_cycle(log,
  678. start_blk, num_scan_bblks,
  679. stop_on_cycle, &new_blk)))
  680. goto bp_err;
  681. if (new_blk != -1)
  682. head_blk = new_blk;
  683. } else { /* need to read 2 parts of log */
  684. /*
  685. * We are going to scan backwards in the log in two parts.
  686. * First we scan the physical end of the log. In this part
  687. * of the log, we are looking for blocks with cycle number
  688. * last_half_cycle - 1.
  689. * If we find one, then we know that the log starts there, as
  690. * we've found a hole that didn't get written in going around
  691. * the end of the physical log. The simple case for this is
  692. * x + 1 ... | x ... | x - 1 | x
  693. * <---------> less than scan distance
  694. * If all of the blocks at the end of the log have cycle number
  695. * last_half_cycle, then we check the blocks at the start of
  696. * the log looking for occurrences of last_half_cycle. If we
  697. * find one, then our current estimate for the location of the
  698. * first occurrence of last_half_cycle is wrong and we move
  699. * back to the hole we've found. This case looks like
  700. * x + 1 ... | x | x + 1 | x ...
  701. * ^ binary search stopped here
  702. * Another case we need to handle that only occurs in 256k
  703. * logs is
  704. * x + 1 ... | x ... | x+1 | x ...
  705. * ^ binary search stops here
  706. * In a 256k log, the scan at the end of the log will see the
  707. * x + 1 blocks. We need to skip past those since that is
  708. * certainly not the head of the log. By searching for
  709. * last_half_cycle-1 we accomplish that.
  710. */
  711. ASSERT(head_blk <= INT_MAX &&
  712. (xfs_daddr_t) num_scan_bblks >= head_blk);
  713. start_blk = log_bbnum - (num_scan_bblks - head_blk);
  714. if ((error = xlog_find_verify_cycle(log, start_blk,
  715. num_scan_bblks - (int)head_blk,
  716. (stop_on_cycle - 1), &new_blk)))
  717. goto bp_err;
  718. if (new_blk != -1) {
  719. head_blk = new_blk;
  720. goto validate_head;
  721. }
  722. /*
  723. * Scan beginning of log now. The last part of the physical
  724. * log is good. This scan needs to verify that it doesn't find
  725. * the last_half_cycle.
  726. */
  727. start_blk = 0;
  728. ASSERT(head_blk <= INT_MAX);
  729. if ((error = xlog_find_verify_cycle(log,
  730. start_blk, (int)head_blk,
  731. stop_on_cycle, &new_blk)))
  732. goto bp_err;
  733. if (new_blk != -1)
  734. head_blk = new_blk;
  735. }
  736. validate_head:
  737. /*
  738. * Now we need to make sure head_blk is not pointing to a block in
  739. * the middle of a log record.
  740. */
  741. num_scan_bblks = XLOG_REC_SHIFT(log);
  742. if (head_blk >= num_scan_bblks) {
  743. start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
  744. /* start ptr at last block ptr before head_blk */
  745. if ((error = xlog_find_verify_log_record(log, start_blk,
  746. &head_blk, 0)) == -1) {
  747. error = XFS_ERROR(EIO);
  748. goto bp_err;
  749. } else if (error)
  750. goto bp_err;
  751. } else {
  752. start_blk = 0;
  753. ASSERT(head_blk <= INT_MAX);
  754. if ((error = xlog_find_verify_log_record(log, start_blk,
  755. &head_blk, 0)) == -1) {
  756. /* We hit the beginning of the log during our search */
  757. start_blk = log_bbnum - (num_scan_bblks - head_blk);
  758. new_blk = log_bbnum;
  759. ASSERT(start_blk <= INT_MAX &&
  760. (xfs_daddr_t) log_bbnum-start_blk >= 0);
  761. ASSERT(head_blk <= INT_MAX);
  762. if ((error = xlog_find_verify_log_record(log,
  763. start_blk, &new_blk,
  764. (int)head_blk)) == -1) {
  765. error = XFS_ERROR(EIO);
  766. goto bp_err;
  767. } else if (error)
  768. goto bp_err;
  769. if (new_blk != log_bbnum)
  770. head_blk = new_blk;
  771. } else if (error)
  772. goto bp_err;
  773. }
  774. xlog_put_bp(bp);
  775. if (head_blk == log_bbnum)
  776. *return_head_blk = 0;
  777. else
  778. *return_head_blk = head_blk;
  779. /*
  780. * When returning here, we have a good block number. Bad block
  781. * means that during a previous crash, we didn't have a clean break
  782. * from cycle number N to cycle number N-1. In this case, we need
  783. * to find the first block with cycle number N-1.
  784. */
  785. return 0;
  786. bp_err:
  787. xlog_put_bp(bp);
  788. if (error)
  789. xfs_warn(log->l_mp, "failed to find log head");
  790. return error;
  791. }
  792. /*
  793. * Find the sync block number or the tail of the log.
  794. *
  795. * This will be the block number of the last record to have its
  796. * associated buffers synced to disk. Every log record header has
  797. * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
  798. * to get a sync block number. The only concern is to figure out which
  799. * log record header to believe.
  800. *
  801. * The following algorithm uses the log record header with the largest
  802. * lsn. The entire log record does not need to be valid. We only care
  803. * that the header is valid.
  804. *
  805. * We could speed up search by using current head_blk buffer, but it is not
  806. * available.
  807. */
  808. STATIC int
  809. xlog_find_tail(
  810. struct xlog *log,
  811. xfs_daddr_t *head_blk,
  812. xfs_daddr_t *tail_blk)
  813. {
  814. xlog_rec_header_t *rhead;
  815. xlog_op_header_t *op_head;
  816. xfs_caddr_t offset = NULL;
  817. xfs_buf_t *bp;
  818. int error, i, found;
  819. xfs_daddr_t umount_data_blk;
  820. xfs_daddr_t after_umount_blk;
  821. xfs_lsn_t tail_lsn;
  822. int hblks;
  823. found = 0;
  824. /*
  825. * Find previous log record
  826. */
  827. if ((error = xlog_find_head(log, head_blk)))
  828. return error;
  829. bp = xlog_get_bp(log, 1);
  830. if (!bp)
  831. return ENOMEM;
  832. if (*head_blk == 0) { /* special case */
  833. error = xlog_bread(log, 0, 1, bp, &offset);
  834. if (error)
  835. goto done;
  836. if (xlog_get_cycle(offset) == 0) {
  837. *tail_blk = 0;
  838. /* leave all other log inited values alone */
  839. goto done;
  840. }
  841. }
  842. /*
  843. * Search backwards looking for log record header block
  844. */
  845. ASSERT(*head_blk < INT_MAX);
  846. for (i = (int)(*head_blk) - 1; i >= 0; i--) {
  847. error = xlog_bread(log, i, 1, bp, &offset);
  848. if (error)
  849. goto done;
  850. if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
  851. found = 1;
  852. break;
  853. }
  854. }
  855. /*
  856. * If we haven't found the log record header block, start looking
  857. * again from the end of the physical log. XXXmiken: There should be
  858. * a check here to make sure we didn't search more than N blocks in
  859. * the previous code.
  860. */
  861. if (!found) {
  862. for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
  863. error = xlog_bread(log, i, 1, bp, &offset);
  864. if (error)
  865. goto done;
  866. if (*(__be32 *)offset ==
  867. cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
  868. found = 2;
  869. break;
  870. }
  871. }
  872. }
  873. if (!found) {
  874. xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
  875. xlog_put_bp(bp);
  876. ASSERT(0);
  877. return XFS_ERROR(EIO);
  878. }
  879. /* find blk_no of tail of log */
  880. rhead = (xlog_rec_header_t *)offset;
  881. *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
  882. /*
  883. * Reset log values according to the state of the log when we
  884. * crashed. In the case where head_blk == 0, we bump curr_cycle
  885. * one because the next write starts a new cycle rather than
  886. * continuing the cycle of the last good log record. At this
  887. * point we have guaranteed that all partial log records have been
  888. * accounted for. Therefore, we know that the last good log record
  889. * written was complete and ended exactly on the end boundary
  890. * of the physical log.
  891. */
  892. log->l_prev_block = i;
  893. log->l_curr_block = (int)*head_blk;
  894. log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
  895. if (found == 2)
  896. log->l_curr_cycle++;
  897. atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
  898. atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
  899. xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
  900. BBTOB(log->l_curr_block));
  901. xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
  902. BBTOB(log->l_curr_block));
  903. /*
  904. * Look for unmount record. If we find it, then we know there
  905. * was a clean unmount. Since 'i' could be the last block in
  906. * the physical log, we convert to a log block before comparing
  907. * to the head_blk.
  908. *
  909. * Save the current tail lsn to use to pass to
  910. * xlog_clear_stale_blocks() below. We won't want to clear the
  911. * unmount record if there is one, so we pass the lsn of the
  912. * unmount record rather than the block after it.
  913. */
  914. if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
  915. int h_size = be32_to_cpu(rhead->h_size);
  916. int h_version = be32_to_cpu(rhead->h_version);
  917. if ((h_version & XLOG_VERSION_2) &&
  918. (h_size > XLOG_HEADER_CYCLE_SIZE)) {
  919. hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
  920. if (h_size % XLOG_HEADER_CYCLE_SIZE)
  921. hblks++;
  922. } else {
  923. hblks = 1;
  924. }
  925. } else {
  926. hblks = 1;
  927. }
  928. after_umount_blk = (i + hblks + (int)
  929. BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
  930. tail_lsn = atomic64_read(&log->l_tail_lsn);
  931. if (*head_blk == after_umount_blk &&
  932. be32_to_cpu(rhead->h_num_logops) == 1) {
  933. umount_data_blk = (i + hblks) % log->l_logBBsize;
  934. error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
  935. if (error)
  936. goto done;
  937. op_head = (xlog_op_header_t *)offset;
  938. if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
  939. /*
  940. * Set tail and last sync so that newly written
  941. * log records will point recovery to after the
  942. * current unmount record.
  943. */
  944. xlog_assign_atomic_lsn(&log->l_tail_lsn,
  945. log->l_curr_cycle, after_umount_blk);
  946. xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
  947. log->l_curr_cycle, after_umount_blk);
  948. *tail_blk = after_umount_blk;
  949. /*
  950. * Note that the unmount was clean. If the unmount
  951. * was not clean, we need to know this to rebuild the
  952. * superblock counters from the perag headers if we
  953. * have a filesystem using non-persistent counters.
  954. */
  955. log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
  956. }
  957. }
  958. /*
  959. * Make sure that there are no blocks in front of the head
  960. * with the same cycle number as the head. This can happen
  961. * because we allow multiple outstanding log writes concurrently,
  962. * and the later writes might make it out before earlier ones.
  963. *
  964. * We use the lsn from before modifying it so that we'll never
  965. * overwrite the unmount record after a clean unmount.
  966. *
  967. * Do this only if we are going to recover the filesystem
  968. *
  969. * NOTE: This used to say "if (!readonly)"
  970. * However on Linux, we can & do recover a read-only filesystem.
  971. * We only skip recovery if NORECOVERY is specified on mount,
  972. * in which case we would not be here.
  973. *
  974. * But... if the -device- itself is readonly, just skip this.
  975. * We can't recover this device anyway, so it won't matter.
  976. */
  977. if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
  978. error = xlog_clear_stale_blocks(log, tail_lsn);
  979. done:
  980. xlog_put_bp(bp);
  981. if (error)
  982. xfs_warn(log->l_mp, "failed to locate log tail");
  983. return error;
  984. }
  985. /*
  986. * Is the log zeroed at all?
  987. *
  988. * The last binary search should be changed to perform an X block read
  989. * once X becomes small enough. You can then search linearly through
  990. * the X blocks. This will cut down on the number of reads we need to do.
  991. *
  992. * If the log is partially zeroed, this routine will pass back the blkno
  993. * of the first block with cycle number 0. It won't have a complete LR
  994. * preceding it.
  995. *
  996. * Return:
  997. * 0 => the log is completely written to
  998. * -1 => use *blk_no as the first block of the log
  999. * >0 => error has occurred
  1000. */
  1001. STATIC int
  1002. xlog_find_zeroed(
  1003. struct xlog *log,
  1004. xfs_daddr_t *blk_no)
  1005. {
  1006. xfs_buf_t *bp;
  1007. xfs_caddr_t offset;
  1008. uint first_cycle, last_cycle;
  1009. xfs_daddr_t new_blk, last_blk, start_blk;
  1010. xfs_daddr_t num_scan_bblks;
  1011. int error, log_bbnum = log->l_logBBsize;
  1012. *blk_no = 0;
  1013. /* check totally zeroed log */
  1014. bp = xlog_get_bp(log, 1);
  1015. if (!bp)
  1016. return ENOMEM;
  1017. error = xlog_bread(log, 0, 1, bp, &offset);
  1018. if (error)
  1019. goto bp_err;
  1020. first_cycle = xlog_get_cycle(offset);
  1021. if (first_cycle == 0) { /* completely zeroed log */
  1022. *blk_no = 0;
  1023. xlog_put_bp(bp);
  1024. return -1;
  1025. }
  1026. /* check partially zeroed log */
  1027. error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
  1028. if (error)
  1029. goto bp_err;
  1030. last_cycle = xlog_get_cycle(offset);
  1031. if (last_cycle != 0) { /* log completely written to */
  1032. xlog_put_bp(bp);
  1033. return 0;
  1034. } else if (first_cycle != 1) {
  1035. /*
  1036. * If the cycle of the last block is zero, the cycle of
  1037. * the first block must be 1. If it's not, maybe we're
  1038. * not looking at a log... Bail out.
  1039. */
  1040. xfs_warn(log->l_mp,
  1041. "Log inconsistent or not a log (last==0, first!=1)");
  1042. error = XFS_ERROR(EINVAL);
  1043. goto bp_err;
  1044. }
  1045. /* we have a partially zeroed log */
  1046. last_blk = log_bbnum-1;
  1047. if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
  1048. goto bp_err;
  1049. /*
  1050. * Validate the answer. Because there is no way to guarantee that
  1051. * the entire log is made up of log records which are the same size,
  1052. * we scan over the defined maximum blocks. At this point, the maximum
  1053. * is not chosen to mean anything special. XXXmiken
  1054. */
  1055. num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
  1056. ASSERT(num_scan_bblks <= INT_MAX);
  1057. if (last_blk < num_scan_bblks)
  1058. num_scan_bblks = last_blk;
  1059. start_blk = last_blk - num_scan_bblks;
  1060. /*
  1061. * We search for any instances of cycle number 0 that occur before
  1062. * our current estimate of the head. What we're trying to detect is
  1063. * 1 ... | 0 | 1 | 0...
  1064. * ^ binary search ends here
  1065. */
  1066. if ((error = xlog_find_verify_cycle(log, start_blk,
  1067. (int)num_scan_bblks, 0, &new_blk)))
  1068. goto bp_err;
  1069. if (new_blk != -1)
  1070. last_blk = new_blk;
  1071. /*
  1072. * Potentially backup over partial log record write. We don't need
  1073. * to search the end of the log because we know it is zero.
  1074. */
  1075. if ((error = xlog_find_verify_log_record(log, start_blk,
  1076. &last_blk, 0)) == -1) {
  1077. error = XFS_ERROR(EIO);
  1078. goto bp_err;
  1079. } else if (error)
  1080. goto bp_err;
  1081. *blk_no = last_blk;
  1082. bp_err:
  1083. xlog_put_bp(bp);
  1084. if (error)
  1085. return error;
  1086. return -1;
  1087. }
  1088. /*
  1089. * These are simple subroutines used by xlog_clear_stale_blocks() below
  1090. * to initialize a buffer full of empty log record headers and write
  1091. * them into the log.
  1092. */
  1093. STATIC void
  1094. xlog_add_record(
  1095. struct xlog *log,
  1096. xfs_caddr_t buf,
  1097. int cycle,
  1098. int block,
  1099. int tail_cycle,
  1100. int tail_block)
  1101. {
  1102. xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
  1103. memset(buf, 0, BBSIZE);
  1104. recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
  1105. recp->h_cycle = cpu_to_be32(cycle);
  1106. recp->h_version = cpu_to_be32(
  1107. xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
  1108. recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
  1109. recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
  1110. recp->h_fmt = cpu_to_be32(XLOG_FMT);
  1111. memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
  1112. }
  1113. STATIC int
  1114. xlog_write_log_records(
  1115. struct xlog *log,
  1116. int cycle,
  1117. int start_block,
  1118. int blocks,
  1119. int tail_cycle,
  1120. int tail_block)
  1121. {
  1122. xfs_caddr_t offset;
  1123. xfs_buf_t *bp;
  1124. int balign, ealign;
  1125. int sectbb = log->l_sectBBsize;
  1126. int end_block = start_block + blocks;
  1127. int bufblks;
  1128. int error = 0;
  1129. int i, j = 0;
  1130. /*
  1131. * Greedily allocate a buffer big enough to handle the full
  1132. * range of basic blocks to be written. If that fails, try
  1133. * a smaller size. We need to be able to write at least a
  1134. * log sector, or we're out of luck.
  1135. */
  1136. bufblks = 1 << ffs(blocks);
  1137. while (bufblks > log->l_logBBsize)
  1138. bufblks >>= 1;
  1139. while (!(bp = xlog_get_bp(log, bufblks))) {
  1140. bufblks >>= 1;
  1141. if (bufblks < sectbb)
  1142. return ENOMEM;
  1143. }
  1144. /* We may need to do a read at the start to fill in part of
  1145. * the buffer in the starting sector not covered by the first
  1146. * write below.
  1147. */
  1148. balign = round_down(start_block, sectbb);
  1149. if (balign != start_block) {
  1150. error = xlog_bread_noalign(log, start_block, 1, bp);
  1151. if (error)
  1152. goto out_put_bp;
  1153. j = start_block - balign;
  1154. }
  1155. for (i = start_block; i < end_block; i += bufblks) {
  1156. int bcount, endcount;
  1157. bcount = min(bufblks, end_block - start_block);
  1158. endcount = bcount - j;
  1159. /* We may need to do a read at the end to fill in part of
  1160. * the buffer in the final sector not covered by the write.
  1161. * If this is the same sector as the above read, skip it.
  1162. */
  1163. ealign = round_down(end_block, sectbb);
  1164. if (j == 0 && (start_block + endcount > ealign)) {
  1165. offset = bp->b_addr + BBTOB(ealign - start_block);
  1166. error = xlog_bread_offset(log, ealign, sectbb,
  1167. bp, offset);
  1168. if (error)
  1169. break;
  1170. }
  1171. offset = xlog_align(log, start_block, endcount, bp);
  1172. for (; j < endcount; j++) {
  1173. xlog_add_record(log, offset, cycle, i+j,
  1174. tail_cycle, tail_block);
  1175. offset += BBSIZE;
  1176. }
  1177. error = xlog_bwrite(log, start_block, endcount, bp);
  1178. if (error)
  1179. break;
  1180. start_block += endcount;
  1181. j = 0;
  1182. }
  1183. out_put_bp:
  1184. xlog_put_bp(bp);
  1185. return error;
  1186. }
  1187. /*
  1188. * This routine is called to blow away any incomplete log writes out
  1189. * in front of the log head. We do this so that we won't become confused
  1190. * if we come up, write only a little bit more, and then crash again.
  1191. * If we leave the partial log records out there, this situation could
  1192. * cause us to think those partial writes are valid blocks since they
  1193. * have the current cycle number. We get rid of them by overwriting them
  1194. * with empty log records with the old cycle number rather than the
  1195. * current one.
  1196. *
  1197. * The tail lsn is passed in rather than taken from
  1198. * the log so that we will not write over the unmount record after a
  1199. * clean unmount in a 512 block log. Doing so would leave the log without
  1200. * any valid log records in it until a new one was written. If we crashed
  1201. * during that time we would not be able to recover.
  1202. */
  1203. STATIC int
  1204. xlog_clear_stale_blocks(
  1205. struct xlog *log,
  1206. xfs_lsn_t tail_lsn)
  1207. {
  1208. int tail_cycle, head_cycle;
  1209. int tail_block, head_block;
  1210. int tail_distance, max_distance;
  1211. int distance;
  1212. int error;
  1213. tail_cycle = CYCLE_LSN(tail_lsn);
  1214. tail_block = BLOCK_LSN(tail_lsn);
  1215. head_cycle = log->l_curr_cycle;
  1216. head_block = log->l_curr_block;
  1217. /*
  1218. * Figure out the distance between the new head of the log
  1219. * and the tail. We want to write over any blocks beyond the
  1220. * head that we may have written just before the crash, but
  1221. * we don't want to overwrite the tail of the log.
  1222. */
  1223. if (head_cycle == tail_cycle) {
  1224. /*
  1225. * The tail is behind the head in the physical log,
  1226. * so the distance from the head to the tail is the
  1227. * distance from the head to the end of the log plus
  1228. * the distance from the beginning of the log to the
  1229. * tail.
  1230. */
  1231. if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
  1232. XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
  1233. XFS_ERRLEVEL_LOW, log->l_mp);
  1234. return XFS_ERROR(EFSCORRUPTED);
  1235. }
  1236. tail_distance = tail_block + (log->l_logBBsize - head_block);
  1237. } else {
  1238. /*
  1239. * The head is behind the tail in the physical log,
  1240. * so the distance from the head to the tail is just
  1241. * the tail block minus the head block.
  1242. */
  1243. if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
  1244. XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
  1245. XFS_ERRLEVEL_LOW, log->l_mp);
  1246. return XFS_ERROR(EFSCORRUPTED);
  1247. }
  1248. tail_distance = tail_block - head_block;
  1249. }
  1250. /*
  1251. * If the head is right up against the tail, we can't clear
  1252. * anything.
  1253. */
  1254. if (tail_distance <= 0) {
  1255. ASSERT(tail_distance == 0);
  1256. return 0;
  1257. }
  1258. max_distance = XLOG_TOTAL_REC_SHIFT(log);
  1259. /*
  1260. * Take the smaller of the maximum amount of outstanding I/O
  1261. * we could have and the distance to the tail to clear out.
  1262. * We take the smaller so that we don't overwrite the tail and
  1263. * we don't waste all day writing from the head to the tail
  1264. * for no reason.
  1265. */
  1266. max_distance = MIN(max_distance, tail_distance);
  1267. if ((head_block + max_distance) <= log->l_logBBsize) {
  1268. /*
  1269. * We can stomp all the blocks we need to without
  1270. * wrapping around the end of the log. Just do it
  1271. * in a single write. Use the cycle number of the
  1272. * current cycle minus one so that the log will look like:
  1273. * n ... | n - 1 ...
  1274. */
  1275. error = xlog_write_log_records(log, (head_cycle - 1),
  1276. head_block, max_distance, tail_cycle,
  1277. tail_block);
  1278. if (error)
  1279. return error;
  1280. } else {
  1281. /*
  1282. * We need to wrap around the end of the physical log in
  1283. * order to clear all the blocks. Do it in two separate
  1284. * I/Os. The first write should be from the head to the
  1285. * end of the physical log, and it should use the current
  1286. * cycle number minus one just like above.
  1287. */
  1288. distance = log->l_logBBsize - head_block;
  1289. error = xlog_write_log_records(log, (head_cycle - 1),
  1290. head_block, distance, tail_cycle,
  1291. tail_block);
  1292. if (error)
  1293. return error;
  1294. /*
  1295. * Now write the blocks at the start of the physical log.
  1296. * This writes the remainder of the blocks we want to clear.
  1297. * It uses the current cycle number since we're now on the
  1298. * same cycle as the head so that we get:
  1299. * n ... n ... | n - 1 ...
  1300. * ^^^^^ blocks we're writing
  1301. */
  1302. distance = max_distance - (log->l_logBBsize - head_block);
  1303. error = xlog_write_log_records(log, head_cycle, 0, distance,
  1304. tail_cycle, tail_block);
  1305. if (error)
  1306. return error;
  1307. }
  1308. return 0;
  1309. }
  1310. /******************************************************************************
  1311. *
  1312. * Log recover routines
  1313. *
  1314. ******************************************************************************
  1315. */
  1316. STATIC xlog_recover_t *
  1317. xlog_recover_find_tid(
  1318. struct hlist_head *head,
  1319. xlog_tid_t tid)
  1320. {
  1321. xlog_recover_t *trans;
  1322. hlist_for_each_entry(trans, head, r_list) {
  1323. if (trans->r_log_tid == tid)
  1324. return trans;
  1325. }
  1326. return NULL;
  1327. }
  1328. STATIC void
  1329. xlog_recover_new_tid(
  1330. struct hlist_head *head,
  1331. xlog_tid_t tid,
  1332. xfs_lsn_t lsn)
  1333. {
  1334. xlog_recover_t *trans;
  1335. trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
  1336. trans->r_log_tid = tid;
  1337. trans->r_lsn = lsn;
  1338. INIT_LIST_HEAD(&trans->r_itemq);
  1339. INIT_HLIST_NODE(&trans->r_list);
  1340. hlist_add_head(&trans->r_list, head);
  1341. }
  1342. STATIC void
  1343. xlog_recover_add_item(
  1344. struct list_head *head)
  1345. {
  1346. xlog_recover_item_t *item;
  1347. item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
  1348. INIT_LIST_HEAD(&item->ri_list);
  1349. list_add_tail(&item->ri_list, head);
  1350. }
  1351. STATIC int
  1352. xlog_recover_add_to_cont_trans(
  1353. struct xlog *log,
  1354. struct xlog_recover *trans,
  1355. xfs_caddr_t dp,
  1356. int len)
  1357. {
  1358. xlog_recover_item_t *item;
  1359. xfs_caddr_t ptr, old_ptr;
  1360. int old_len;
  1361. if (list_empty(&trans->r_itemq)) {
  1362. /* finish copying rest of trans header */
  1363. xlog_recover_add_item(&trans->r_itemq);
  1364. ptr = (xfs_caddr_t) &trans->r_theader +
  1365. sizeof(xfs_trans_header_t) - len;
  1366. memcpy(ptr, dp, len); /* d, s, l */
  1367. return 0;
  1368. }
  1369. /* take the tail entry */
  1370. item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
  1371. old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
  1372. old_len = item->ri_buf[item->ri_cnt-1].i_len;
  1373. ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
  1374. memcpy(&ptr[old_len], dp, len); /* d, s, l */
  1375. item->ri_buf[item->ri_cnt-1].i_len += len;
  1376. item->ri_buf[item->ri_cnt-1].i_addr = ptr;
  1377. trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
  1378. return 0;
  1379. }
  1380. /*
  1381. * The next region to add is the start of a new region. It could be
  1382. * a whole region or it could be the first part of a new region. Because
  1383. * of this, the assumption here is that the type and size fields of all
  1384. * format structures fit into the first 32 bits of the structure.
  1385. *
  1386. * This works because all regions must be 32 bit aligned. Therefore, we
  1387. * either have both fields or we have neither field. In the case we have
  1388. * neither field, the data part of the region is zero length. We only have
  1389. * a log_op_header and can throw away the header since a new one will appear
  1390. * later. If we have at least 4 bytes, then we can determine how many regions
  1391. * will appear in the current log item.
  1392. */
  1393. STATIC int
  1394. xlog_recover_add_to_trans(
  1395. struct xlog *log,
  1396. struct xlog_recover *trans,
  1397. xfs_caddr_t dp,
  1398. int len)
  1399. {
  1400. xfs_inode_log_format_t *in_f; /* any will do */
  1401. xlog_recover_item_t *item;
  1402. xfs_caddr_t ptr;
  1403. if (!len)
  1404. return 0;
  1405. if (list_empty(&trans->r_itemq)) {
  1406. /* we need to catch log corruptions here */
  1407. if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
  1408. xfs_warn(log->l_mp, "%s: bad header magic number",
  1409. __func__);
  1410. ASSERT(0);
  1411. return XFS_ERROR(EIO);
  1412. }
  1413. if (len == sizeof(xfs_trans_header_t))
  1414. xlog_recover_add_item(&trans->r_itemq);
  1415. memcpy(&trans->r_theader, dp, len); /* d, s, l */
  1416. return 0;
  1417. }
  1418. ptr = kmem_alloc(len, KM_SLEEP);
  1419. memcpy(ptr, dp, len);
  1420. in_f = (xfs_inode_log_format_t *)ptr;
  1421. /* take the tail entry */
  1422. item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
  1423. if (item->ri_total != 0 &&
  1424. item->ri_total == item->ri_cnt) {
  1425. /* tail item is in use, get a new one */
  1426. xlog_recover_add_item(&trans->r_itemq);
  1427. item = list_entry(trans->r_itemq.prev,
  1428. xlog_recover_item_t, ri_list);
  1429. }
  1430. if (item->ri_total == 0) { /* first region to be added */
  1431. if (in_f->ilf_size == 0 ||
  1432. in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
  1433. xfs_warn(log->l_mp,
  1434. "bad number of regions (%d) in inode log format",
  1435. in_f->ilf_size);
  1436. ASSERT(0);
  1437. return XFS_ERROR(EIO);
  1438. }
  1439. item->ri_total = in_f->ilf_size;
  1440. item->ri_buf =
  1441. kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
  1442. KM_SLEEP);
  1443. }
  1444. ASSERT(item->ri_total > item->ri_cnt);
  1445. /* Description region is ri_buf[0] */
  1446. item->ri_buf[item->ri_cnt].i_addr = ptr;
  1447. item->ri_buf[item->ri_cnt].i_len = len;
  1448. item->ri_cnt++;
  1449. trace_xfs_log_recover_item_add(log, trans, item, 0);
  1450. return 0;
  1451. }
  1452. /*
  1453. * Sort the log items in the transaction.
  1454. *
  1455. * The ordering constraints are defined by the inode allocation and unlink
  1456. * behaviour. The rules are:
  1457. *
  1458. * 1. Every item is only logged once in a given transaction. Hence it
  1459. * represents the last logged state of the item. Hence ordering is
  1460. * dependent on the order in which operations need to be performed so
  1461. * required initial conditions are always met.
  1462. *
  1463. * 2. Cancelled buffers are recorded in pass 1 in a separate table and
  1464. * there's nothing to replay from them so we can simply cull them
  1465. * from the transaction. However, we can't do that until after we've
  1466. * replayed all the other items because they may be dependent on the
  1467. * cancelled buffer and replaying the cancelled buffer can remove it
  1468. * form the cancelled buffer table. Hence they have tobe done last.
  1469. *
  1470. * 3. Inode allocation buffers must be replayed before inode items that
  1471. * read the buffer and replay changes into it. For filesystems using the
  1472. * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
  1473. * treated the same as inode allocation buffers as they create and
  1474. * initialise the buffers directly.
  1475. *
  1476. * 4. Inode unlink buffers must be replayed after inode items are replayed.
  1477. * This ensures that inodes are completely flushed to the inode buffer
  1478. * in a "free" state before we remove the unlinked inode list pointer.
  1479. *
  1480. * Hence the ordering needs to be inode allocation buffers first, inode items
  1481. * second, inode unlink buffers third and cancelled buffers last.
  1482. *
  1483. * But there's a problem with that - we can't tell an inode allocation buffer
  1484. * apart from a regular buffer, so we can't separate them. We can, however,
  1485. * tell an inode unlink buffer from the others, and so we can separate them out
  1486. * from all the other buffers and move them to last.
  1487. *
  1488. * Hence, 4 lists, in order from head to tail:
  1489. * - buffer_list for all buffers except cancelled/inode unlink buffers
  1490. * - item_list for all non-buffer items
  1491. * - inode_buffer_list for inode unlink buffers
  1492. * - cancel_list for the cancelled buffers
  1493. *
  1494. * Note that we add objects to the tail of the lists so that first-to-last
  1495. * ordering is preserved within the lists. Adding objects to the head of the
  1496. * list means when we traverse from the head we walk them in last-to-first
  1497. * order. For cancelled buffers and inode unlink buffers this doesn't matter,
  1498. * but for all other items there may be specific ordering that we need to
  1499. * preserve.
  1500. */
  1501. STATIC int
  1502. xlog_recover_reorder_trans(
  1503. struct xlog *log,
  1504. struct xlog_recover *trans,
  1505. int pass)
  1506. {
  1507. xlog_recover_item_t *item, *n;
  1508. LIST_HEAD(sort_list);
  1509. LIST_HEAD(cancel_list);
  1510. LIST_HEAD(buffer_list);
  1511. LIST_HEAD(inode_buffer_list);
  1512. LIST_HEAD(inode_list);
  1513. list_splice_init(&trans->r_itemq, &sort_list);
  1514. list_for_each_entry_safe(item, n, &sort_list, ri_list) {
  1515. xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
  1516. switch (ITEM_TYPE(item)) {
  1517. case XFS_LI_ICREATE:
  1518. list_move_tail(&item->ri_list, &buffer_list);
  1519. break;
  1520. case XFS_LI_BUF:
  1521. if (buf_f->blf_flags & XFS_BLF_CANCEL) {
  1522. trace_xfs_log_recover_item_reorder_head(log,
  1523. trans, item, pass);
  1524. list_move(&item->ri_list, &cancel_list);
  1525. break;
  1526. }
  1527. if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
  1528. list_move(&item->ri_list, &inode_buffer_list);
  1529. break;
  1530. }
  1531. list_move_tail(&item->ri_list, &buffer_list);
  1532. break;
  1533. case XFS_LI_INODE:
  1534. case XFS_LI_DQUOT:
  1535. case XFS_LI_QUOTAOFF:
  1536. case XFS_LI_EFD:
  1537. case XFS_LI_EFI:
  1538. trace_xfs_log_recover_item_reorder_tail(log,
  1539. trans, item, pass);
  1540. list_move_tail(&item->ri_list, &inode_list);
  1541. break;
  1542. default:
  1543. xfs_warn(log->l_mp,
  1544. "%s: unrecognized type of log operation",
  1545. __func__);
  1546. ASSERT(0);
  1547. return XFS_ERROR(EIO);
  1548. }
  1549. }
  1550. ASSERT(list_empty(&sort_list));
  1551. if (!list_empty(&buffer_list))
  1552. list_splice(&buffer_list, &trans->r_itemq);
  1553. if (!list_empty(&inode_list))
  1554. list_splice_tail(&inode_list, &trans->r_itemq);
  1555. if (!list_empty(&inode_buffer_list))
  1556. list_splice_tail(&inode_buffer_list, &trans->r_itemq);
  1557. if (!list_empty(&cancel_list))
  1558. list_splice_tail(&cancel_list, &trans->r_itemq);
  1559. return 0;
  1560. }
  1561. /*
  1562. * Build up the table of buf cancel records so that we don't replay
  1563. * cancelled data in the second pass. For buffer records that are
  1564. * not cancel records, there is nothing to do here so we just return.
  1565. *
  1566. * If we get a cancel record which is already in the table, this indicates
  1567. * that the buffer was cancelled multiple times. In order to ensure
  1568. * that during pass 2 we keep the record in the table until we reach its
  1569. * last occurrence in the log, we keep a reference count in the cancel
  1570. * record in the table to tell us how many times we expect to see this
  1571. * record during the second pass.
  1572. */
  1573. STATIC int
  1574. xlog_recover_buffer_pass1(
  1575. struct xlog *log,
  1576. struct xlog_recover_item *item)
  1577. {
  1578. xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
  1579. struct list_head *bucket;
  1580. struct xfs_buf_cancel *bcp;
  1581. /*
  1582. * If this isn't a cancel buffer item, then just return.
  1583. */
  1584. if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
  1585. trace_xfs_log_recover_buf_not_cancel(log, buf_f);
  1586. return 0;
  1587. }
  1588. /*
  1589. * Insert an xfs_buf_cancel record into the hash table of them.
  1590. * If there is already an identical record, bump its reference count.
  1591. */
  1592. bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
  1593. list_for_each_entry(bcp, bucket, bc_list) {
  1594. if (bcp->bc_blkno == buf_f->blf_blkno &&
  1595. bcp->bc_len == buf_f->blf_len) {
  1596. bcp->bc_refcount++;
  1597. trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
  1598. return 0;
  1599. }
  1600. }
  1601. bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
  1602. bcp->bc_blkno = buf_f->blf_blkno;
  1603. bcp->bc_len = buf_f->blf_len;
  1604. bcp->bc_refcount = 1;
  1605. list_add_tail(&bcp->bc_list, bucket);
  1606. trace_xfs_log_recover_buf_cancel_add(log, buf_f);
  1607. return 0;
  1608. }
  1609. /*
  1610. * Check to see whether the buffer being recovered has a corresponding
  1611. * entry in the buffer cancel record table. If it is, return the cancel
  1612. * buffer structure to the caller.
  1613. */
  1614. STATIC struct xfs_buf_cancel *
  1615. xlog_peek_buffer_cancelled(
  1616. struct xlog *log,
  1617. xfs_daddr_t blkno,
  1618. uint len,
  1619. ushort flags)
  1620. {
  1621. struct list_head *bucket;
  1622. struct xfs_buf_cancel *bcp;
  1623. if (!log->l_buf_cancel_table) {
  1624. /* empty table means no cancelled buffers in the log */
  1625. ASSERT(!(flags & XFS_BLF_CANCEL));
  1626. return NULL;
  1627. }
  1628. bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
  1629. list_for_each_entry(bcp, bucket, bc_list) {
  1630. if (bcp->bc_blkno == blkno && bcp->bc_len == len)
  1631. return bcp;
  1632. }
  1633. /*
  1634. * We didn't find a corresponding entry in the table, so return 0 so
  1635. * that the buffer is NOT cancelled.
  1636. */
  1637. ASSERT(!(flags & XFS_BLF_CANCEL));
  1638. return NULL;
  1639. }
  1640. /*
  1641. * If the buffer is being cancelled then return 1 so that it will be cancelled,
  1642. * otherwise return 0. If the buffer is actually a buffer cancel item
  1643. * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
  1644. * table and remove it from the table if this is the last reference.
  1645. *
  1646. * We remove the cancel record from the table when we encounter its last
  1647. * occurrence in the log so that if the same buffer is re-used again after its
  1648. * last cancellation we actually replay the changes made at that point.
  1649. */
  1650. STATIC int
  1651. xlog_check_buffer_cancelled(
  1652. struct xlog *log,
  1653. xfs_daddr_t blkno,
  1654. uint len,
  1655. ushort flags)
  1656. {
  1657. struct xfs_buf_cancel *bcp;
  1658. bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
  1659. if (!bcp)
  1660. return 0;
  1661. /*
  1662. * We've go a match, so return 1 so that the recovery of this buffer
  1663. * is cancelled. If this buffer is actually a buffer cancel log
  1664. * item, then decrement the refcount on the one in the table and
  1665. * remove it if this is the last reference.
  1666. */
  1667. if (flags & XFS_BLF_CANCEL) {
  1668. if (--bcp->bc_refcount == 0) {
  1669. list_del(&bcp->bc_list);
  1670. kmem_free(bcp);
  1671. }
  1672. }
  1673. return 1;
  1674. }
  1675. /*
  1676. * Perform recovery for a buffer full of inodes. In these buffers, the only
  1677. * data which should be recovered is that which corresponds to the
  1678. * di_next_unlinked pointers in the on disk inode structures. The rest of the
  1679. * data for the inodes is always logged through the inodes themselves rather
  1680. * than the inode buffer and is recovered in xlog_recover_inode_pass2().
  1681. *
  1682. * The only time when buffers full of inodes are fully recovered is when the
  1683. * buffer is full of newly allocated inodes. In this case the buffer will
  1684. * not be marked as an inode buffer and so will be sent to
  1685. * xlog_recover_do_reg_buffer() below during recovery.
  1686. */
  1687. STATIC int
  1688. xlog_recover_do_inode_buffer(
  1689. struct xfs_mount *mp,
  1690. xlog_recover_item_t *item,
  1691. struct xfs_buf *bp,
  1692. xfs_buf_log_format_t *buf_f)
  1693. {
  1694. int i;
  1695. int item_index = 0;
  1696. int bit = 0;
  1697. int nbits = 0;
  1698. int reg_buf_offset = 0;
  1699. int reg_buf_bytes = 0;
  1700. int next_unlinked_offset;
  1701. int inodes_per_buf;
  1702. xfs_agino_t *logged_nextp;
  1703. xfs_agino_t *buffer_nextp;
  1704. trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
  1705. /*
  1706. * Post recovery validation only works properly on CRC enabled
  1707. * filesystems.
  1708. */
  1709. if (xfs_sb_version_hascrc(&mp->m_sb))
  1710. bp->b_ops = &xfs_inode_buf_ops;
  1711. inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
  1712. for (i = 0; i < inodes_per_buf; i++) {
  1713. next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
  1714. offsetof(xfs_dinode_t, di_next_unlinked);
  1715. while (next_unlinked_offset >=
  1716. (reg_buf_offset + reg_buf_bytes)) {
  1717. /*
  1718. * The next di_next_unlinked field is beyond
  1719. * the current logged region. Find the next
  1720. * logged region that contains or is beyond
  1721. * the current di_next_unlinked field.
  1722. */
  1723. bit += nbits;
  1724. bit = xfs_next_bit(buf_f->blf_data_map,
  1725. buf_f->blf_map_size, bit);
  1726. /*
  1727. * If there are no more logged regions in the
  1728. * buffer, then we're done.
  1729. */
  1730. if (bit == -1)
  1731. return 0;
  1732. nbits = xfs_contig_bits(buf_f->blf_data_map,
  1733. buf_f->blf_map_size, bit);
  1734. ASSERT(nbits > 0);
  1735. reg_buf_offset = bit << XFS_BLF_SHIFT;
  1736. reg_buf_bytes = nbits << XFS_BLF_SHIFT;
  1737. item_index++;
  1738. }
  1739. /*
  1740. * If the current logged region starts after the current
  1741. * di_next_unlinked field, then move on to the next
  1742. * di_next_unlinked field.
  1743. */
  1744. if (next_unlinked_offset < reg_buf_offset)
  1745. continue;
  1746. ASSERT(item->ri_buf[item_index].i_addr != NULL);
  1747. ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
  1748. ASSERT((reg_buf_offset + reg_buf_bytes) <=
  1749. BBTOB(bp->b_io_length));
  1750. /*
  1751. * The current logged region contains a copy of the
  1752. * current di_next_unlinked field. Extract its value
  1753. * and copy it to the buffer copy.
  1754. */
  1755. logged_nextp = item->ri_buf[item_index].i_addr +
  1756. next_unlinked_offset - reg_buf_offset;
  1757. if (unlikely(*logged_nextp == 0)) {
  1758. xfs_alert(mp,
  1759. "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
  1760. "Trying to replay bad (0) inode di_next_unlinked field.",
  1761. item, bp);
  1762. XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
  1763. XFS_ERRLEVEL_LOW, mp);
  1764. return XFS_ERROR(EFSCORRUPTED);
  1765. }
  1766. buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
  1767. next_unlinked_offset);
  1768. *buffer_nextp = *logged_nextp;
  1769. /*
  1770. * If necessary, recalculate the CRC in the on-disk inode. We
  1771. * have to leave the inode in a consistent state for whoever
  1772. * reads it next....
  1773. */
  1774. xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
  1775. xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
  1776. }
  1777. return 0;
  1778. }
  1779. /*
  1780. * V5 filesystems know the age of the buffer on disk being recovered. We can
  1781. * have newer objects on disk than we are replaying, and so for these cases we
  1782. * don't want to replay the current change as that will make the buffer contents
  1783. * temporarily invalid on disk.
  1784. *
  1785. * The magic number might not match the buffer type we are going to recover
  1786. * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
  1787. * extract the LSN of the existing object in the buffer based on it's current
  1788. * magic number. If we don't recognise the magic number in the buffer, then
  1789. * return a LSN of -1 so that the caller knows it was an unrecognised block and
  1790. * so can recover the buffer.
  1791. */
  1792. static xfs_lsn_t
  1793. xlog_recover_get_buf_lsn(
  1794. struct xfs_mount *mp,
  1795. struct xfs_buf *bp)
  1796. {
  1797. __uint32_t magic32;
  1798. __uint16_t magic16;
  1799. __uint16_t magicda;
  1800. void *blk = bp->b_addr;
  1801. /* v4 filesystems always recover immediately */
  1802. if (!xfs_sb_version_hascrc(&mp->m_sb))
  1803. goto recover_immediately;
  1804. magic32 = be32_to_cpu(*(__be32 *)blk);
  1805. switch (magic32) {
  1806. case XFS_ABTB_CRC_MAGIC:
  1807. case XFS_ABTC_CRC_MAGIC:
  1808. case XFS_ABTB_MAGIC:
  1809. case XFS_ABTC_MAGIC:
  1810. case XFS_IBT_CRC_MAGIC:
  1811. case XFS_IBT_MAGIC:
  1812. return be64_to_cpu(
  1813. ((struct xfs_btree_block *)blk)->bb_u.s.bb_lsn);
  1814. case XFS_BMAP_CRC_MAGIC:
  1815. case XFS_BMAP_MAGIC:
  1816. return be64_to_cpu(
  1817. ((struct xfs_btree_block *)blk)->bb_u.l.bb_lsn);
  1818. case XFS_AGF_MAGIC:
  1819. return be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
  1820. case XFS_AGFL_MAGIC:
  1821. return be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
  1822. case XFS_AGI_MAGIC:
  1823. return be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
  1824. case XFS_SYMLINK_MAGIC:
  1825. return be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
  1826. case XFS_DIR3_BLOCK_MAGIC:
  1827. case XFS_DIR3_DATA_MAGIC:
  1828. case XFS_DIR3_FREE_MAGIC:
  1829. return be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
  1830. case XFS_ATTR3_RMT_MAGIC:
  1831. return be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
  1832. case XFS_SB_MAGIC:
  1833. return be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
  1834. default:
  1835. break;
  1836. }
  1837. magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
  1838. switch (magicda) {
  1839. case XFS_DIR3_LEAF1_MAGIC:
  1840. case XFS_DIR3_LEAFN_MAGIC:
  1841. case XFS_DA3_NODE_MAGIC:
  1842. return be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
  1843. default:
  1844. break;
  1845. }
  1846. /*
  1847. * We do individual object checks on dquot and inode buffers as they
  1848. * have their own individual LSN records. Also, we could have a stale
  1849. * buffer here, so we have to at least recognise these buffer types.
  1850. *
  1851. * A notd complexity here is inode unlinked list processing - it logs
  1852. * the inode directly in the buffer, but we don't know which inodes have
  1853. * been modified, and there is no global buffer LSN. Hence we need to
  1854. * recover all inode buffer types immediately. This problem will be
  1855. * fixed by logical logging of the unlinked list modifications.
  1856. */
  1857. magic16 = be16_to_cpu(*(__be16 *)blk);
  1858. switch (magic16) {
  1859. case XFS_DQUOT_MAGIC:
  1860. case XFS_DINODE_MAGIC:
  1861. goto recover_immediately;
  1862. default:
  1863. break;
  1864. }
  1865. /* unknown buffer contents, recover immediately */
  1866. recover_immediately:
  1867. return (xfs_lsn_t)-1;
  1868. }
  1869. /*
  1870. * Validate the recovered buffer is of the correct type and attach the
  1871. * appropriate buffer operations to them for writeback. Magic numbers are in a
  1872. * few places:
  1873. * the first 16 bits of the buffer (inode buffer, dquot buffer),
  1874. * the first 32 bits of the buffer (most blocks),
  1875. * inside a struct xfs_da_blkinfo at the start of the buffer.
  1876. */
  1877. static void
  1878. xlog_recover_validate_buf_type(
  1879. struct xfs_mount *mp,
  1880. struct xfs_buf *bp,
  1881. xfs_buf_log_format_t *buf_f)
  1882. {
  1883. struct xfs_da_blkinfo *info = bp->b_addr;
  1884. __uint32_t magic32;
  1885. __uint16_t magic16;
  1886. __uint16_t magicda;
  1887. magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
  1888. magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
  1889. magicda = be16_to_cpu(info->magic);
  1890. switch (xfs_blft_from_flags(buf_f)) {
  1891. case XFS_BLFT_BTREE_BUF:
  1892. switch (magic32) {
  1893. case XFS_ABTB_CRC_MAGIC:
  1894. case XFS_ABTC_CRC_MAGIC:
  1895. case XFS_ABTB_MAGIC:
  1896. case XFS_ABTC_MAGIC:
  1897. bp->b_ops = &xfs_allocbt_buf_ops;
  1898. break;
  1899. case XFS_IBT_CRC_MAGIC:
  1900. case XFS_IBT_MAGIC:
  1901. bp->b_ops = &xfs_inobt_buf_ops;
  1902. break;
  1903. case XFS_BMAP_CRC_MAGIC:
  1904. case XFS_BMAP_MAGIC:
  1905. bp->b_ops = &xfs_bmbt_buf_ops;
  1906. break;
  1907. default:
  1908. xfs_warn(mp, "Bad btree block magic!");
  1909. ASSERT(0);
  1910. break;
  1911. }
  1912. break;
  1913. case XFS_BLFT_AGF_BUF:
  1914. if (magic32 != XFS_AGF_MAGIC) {
  1915. xfs_warn(mp, "Bad AGF block magic!");
  1916. ASSERT(0);
  1917. break;
  1918. }
  1919. bp->b_ops = &xfs_agf_buf_ops;
  1920. break;
  1921. case XFS_BLFT_AGFL_BUF:
  1922. if (!xfs_sb_version_hascrc(&mp->m_sb))
  1923. break;
  1924. if (magic32 != XFS_AGFL_MAGIC) {
  1925. xfs_warn(mp, "Bad AGFL block magic!");
  1926. ASSERT(0);
  1927. break;
  1928. }
  1929. bp->b_ops = &xfs_agfl_buf_ops;
  1930. break;
  1931. case XFS_BLFT_AGI_BUF:
  1932. if (magic32 != XFS_AGI_MAGIC) {
  1933. xfs_warn(mp, "Bad AGI block magic!");
  1934. ASSERT(0);
  1935. break;
  1936. }
  1937. bp->b_ops = &xfs_agi_buf_ops;
  1938. break;
  1939. case XFS_BLFT_UDQUOT_BUF:
  1940. case XFS_BLFT_PDQUOT_BUF:
  1941. case XFS_BLFT_GDQUOT_BUF:
  1942. #ifdef CONFIG_XFS_QUOTA
  1943. if (magic16 != XFS_DQUOT_MAGIC) {
  1944. xfs_warn(mp, "Bad DQUOT block magic!");
  1945. ASSERT(0);
  1946. break;
  1947. }
  1948. bp->b_ops = &xfs_dquot_buf_ops;
  1949. #else
  1950. xfs_alert(mp,
  1951. "Trying to recover dquots without QUOTA support built in!");
  1952. ASSERT(0);
  1953. #endif
  1954. break;
  1955. case XFS_BLFT_DINO_BUF:
  1956. /*
  1957. * we get here with inode allocation buffers, not buffers that
  1958. * track unlinked list changes.
  1959. */
  1960. if (magic16 != XFS_DINODE_MAGIC) {
  1961. xfs_warn(mp, "Bad INODE block magic!");
  1962. ASSERT(0);
  1963. break;
  1964. }
  1965. bp->b_ops = &xfs_inode_buf_ops;
  1966. break;
  1967. case XFS_BLFT_SYMLINK_BUF:
  1968. if (magic32 != XFS_SYMLINK_MAGIC) {
  1969. xfs_warn(mp, "Bad symlink block magic!");
  1970. ASSERT(0);
  1971. break;
  1972. }
  1973. bp->b_ops = &xfs_symlink_buf_ops;
  1974. break;
  1975. case XFS_BLFT_DIR_BLOCK_BUF:
  1976. if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
  1977. magic32 != XFS_DIR3_BLOCK_MAGIC) {
  1978. xfs_warn(mp, "Bad dir block magic!");
  1979. ASSERT(0);
  1980. break;
  1981. }
  1982. bp->b_ops = &xfs_dir3_block_buf_ops;
  1983. break;
  1984. case XFS_BLFT_DIR_DATA_BUF:
  1985. if (magic32 != XFS_DIR2_DATA_MAGIC &&
  1986. magic32 != XFS_DIR3_DATA_MAGIC) {
  1987. xfs_warn(mp, "Bad dir data magic!");
  1988. ASSERT(0);
  1989. break;
  1990. }
  1991. bp->b_ops = &xfs_dir3_data_buf_ops;
  1992. break;
  1993. case XFS_BLFT_DIR_FREE_BUF:
  1994. if (magic32 != XFS_DIR2_FREE_MAGIC &&
  1995. magic32 != XFS_DIR3_FREE_MAGIC) {
  1996. xfs_warn(mp, "Bad dir3 free magic!");
  1997. ASSERT(0);
  1998. break;
  1999. }
  2000. bp->b_ops = &xfs_dir3_free_buf_ops;
  2001. break;
  2002. case XFS_BLFT_DIR_LEAF1_BUF:
  2003. if (magicda != XFS_DIR2_LEAF1_MAGIC &&
  2004. magicda != XFS_DIR3_LEAF1_MAGIC) {
  2005. xfs_warn(mp, "Bad dir leaf1 magic!");
  2006. ASSERT(0);
  2007. break;
  2008. }
  2009. bp->b_ops = &xfs_dir3_leaf1_buf_ops;
  2010. break;
  2011. case XFS_BLFT_DIR_LEAFN_BUF:
  2012. if (magicda != XFS_DIR2_LEAFN_MAGIC &&
  2013. magicda != XFS_DIR3_LEAFN_MAGIC) {
  2014. xfs_warn(mp, "Bad dir leafn magic!");
  2015. ASSERT(0);
  2016. break;
  2017. }
  2018. bp->b_ops = &xfs_dir3_leafn_buf_ops;
  2019. break;
  2020. case XFS_BLFT_DA_NODE_BUF:
  2021. if (magicda != XFS_DA_NODE_MAGIC &&
  2022. magicda != XFS_DA3_NODE_MAGIC) {
  2023. xfs_warn(mp, "Bad da node magic!");
  2024. ASSERT(0);
  2025. break;
  2026. }
  2027. bp->b_ops = &xfs_da3_node_buf_ops;
  2028. break;
  2029. case XFS_BLFT_ATTR_LEAF_BUF:
  2030. if (magicda != XFS_ATTR_LEAF_MAGIC &&
  2031. magicda != XFS_ATTR3_LEAF_MAGIC) {
  2032. xfs_warn(mp, "Bad attr leaf magic!");
  2033. ASSERT(0);
  2034. break;
  2035. }
  2036. bp->b_ops = &xfs_attr3_leaf_buf_ops;
  2037. break;
  2038. case XFS_BLFT_ATTR_RMT_BUF:
  2039. if (!xfs_sb_version_hascrc(&mp->m_sb))
  2040. break;
  2041. if (magic32 != XFS_ATTR3_RMT_MAGIC) {
  2042. xfs_warn(mp, "Bad attr remote magic!");
  2043. ASSERT(0);
  2044. break;
  2045. }
  2046. bp->b_ops = &xfs_attr3_rmt_buf_ops;
  2047. break;
  2048. case XFS_BLFT_SB_BUF:
  2049. if (magic32 != XFS_SB_MAGIC) {
  2050. xfs_warn(mp, "Bad SB block magic!");
  2051. ASSERT(0);
  2052. break;
  2053. }
  2054. bp->b_ops = &xfs_sb_buf_ops;
  2055. break;
  2056. default:
  2057. xfs_warn(mp, "Unknown buffer type %d!",
  2058. xfs_blft_from_flags(buf_f));
  2059. break;
  2060. }
  2061. }
  2062. /*
  2063. * Perform a 'normal' buffer recovery. Each logged region of the
  2064. * buffer should be copied over the corresponding region in the
  2065. * given buffer. The bitmap in the buf log format structure indicates
  2066. * where to place the logged data.
  2067. */
  2068. STATIC void
  2069. xlog_recover_do_reg_buffer(
  2070. struct xfs_mount *mp,
  2071. xlog_recover_item_t *item,
  2072. struct xfs_buf *bp,
  2073. xfs_buf_log_format_t *buf_f)
  2074. {
  2075. int i;
  2076. int bit;
  2077. int nbits;
  2078. int error;
  2079. trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
  2080. bit = 0;
  2081. i = 1; /* 0 is the buf format structure */
  2082. while (1) {
  2083. bit = xfs_next_bit(buf_f->blf_data_map,
  2084. buf_f->blf_map_size, bit);
  2085. if (bit == -1)
  2086. break;
  2087. nbits = xfs_contig_bits(buf_f->blf_data_map,
  2088. buf_f->blf_map_size, bit);
  2089. ASSERT(nbits > 0);
  2090. ASSERT(item->ri_buf[i].i_addr != NULL);
  2091. ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
  2092. ASSERT(BBTOB(bp->b_io_length) >=
  2093. ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
  2094. /*
  2095. * The dirty regions logged in the buffer, even though
  2096. * contiguous, may span multiple chunks. This is because the
  2097. * dirty region may span a physical page boundary in a buffer
  2098. * and hence be split into two separate vectors for writing into
  2099. * the log. Hence we need to trim nbits back to the length of
  2100. * the current region being copied out of the log.
  2101. */
  2102. if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
  2103. nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
  2104. /*
  2105. * Do a sanity check if this is a dquot buffer. Just checking
  2106. * the first dquot in the buffer should do. XXXThis is
  2107. * probably a good thing to do for other buf types also.
  2108. */
  2109. error = 0;
  2110. if (buf_f->blf_flags &
  2111. (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
  2112. if (item->ri_buf[i].i_addr == NULL) {
  2113. xfs_alert(mp,
  2114. "XFS: NULL dquot in %s.", __func__);
  2115. goto next;
  2116. }
  2117. if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
  2118. xfs_alert(mp,
  2119. "XFS: dquot too small (%d) in %s.",
  2120. item->ri_buf[i].i_len, __func__);
  2121. goto next;
  2122. }
  2123. error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
  2124. -1, 0, XFS_QMOPT_DOWARN,
  2125. "dquot_buf_recover");
  2126. if (error)
  2127. goto next;
  2128. }
  2129. memcpy(xfs_buf_offset(bp,
  2130. (uint)bit << XFS_BLF_SHIFT), /* dest */
  2131. item->ri_buf[i].i_addr, /* source */
  2132. nbits<<XFS_BLF_SHIFT); /* length */
  2133. next:
  2134. i++;
  2135. bit += nbits;
  2136. }
  2137. /* Shouldn't be any more regions */
  2138. ASSERT(i == item->ri_total);
  2139. /*
  2140. * We can only do post recovery validation on items on CRC enabled
  2141. * fielsystems as we need to know when the buffer was written to be able
  2142. * to determine if we should have replayed the item. If we replay old
  2143. * metadata over a newer buffer, then it will enter a temporarily
  2144. * inconsistent state resulting in verification failures. Hence for now
  2145. * just avoid the verification stage for non-crc filesystems
  2146. */
  2147. if (xfs_sb_version_hascrc(&mp->m_sb))
  2148. xlog_recover_validate_buf_type(mp, bp, buf_f);
  2149. }
  2150. /*
  2151. * Do some primitive error checking on ondisk dquot data structures.
  2152. */
  2153. int
  2154. xfs_qm_dqcheck(
  2155. struct xfs_mount *mp,
  2156. xfs_disk_dquot_t *ddq,
  2157. xfs_dqid_t id,
  2158. uint type, /* used only when IO_dorepair is true */
  2159. uint flags,
  2160. char *str)
  2161. {
  2162. xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
  2163. int errs = 0;
  2164. /*
  2165. * We can encounter an uninitialized dquot buffer for 2 reasons:
  2166. * 1. If we crash while deleting the quotainode(s), and those blks got
  2167. * used for user data. This is because we take the path of regular
  2168. * file deletion; however, the size field of quotainodes is never
  2169. * updated, so all the tricks that we play in itruncate_finish
  2170. * don't quite matter.
  2171. *
  2172. * 2. We don't play the quota buffers when there's a quotaoff logitem.
  2173. * But the allocation will be replayed so we'll end up with an
  2174. * uninitialized quota block.
  2175. *
  2176. * This is all fine; things are still consistent, and we haven't lost
  2177. * any quota information. Just don't complain about bad dquot blks.
  2178. */
  2179. if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
  2180. if (flags & XFS_QMOPT_DOWARN)
  2181. xfs_alert(mp,
  2182. "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
  2183. str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
  2184. errs++;
  2185. }
  2186. if (ddq->d_version != XFS_DQUOT_VERSION) {
  2187. if (flags & XFS_QMOPT_DOWARN)
  2188. xfs_alert(mp,
  2189. "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
  2190. str, id, ddq->d_version, XFS_DQUOT_VERSION);
  2191. errs++;
  2192. }
  2193. if (ddq->d_flags != XFS_DQ_USER &&
  2194. ddq->d_flags != XFS_DQ_PROJ &&
  2195. ddq->d_flags != XFS_DQ_GROUP) {
  2196. if (flags & XFS_QMOPT_DOWARN)
  2197. xfs_alert(mp,
  2198. "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
  2199. str, id, ddq->d_flags);
  2200. errs++;
  2201. }
  2202. if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
  2203. if (flags & XFS_QMOPT_DOWARN)
  2204. xfs_alert(mp,
  2205. "%s : ondisk-dquot 0x%p, ID mismatch: "
  2206. "0x%x expected, found id 0x%x",
  2207. str, ddq, id, be32_to_cpu(ddq->d_id));
  2208. errs++;
  2209. }
  2210. if (!errs && ddq->d_id) {
  2211. if (ddq->d_blk_softlimit &&
  2212. be64_to_cpu(ddq->d_bcount) >
  2213. be64_to_cpu(ddq->d_blk_softlimit)) {
  2214. if (!ddq->d_btimer) {
  2215. if (flags & XFS_QMOPT_DOWARN)
  2216. xfs_alert(mp,
  2217. "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
  2218. str, (int)be32_to_cpu(ddq->d_id), ddq);
  2219. errs++;
  2220. }
  2221. }
  2222. if (ddq->d_ino_softlimit &&
  2223. be64_to_cpu(ddq->d_icount) >
  2224. be64_to_cpu(ddq->d_ino_softlimit)) {
  2225. if (!ddq->d_itimer) {
  2226. if (flags & XFS_QMOPT_DOWARN)
  2227. xfs_alert(mp,
  2228. "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
  2229. str, (int)be32_to_cpu(ddq->d_id), ddq);
  2230. errs++;
  2231. }
  2232. }
  2233. if (ddq->d_rtb_softlimit &&
  2234. be64_to_cpu(ddq->d_rtbcount) >
  2235. be64_to_cpu(ddq->d_rtb_softlimit)) {
  2236. if (!ddq->d_rtbtimer) {
  2237. if (flags & XFS_QMOPT_DOWARN)
  2238. xfs_alert(mp,
  2239. "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
  2240. str, (int)be32_to_cpu(ddq->d_id), ddq);
  2241. errs++;
  2242. }
  2243. }
  2244. }
  2245. if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
  2246. return errs;
  2247. if (flags & XFS_QMOPT_DOWARN)
  2248. xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
  2249. /*
  2250. * Typically, a repair is only requested by quotacheck.
  2251. */
  2252. ASSERT(id != -1);
  2253. ASSERT(flags & XFS_QMOPT_DQREPAIR);
  2254. memset(d, 0, sizeof(xfs_dqblk_t));
  2255. d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
  2256. d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
  2257. d->dd_diskdq.d_flags = type;
  2258. d->dd_diskdq.d_id = cpu_to_be32(id);
  2259. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  2260. uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
  2261. xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
  2262. XFS_DQUOT_CRC_OFF);
  2263. }
  2264. return errs;
  2265. }
  2266. /*
  2267. * Perform a dquot buffer recovery.
  2268. * Simple algorithm: if we have found a QUOTAOFF log item of the same type
  2269. * (ie. USR or GRP), then just toss this buffer away; don't recover it.
  2270. * Else, treat it as a regular buffer and do recovery.
  2271. */
  2272. STATIC void
  2273. xlog_recover_do_dquot_buffer(
  2274. struct xfs_mount *mp,
  2275. struct xlog *log,
  2276. struct xlog_recover_item *item,
  2277. struct xfs_buf *bp,
  2278. struct xfs_buf_log_format *buf_f)
  2279. {
  2280. uint type;
  2281. trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
  2282. /*
  2283. * Filesystems are required to send in quota flags at mount time.
  2284. */
  2285. if (mp->m_qflags == 0) {
  2286. return;
  2287. }
  2288. type = 0;
  2289. if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
  2290. type |= XFS_DQ_USER;
  2291. if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
  2292. type |= XFS_DQ_PROJ;
  2293. if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
  2294. type |= XFS_DQ_GROUP;
  2295. /*
  2296. * This type of quotas was turned off, so ignore this buffer
  2297. */
  2298. if (log->l_quotaoffs_flag & type)
  2299. return;
  2300. xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
  2301. }
  2302. /*
  2303. * This routine replays a modification made to a buffer at runtime.
  2304. * There are actually two types of buffer, regular and inode, which
  2305. * are handled differently. Inode buffers are handled differently
  2306. * in that we only recover a specific set of data from them, namely
  2307. * the inode di_next_unlinked fields. This is because all other inode
  2308. * data is actually logged via inode records and any data we replay
  2309. * here which overlaps that may be stale.
  2310. *
  2311. * When meta-data buffers are freed at run time we log a buffer item
  2312. * with the XFS_BLF_CANCEL bit set to indicate that previous copies
  2313. * of the buffer in the log should not be replayed at recovery time.
  2314. * This is so that if the blocks covered by the buffer are reused for
  2315. * file data before we crash we don't end up replaying old, freed
  2316. * meta-data into a user's file.
  2317. *
  2318. * To handle the cancellation of buffer log items, we make two passes
  2319. * over the log during recovery. During the first we build a table of
  2320. * those buffers which have been cancelled, and during the second we
  2321. * only replay those buffers which do not have corresponding cancel
  2322. * records in the table. See xlog_recover_buffer_pass[1,2] above
  2323. * for more details on the implementation of the table of cancel records.
  2324. */
  2325. STATIC int
  2326. xlog_recover_buffer_pass2(
  2327. struct xlog *log,
  2328. struct list_head *buffer_list,
  2329. struct xlog_recover_item *item,
  2330. xfs_lsn_t current_lsn)
  2331. {
  2332. xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
  2333. xfs_mount_t *mp = log->l_mp;
  2334. xfs_buf_t *bp;
  2335. int error;
  2336. uint buf_flags;
  2337. xfs_lsn_t lsn;
  2338. /*
  2339. * In this pass we only want to recover all the buffers which have
  2340. * not been cancelled and are not cancellation buffers themselves.
  2341. */
  2342. if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
  2343. buf_f->blf_len, buf_f->blf_flags)) {
  2344. trace_xfs_log_recover_buf_cancel(log, buf_f);
  2345. return 0;
  2346. }
  2347. trace_xfs_log_recover_buf_recover(log, buf_f);
  2348. buf_flags = 0;
  2349. if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
  2350. buf_flags |= XBF_UNMAPPED;
  2351. bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
  2352. buf_flags, NULL);
  2353. if (!bp)
  2354. return XFS_ERROR(ENOMEM);
  2355. error = bp->b_error;
  2356. if (error) {
  2357. xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
  2358. goto out_release;
  2359. }
  2360. /*
  2361. * recover the buffer only if we get an LSN from it and it's less than
  2362. * the lsn of the transaction we are replaying.
  2363. */
  2364. lsn = xlog_recover_get_buf_lsn(mp, bp);
  2365. if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0)
  2366. goto out_release;
  2367. if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
  2368. error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
  2369. } else if (buf_f->blf_flags &
  2370. (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
  2371. xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
  2372. } else {
  2373. xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
  2374. }
  2375. if (error)
  2376. goto out_release;
  2377. /*
  2378. * Perform delayed write on the buffer. Asynchronous writes will be
  2379. * slower when taking into account all the buffers to be flushed.
  2380. *
  2381. * Also make sure that only inode buffers with good sizes stay in
  2382. * the buffer cache. The kernel moves inodes in buffers of 1 block
  2383. * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode
  2384. * buffers in the log can be a different size if the log was generated
  2385. * by an older kernel using unclustered inode buffers or a newer kernel
  2386. * running with a different inode cluster size. Regardless, if the
  2387. * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
  2388. * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
  2389. * the buffer out of the buffer cache so that the buffer won't
  2390. * overlap with future reads of those inodes.
  2391. */
  2392. if (XFS_DINODE_MAGIC ==
  2393. be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
  2394. (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
  2395. (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
  2396. xfs_buf_stale(bp);
  2397. error = xfs_bwrite(bp);
  2398. } else {
  2399. ASSERT(bp->b_target->bt_mount == mp);
  2400. bp->b_iodone = xlog_recover_iodone;
  2401. xfs_buf_delwri_queue(bp, buffer_list);
  2402. }
  2403. out_release:
  2404. xfs_buf_relse(bp);
  2405. return error;
  2406. }
  2407. /*
  2408. * Inode fork owner changes
  2409. *
  2410. * If we have been told that we have to reparent the inode fork, it's because an
  2411. * extent swap operation on a CRC enabled filesystem has been done and we are
  2412. * replaying it. We need to walk the BMBT of the appropriate fork and change the
  2413. * owners of it.
  2414. *
  2415. * The complexity here is that we don't have an inode context to work with, so
  2416. * after we've replayed the inode we need to instantiate one. This is where the
  2417. * fun begins.
  2418. *
  2419. * We are in the middle of log recovery, so we can't run transactions. That
  2420. * means we cannot use cache coherent inode instantiation via xfs_iget(), as
  2421. * that will result in the corresponding iput() running the inode through
  2422. * xfs_inactive(). If we've just replayed an inode core that changes the link
  2423. * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
  2424. * transactions (bad!).
  2425. *
  2426. * So, to avoid this, we instantiate an inode directly from the inode core we've
  2427. * just recovered. We have the buffer still locked, and all we really need to
  2428. * instantiate is the inode core and the forks being modified. We can do this
  2429. * manually, then run the inode btree owner change, and then tear down the
  2430. * xfs_inode without having to run any transactions at all.
  2431. *
  2432. * Also, because we don't have a transaction context available here but need to
  2433. * gather all the buffers we modify for writeback so we pass the buffer_list
  2434. * instead for the operation to use.
  2435. */
  2436. STATIC int
  2437. xfs_recover_inode_owner_change(
  2438. struct xfs_mount *mp,
  2439. struct xfs_dinode *dip,
  2440. struct xfs_inode_log_format *in_f,
  2441. struct list_head *buffer_list)
  2442. {
  2443. struct xfs_inode *ip;
  2444. int error;
  2445. ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
  2446. ip = xfs_inode_alloc(mp, in_f->ilf_ino);
  2447. if (!ip)
  2448. return ENOMEM;
  2449. /* instantiate the inode */
  2450. xfs_dinode_from_disk(&ip->i_d, dip);
  2451. ASSERT(ip->i_d.di_version >= 3);
  2452. error = xfs_iformat_fork(ip, dip);
  2453. if (error)
  2454. goto out_free_ip;
  2455. if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
  2456. ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
  2457. error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
  2458. ip->i_ino, buffer_list);
  2459. if (error)
  2460. goto out_free_ip;
  2461. }
  2462. if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
  2463. ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
  2464. error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
  2465. ip->i_ino, buffer_list);
  2466. if (error)
  2467. goto out_free_ip;
  2468. }
  2469. out_free_ip:
  2470. xfs_inode_free(ip);
  2471. return error;
  2472. }
  2473. STATIC int
  2474. xlog_recover_inode_pass2(
  2475. struct xlog *log,
  2476. struct list_head *buffer_list,
  2477. struct xlog_recover_item *item,
  2478. xfs_lsn_t current_lsn)
  2479. {
  2480. xfs_inode_log_format_t *in_f;
  2481. xfs_mount_t *mp = log->l_mp;
  2482. xfs_buf_t *bp;
  2483. xfs_dinode_t *dip;
  2484. int len;
  2485. xfs_caddr_t src;
  2486. xfs_caddr_t dest;
  2487. int error;
  2488. int attr_index;
  2489. uint fields;
  2490. xfs_icdinode_t *dicp;
  2491. uint isize;
  2492. int need_free = 0;
  2493. if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
  2494. in_f = item->ri_buf[0].i_addr;
  2495. } else {
  2496. in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
  2497. need_free = 1;
  2498. error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
  2499. if (error)
  2500. goto error;
  2501. }
  2502. /*
  2503. * Inode buffers can be freed, look out for it,
  2504. * and do not replay the inode.
  2505. */
  2506. if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
  2507. in_f->ilf_len, 0)) {
  2508. error = 0;
  2509. trace_xfs_log_recover_inode_cancel(log, in_f);
  2510. goto error;
  2511. }
  2512. trace_xfs_log_recover_inode_recover(log, in_f);
  2513. bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
  2514. &xfs_inode_buf_ops);
  2515. if (!bp) {
  2516. error = ENOMEM;
  2517. goto error;
  2518. }
  2519. error = bp->b_error;
  2520. if (error) {
  2521. xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
  2522. goto out_release;
  2523. }
  2524. ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
  2525. dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
  2526. /*
  2527. * Make sure the place we're flushing out to really looks
  2528. * like an inode!
  2529. */
  2530. if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
  2531. xfs_alert(mp,
  2532. "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
  2533. __func__, dip, bp, in_f->ilf_ino);
  2534. XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
  2535. XFS_ERRLEVEL_LOW, mp);
  2536. error = EFSCORRUPTED;
  2537. goto out_release;
  2538. }
  2539. dicp = item->ri_buf[1].i_addr;
  2540. if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
  2541. xfs_alert(mp,
  2542. "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
  2543. __func__, item, in_f->ilf_ino);
  2544. XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
  2545. XFS_ERRLEVEL_LOW, mp);
  2546. error = EFSCORRUPTED;
  2547. goto out_release;
  2548. }
  2549. /*
  2550. * If the inode has an LSN in it, recover the inode only if it's less
  2551. * than the lsn of the transaction we are replaying. Note: we still
  2552. * need to replay an owner change even though the inode is more recent
  2553. * than the transaction as there is no guarantee that all the btree
  2554. * blocks are more recent than this transaction, too.
  2555. */
  2556. if (dip->di_version >= 3) {
  2557. xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
  2558. if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
  2559. trace_xfs_log_recover_inode_skip(log, in_f);
  2560. error = 0;
  2561. goto out_owner_change;
  2562. }
  2563. }
  2564. /*
  2565. * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
  2566. * are transactional and if ordering is necessary we can determine that
  2567. * more accurately by the LSN field in the V3 inode core. Don't trust
  2568. * the inode versions we might be changing them here - use the
  2569. * superblock flag to determine whether we need to look at di_flushiter
  2570. * to skip replay when the on disk inode is newer than the log one
  2571. */
  2572. if (!xfs_sb_version_hascrc(&mp->m_sb) &&
  2573. dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
  2574. /*
  2575. * Deal with the wrap case, DI_MAX_FLUSH is less
  2576. * than smaller numbers
  2577. */
  2578. if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
  2579. dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
  2580. /* do nothing */
  2581. } else {
  2582. trace_xfs_log_recover_inode_skip(log, in_f);
  2583. error = 0;
  2584. goto out_release;
  2585. }
  2586. }
  2587. /* Take the opportunity to reset the flush iteration count */
  2588. dicp->di_flushiter = 0;
  2589. if (unlikely(S_ISREG(dicp->di_mode))) {
  2590. if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
  2591. (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
  2592. XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
  2593. XFS_ERRLEVEL_LOW, mp, dicp);
  2594. xfs_alert(mp,
  2595. "%s: Bad regular inode log record, rec ptr 0x%p, "
  2596. "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
  2597. __func__, item, dip, bp, in_f->ilf_ino);
  2598. error = EFSCORRUPTED;
  2599. goto out_release;
  2600. }
  2601. } else if (unlikely(S_ISDIR(dicp->di_mode))) {
  2602. if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
  2603. (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
  2604. (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
  2605. XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
  2606. XFS_ERRLEVEL_LOW, mp, dicp);
  2607. xfs_alert(mp,
  2608. "%s: Bad dir inode log record, rec ptr 0x%p, "
  2609. "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
  2610. __func__, item, dip, bp, in_f->ilf_ino);
  2611. error = EFSCORRUPTED;
  2612. goto out_release;
  2613. }
  2614. }
  2615. if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
  2616. XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
  2617. XFS_ERRLEVEL_LOW, mp, dicp);
  2618. xfs_alert(mp,
  2619. "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
  2620. "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
  2621. __func__, item, dip, bp, in_f->ilf_ino,
  2622. dicp->di_nextents + dicp->di_anextents,
  2623. dicp->di_nblocks);
  2624. error = EFSCORRUPTED;
  2625. goto out_release;
  2626. }
  2627. if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
  2628. XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
  2629. XFS_ERRLEVEL_LOW, mp, dicp);
  2630. xfs_alert(mp,
  2631. "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
  2632. "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
  2633. item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
  2634. error = EFSCORRUPTED;
  2635. goto out_release;
  2636. }
  2637. isize = xfs_icdinode_size(dicp->di_version);
  2638. if (unlikely(item->ri_buf[1].i_len > isize)) {
  2639. XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
  2640. XFS_ERRLEVEL_LOW, mp, dicp);
  2641. xfs_alert(mp,
  2642. "%s: Bad inode log record length %d, rec ptr 0x%p",
  2643. __func__, item->ri_buf[1].i_len, item);
  2644. error = EFSCORRUPTED;
  2645. goto out_release;
  2646. }
  2647. /* The core is in in-core format */
  2648. xfs_dinode_to_disk(dip, dicp);
  2649. /* the rest is in on-disk format */
  2650. if (item->ri_buf[1].i_len > isize) {
  2651. memcpy((char *)dip + isize,
  2652. item->ri_buf[1].i_addr + isize,
  2653. item->ri_buf[1].i_len - isize);
  2654. }
  2655. fields = in_f->ilf_fields;
  2656. switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
  2657. case XFS_ILOG_DEV:
  2658. xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
  2659. break;
  2660. case XFS_ILOG_UUID:
  2661. memcpy(XFS_DFORK_DPTR(dip),
  2662. &in_f->ilf_u.ilfu_uuid,
  2663. sizeof(uuid_t));
  2664. break;
  2665. }
  2666. if (in_f->ilf_size == 2)
  2667. goto out_owner_change;
  2668. len = item->ri_buf[2].i_len;
  2669. src = item->ri_buf[2].i_addr;
  2670. ASSERT(in_f->ilf_size <= 4);
  2671. ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
  2672. ASSERT(!(fields & XFS_ILOG_DFORK) ||
  2673. (len == in_f->ilf_dsize));
  2674. switch (fields & XFS_ILOG_DFORK) {
  2675. case XFS_ILOG_DDATA:
  2676. case XFS_ILOG_DEXT:
  2677. memcpy(XFS_DFORK_DPTR(dip), src, len);
  2678. break;
  2679. case XFS_ILOG_DBROOT:
  2680. xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
  2681. (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
  2682. XFS_DFORK_DSIZE(dip, mp));
  2683. break;
  2684. default:
  2685. /*
  2686. * There are no data fork flags set.
  2687. */
  2688. ASSERT((fields & XFS_ILOG_DFORK) == 0);
  2689. break;
  2690. }
  2691. /*
  2692. * If we logged any attribute data, recover it. There may or
  2693. * may not have been any other non-core data logged in this
  2694. * transaction.
  2695. */
  2696. if (in_f->ilf_fields & XFS_ILOG_AFORK) {
  2697. if (in_f->ilf_fields & XFS_ILOG_DFORK) {
  2698. attr_index = 3;
  2699. } else {
  2700. attr_index = 2;
  2701. }
  2702. len = item->ri_buf[attr_index].i_len;
  2703. src = item->ri_buf[attr_index].i_addr;
  2704. ASSERT(len == in_f->ilf_asize);
  2705. switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
  2706. case XFS_ILOG_ADATA:
  2707. case XFS_ILOG_AEXT:
  2708. dest = XFS_DFORK_APTR(dip);
  2709. ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
  2710. memcpy(dest, src, len);
  2711. break;
  2712. case XFS_ILOG_ABROOT:
  2713. dest = XFS_DFORK_APTR(dip);
  2714. xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
  2715. len, (xfs_bmdr_block_t*)dest,
  2716. XFS_DFORK_ASIZE(dip, mp));
  2717. break;
  2718. default:
  2719. xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
  2720. ASSERT(0);
  2721. error = EIO;
  2722. goto out_release;
  2723. }
  2724. }
  2725. out_owner_change:
  2726. if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
  2727. error = xfs_recover_inode_owner_change(mp, dip, in_f,
  2728. buffer_list);
  2729. /* re-generate the checksum. */
  2730. xfs_dinode_calc_crc(log->l_mp, dip);
  2731. ASSERT(bp->b_target->bt_mount == mp);
  2732. bp->b_iodone = xlog_recover_iodone;
  2733. xfs_buf_delwri_queue(bp, buffer_list);
  2734. out_release:
  2735. xfs_buf_relse(bp);
  2736. error:
  2737. if (need_free)
  2738. kmem_free(in_f);
  2739. return XFS_ERROR(error);
  2740. }
  2741. /*
  2742. * Recover QUOTAOFF records. We simply make a note of it in the xlog
  2743. * structure, so that we know not to do any dquot item or dquot buffer recovery,
  2744. * of that type.
  2745. */
  2746. STATIC int
  2747. xlog_recover_quotaoff_pass1(
  2748. struct xlog *log,
  2749. struct xlog_recover_item *item)
  2750. {
  2751. xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
  2752. ASSERT(qoff_f);
  2753. /*
  2754. * The logitem format's flag tells us if this was user quotaoff,
  2755. * group/project quotaoff or both.
  2756. */
  2757. if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
  2758. log->l_quotaoffs_flag |= XFS_DQ_USER;
  2759. if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
  2760. log->l_quotaoffs_flag |= XFS_DQ_PROJ;
  2761. if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
  2762. log->l_quotaoffs_flag |= XFS_DQ_GROUP;
  2763. return (0);
  2764. }
  2765. /*
  2766. * Recover a dquot record
  2767. */
  2768. STATIC int
  2769. xlog_recover_dquot_pass2(
  2770. struct xlog *log,
  2771. struct list_head *buffer_list,
  2772. struct xlog_recover_item *item,
  2773. xfs_lsn_t current_lsn)
  2774. {
  2775. xfs_mount_t *mp = log->l_mp;
  2776. xfs_buf_t *bp;
  2777. struct xfs_disk_dquot *ddq, *recddq;
  2778. int error;
  2779. xfs_dq_logformat_t *dq_f;
  2780. uint type;
  2781. /*
  2782. * Filesystems are required to send in quota flags at mount time.
  2783. */
  2784. if (mp->m_qflags == 0)
  2785. return (0);
  2786. recddq = item->ri_buf[1].i_addr;
  2787. if (recddq == NULL) {
  2788. xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
  2789. return XFS_ERROR(EIO);
  2790. }
  2791. if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
  2792. xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
  2793. item->ri_buf[1].i_len, __func__);
  2794. return XFS_ERROR(EIO);
  2795. }
  2796. /*
  2797. * This type of quotas was turned off, so ignore this record.
  2798. */
  2799. type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
  2800. ASSERT(type);
  2801. if (log->l_quotaoffs_flag & type)
  2802. return (0);
  2803. /*
  2804. * At this point we know that quota was _not_ turned off.
  2805. * Since the mount flags are not indicating to us otherwise, this
  2806. * must mean that quota is on, and the dquot needs to be replayed.
  2807. * Remember that we may not have fully recovered the superblock yet,
  2808. * so we can't do the usual trick of looking at the SB quota bits.
  2809. *
  2810. * The other possibility, of course, is that the quota subsystem was
  2811. * removed since the last mount - ENOSYS.
  2812. */
  2813. dq_f = item->ri_buf[0].i_addr;
  2814. ASSERT(dq_f);
  2815. error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
  2816. "xlog_recover_dquot_pass2 (log copy)");
  2817. if (error)
  2818. return XFS_ERROR(EIO);
  2819. ASSERT(dq_f->qlf_len == 1);
  2820. error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
  2821. XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
  2822. NULL);
  2823. if (error)
  2824. return error;
  2825. ASSERT(bp);
  2826. ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
  2827. /*
  2828. * At least the magic num portion should be on disk because this
  2829. * was among a chunk of dquots created earlier, and we did some
  2830. * minimal initialization then.
  2831. */
  2832. error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
  2833. "xlog_recover_dquot_pass2");
  2834. if (error) {
  2835. xfs_buf_relse(bp);
  2836. return XFS_ERROR(EIO);
  2837. }
  2838. /*
  2839. * If the dquot has an LSN in it, recover the dquot only if it's less
  2840. * than the lsn of the transaction we are replaying.
  2841. */
  2842. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  2843. struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
  2844. xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
  2845. if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
  2846. goto out_release;
  2847. }
  2848. }
  2849. memcpy(ddq, recddq, item->ri_buf[1].i_len);
  2850. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  2851. xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
  2852. XFS_DQUOT_CRC_OFF);
  2853. }
  2854. ASSERT(dq_f->qlf_size == 2);
  2855. ASSERT(bp->b_target->bt_mount == mp);
  2856. bp->b_iodone = xlog_recover_iodone;
  2857. xfs_buf_delwri_queue(bp, buffer_list);
  2858. out_release:
  2859. xfs_buf_relse(bp);
  2860. return 0;
  2861. }
  2862. /*
  2863. * This routine is called to create an in-core extent free intent
  2864. * item from the efi format structure which was logged on disk.
  2865. * It allocates an in-core efi, copies the extents from the format
  2866. * structure into it, and adds the efi to the AIL with the given
  2867. * LSN.
  2868. */
  2869. STATIC int
  2870. xlog_recover_efi_pass2(
  2871. struct xlog *log,
  2872. struct xlog_recover_item *item,
  2873. xfs_lsn_t lsn)
  2874. {
  2875. int error;
  2876. xfs_mount_t *mp = log->l_mp;
  2877. xfs_efi_log_item_t *efip;
  2878. xfs_efi_log_format_t *efi_formatp;
  2879. efi_formatp = item->ri_buf[0].i_addr;
  2880. efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
  2881. if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
  2882. &(efip->efi_format)))) {
  2883. xfs_efi_item_free(efip);
  2884. return error;
  2885. }
  2886. atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
  2887. spin_lock(&log->l_ailp->xa_lock);
  2888. /*
  2889. * xfs_trans_ail_update() drops the AIL lock.
  2890. */
  2891. xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
  2892. return 0;
  2893. }
  2894. /*
  2895. * This routine is called when an efd format structure is found in
  2896. * a committed transaction in the log. It's purpose is to cancel
  2897. * the corresponding efi if it was still in the log. To do this
  2898. * it searches the AIL for the efi with an id equal to that in the
  2899. * efd format structure. If we find it, we remove the efi from the
  2900. * AIL and free it.
  2901. */
  2902. STATIC int
  2903. xlog_recover_efd_pass2(
  2904. struct xlog *log,
  2905. struct xlog_recover_item *item)
  2906. {
  2907. xfs_efd_log_format_t *efd_formatp;
  2908. xfs_efi_log_item_t *efip = NULL;
  2909. xfs_log_item_t *lip;
  2910. __uint64_t efi_id;
  2911. struct xfs_ail_cursor cur;
  2912. struct xfs_ail *ailp = log->l_ailp;
  2913. efd_formatp = item->ri_buf[0].i_addr;
  2914. ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
  2915. ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
  2916. (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
  2917. ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
  2918. efi_id = efd_formatp->efd_efi_id;
  2919. /*
  2920. * Search for the efi with the id in the efd format structure
  2921. * in the AIL.
  2922. */
  2923. spin_lock(&ailp->xa_lock);
  2924. lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
  2925. while (lip != NULL) {
  2926. if (lip->li_type == XFS_LI_EFI) {
  2927. efip = (xfs_efi_log_item_t *)lip;
  2928. if (efip->efi_format.efi_id == efi_id) {
  2929. /*
  2930. * xfs_trans_ail_delete() drops the
  2931. * AIL lock.
  2932. */
  2933. xfs_trans_ail_delete(ailp, lip,
  2934. SHUTDOWN_CORRUPT_INCORE);
  2935. xfs_efi_item_free(efip);
  2936. spin_lock(&ailp->xa_lock);
  2937. break;
  2938. }
  2939. }
  2940. lip = xfs_trans_ail_cursor_next(ailp, &cur);
  2941. }
  2942. xfs_trans_ail_cursor_done(ailp, &cur);
  2943. spin_unlock(&ailp->xa_lock);
  2944. return 0;
  2945. }
  2946. /*
  2947. * This routine is called when an inode create format structure is found in a
  2948. * committed transaction in the log. It's purpose is to initialise the inodes
  2949. * being allocated on disk. This requires us to get inode cluster buffers that
  2950. * match the range to be intialised, stamped with inode templates and written
  2951. * by delayed write so that subsequent modifications will hit the cached buffer
  2952. * and only need writing out at the end of recovery.
  2953. */
  2954. STATIC int
  2955. xlog_recover_do_icreate_pass2(
  2956. struct xlog *log,
  2957. struct list_head *buffer_list,
  2958. xlog_recover_item_t *item)
  2959. {
  2960. struct xfs_mount *mp = log->l_mp;
  2961. struct xfs_icreate_log *icl;
  2962. xfs_agnumber_t agno;
  2963. xfs_agblock_t agbno;
  2964. unsigned int count;
  2965. unsigned int isize;
  2966. xfs_agblock_t length;
  2967. icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
  2968. if (icl->icl_type != XFS_LI_ICREATE) {
  2969. xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
  2970. return EINVAL;
  2971. }
  2972. if (icl->icl_size != 1) {
  2973. xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
  2974. return EINVAL;
  2975. }
  2976. agno = be32_to_cpu(icl->icl_ag);
  2977. if (agno >= mp->m_sb.sb_agcount) {
  2978. xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
  2979. return EINVAL;
  2980. }
  2981. agbno = be32_to_cpu(icl->icl_agbno);
  2982. if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
  2983. xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
  2984. return EINVAL;
  2985. }
  2986. isize = be32_to_cpu(icl->icl_isize);
  2987. if (isize != mp->m_sb.sb_inodesize) {
  2988. xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
  2989. return EINVAL;
  2990. }
  2991. count = be32_to_cpu(icl->icl_count);
  2992. if (!count) {
  2993. xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
  2994. return EINVAL;
  2995. }
  2996. length = be32_to_cpu(icl->icl_length);
  2997. if (!length || length >= mp->m_sb.sb_agblocks) {
  2998. xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
  2999. return EINVAL;
  3000. }
  3001. /* existing allocation is fixed value */
  3002. ASSERT(count == XFS_IALLOC_INODES(mp));
  3003. ASSERT(length == XFS_IALLOC_BLOCKS(mp));
  3004. if (count != XFS_IALLOC_INODES(mp) ||
  3005. length != XFS_IALLOC_BLOCKS(mp)) {
  3006. xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
  3007. return EINVAL;
  3008. }
  3009. /*
  3010. * Inode buffers can be freed. Do not replay the inode initialisation as
  3011. * we could be overwriting something written after this inode buffer was
  3012. * cancelled.
  3013. *
  3014. * XXX: we need to iterate all buffers and only init those that are not
  3015. * cancelled. I think that a more fine grained factoring of
  3016. * xfs_ialloc_inode_init may be appropriate here to enable this to be
  3017. * done easily.
  3018. */
  3019. if (xlog_check_buffer_cancelled(log,
  3020. XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0))
  3021. return 0;
  3022. xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length,
  3023. be32_to_cpu(icl->icl_gen));
  3024. return 0;
  3025. }
  3026. /*
  3027. * Free up any resources allocated by the transaction
  3028. *
  3029. * Remember that EFIs, EFDs, and IUNLINKs are handled later.
  3030. */
  3031. STATIC void
  3032. xlog_recover_free_trans(
  3033. struct xlog_recover *trans)
  3034. {
  3035. xlog_recover_item_t *item, *n;
  3036. int i;
  3037. list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
  3038. /* Free the regions in the item. */
  3039. list_del(&item->ri_list);
  3040. for (i = 0; i < item->ri_cnt; i++)
  3041. kmem_free(item->ri_buf[i].i_addr);
  3042. /* Free the item itself */
  3043. kmem_free(item->ri_buf);
  3044. kmem_free(item);
  3045. }
  3046. /* Free the transaction recover structure */
  3047. kmem_free(trans);
  3048. }
  3049. STATIC void
  3050. xlog_recover_buffer_ra_pass2(
  3051. struct xlog *log,
  3052. struct xlog_recover_item *item)
  3053. {
  3054. struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
  3055. struct xfs_mount *mp = log->l_mp;
  3056. if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
  3057. buf_f->blf_len, buf_f->blf_flags)) {
  3058. return;
  3059. }
  3060. xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
  3061. buf_f->blf_len, NULL);
  3062. }
  3063. STATIC void
  3064. xlog_recover_inode_ra_pass2(
  3065. struct xlog *log,
  3066. struct xlog_recover_item *item)
  3067. {
  3068. struct xfs_inode_log_format ilf_buf;
  3069. struct xfs_inode_log_format *ilfp;
  3070. struct xfs_mount *mp = log->l_mp;
  3071. int error;
  3072. if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
  3073. ilfp = item->ri_buf[0].i_addr;
  3074. } else {
  3075. ilfp = &ilf_buf;
  3076. memset(ilfp, 0, sizeof(*ilfp));
  3077. error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
  3078. if (error)
  3079. return;
  3080. }
  3081. if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
  3082. return;
  3083. xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
  3084. ilfp->ilf_len, &xfs_inode_buf_ra_ops);
  3085. }
  3086. STATIC void
  3087. xlog_recover_dquot_ra_pass2(
  3088. struct xlog *log,
  3089. struct xlog_recover_item *item)
  3090. {
  3091. struct xfs_mount *mp = log->l_mp;
  3092. struct xfs_disk_dquot *recddq;
  3093. struct xfs_dq_logformat *dq_f;
  3094. uint type;
  3095. if (mp->m_qflags == 0)
  3096. return;
  3097. recddq = item->ri_buf[1].i_addr;
  3098. if (recddq == NULL)
  3099. return;
  3100. if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
  3101. return;
  3102. type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
  3103. ASSERT(type);
  3104. if (log->l_quotaoffs_flag & type)
  3105. return;
  3106. dq_f = item->ri_buf[0].i_addr;
  3107. ASSERT(dq_f);
  3108. ASSERT(dq_f->qlf_len == 1);
  3109. xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno,
  3110. XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL);
  3111. }
  3112. STATIC void
  3113. xlog_recover_ra_pass2(
  3114. struct xlog *log,
  3115. struct xlog_recover_item *item)
  3116. {
  3117. switch (ITEM_TYPE(item)) {
  3118. case XFS_LI_BUF:
  3119. xlog_recover_buffer_ra_pass2(log, item);
  3120. break;
  3121. case XFS_LI_INODE:
  3122. xlog_recover_inode_ra_pass2(log, item);
  3123. break;
  3124. case XFS_LI_DQUOT:
  3125. xlog_recover_dquot_ra_pass2(log, item);
  3126. break;
  3127. case XFS_LI_EFI:
  3128. case XFS_LI_EFD:
  3129. case XFS_LI_QUOTAOFF:
  3130. default:
  3131. break;
  3132. }
  3133. }
  3134. STATIC int
  3135. xlog_recover_commit_pass1(
  3136. struct xlog *log,
  3137. struct xlog_recover *trans,
  3138. struct xlog_recover_item *item)
  3139. {
  3140. trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
  3141. switch (ITEM_TYPE(item)) {
  3142. case XFS_LI_BUF:
  3143. return xlog_recover_buffer_pass1(log, item);
  3144. case XFS_LI_QUOTAOFF:
  3145. return xlog_recover_quotaoff_pass1(log, item);
  3146. case XFS_LI_INODE:
  3147. case XFS_LI_EFI:
  3148. case XFS_LI_EFD:
  3149. case XFS_LI_DQUOT:
  3150. case XFS_LI_ICREATE:
  3151. /* nothing to do in pass 1 */
  3152. return 0;
  3153. default:
  3154. xfs_warn(log->l_mp, "%s: invalid item type (%d)",
  3155. __func__, ITEM_TYPE(item));
  3156. ASSERT(0);
  3157. return XFS_ERROR(EIO);
  3158. }
  3159. }
  3160. STATIC int
  3161. xlog_recover_commit_pass2(
  3162. struct xlog *log,
  3163. struct xlog_recover *trans,
  3164. struct list_head *buffer_list,
  3165. struct xlog_recover_item *item)
  3166. {
  3167. trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
  3168. switch (ITEM_TYPE(item)) {
  3169. case XFS_LI_BUF:
  3170. return xlog_recover_buffer_pass2(log, buffer_list, item,
  3171. trans->r_lsn);
  3172. case XFS_LI_INODE:
  3173. return xlog_recover_inode_pass2(log, buffer_list, item,
  3174. trans->r_lsn);
  3175. case XFS_LI_EFI:
  3176. return xlog_recover_efi_pass2(log, item, trans->r_lsn);
  3177. case XFS_LI_EFD:
  3178. return xlog_recover_efd_pass2(log, item);
  3179. case XFS_LI_DQUOT:
  3180. return xlog_recover_dquot_pass2(log, buffer_list, item,
  3181. trans->r_lsn);
  3182. case XFS_LI_ICREATE:
  3183. return xlog_recover_do_icreate_pass2(log, buffer_list, item);
  3184. case XFS_LI_QUOTAOFF:
  3185. /* nothing to do in pass2 */
  3186. return 0;
  3187. default:
  3188. xfs_warn(log->l_mp, "%s: invalid item type (%d)",
  3189. __func__, ITEM_TYPE(item));
  3190. ASSERT(0);
  3191. return XFS_ERROR(EIO);
  3192. }
  3193. }
  3194. STATIC int
  3195. xlog_recover_items_pass2(
  3196. struct xlog *log,
  3197. struct xlog_recover *trans,
  3198. struct list_head *buffer_list,
  3199. struct list_head *item_list)
  3200. {
  3201. struct xlog_recover_item *item;
  3202. int error = 0;
  3203. list_for_each_entry(item, item_list, ri_list) {
  3204. error = xlog_recover_commit_pass2(log, trans,
  3205. buffer_list, item);
  3206. if (error)
  3207. return error;
  3208. }
  3209. return error;
  3210. }
  3211. /*
  3212. * Perform the transaction.
  3213. *
  3214. * If the transaction modifies a buffer or inode, do it now. Otherwise,
  3215. * EFIs and EFDs get queued up by adding entries into the AIL for them.
  3216. */
  3217. STATIC int
  3218. xlog_recover_commit_trans(
  3219. struct xlog *log,
  3220. struct xlog_recover *trans,
  3221. int pass)
  3222. {
  3223. int error = 0;
  3224. int error2;
  3225. int items_queued = 0;
  3226. struct xlog_recover_item *item;
  3227. struct xlog_recover_item *next;
  3228. LIST_HEAD (buffer_list);
  3229. LIST_HEAD (ra_list);
  3230. LIST_HEAD (done_list);
  3231. #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
  3232. hlist_del(&trans->r_list);
  3233. error = xlog_recover_reorder_trans(log, trans, pass);
  3234. if (error)
  3235. return error;
  3236. list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
  3237. switch (pass) {
  3238. case XLOG_RECOVER_PASS1:
  3239. error = xlog_recover_commit_pass1(log, trans, item);
  3240. break;
  3241. case XLOG_RECOVER_PASS2:
  3242. xlog_recover_ra_pass2(log, item);
  3243. list_move_tail(&item->ri_list, &ra_list);
  3244. items_queued++;
  3245. if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
  3246. error = xlog_recover_items_pass2(log, trans,
  3247. &buffer_list, &ra_list);
  3248. list_splice_tail_init(&ra_list, &done_list);
  3249. items_queued = 0;
  3250. }
  3251. break;
  3252. default:
  3253. ASSERT(0);
  3254. }
  3255. if (error)
  3256. goto out;
  3257. }
  3258. out:
  3259. if (!list_empty(&ra_list)) {
  3260. if (!error)
  3261. error = xlog_recover_items_pass2(log, trans,
  3262. &buffer_list, &ra_list);
  3263. list_splice_tail_init(&ra_list, &done_list);
  3264. }
  3265. if (!list_empty(&done_list))
  3266. list_splice_init(&done_list, &trans->r_itemq);
  3267. xlog_recover_free_trans(trans);
  3268. error2 = xfs_buf_delwri_submit(&buffer_list);
  3269. return error ? error : error2;
  3270. }
  3271. STATIC int
  3272. xlog_recover_unmount_trans(
  3273. struct xlog *log,
  3274. struct xlog_recover *trans)
  3275. {
  3276. /* Do nothing now */
  3277. xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
  3278. return 0;
  3279. }
  3280. /*
  3281. * There are two valid states of the r_state field. 0 indicates that the
  3282. * transaction structure is in a normal state. We have either seen the
  3283. * start of the transaction or the last operation we added was not a partial
  3284. * operation. If the last operation we added to the transaction was a
  3285. * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
  3286. *
  3287. * NOTE: skip LRs with 0 data length.
  3288. */
  3289. STATIC int
  3290. xlog_recover_process_data(
  3291. struct xlog *log,
  3292. struct hlist_head rhash[],
  3293. struct xlog_rec_header *rhead,
  3294. xfs_caddr_t dp,
  3295. int pass)
  3296. {
  3297. xfs_caddr_t lp;
  3298. int num_logops;
  3299. xlog_op_header_t *ohead;
  3300. xlog_recover_t *trans;
  3301. xlog_tid_t tid;
  3302. int error;
  3303. unsigned long hash;
  3304. uint flags;
  3305. lp = dp + be32_to_cpu(rhead->h_len);
  3306. num_logops = be32_to_cpu(rhead->h_num_logops);
  3307. /* check the log format matches our own - else we can't recover */
  3308. if (xlog_header_check_recover(log->l_mp, rhead))
  3309. return (XFS_ERROR(EIO));
  3310. while ((dp < lp) && num_logops) {
  3311. ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
  3312. ohead = (xlog_op_header_t *)dp;
  3313. dp += sizeof(xlog_op_header_t);
  3314. if (ohead->oh_clientid != XFS_TRANSACTION &&
  3315. ohead->oh_clientid != XFS_LOG) {
  3316. xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
  3317. __func__, ohead->oh_clientid);
  3318. ASSERT(0);
  3319. return (XFS_ERROR(EIO));
  3320. }
  3321. tid = be32_to_cpu(ohead->oh_tid);
  3322. hash = XLOG_RHASH(tid);
  3323. trans = xlog_recover_find_tid(&rhash[hash], tid);
  3324. if (trans == NULL) { /* not found; add new tid */
  3325. if (ohead->oh_flags & XLOG_START_TRANS)
  3326. xlog_recover_new_tid(&rhash[hash], tid,
  3327. be64_to_cpu(rhead->h_lsn));
  3328. } else {
  3329. if (dp + be32_to_cpu(ohead->oh_len) > lp) {
  3330. xfs_warn(log->l_mp, "%s: bad length 0x%x",
  3331. __func__, be32_to_cpu(ohead->oh_len));
  3332. WARN_ON(1);
  3333. return (XFS_ERROR(EIO));
  3334. }
  3335. flags = ohead->oh_flags & ~XLOG_END_TRANS;
  3336. if (flags & XLOG_WAS_CONT_TRANS)
  3337. flags &= ~XLOG_CONTINUE_TRANS;
  3338. switch (flags) {
  3339. case XLOG_COMMIT_TRANS:
  3340. error = xlog_recover_commit_trans(log,
  3341. trans, pass);
  3342. break;
  3343. case XLOG_UNMOUNT_TRANS:
  3344. error = xlog_recover_unmount_trans(log, trans);
  3345. break;
  3346. case XLOG_WAS_CONT_TRANS:
  3347. error = xlog_recover_add_to_cont_trans(log,
  3348. trans, dp,
  3349. be32_to_cpu(ohead->oh_len));
  3350. break;
  3351. case XLOG_START_TRANS:
  3352. xfs_warn(log->l_mp, "%s: bad transaction",
  3353. __func__);
  3354. ASSERT(0);
  3355. error = XFS_ERROR(EIO);
  3356. break;
  3357. case 0:
  3358. case XLOG_CONTINUE_TRANS:
  3359. error = xlog_recover_add_to_trans(log, trans,
  3360. dp, be32_to_cpu(ohead->oh_len));
  3361. break;
  3362. default:
  3363. xfs_warn(log->l_mp, "%s: bad flag 0x%x",
  3364. __func__, flags);
  3365. ASSERT(0);
  3366. error = XFS_ERROR(EIO);
  3367. break;
  3368. }
  3369. if (error)
  3370. return error;
  3371. }
  3372. dp += be32_to_cpu(ohead->oh_len);
  3373. num_logops--;
  3374. }
  3375. return 0;
  3376. }
  3377. /*
  3378. * Process an extent free intent item that was recovered from
  3379. * the log. We need to free the extents that it describes.
  3380. */
  3381. STATIC int
  3382. xlog_recover_process_efi(
  3383. xfs_mount_t *mp,
  3384. xfs_efi_log_item_t *efip)
  3385. {
  3386. xfs_efd_log_item_t *efdp;
  3387. xfs_trans_t *tp;
  3388. int i;
  3389. int error = 0;
  3390. xfs_extent_t *extp;
  3391. xfs_fsblock_t startblock_fsb;
  3392. ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
  3393. /*
  3394. * First check the validity of the extents described by the
  3395. * EFI. If any are bad, then assume that all are bad and
  3396. * just toss the EFI.
  3397. */
  3398. for (i = 0; i < efip->efi_format.efi_nextents; i++) {
  3399. extp = &(efip->efi_format.efi_extents[i]);
  3400. startblock_fsb = XFS_BB_TO_FSB(mp,
  3401. XFS_FSB_TO_DADDR(mp, extp->ext_start));
  3402. if ((startblock_fsb == 0) ||
  3403. (extp->ext_len == 0) ||
  3404. (startblock_fsb >= mp->m_sb.sb_dblocks) ||
  3405. (extp->ext_len >= mp->m_sb.sb_agblocks)) {
  3406. /*
  3407. * This will pull the EFI from the AIL and
  3408. * free the memory associated with it.
  3409. */
  3410. set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
  3411. xfs_efi_release(efip, efip->efi_format.efi_nextents);
  3412. return XFS_ERROR(EIO);
  3413. }
  3414. }
  3415. tp = xfs_trans_alloc(mp, 0);
  3416. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
  3417. if (error)
  3418. goto abort_error;
  3419. efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
  3420. for (i = 0; i < efip->efi_format.efi_nextents; i++) {
  3421. extp = &(efip->efi_format.efi_extents[i]);
  3422. error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
  3423. if (error)
  3424. goto abort_error;
  3425. xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
  3426. extp->ext_len);
  3427. }
  3428. set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
  3429. error = xfs_trans_commit(tp, 0);
  3430. return error;
  3431. abort_error:
  3432. xfs_trans_cancel(tp, XFS_TRANS_ABORT);
  3433. return error;
  3434. }
  3435. /*
  3436. * When this is called, all of the EFIs which did not have
  3437. * corresponding EFDs should be in the AIL. What we do now
  3438. * is free the extents associated with each one.
  3439. *
  3440. * Since we process the EFIs in normal transactions, they
  3441. * will be removed at some point after the commit. This prevents
  3442. * us from just walking down the list processing each one.
  3443. * We'll use a flag in the EFI to skip those that we've already
  3444. * processed and use the AIL iteration mechanism's generation
  3445. * count to try to speed this up at least a bit.
  3446. *
  3447. * When we start, we know that the EFIs are the only things in
  3448. * the AIL. As we process them, however, other items are added
  3449. * to the AIL. Since everything added to the AIL must come after
  3450. * everything already in the AIL, we stop processing as soon as
  3451. * we see something other than an EFI in the AIL.
  3452. */
  3453. STATIC int
  3454. xlog_recover_process_efis(
  3455. struct xlog *log)
  3456. {
  3457. xfs_log_item_t *lip;
  3458. xfs_efi_log_item_t *efip;
  3459. int error = 0;
  3460. struct xfs_ail_cursor cur;
  3461. struct xfs_ail *ailp;
  3462. ailp = log->l_ailp;
  3463. spin_lock(&ailp->xa_lock);
  3464. lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
  3465. while (lip != NULL) {
  3466. /*
  3467. * We're done when we see something other than an EFI.
  3468. * There should be no EFIs left in the AIL now.
  3469. */
  3470. if (lip->li_type != XFS_LI_EFI) {
  3471. #ifdef DEBUG
  3472. for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
  3473. ASSERT(lip->li_type != XFS_LI_EFI);
  3474. #endif
  3475. break;
  3476. }
  3477. /*
  3478. * Skip EFIs that we've already processed.
  3479. */
  3480. efip = (xfs_efi_log_item_t *)lip;
  3481. if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
  3482. lip = xfs_trans_ail_cursor_next(ailp, &cur);
  3483. continue;
  3484. }
  3485. spin_unlock(&ailp->xa_lock);
  3486. error = xlog_recover_process_efi(log->l_mp, efip);
  3487. spin_lock(&ailp->xa_lock);
  3488. if (error)
  3489. goto out;
  3490. lip = xfs_trans_ail_cursor_next(ailp, &cur);
  3491. }
  3492. out:
  3493. xfs_trans_ail_cursor_done(ailp, &cur);
  3494. spin_unlock(&ailp->xa_lock);
  3495. return error;
  3496. }
  3497. /*
  3498. * This routine performs a transaction to null out a bad inode pointer
  3499. * in an agi unlinked inode hash bucket.
  3500. */
  3501. STATIC void
  3502. xlog_recover_clear_agi_bucket(
  3503. xfs_mount_t *mp,
  3504. xfs_agnumber_t agno,
  3505. int bucket)
  3506. {
  3507. xfs_trans_t *tp;
  3508. xfs_agi_t *agi;
  3509. xfs_buf_t *agibp;
  3510. int offset;
  3511. int error;
  3512. tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
  3513. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
  3514. if (error)
  3515. goto out_abort;
  3516. error = xfs_read_agi(mp, tp, agno, &agibp);
  3517. if (error)
  3518. goto out_abort;
  3519. agi = XFS_BUF_TO_AGI(agibp);
  3520. agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
  3521. offset = offsetof(xfs_agi_t, agi_unlinked) +
  3522. (sizeof(xfs_agino_t) * bucket);
  3523. xfs_trans_log_buf(tp, agibp, offset,
  3524. (offset + sizeof(xfs_agino_t) - 1));
  3525. error = xfs_trans_commit(tp, 0);
  3526. if (error)
  3527. goto out_error;
  3528. return;
  3529. out_abort:
  3530. xfs_trans_cancel(tp, XFS_TRANS_ABORT);
  3531. out_error:
  3532. xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
  3533. return;
  3534. }
  3535. STATIC xfs_agino_t
  3536. xlog_recover_process_one_iunlink(
  3537. struct xfs_mount *mp,
  3538. xfs_agnumber_t agno,
  3539. xfs_agino_t agino,
  3540. int bucket)
  3541. {
  3542. struct xfs_buf *ibp;
  3543. struct xfs_dinode *dip;
  3544. struct xfs_inode *ip;
  3545. xfs_ino_t ino;
  3546. int error;
  3547. ino = XFS_AGINO_TO_INO(mp, agno, agino);
  3548. error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
  3549. if (error)
  3550. goto fail;
  3551. /*
  3552. * Get the on disk inode to find the next inode in the bucket.
  3553. */
  3554. error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
  3555. if (error)
  3556. goto fail_iput;
  3557. ASSERT(ip->i_d.di_nlink == 0);
  3558. ASSERT(ip->i_d.di_mode != 0);
  3559. /* setup for the next pass */
  3560. agino = be32_to_cpu(dip->di_next_unlinked);
  3561. xfs_buf_relse(ibp);
  3562. /*
  3563. * Prevent any DMAPI event from being sent when the reference on
  3564. * the inode is dropped.
  3565. */
  3566. ip->i_d.di_dmevmask = 0;
  3567. IRELE(ip);
  3568. return agino;
  3569. fail_iput:
  3570. IRELE(ip);
  3571. fail:
  3572. /*
  3573. * We can't read in the inode this bucket points to, or this inode
  3574. * is messed up. Just ditch this bucket of inodes. We will lose
  3575. * some inodes and space, but at least we won't hang.
  3576. *
  3577. * Call xlog_recover_clear_agi_bucket() to perform a transaction to
  3578. * clear the inode pointer in the bucket.
  3579. */
  3580. xlog_recover_clear_agi_bucket(mp, agno, bucket);
  3581. return NULLAGINO;
  3582. }
  3583. /*
  3584. * xlog_iunlink_recover
  3585. *
  3586. * This is called during recovery to process any inodes which
  3587. * we unlinked but not freed when the system crashed. These
  3588. * inodes will be on the lists in the AGI blocks. What we do
  3589. * here is scan all the AGIs and fully truncate and free any
  3590. * inodes found on the lists. Each inode is removed from the
  3591. * lists when it has been fully truncated and is freed. The
  3592. * freeing of the inode and its removal from the list must be
  3593. * atomic.
  3594. */
  3595. STATIC void
  3596. xlog_recover_process_iunlinks(
  3597. struct xlog *log)
  3598. {
  3599. xfs_mount_t *mp;
  3600. xfs_agnumber_t agno;
  3601. xfs_agi_t *agi;
  3602. xfs_buf_t *agibp;
  3603. xfs_agino_t agino;
  3604. int bucket;
  3605. int error;
  3606. uint mp_dmevmask;
  3607. mp = log->l_mp;
  3608. /*
  3609. * Prevent any DMAPI event from being sent while in this function.
  3610. */
  3611. mp_dmevmask = mp->m_dmevmask;
  3612. mp->m_dmevmask = 0;
  3613. for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
  3614. /*
  3615. * Find the agi for this ag.
  3616. */
  3617. error = xfs_read_agi(mp, NULL, agno, &agibp);
  3618. if (error) {
  3619. /*
  3620. * AGI is b0rked. Don't process it.
  3621. *
  3622. * We should probably mark the filesystem as corrupt
  3623. * after we've recovered all the ag's we can....
  3624. */
  3625. continue;
  3626. }
  3627. /*
  3628. * Unlock the buffer so that it can be acquired in the normal
  3629. * course of the transaction to truncate and free each inode.
  3630. * Because we are not racing with anyone else here for the AGI
  3631. * buffer, we don't even need to hold it locked to read the
  3632. * initial unlinked bucket entries out of the buffer. We keep
  3633. * buffer reference though, so that it stays pinned in memory
  3634. * while we need the buffer.
  3635. */
  3636. agi = XFS_BUF_TO_AGI(agibp);
  3637. xfs_buf_unlock(agibp);
  3638. for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
  3639. agino = be32_to_cpu(agi->agi_unlinked[bucket]);
  3640. while (agino != NULLAGINO) {
  3641. agino = xlog_recover_process_one_iunlink(mp,
  3642. agno, agino, bucket);
  3643. }
  3644. }
  3645. xfs_buf_rele(agibp);
  3646. }
  3647. mp->m_dmevmask = mp_dmevmask;
  3648. }
  3649. /*
  3650. * Upack the log buffer data and crc check it. If the check fails, issue a
  3651. * warning if and only if the CRC in the header is non-zero. This makes the
  3652. * check an advisory warning, and the zero CRC check will prevent failure
  3653. * warnings from being emitted when upgrading the kernel from one that does not
  3654. * add CRCs by default.
  3655. *
  3656. * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log
  3657. * corruption failure
  3658. */
  3659. STATIC int
  3660. xlog_unpack_data_crc(
  3661. struct xlog_rec_header *rhead,
  3662. xfs_caddr_t dp,
  3663. struct xlog *log)
  3664. {
  3665. __le32 crc;
  3666. crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
  3667. if (crc != rhead->h_crc) {
  3668. if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
  3669. xfs_alert(log->l_mp,
  3670. "log record CRC mismatch: found 0x%x, expected 0x%x.\n",
  3671. le32_to_cpu(rhead->h_crc),
  3672. le32_to_cpu(crc));
  3673. xfs_hex_dump(dp, 32);
  3674. }
  3675. /*
  3676. * If we've detected a log record corruption, then we can't
  3677. * recover past this point. Abort recovery if we are enforcing
  3678. * CRC protection by punting an error back up the stack.
  3679. */
  3680. if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
  3681. return EFSCORRUPTED;
  3682. }
  3683. return 0;
  3684. }
  3685. STATIC int
  3686. xlog_unpack_data(
  3687. struct xlog_rec_header *rhead,
  3688. xfs_caddr_t dp,
  3689. struct xlog *log)
  3690. {
  3691. int i, j, k;
  3692. int error;
  3693. error = xlog_unpack_data_crc(rhead, dp, log);
  3694. if (error)
  3695. return error;
  3696. for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
  3697. i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
  3698. *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
  3699. dp += BBSIZE;
  3700. }
  3701. if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
  3702. xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
  3703. for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
  3704. j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
  3705. k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
  3706. *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
  3707. dp += BBSIZE;
  3708. }
  3709. }
  3710. return 0;
  3711. }
  3712. STATIC int
  3713. xlog_valid_rec_header(
  3714. struct xlog *log,
  3715. struct xlog_rec_header *rhead,
  3716. xfs_daddr_t blkno)
  3717. {
  3718. int hlen;
  3719. if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
  3720. XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
  3721. XFS_ERRLEVEL_LOW, log->l_mp);
  3722. return XFS_ERROR(EFSCORRUPTED);
  3723. }
  3724. if (unlikely(
  3725. (!rhead->h_version ||
  3726. (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
  3727. xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
  3728. __func__, be32_to_cpu(rhead->h_version));
  3729. return XFS_ERROR(EIO);
  3730. }
  3731. /* LR body must have data or it wouldn't have been written */
  3732. hlen = be32_to_cpu(rhead->h_len);
  3733. if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
  3734. XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
  3735. XFS_ERRLEVEL_LOW, log->l_mp);
  3736. return XFS_ERROR(EFSCORRUPTED);
  3737. }
  3738. if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
  3739. XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
  3740. XFS_ERRLEVEL_LOW, log->l_mp);
  3741. return XFS_ERROR(EFSCORRUPTED);
  3742. }
  3743. return 0;
  3744. }
  3745. /*
  3746. * Read the log from tail to head and process the log records found.
  3747. * Handle the two cases where the tail and head are in the same cycle
  3748. * and where the active portion of the log wraps around the end of
  3749. * the physical log separately. The pass parameter is passed through
  3750. * to the routines called to process the data and is not looked at
  3751. * here.
  3752. */
  3753. STATIC int
  3754. xlog_do_recovery_pass(
  3755. struct xlog *log,
  3756. xfs_daddr_t head_blk,
  3757. xfs_daddr_t tail_blk,
  3758. int pass)
  3759. {
  3760. xlog_rec_header_t *rhead;
  3761. xfs_daddr_t blk_no;
  3762. xfs_caddr_t offset;
  3763. xfs_buf_t *hbp, *dbp;
  3764. int error = 0, h_size;
  3765. int bblks, split_bblks;
  3766. int hblks, split_hblks, wrapped_hblks;
  3767. struct hlist_head rhash[XLOG_RHASH_SIZE];
  3768. ASSERT(head_blk != tail_blk);
  3769. /*
  3770. * Read the header of the tail block and get the iclog buffer size from
  3771. * h_size. Use this to tell how many sectors make up the log header.
  3772. */
  3773. if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
  3774. /*
  3775. * When using variable length iclogs, read first sector of
  3776. * iclog header and extract the header size from it. Get a
  3777. * new hbp that is the correct size.
  3778. */
  3779. hbp = xlog_get_bp(log, 1);
  3780. if (!hbp)
  3781. return ENOMEM;
  3782. error = xlog_bread(log, tail_blk, 1, hbp, &offset);
  3783. if (error)
  3784. goto bread_err1;
  3785. rhead = (xlog_rec_header_t *)offset;
  3786. error = xlog_valid_rec_header(log, rhead, tail_blk);
  3787. if (error)
  3788. goto bread_err1;
  3789. h_size = be32_to_cpu(rhead->h_size);
  3790. if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
  3791. (h_size > XLOG_HEADER_CYCLE_SIZE)) {
  3792. hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
  3793. if (h_size % XLOG_HEADER_CYCLE_SIZE)
  3794. hblks++;
  3795. xlog_put_bp(hbp);
  3796. hbp = xlog_get_bp(log, hblks);
  3797. } else {
  3798. hblks = 1;
  3799. }
  3800. } else {
  3801. ASSERT(log->l_sectBBsize == 1);
  3802. hblks = 1;
  3803. hbp = xlog_get_bp(log, 1);
  3804. h_size = XLOG_BIG_RECORD_BSIZE;
  3805. }
  3806. if (!hbp)
  3807. return ENOMEM;
  3808. dbp = xlog_get_bp(log, BTOBB(h_size));
  3809. if (!dbp) {
  3810. xlog_put_bp(hbp);
  3811. return ENOMEM;
  3812. }
  3813. memset(rhash, 0, sizeof(rhash));
  3814. if (tail_blk <= head_blk) {
  3815. for (blk_no = tail_blk; blk_no < head_blk; ) {
  3816. error = xlog_bread(log, blk_no, hblks, hbp, &offset);
  3817. if (error)
  3818. goto bread_err2;
  3819. rhead = (xlog_rec_header_t *)offset;
  3820. error = xlog_valid_rec_header(log, rhead, blk_no);
  3821. if (error)
  3822. goto bread_err2;
  3823. /* blocks in data section */
  3824. bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
  3825. error = xlog_bread(log, blk_no + hblks, bblks, dbp,
  3826. &offset);
  3827. if (error)
  3828. goto bread_err2;
  3829. error = xlog_unpack_data(rhead, offset, log);
  3830. if (error)
  3831. goto bread_err2;
  3832. error = xlog_recover_process_data(log,
  3833. rhash, rhead, offset, pass);
  3834. if (error)
  3835. goto bread_err2;
  3836. blk_no += bblks + hblks;
  3837. }
  3838. } else {
  3839. /*
  3840. * Perform recovery around the end of the physical log.
  3841. * When the head is not on the same cycle number as the tail,
  3842. * we can't do a sequential recovery as above.
  3843. */
  3844. blk_no = tail_blk;
  3845. while (blk_no < log->l_logBBsize) {
  3846. /*
  3847. * Check for header wrapping around physical end-of-log
  3848. */
  3849. offset = hbp->b_addr;
  3850. split_hblks = 0;
  3851. wrapped_hblks = 0;
  3852. if (blk_no + hblks <= log->l_logBBsize) {
  3853. /* Read header in one read */
  3854. error = xlog_bread(log, blk_no, hblks, hbp,
  3855. &offset);
  3856. if (error)
  3857. goto bread_err2;
  3858. } else {
  3859. /* This LR is split across physical log end */
  3860. if (blk_no != log->l_logBBsize) {
  3861. /* some data before physical log end */
  3862. ASSERT(blk_no <= INT_MAX);
  3863. split_hblks = log->l_logBBsize - (int)blk_no;
  3864. ASSERT(split_hblks > 0);
  3865. error = xlog_bread(log, blk_no,
  3866. split_hblks, hbp,
  3867. &offset);
  3868. if (error)
  3869. goto bread_err2;
  3870. }
  3871. /*
  3872. * Note: this black magic still works with
  3873. * large sector sizes (non-512) only because:
  3874. * - we increased the buffer size originally
  3875. * by 1 sector giving us enough extra space
  3876. * for the second read;
  3877. * - the log start is guaranteed to be sector
  3878. * aligned;
  3879. * - we read the log end (LR header start)
  3880. * _first_, then the log start (LR header end)
  3881. * - order is important.
  3882. */
  3883. wrapped_hblks = hblks - split_hblks;
  3884. error = xlog_bread_offset(log, 0,
  3885. wrapped_hblks, hbp,
  3886. offset + BBTOB(split_hblks));
  3887. if (error)
  3888. goto bread_err2;
  3889. }
  3890. rhead = (xlog_rec_header_t *)offset;
  3891. error = xlog_valid_rec_header(log, rhead,
  3892. split_hblks ? blk_no : 0);
  3893. if (error)
  3894. goto bread_err2;
  3895. bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
  3896. blk_no += hblks;
  3897. /* Read in data for log record */
  3898. if (blk_no + bblks <= log->l_logBBsize) {
  3899. error = xlog_bread(log, blk_no, bblks, dbp,
  3900. &offset);
  3901. if (error)
  3902. goto bread_err2;
  3903. } else {
  3904. /* This log record is split across the
  3905. * physical end of log */
  3906. offset = dbp->b_addr;
  3907. split_bblks = 0;
  3908. if (blk_no != log->l_logBBsize) {
  3909. /* some data is before the physical
  3910. * end of log */
  3911. ASSERT(!wrapped_hblks);
  3912. ASSERT(blk_no <= INT_MAX);
  3913. split_bblks =
  3914. log->l_logBBsize - (int)blk_no;
  3915. ASSERT(split_bblks > 0);
  3916. error = xlog_bread(log, blk_no,
  3917. split_bblks, dbp,
  3918. &offset);
  3919. if (error)
  3920. goto bread_err2;
  3921. }
  3922. /*
  3923. * Note: this black magic still works with
  3924. * large sector sizes (non-512) only because:
  3925. * - we increased the buffer size originally
  3926. * by 1 sector giving us enough extra space
  3927. * for the second read;
  3928. * - the log start is guaranteed to be sector
  3929. * aligned;
  3930. * - we read the log end (LR header start)
  3931. * _first_, then the log start (LR header end)
  3932. * - order is important.
  3933. */
  3934. error = xlog_bread_offset(log, 0,
  3935. bblks - split_bblks, dbp,
  3936. offset + BBTOB(split_bblks));
  3937. if (error)
  3938. goto bread_err2;
  3939. }
  3940. error = xlog_unpack_data(rhead, offset, log);
  3941. if (error)
  3942. goto bread_err2;
  3943. error = xlog_recover_process_data(log, rhash,
  3944. rhead, offset, pass);
  3945. if (error)
  3946. goto bread_err2;
  3947. blk_no += bblks;
  3948. }
  3949. ASSERT(blk_no >= log->l_logBBsize);
  3950. blk_no -= log->l_logBBsize;
  3951. /* read first part of physical log */
  3952. while (blk_no < head_blk) {
  3953. error = xlog_bread(log, blk_no, hblks, hbp, &offset);
  3954. if (error)
  3955. goto bread_err2;
  3956. rhead = (xlog_rec_header_t *)offset;
  3957. error = xlog_valid_rec_header(log, rhead, blk_no);
  3958. if (error)
  3959. goto bread_err2;
  3960. bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
  3961. error = xlog_bread(log, blk_no+hblks, bblks, dbp,
  3962. &offset);
  3963. if (error)
  3964. goto bread_err2;
  3965. error = xlog_unpack_data(rhead, offset, log);
  3966. if (error)
  3967. goto bread_err2;
  3968. error = xlog_recover_process_data(log, rhash,
  3969. rhead, offset, pass);
  3970. if (error)
  3971. goto bread_err2;
  3972. blk_no += bblks + hblks;
  3973. }
  3974. }
  3975. bread_err2:
  3976. xlog_put_bp(dbp);
  3977. bread_err1:
  3978. xlog_put_bp(hbp);
  3979. return error;
  3980. }
  3981. /*
  3982. * Do the recovery of the log. We actually do this in two phases.
  3983. * The two passes are necessary in order to implement the function
  3984. * of cancelling a record written into the log. The first pass
  3985. * determines those things which have been cancelled, and the
  3986. * second pass replays log items normally except for those which
  3987. * have been cancelled. The handling of the replay and cancellations
  3988. * takes place in the log item type specific routines.
  3989. *
  3990. * The table of items which have cancel records in the log is allocated
  3991. * and freed at this level, since only here do we know when all of
  3992. * the log recovery has been completed.
  3993. */
  3994. STATIC int
  3995. xlog_do_log_recovery(
  3996. struct xlog *log,
  3997. xfs_daddr_t head_blk,
  3998. xfs_daddr_t tail_blk)
  3999. {
  4000. int error, i;
  4001. ASSERT(head_blk != tail_blk);
  4002. /*
  4003. * First do a pass to find all of the cancelled buf log items.
  4004. * Store them in the buf_cancel_table for use in the second pass.
  4005. */
  4006. log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
  4007. sizeof(struct list_head),
  4008. KM_SLEEP);
  4009. for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
  4010. INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
  4011. error = xlog_do_recovery_pass(log, head_blk, tail_blk,
  4012. XLOG_RECOVER_PASS1);
  4013. if (error != 0) {
  4014. kmem_free(log->l_buf_cancel_table);
  4015. log->l_buf_cancel_table = NULL;
  4016. return error;
  4017. }
  4018. /*
  4019. * Then do a second pass to actually recover the items in the log.
  4020. * When it is complete free the table of buf cancel items.
  4021. */
  4022. error = xlog_do_recovery_pass(log, head_blk, tail_blk,
  4023. XLOG_RECOVER_PASS2);
  4024. #ifdef DEBUG
  4025. if (!error) {
  4026. int i;
  4027. for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
  4028. ASSERT(list_empty(&log->l_buf_cancel_table[i]));
  4029. }
  4030. #endif /* DEBUG */
  4031. kmem_free(log->l_buf_cancel_table);
  4032. log->l_buf_cancel_table = NULL;
  4033. return error;
  4034. }
  4035. /*
  4036. * Do the actual recovery
  4037. */
  4038. STATIC int
  4039. xlog_do_recover(
  4040. struct xlog *log,
  4041. xfs_daddr_t head_blk,
  4042. xfs_daddr_t tail_blk)
  4043. {
  4044. int error;
  4045. xfs_buf_t *bp;
  4046. xfs_sb_t *sbp;
  4047. /*
  4048. * First replay the images in the log.
  4049. */
  4050. error = xlog_do_log_recovery(log, head_blk, tail_blk);
  4051. if (error)
  4052. return error;
  4053. /*
  4054. * If IO errors happened during recovery, bail out.
  4055. */
  4056. if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
  4057. return (EIO);
  4058. }
  4059. /*
  4060. * We now update the tail_lsn since much of the recovery has completed
  4061. * and there may be space available to use. If there were no extent
  4062. * or iunlinks, we can free up the entire log and set the tail_lsn to
  4063. * be the last_sync_lsn. This was set in xlog_find_tail to be the
  4064. * lsn of the last known good LR on disk. If there are extent frees
  4065. * or iunlinks they will have some entries in the AIL; so we look at
  4066. * the AIL to determine how to set the tail_lsn.
  4067. */
  4068. xlog_assign_tail_lsn(log->l_mp);
  4069. /*
  4070. * Now that we've finished replaying all buffer and inode
  4071. * updates, re-read in the superblock and reverify it.
  4072. */
  4073. bp = xfs_getsb(log->l_mp, 0);
  4074. XFS_BUF_UNDONE(bp);
  4075. ASSERT(!(XFS_BUF_ISWRITE(bp)));
  4076. XFS_BUF_READ(bp);
  4077. XFS_BUF_UNASYNC(bp);
  4078. bp->b_ops = &xfs_sb_buf_ops;
  4079. xfsbdstrat(log->l_mp, bp);
  4080. error = xfs_buf_iowait(bp);
  4081. if (error) {
  4082. xfs_buf_ioerror_alert(bp, __func__);
  4083. ASSERT(0);
  4084. xfs_buf_relse(bp);
  4085. return error;
  4086. }
  4087. /* Convert superblock from on-disk format */
  4088. sbp = &log->l_mp->m_sb;
  4089. xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
  4090. ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
  4091. ASSERT(xfs_sb_good_version(sbp));
  4092. xfs_buf_relse(bp);
  4093. /* We've re-read the superblock so re-initialize per-cpu counters */
  4094. xfs_icsb_reinit_counters(log->l_mp);
  4095. xlog_recover_check_summary(log);
  4096. /* Normal transactions can now occur */
  4097. log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
  4098. return 0;
  4099. }
  4100. /*
  4101. * Perform recovery and re-initialize some log variables in xlog_find_tail.
  4102. *
  4103. * Return error or zero.
  4104. */
  4105. int
  4106. xlog_recover(
  4107. struct xlog *log)
  4108. {
  4109. xfs_daddr_t head_blk, tail_blk;
  4110. int error;
  4111. /* find the tail of the log */
  4112. if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
  4113. return error;
  4114. if (tail_blk != head_blk) {
  4115. /* There used to be a comment here:
  4116. *
  4117. * disallow recovery on read-only mounts. note -- mount
  4118. * checks for ENOSPC and turns it into an intelligent
  4119. * error message.
  4120. * ...but this is no longer true. Now, unless you specify
  4121. * NORECOVERY (in which case this function would never be
  4122. * called), we just go ahead and recover. We do this all
  4123. * under the vfs layer, so we can get away with it unless
  4124. * the device itself is read-only, in which case we fail.
  4125. */
  4126. if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
  4127. return error;
  4128. }
  4129. /*
  4130. * Version 5 superblock log feature mask validation. We know the
  4131. * log is dirty so check if there are any unknown log features
  4132. * in what we need to recover. If there are unknown features
  4133. * (e.g. unsupported transactions, then simply reject the
  4134. * attempt at recovery before touching anything.
  4135. */
  4136. if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
  4137. xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
  4138. XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
  4139. xfs_warn(log->l_mp,
  4140. "Superblock has unknown incompatible log features (0x%x) enabled.\n"
  4141. "The log can not be fully and/or safely recovered by this kernel.\n"
  4142. "Please recover the log on a kernel that supports the unknown features.",
  4143. (log->l_mp->m_sb.sb_features_log_incompat &
  4144. XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
  4145. return EINVAL;
  4146. }
  4147. xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
  4148. log->l_mp->m_logname ? log->l_mp->m_logname
  4149. : "internal");
  4150. error = xlog_do_recover(log, head_blk, tail_blk);
  4151. log->l_flags |= XLOG_RECOVERY_NEEDED;
  4152. }
  4153. return error;
  4154. }
  4155. /*
  4156. * In the first part of recovery we replay inodes and buffers and build
  4157. * up the list of extent free items which need to be processed. Here
  4158. * we process the extent free items and clean up the on disk unlinked
  4159. * inode lists. This is separated from the first part of recovery so
  4160. * that the root and real-time bitmap inodes can be read in from disk in
  4161. * between the two stages. This is necessary so that we can free space
  4162. * in the real-time portion of the file system.
  4163. */
  4164. int
  4165. xlog_recover_finish(
  4166. struct xlog *log)
  4167. {
  4168. /*
  4169. * Now we're ready to do the transactions needed for the
  4170. * rest of recovery. Start with completing all the extent
  4171. * free intent records and then process the unlinked inode
  4172. * lists. At this point, we essentially run in normal mode
  4173. * except that we're still performing recovery actions
  4174. * rather than accepting new requests.
  4175. */
  4176. if (log->l_flags & XLOG_RECOVERY_NEEDED) {
  4177. int error;
  4178. error = xlog_recover_process_efis(log);
  4179. if (error) {
  4180. xfs_alert(log->l_mp, "Failed to recover EFIs");
  4181. return error;
  4182. }
  4183. /*
  4184. * Sync the log to get all the EFIs out of the AIL.
  4185. * This isn't absolutely necessary, but it helps in
  4186. * case the unlink transactions would have problems
  4187. * pushing the EFIs out of the way.
  4188. */
  4189. xfs_log_force(log->l_mp, XFS_LOG_SYNC);
  4190. xlog_recover_process_iunlinks(log);
  4191. xlog_recover_check_summary(log);
  4192. xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
  4193. log->l_mp->m_logname ? log->l_mp->m_logname
  4194. : "internal");
  4195. log->l_flags &= ~XLOG_RECOVERY_NEEDED;
  4196. } else {
  4197. xfs_info(log->l_mp, "Ending clean mount");
  4198. }
  4199. return 0;
  4200. }
  4201. #if defined(DEBUG)
  4202. /*
  4203. * Read all of the agf and agi counters and check that they
  4204. * are consistent with the superblock counters.
  4205. */
  4206. void
  4207. xlog_recover_check_summary(
  4208. struct xlog *log)
  4209. {
  4210. xfs_mount_t *mp;
  4211. xfs_agf_t *agfp;
  4212. xfs_buf_t *agfbp;
  4213. xfs_buf_t *agibp;
  4214. xfs_agnumber_t agno;
  4215. __uint64_t freeblks;
  4216. __uint64_t itotal;
  4217. __uint64_t ifree;
  4218. int error;
  4219. mp = log->l_mp;
  4220. freeblks = 0LL;
  4221. itotal = 0LL;
  4222. ifree = 0LL;
  4223. for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
  4224. error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
  4225. if (error) {
  4226. xfs_alert(mp, "%s agf read failed agno %d error %d",
  4227. __func__, agno, error);
  4228. } else {
  4229. agfp = XFS_BUF_TO_AGF(agfbp);
  4230. freeblks += be32_to_cpu(agfp->agf_freeblks) +
  4231. be32_to_cpu(agfp->agf_flcount);
  4232. xfs_buf_relse(agfbp);
  4233. }
  4234. error = xfs_read_agi(mp, NULL, agno, &agibp);
  4235. if (error) {
  4236. xfs_alert(mp, "%s agi read failed agno %d error %d",
  4237. __func__, agno, error);
  4238. } else {
  4239. struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
  4240. itotal += be32_to_cpu(agi->agi_count);
  4241. ifree += be32_to_cpu(agi->agi_freecount);
  4242. xfs_buf_relse(agibp);
  4243. }
  4244. }
  4245. }
  4246. #endif /* DEBUG */