disk-io.c 112 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/swap.h>
  22. #include <linux/radix-tree.h>
  23. #include <linux/writeback.h>
  24. #include <linux/buffer_head.h>
  25. #include <linux/workqueue.h>
  26. #include <linux/kthread.h>
  27. #include <linux/freezer.h>
  28. #include <linux/crc32c.h>
  29. #include <linux/slab.h>
  30. #include <linux/migrate.h>
  31. #include <linux/ratelimit.h>
  32. #include <linux/uuid.h>
  33. #include <linux/semaphore.h>
  34. #include <asm/unaligned.h>
  35. #include "compat.h"
  36. #include "ctree.h"
  37. #include "disk-io.h"
  38. #include "transaction.h"
  39. #include "btrfs_inode.h"
  40. #include "volumes.h"
  41. #include "print-tree.h"
  42. #include "async-thread.h"
  43. #include "locking.h"
  44. #include "tree-log.h"
  45. #include "free-space-cache.h"
  46. #include "inode-map.h"
  47. #include "check-integrity.h"
  48. #include "rcu-string.h"
  49. #include "dev-replace.h"
  50. #include "raid56.h"
  51. #ifdef CONFIG_X86
  52. #include <asm/cpufeature.h>
  53. #endif
  54. static struct extent_io_ops btree_extent_io_ops;
  55. static void end_workqueue_fn(struct btrfs_work *work);
  56. static void free_fs_root(struct btrfs_root *root);
  57. static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
  58. int read_only);
  59. static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
  60. struct btrfs_root *root);
  61. static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
  62. static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
  63. struct btrfs_root *root);
  64. static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t);
  65. static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
  66. static int btrfs_destroy_marked_extents(struct btrfs_root *root,
  67. struct extent_io_tree *dirty_pages,
  68. int mark);
  69. static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
  70. struct extent_io_tree *pinned_extents);
  71. static int btrfs_cleanup_transaction(struct btrfs_root *root);
  72. static void btrfs_error_commit_super(struct btrfs_root *root);
  73. /*
  74. * end_io_wq structs are used to do processing in task context when an IO is
  75. * complete. This is used during reads to verify checksums, and it is used
  76. * by writes to insert metadata for new file extents after IO is complete.
  77. */
  78. struct end_io_wq {
  79. struct bio *bio;
  80. bio_end_io_t *end_io;
  81. void *private;
  82. struct btrfs_fs_info *info;
  83. int error;
  84. int metadata;
  85. struct list_head list;
  86. struct btrfs_work work;
  87. };
  88. /*
  89. * async submit bios are used to offload expensive checksumming
  90. * onto the worker threads. They checksum file and metadata bios
  91. * just before they are sent down the IO stack.
  92. */
  93. struct async_submit_bio {
  94. struct inode *inode;
  95. struct bio *bio;
  96. struct list_head list;
  97. extent_submit_bio_hook_t *submit_bio_start;
  98. extent_submit_bio_hook_t *submit_bio_done;
  99. int rw;
  100. int mirror_num;
  101. unsigned long bio_flags;
  102. /*
  103. * bio_offset is optional, can be used if the pages in the bio
  104. * can't tell us where in the file the bio should go
  105. */
  106. u64 bio_offset;
  107. struct btrfs_work work;
  108. int error;
  109. };
  110. /*
  111. * Lockdep class keys for extent_buffer->lock's in this root. For a given
  112. * eb, the lockdep key is determined by the btrfs_root it belongs to and
  113. * the level the eb occupies in the tree.
  114. *
  115. * Different roots are used for different purposes and may nest inside each
  116. * other and they require separate keysets. As lockdep keys should be
  117. * static, assign keysets according to the purpose of the root as indicated
  118. * by btrfs_root->objectid. This ensures that all special purpose roots
  119. * have separate keysets.
  120. *
  121. * Lock-nesting across peer nodes is always done with the immediate parent
  122. * node locked thus preventing deadlock. As lockdep doesn't know this, use
  123. * subclass to avoid triggering lockdep warning in such cases.
  124. *
  125. * The key is set by the readpage_end_io_hook after the buffer has passed
  126. * csum validation but before the pages are unlocked. It is also set by
  127. * btrfs_init_new_buffer on freshly allocated blocks.
  128. *
  129. * We also add a check to make sure the highest level of the tree is the
  130. * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
  131. * needs update as well.
  132. */
  133. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  134. # if BTRFS_MAX_LEVEL != 8
  135. # error
  136. # endif
  137. static struct btrfs_lockdep_keyset {
  138. u64 id; /* root objectid */
  139. const char *name_stem; /* lock name stem */
  140. char names[BTRFS_MAX_LEVEL + 1][20];
  141. struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
  142. } btrfs_lockdep_keysets[] = {
  143. { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
  144. { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
  145. { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
  146. { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
  147. { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
  148. { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
  149. { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
  150. { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
  151. { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
  152. { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
  153. { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" },
  154. { .id = 0, .name_stem = "tree" },
  155. };
  156. void __init btrfs_init_lockdep(void)
  157. {
  158. int i, j;
  159. /* initialize lockdep class names */
  160. for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
  161. struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
  162. for (j = 0; j < ARRAY_SIZE(ks->names); j++)
  163. snprintf(ks->names[j], sizeof(ks->names[j]),
  164. "btrfs-%s-%02d", ks->name_stem, j);
  165. }
  166. }
  167. void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
  168. int level)
  169. {
  170. struct btrfs_lockdep_keyset *ks;
  171. BUG_ON(level >= ARRAY_SIZE(ks->keys));
  172. /* find the matching keyset, id 0 is the default entry */
  173. for (ks = btrfs_lockdep_keysets; ks->id; ks++)
  174. if (ks->id == objectid)
  175. break;
  176. lockdep_set_class_and_name(&eb->lock,
  177. &ks->keys[level], ks->names[level]);
  178. }
  179. #endif
  180. /*
  181. * extents on the btree inode are pretty simple, there's one extent
  182. * that covers the entire device
  183. */
  184. static struct extent_map *btree_get_extent(struct inode *inode,
  185. struct page *page, size_t pg_offset, u64 start, u64 len,
  186. int create)
  187. {
  188. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  189. struct extent_map *em;
  190. int ret;
  191. read_lock(&em_tree->lock);
  192. em = lookup_extent_mapping(em_tree, start, len);
  193. if (em) {
  194. em->bdev =
  195. BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
  196. read_unlock(&em_tree->lock);
  197. goto out;
  198. }
  199. read_unlock(&em_tree->lock);
  200. em = alloc_extent_map();
  201. if (!em) {
  202. em = ERR_PTR(-ENOMEM);
  203. goto out;
  204. }
  205. em->start = 0;
  206. em->len = (u64)-1;
  207. em->block_len = (u64)-1;
  208. em->block_start = 0;
  209. em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
  210. write_lock(&em_tree->lock);
  211. ret = add_extent_mapping(em_tree, em, 0);
  212. if (ret == -EEXIST) {
  213. free_extent_map(em);
  214. em = lookup_extent_mapping(em_tree, start, len);
  215. if (!em)
  216. em = ERR_PTR(-EIO);
  217. } else if (ret) {
  218. free_extent_map(em);
  219. em = ERR_PTR(ret);
  220. }
  221. write_unlock(&em_tree->lock);
  222. out:
  223. return em;
  224. }
  225. u32 btrfs_csum_data(char *data, u32 seed, size_t len)
  226. {
  227. return crc32c(seed, data, len);
  228. }
  229. void btrfs_csum_final(u32 crc, char *result)
  230. {
  231. put_unaligned_le32(~crc, result);
  232. }
  233. /*
  234. * compute the csum for a btree block, and either verify it or write it
  235. * into the csum field of the block.
  236. */
  237. static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
  238. int verify)
  239. {
  240. u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
  241. char *result = NULL;
  242. unsigned long len;
  243. unsigned long cur_len;
  244. unsigned long offset = BTRFS_CSUM_SIZE;
  245. char *kaddr;
  246. unsigned long map_start;
  247. unsigned long map_len;
  248. int err;
  249. u32 crc = ~(u32)0;
  250. unsigned long inline_result;
  251. len = buf->len - offset;
  252. while (len > 0) {
  253. err = map_private_extent_buffer(buf, offset, 32,
  254. &kaddr, &map_start, &map_len);
  255. if (err)
  256. return 1;
  257. cur_len = min(len, map_len - (offset - map_start));
  258. crc = btrfs_csum_data(kaddr + offset - map_start,
  259. crc, cur_len);
  260. len -= cur_len;
  261. offset += cur_len;
  262. }
  263. if (csum_size > sizeof(inline_result)) {
  264. result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
  265. if (!result)
  266. return 1;
  267. } else {
  268. result = (char *)&inline_result;
  269. }
  270. btrfs_csum_final(crc, result);
  271. if (verify) {
  272. if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
  273. u32 val;
  274. u32 found = 0;
  275. memcpy(&found, result, csum_size);
  276. read_extent_buffer(buf, &val, 0, csum_size);
  277. printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
  278. "failed on %llu wanted %X found %X "
  279. "level %d\n",
  280. root->fs_info->sb->s_id, buf->start,
  281. val, found, btrfs_header_level(buf));
  282. if (result != (char *)&inline_result)
  283. kfree(result);
  284. return 1;
  285. }
  286. } else {
  287. write_extent_buffer(buf, result, 0, csum_size);
  288. }
  289. if (result != (char *)&inline_result)
  290. kfree(result);
  291. return 0;
  292. }
  293. /*
  294. * we can't consider a given block up to date unless the transid of the
  295. * block matches the transid in the parent node's pointer. This is how we
  296. * detect blocks that either didn't get written at all or got written
  297. * in the wrong place.
  298. */
  299. static int verify_parent_transid(struct extent_io_tree *io_tree,
  300. struct extent_buffer *eb, u64 parent_transid,
  301. int atomic)
  302. {
  303. struct extent_state *cached_state = NULL;
  304. int ret;
  305. if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
  306. return 0;
  307. if (atomic)
  308. return -EAGAIN;
  309. lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
  310. 0, &cached_state);
  311. if (extent_buffer_uptodate(eb) &&
  312. btrfs_header_generation(eb) == parent_transid) {
  313. ret = 0;
  314. goto out;
  315. }
  316. printk_ratelimited("parent transid verify failed on %llu wanted %llu "
  317. "found %llu\n",
  318. eb->start, parent_transid, btrfs_header_generation(eb));
  319. ret = 1;
  320. clear_extent_buffer_uptodate(eb);
  321. out:
  322. unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
  323. &cached_state, GFP_NOFS);
  324. return ret;
  325. }
  326. /*
  327. * Return 0 if the superblock checksum type matches the checksum value of that
  328. * algorithm. Pass the raw disk superblock data.
  329. */
  330. static int btrfs_check_super_csum(char *raw_disk_sb)
  331. {
  332. struct btrfs_super_block *disk_sb =
  333. (struct btrfs_super_block *)raw_disk_sb;
  334. u16 csum_type = btrfs_super_csum_type(disk_sb);
  335. int ret = 0;
  336. if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
  337. u32 crc = ~(u32)0;
  338. const int csum_size = sizeof(crc);
  339. char result[csum_size];
  340. /*
  341. * The super_block structure does not span the whole
  342. * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
  343. * is filled with zeros and is included in the checkum.
  344. */
  345. crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
  346. crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
  347. btrfs_csum_final(crc, result);
  348. if (memcmp(raw_disk_sb, result, csum_size))
  349. ret = 1;
  350. if (ret && btrfs_super_generation(disk_sb) < 10) {
  351. printk(KERN_WARNING "btrfs: super block crcs don't match, older mkfs detected\n");
  352. ret = 0;
  353. }
  354. }
  355. if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
  356. printk(KERN_ERR "btrfs: unsupported checksum algorithm %u\n",
  357. csum_type);
  358. ret = 1;
  359. }
  360. return ret;
  361. }
  362. /*
  363. * helper to read a given tree block, doing retries as required when
  364. * the checksums don't match and we have alternate mirrors to try.
  365. */
  366. static int btree_read_extent_buffer_pages(struct btrfs_root *root,
  367. struct extent_buffer *eb,
  368. u64 start, u64 parent_transid)
  369. {
  370. struct extent_io_tree *io_tree;
  371. int failed = 0;
  372. int ret;
  373. int num_copies = 0;
  374. int mirror_num = 0;
  375. int failed_mirror = 0;
  376. clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
  377. io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
  378. while (1) {
  379. ret = read_extent_buffer_pages(io_tree, eb, start,
  380. WAIT_COMPLETE,
  381. btree_get_extent, mirror_num);
  382. if (!ret) {
  383. if (!verify_parent_transid(io_tree, eb,
  384. parent_transid, 0))
  385. break;
  386. else
  387. ret = -EIO;
  388. }
  389. /*
  390. * This buffer's crc is fine, but its contents are corrupted, so
  391. * there is no reason to read the other copies, they won't be
  392. * any less wrong.
  393. */
  394. if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
  395. break;
  396. num_copies = btrfs_num_copies(root->fs_info,
  397. eb->start, eb->len);
  398. if (num_copies == 1)
  399. break;
  400. if (!failed_mirror) {
  401. failed = 1;
  402. failed_mirror = eb->read_mirror;
  403. }
  404. mirror_num++;
  405. if (mirror_num == failed_mirror)
  406. mirror_num++;
  407. if (mirror_num > num_copies)
  408. break;
  409. }
  410. if (failed && !ret && failed_mirror)
  411. repair_eb_io_failure(root, eb, failed_mirror);
  412. return ret;
  413. }
  414. /*
  415. * checksum a dirty tree block before IO. This has extra checks to make sure
  416. * we only fill in the checksum field in the first page of a multi-page block
  417. */
  418. static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
  419. {
  420. struct extent_io_tree *tree;
  421. u64 start = page_offset(page);
  422. u64 found_start;
  423. struct extent_buffer *eb;
  424. tree = &BTRFS_I(page->mapping->host)->io_tree;
  425. eb = (struct extent_buffer *)page->private;
  426. if (page != eb->pages[0])
  427. return 0;
  428. found_start = btrfs_header_bytenr(eb);
  429. if (found_start != start) {
  430. WARN_ON(1);
  431. return 0;
  432. }
  433. if (!PageUptodate(page)) {
  434. WARN_ON(1);
  435. return 0;
  436. }
  437. csum_tree_block(root, eb, 0);
  438. return 0;
  439. }
  440. static int check_tree_block_fsid(struct btrfs_root *root,
  441. struct extent_buffer *eb)
  442. {
  443. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  444. u8 fsid[BTRFS_UUID_SIZE];
  445. int ret = 1;
  446. read_extent_buffer(eb, fsid, btrfs_header_fsid(eb), BTRFS_FSID_SIZE);
  447. while (fs_devices) {
  448. if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
  449. ret = 0;
  450. break;
  451. }
  452. fs_devices = fs_devices->seed;
  453. }
  454. return ret;
  455. }
  456. #define CORRUPT(reason, eb, root, slot) \
  457. printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
  458. "root=%llu, slot=%d\n", reason, \
  459. btrfs_header_bytenr(eb), root->objectid, slot)
  460. static noinline int check_leaf(struct btrfs_root *root,
  461. struct extent_buffer *leaf)
  462. {
  463. struct btrfs_key key;
  464. struct btrfs_key leaf_key;
  465. u32 nritems = btrfs_header_nritems(leaf);
  466. int slot;
  467. if (nritems == 0)
  468. return 0;
  469. /* Check the 0 item */
  470. if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
  471. BTRFS_LEAF_DATA_SIZE(root)) {
  472. CORRUPT("invalid item offset size pair", leaf, root, 0);
  473. return -EIO;
  474. }
  475. /*
  476. * Check to make sure each items keys are in the correct order and their
  477. * offsets make sense. We only have to loop through nritems-1 because
  478. * we check the current slot against the next slot, which verifies the
  479. * next slot's offset+size makes sense and that the current's slot
  480. * offset is correct.
  481. */
  482. for (slot = 0; slot < nritems - 1; slot++) {
  483. btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
  484. btrfs_item_key_to_cpu(leaf, &key, slot + 1);
  485. /* Make sure the keys are in the right order */
  486. if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
  487. CORRUPT("bad key order", leaf, root, slot);
  488. return -EIO;
  489. }
  490. /*
  491. * Make sure the offset and ends are right, remember that the
  492. * item data starts at the end of the leaf and grows towards the
  493. * front.
  494. */
  495. if (btrfs_item_offset_nr(leaf, slot) !=
  496. btrfs_item_end_nr(leaf, slot + 1)) {
  497. CORRUPT("slot offset bad", leaf, root, slot);
  498. return -EIO;
  499. }
  500. /*
  501. * Check to make sure that we don't point outside of the leaf,
  502. * just incase all the items are consistent to eachother, but
  503. * all point outside of the leaf.
  504. */
  505. if (btrfs_item_end_nr(leaf, slot) >
  506. BTRFS_LEAF_DATA_SIZE(root)) {
  507. CORRUPT("slot end outside of leaf", leaf, root, slot);
  508. return -EIO;
  509. }
  510. }
  511. return 0;
  512. }
  513. static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
  514. u64 phy_offset, struct page *page,
  515. u64 start, u64 end, int mirror)
  516. {
  517. struct extent_io_tree *tree;
  518. u64 found_start;
  519. int found_level;
  520. struct extent_buffer *eb;
  521. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  522. int ret = 0;
  523. int reads_done;
  524. if (!page->private)
  525. goto out;
  526. tree = &BTRFS_I(page->mapping->host)->io_tree;
  527. eb = (struct extent_buffer *)page->private;
  528. /* the pending IO might have been the only thing that kept this buffer
  529. * in memory. Make sure we have a ref for all this other checks
  530. */
  531. extent_buffer_get(eb);
  532. reads_done = atomic_dec_and_test(&eb->io_pages);
  533. if (!reads_done)
  534. goto err;
  535. eb->read_mirror = mirror;
  536. if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
  537. ret = -EIO;
  538. goto err;
  539. }
  540. found_start = btrfs_header_bytenr(eb);
  541. if (found_start != eb->start) {
  542. printk_ratelimited(KERN_INFO "btrfs bad tree block start "
  543. "%llu %llu\n",
  544. found_start, eb->start);
  545. ret = -EIO;
  546. goto err;
  547. }
  548. if (check_tree_block_fsid(root, eb)) {
  549. printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
  550. eb->start);
  551. ret = -EIO;
  552. goto err;
  553. }
  554. found_level = btrfs_header_level(eb);
  555. if (found_level >= BTRFS_MAX_LEVEL) {
  556. btrfs_info(root->fs_info, "bad tree block level %d\n",
  557. (int)btrfs_header_level(eb));
  558. ret = -EIO;
  559. goto err;
  560. }
  561. btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
  562. eb, found_level);
  563. ret = csum_tree_block(root, eb, 1);
  564. if (ret) {
  565. ret = -EIO;
  566. goto err;
  567. }
  568. /*
  569. * If this is a leaf block and it is corrupt, set the corrupt bit so
  570. * that we don't try and read the other copies of this block, just
  571. * return -EIO.
  572. */
  573. if (found_level == 0 && check_leaf(root, eb)) {
  574. set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
  575. ret = -EIO;
  576. }
  577. if (!ret)
  578. set_extent_buffer_uptodate(eb);
  579. err:
  580. if (reads_done &&
  581. test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
  582. btree_readahead_hook(root, eb, eb->start, ret);
  583. if (ret) {
  584. /*
  585. * our io error hook is going to dec the io pages
  586. * again, we have to make sure it has something
  587. * to decrement
  588. */
  589. atomic_inc(&eb->io_pages);
  590. clear_extent_buffer_uptodate(eb);
  591. }
  592. free_extent_buffer(eb);
  593. out:
  594. return ret;
  595. }
  596. static int btree_io_failed_hook(struct page *page, int failed_mirror)
  597. {
  598. struct extent_buffer *eb;
  599. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  600. eb = (struct extent_buffer *)page->private;
  601. set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
  602. eb->read_mirror = failed_mirror;
  603. atomic_dec(&eb->io_pages);
  604. if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
  605. btree_readahead_hook(root, eb, eb->start, -EIO);
  606. return -EIO; /* we fixed nothing */
  607. }
  608. static void end_workqueue_bio(struct bio *bio, int err)
  609. {
  610. struct end_io_wq *end_io_wq = bio->bi_private;
  611. struct btrfs_fs_info *fs_info;
  612. fs_info = end_io_wq->info;
  613. end_io_wq->error = err;
  614. end_io_wq->work.func = end_workqueue_fn;
  615. end_io_wq->work.flags = 0;
  616. if (bio->bi_rw & REQ_WRITE) {
  617. if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
  618. btrfs_queue_worker(&fs_info->endio_meta_write_workers,
  619. &end_io_wq->work);
  620. else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
  621. btrfs_queue_worker(&fs_info->endio_freespace_worker,
  622. &end_io_wq->work);
  623. else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
  624. btrfs_queue_worker(&fs_info->endio_raid56_workers,
  625. &end_io_wq->work);
  626. else
  627. btrfs_queue_worker(&fs_info->endio_write_workers,
  628. &end_io_wq->work);
  629. } else {
  630. if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
  631. btrfs_queue_worker(&fs_info->endio_raid56_workers,
  632. &end_io_wq->work);
  633. else if (end_io_wq->metadata)
  634. btrfs_queue_worker(&fs_info->endio_meta_workers,
  635. &end_io_wq->work);
  636. else
  637. btrfs_queue_worker(&fs_info->endio_workers,
  638. &end_io_wq->work);
  639. }
  640. }
  641. /*
  642. * For the metadata arg you want
  643. *
  644. * 0 - if data
  645. * 1 - if normal metadta
  646. * 2 - if writing to the free space cache area
  647. * 3 - raid parity work
  648. */
  649. int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
  650. int metadata)
  651. {
  652. struct end_io_wq *end_io_wq;
  653. end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
  654. if (!end_io_wq)
  655. return -ENOMEM;
  656. end_io_wq->private = bio->bi_private;
  657. end_io_wq->end_io = bio->bi_end_io;
  658. end_io_wq->info = info;
  659. end_io_wq->error = 0;
  660. end_io_wq->bio = bio;
  661. end_io_wq->metadata = metadata;
  662. bio->bi_private = end_io_wq;
  663. bio->bi_end_io = end_workqueue_bio;
  664. return 0;
  665. }
  666. unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
  667. {
  668. unsigned long limit = min_t(unsigned long,
  669. info->workers.max_workers,
  670. info->fs_devices->open_devices);
  671. return 256 * limit;
  672. }
  673. static void run_one_async_start(struct btrfs_work *work)
  674. {
  675. struct async_submit_bio *async;
  676. int ret;
  677. async = container_of(work, struct async_submit_bio, work);
  678. ret = async->submit_bio_start(async->inode, async->rw, async->bio,
  679. async->mirror_num, async->bio_flags,
  680. async->bio_offset);
  681. if (ret)
  682. async->error = ret;
  683. }
  684. static void run_one_async_done(struct btrfs_work *work)
  685. {
  686. struct btrfs_fs_info *fs_info;
  687. struct async_submit_bio *async;
  688. int limit;
  689. async = container_of(work, struct async_submit_bio, work);
  690. fs_info = BTRFS_I(async->inode)->root->fs_info;
  691. limit = btrfs_async_submit_limit(fs_info);
  692. limit = limit * 2 / 3;
  693. if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
  694. waitqueue_active(&fs_info->async_submit_wait))
  695. wake_up(&fs_info->async_submit_wait);
  696. /* If an error occured we just want to clean up the bio and move on */
  697. if (async->error) {
  698. bio_endio(async->bio, async->error);
  699. return;
  700. }
  701. async->submit_bio_done(async->inode, async->rw, async->bio,
  702. async->mirror_num, async->bio_flags,
  703. async->bio_offset);
  704. }
  705. static void run_one_async_free(struct btrfs_work *work)
  706. {
  707. struct async_submit_bio *async;
  708. async = container_of(work, struct async_submit_bio, work);
  709. kfree(async);
  710. }
  711. int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
  712. int rw, struct bio *bio, int mirror_num,
  713. unsigned long bio_flags,
  714. u64 bio_offset,
  715. extent_submit_bio_hook_t *submit_bio_start,
  716. extent_submit_bio_hook_t *submit_bio_done)
  717. {
  718. struct async_submit_bio *async;
  719. async = kmalloc(sizeof(*async), GFP_NOFS);
  720. if (!async)
  721. return -ENOMEM;
  722. async->inode = inode;
  723. async->rw = rw;
  724. async->bio = bio;
  725. async->mirror_num = mirror_num;
  726. async->submit_bio_start = submit_bio_start;
  727. async->submit_bio_done = submit_bio_done;
  728. async->work.func = run_one_async_start;
  729. async->work.ordered_func = run_one_async_done;
  730. async->work.ordered_free = run_one_async_free;
  731. async->work.flags = 0;
  732. async->bio_flags = bio_flags;
  733. async->bio_offset = bio_offset;
  734. async->error = 0;
  735. atomic_inc(&fs_info->nr_async_submits);
  736. if (rw & REQ_SYNC)
  737. btrfs_set_work_high_prio(&async->work);
  738. btrfs_queue_worker(&fs_info->workers, &async->work);
  739. while (atomic_read(&fs_info->async_submit_draining) &&
  740. atomic_read(&fs_info->nr_async_submits)) {
  741. wait_event(fs_info->async_submit_wait,
  742. (atomic_read(&fs_info->nr_async_submits) == 0));
  743. }
  744. return 0;
  745. }
  746. static int btree_csum_one_bio(struct bio *bio)
  747. {
  748. struct bio_vec *bvec = bio->bi_io_vec;
  749. int bio_index = 0;
  750. struct btrfs_root *root;
  751. int ret = 0;
  752. WARN_ON(bio->bi_vcnt <= 0);
  753. while (bio_index < bio->bi_vcnt) {
  754. root = BTRFS_I(bvec->bv_page->mapping->host)->root;
  755. ret = csum_dirty_buffer(root, bvec->bv_page);
  756. if (ret)
  757. break;
  758. bio_index++;
  759. bvec++;
  760. }
  761. return ret;
  762. }
  763. static int __btree_submit_bio_start(struct inode *inode, int rw,
  764. struct bio *bio, int mirror_num,
  765. unsigned long bio_flags,
  766. u64 bio_offset)
  767. {
  768. /*
  769. * when we're called for a write, we're already in the async
  770. * submission context. Just jump into btrfs_map_bio
  771. */
  772. return btree_csum_one_bio(bio);
  773. }
  774. static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
  775. int mirror_num, unsigned long bio_flags,
  776. u64 bio_offset)
  777. {
  778. int ret;
  779. /*
  780. * when we're called for a write, we're already in the async
  781. * submission context. Just jump into btrfs_map_bio
  782. */
  783. ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
  784. if (ret)
  785. bio_endio(bio, ret);
  786. return ret;
  787. }
  788. static int check_async_write(struct inode *inode, unsigned long bio_flags)
  789. {
  790. if (bio_flags & EXTENT_BIO_TREE_LOG)
  791. return 0;
  792. #ifdef CONFIG_X86
  793. if (cpu_has_xmm4_2)
  794. return 0;
  795. #endif
  796. return 1;
  797. }
  798. static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
  799. int mirror_num, unsigned long bio_flags,
  800. u64 bio_offset)
  801. {
  802. int async = check_async_write(inode, bio_flags);
  803. int ret;
  804. if (!(rw & REQ_WRITE)) {
  805. /*
  806. * called for a read, do the setup so that checksum validation
  807. * can happen in the async kernel threads
  808. */
  809. ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
  810. bio, 1);
  811. if (ret)
  812. goto out_w_error;
  813. ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
  814. mirror_num, 0);
  815. } else if (!async) {
  816. ret = btree_csum_one_bio(bio);
  817. if (ret)
  818. goto out_w_error;
  819. ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
  820. mirror_num, 0);
  821. } else {
  822. /*
  823. * kthread helpers are used to submit writes so that
  824. * checksumming can happen in parallel across all CPUs
  825. */
  826. ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
  827. inode, rw, bio, mirror_num, 0,
  828. bio_offset,
  829. __btree_submit_bio_start,
  830. __btree_submit_bio_done);
  831. }
  832. if (ret) {
  833. out_w_error:
  834. bio_endio(bio, ret);
  835. }
  836. return ret;
  837. }
  838. #ifdef CONFIG_MIGRATION
  839. static int btree_migratepage(struct address_space *mapping,
  840. struct page *newpage, struct page *page,
  841. enum migrate_mode mode)
  842. {
  843. /*
  844. * we can't safely write a btree page from here,
  845. * we haven't done the locking hook
  846. */
  847. if (PageDirty(page))
  848. return -EAGAIN;
  849. /*
  850. * Buffers may be managed in a filesystem specific way.
  851. * We must have no buffers or drop them.
  852. */
  853. if (page_has_private(page) &&
  854. !try_to_release_page(page, GFP_KERNEL))
  855. return -EAGAIN;
  856. return migrate_page(mapping, newpage, page, mode);
  857. }
  858. #endif
  859. static int btree_writepages(struct address_space *mapping,
  860. struct writeback_control *wbc)
  861. {
  862. struct extent_io_tree *tree;
  863. struct btrfs_fs_info *fs_info;
  864. int ret;
  865. tree = &BTRFS_I(mapping->host)->io_tree;
  866. if (wbc->sync_mode == WB_SYNC_NONE) {
  867. if (wbc->for_kupdate)
  868. return 0;
  869. fs_info = BTRFS_I(mapping->host)->root->fs_info;
  870. /* this is a bit racy, but that's ok */
  871. ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
  872. BTRFS_DIRTY_METADATA_THRESH);
  873. if (ret < 0)
  874. return 0;
  875. }
  876. return btree_write_cache_pages(mapping, wbc);
  877. }
  878. static int btree_readpage(struct file *file, struct page *page)
  879. {
  880. struct extent_io_tree *tree;
  881. tree = &BTRFS_I(page->mapping->host)->io_tree;
  882. return extent_read_full_page(tree, page, btree_get_extent, 0);
  883. }
  884. static int btree_releasepage(struct page *page, gfp_t gfp_flags)
  885. {
  886. if (PageWriteback(page) || PageDirty(page))
  887. return 0;
  888. return try_release_extent_buffer(page);
  889. }
  890. static void btree_invalidatepage(struct page *page, unsigned int offset,
  891. unsigned int length)
  892. {
  893. struct extent_io_tree *tree;
  894. tree = &BTRFS_I(page->mapping->host)->io_tree;
  895. extent_invalidatepage(tree, page, offset);
  896. btree_releasepage(page, GFP_NOFS);
  897. if (PagePrivate(page)) {
  898. printk(KERN_WARNING "btrfs warning page private not zero "
  899. "on page %llu\n", (unsigned long long)page_offset(page));
  900. ClearPagePrivate(page);
  901. set_page_private(page, 0);
  902. page_cache_release(page);
  903. }
  904. }
  905. static int btree_set_page_dirty(struct page *page)
  906. {
  907. #ifdef DEBUG
  908. struct extent_buffer *eb;
  909. BUG_ON(!PagePrivate(page));
  910. eb = (struct extent_buffer *)page->private;
  911. BUG_ON(!eb);
  912. BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
  913. BUG_ON(!atomic_read(&eb->refs));
  914. btrfs_assert_tree_locked(eb);
  915. #endif
  916. return __set_page_dirty_nobuffers(page);
  917. }
  918. static const struct address_space_operations btree_aops = {
  919. .readpage = btree_readpage,
  920. .writepages = btree_writepages,
  921. .releasepage = btree_releasepage,
  922. .invalidatepage = btree_invalidatepage,
  923. #ifdef CONFIG_MIGRATION
  924. .migratepage = btree_migratepage,
  925. #endif
  926. .set_page_dirty = btree_set_page_dirty,
  927. };
  928. int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
  929. u64 parent_transid)
  930. {
  931. struct extent_buffer *buf = NULL;
  932. struct inode *btree_inode = root->fs_info->btree_inode;
  933. int ret = 0;
  934. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  935. if (!buf)
  936. return 0;
  937. read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
  938. buf, 0, WAIT_NONE, btree_get_extent, 0);
  939. free_extent_buffer(buf);
  940. return ret;
  941. }
  942. int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
  943. int mirror_num, struct extent_buffer **eb)
  944. {
  945. struct extent_buffer *buf = NULL;
  946. struct inode *btree_inode = root->fs_info->btree_inode;
  947. struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
  948. int ret;
  949. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  950. if (!buf)
  951. return 0;
  952. set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
  953. ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
  954. btree_get_extent, mirror_num);
  955. if (ret) {
  956. free_extent_buffer(buf);
  957. return ret;
  958. }
  959. if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
  960. free_extent_buffer(buf);
  961. return -EIO;
  962. } else if (extent_buffer_uptodate(buf)) {
  963. *eb = buf;
  964. } else {
  965. free_extent_buffer(buf);
  966. }
  967. return 0;
  968. }
  969. struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
  970. u64 bytenr, u32 blocksize)
  971. {
  972. struct inode *btree_inode = root->fs_info->btree_inode;
  973. struct extent_buffer *eb;
  974. eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
  975. bytenr, blocksize);
  976. return eb;
  977. }
  978. struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
  979. u64 bytenr, u32 blocksize)
  980. {
  981. struct inode *btree_inode = root->fs_info->btree_inode;
  982. struct extent_buffer *eb;
  983. eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
  984. bytenr, blocksize);
  985. return eb;
  986. }
  987. int btrfs_write_tree_block(struct extent_buffer *buf)
  988. {
  989. return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
  990. buf->start + buf->len - 1);
  991. }
  992. int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
  993. {
  994. return filemap_fdatawait_range(buf->pages[0]->mapping,
  995. buf->start, buf->start + buf->len - 1);
  996. }
  997. struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
  998. u32 blocksize, u64 parent_transid)
  999. {
  1000. struct extent_buffer *buf = NULL;
  1001. int ret;
  1002. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  1003. if (!buf)
  1004. return NULL;
  1005. ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
  1006. if (ret) {
  1007. free_extent_buffer(buf);
  1008. return NULL;
  1009. }
  1010. return buf;
  1011. }
  1012. void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  1013. struct extent_buffer *buf)
  1014. {
  1015. struct btrfs_fs_info *fs_info = root->fs_info;
  1016. if (btrfs_header_generation(buf) ==
  1017. fs_info->running_transaction->transid) {
  1018. btrfs_assert_tree_locked(buf);
  1019. if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
  1020. __percpu_counter_add(&fs_info->dirty_metadata_bytes,
  1021. -buf->len,
  1022. fs_info->dirty_metadata_batch);
  1023. /* ugh, clear_extent_buffer_dirty needs to lock the page */
  1024. btrfs_set_lock_blocking(buf);
  1025. clear_extent_buffer_dirty(buf);
  1026. }
  1027. }
  1028. }
  1029. static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
  1030. u32 stripesize, struct btrfs_root *root,
  1031. struct btrfs_fs_info *fs_info,
  1032. u64 objectid)
  1033. {
  1034. root->node = NULL;
  1035. root->commit_root = NULL;
  1036. root->sectorsize = sectorsize;
  1037. root->nodesize = nodesize;
  1038. root->leafsize = leafsize;
  1039. root->stripesize = stripesize;
  1040. root->ref_cows = 0;
  1041. root->track_dirty = 0;
  1042. root->in_radix = 0;
  1043. root->orphan_item_inserted = 0;
  1044. root->orphan_cleanup_state = 0;
  1045. root->objectid = objectid;
  1046. root->last_trans = 0;
  1047. root->highest_objectid = 0;
  1048. root->nr_delalloc_inodes = 0;
  1049. root->nr_ordered_extents = 0;
  1050. root->name = NULL;
  1051. root->inode_tree = RB_ROOT;
  1052. INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
  1053. root->block_rsv = NULL;
  1054. root->orphan_block_rsv = NULL;
  1055. INIT_LIST_HEAD(&root->dirty_list);
  1056. INIT_LIST_HEAD(&root->root_list);
  1057. INIT_LIST_HEAD(&root->delalloc_inodes);
  1058. INIT_LIST_HEAD(&root->delalloc_root);
  1059. INIT_LIST_HEAD(&root->ordered_extents);
  1060. INIT_LIST_HEAD(&root->ordered_root);
  1061. INIT_LIST_HEAD(&root->logged_list[0]);
  1062. INIT_LIST_HEAD(&root->logged_list[1]);
  1063. spin_lock_init(&root->orphan_lock);
  1064. spin_lock_init(&root->inode_lock);
  1065. spin_lock_init(&root->delalloc_lock);
  1066. spin_lock_init(&root->ordered_extent_lock);
  1067. spin_lock_init(&root->accounting_lock);
  1068. spin_lock_init(&root->log_extents_lock[0]);
  1069. spin_lock_init(&root->log_extents_lock[1]);
  1070. mutex_init(&root->objectid_mutex);
  1071. mutex_init(&root->log_mutex);
  1072. init_waitqueue_head(&root->log_writer_wait);
  1073. init_waitqueue_head(&root->log_commit_wait[0]);
  1074. init_waitqueue_head(&root->log_commit_wait[1]);
  1075. atomic_set(&root->log_commit[0], 0);
  1076. atomic_set(&root->log_commit[1], 0);
  1077. atomic_set(&root->log_writers, 0);
  1078. atomic_set(&root->log_batch, 0);
  1079. atomic_set(&root->orphan_inodes, 0);
  1080. atomic_set(&root->refs, 1);
  1081. root->log_transid = 0;
  1082. root->last_log_commit = 0;
  1083. extent_io_tree_init(&root->dirty_log_pages,
  1084. fs_info->btree_inode->i_mapping);
  1085. memset(&root->root_key, 0, sizeof(root->root_key));
  1086. memset(&root->root_item, 0, sizeof(root->root_item));
  1087. memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
  1088. memset(&root->root_kobj, 0, sizeof(root->root_kobj));
  1089. root->defrag_trans_start = fs_info->generation;
  1090. init_completion(&root->kobj_unregister);
  1091. root->defrag_running = 0;
  1092. root->root_key.objectid = objectid;
  1093. root->anon_dev = 0;
  1094. spin_lock_init(&root->root_item_lock);
  1095. }
  1096. static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
  1097. {
  1098. struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
  1099. if (root)
  1100. root->fs_info = fs_info;
  1101. return root;
  1102. }
  1103. struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
  1104. struct btrfs_fs_info *fs_info,
  1105. u64 objectid)
  1106. {
  1107. struct extent_buffer *leaf;
  1108. struct btrfs_root *tree_root = fs_info->tree_root;
  1109. struct btrfs_root *root;
  1110. struct btrfs_key key;
  1111. int ret = 0;
  1112. u64 bytenr;
  1113. uuid_le uuid;
  1114. root = btrfs_alloc_root(fs_info);
  1115. if (!root)
  1116. return ERR_PTR(-ENOMEM);
  1117. __setup_root(tree_root->nodesize, tree_root->leafsize,
  1118. tree_root->sectorsize, tree_root->stripesize,
  1119. root, fs_info, objectid);
  1120. root->root_key.objectid = objectid;
  1121. root->root_key.type = BTRFS_ROOT_ITEM_KEY;
  1122. root->root_key.offset = 0;
  1123. leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
  1124. 0, objectid, NULL, 0, 0, 0);
  1125. if (IS_ERR(leaf)) {
  1126. ret = PTR_ERR(leaf);
  1127. leaf = NULL;
  1128. goto fail;
  1129. }
  1130. bytenr = leaf->start;
  1131. memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
  1132. btrfs_set_header_bytenr(leaf, leaf->start);
  1133. btrfs_set_header_generation(leaf, trans->transid);
  1134. btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
  1135. btrfs_set_header_owner(leaf, objectid);
  1136. root->node = leaf;
  1137. write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(leaf),
  1138. BTRFS_FSID_SIZE);
  1139. write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
  1140. btrfs_header_chunk_tree_uuid(leaf),
  1141. BTRFS_UUID_SIZE);
  1142. btrfs_mark_buffer_dirty(leaf);
  1143. root->commit_root = btrfs_root_node(root);
  1144. root->track_dirty = 1;
  1145. root->root_item.flags = 0;
  1146. root->root_item.byte_limit = 0;
  1147. btrfs_set_root_bytenr(&root->root_item, leaf->start);
  1148. btrfs_set_root_generation(&root->root_item, trans->transid);
  1149. btrfs_set_root_level(&root->root_item, 0);
  1150. btrfs_set_root_refs(&root->root_item, 1);
  1151. btrfs_set_root_used(&root->root_item, leaf->len);
  1152. btrfs_set_root_last_snapshot(&root->root_item, 0);
  1153. btrfs_set_root_dirid(&root->root_item, 0);
  1154. uuid_le_gen(&uuid);
  1155. memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
  1156. root->root_item.drop_level = 0;
  1157. key.objectid = objectid;
  1158. key.type = BTRFS_ROOT_ITEM_KEY;
  1159. key.offset = 0;
  1160. ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
  1161. if (ret)
  1162. goto fail;
  1163. btrfs_tree_unlock(leaf);
  1164. return root;
  1165. fail:
  1166. if (leaf) {
  1167. btrfs_tree_unlock(leaf);
  1168. free_extent_buffer(leaf);
  1169. }
  1170. kfree(root);
  1171. return ERR_PTR(ret);
  1172. }
  1173. static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
  1174. struct btrfs_fs_info *fs_info)
  1175. {
  1176. struct btrfs_root *root;
  1177. struct btrfs_root *tree_root = fs_info->tree_root;
  1178. struct extent_buffer *leaf;
  1179. root = btrfs_alloc_root(fs_info);
  1180. if (!root)
  1181. return ERR_PTR(-ENOMEM);
  1182. __setup_root(tree_root->nodesize, tree_root->leafsize,
  1183. tree_root->sectorsize, tree_root->stripesize,
  1184. root, fs_info, BTRFS_TREE_LOG_OBJECTID);
  1185. root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
  1186. root->root_key.type = BTRFS_ROOT_ITEM_KEY;
  1187. root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
  1188. /*
  1189. * log trees do not get reference counted because they go away
  1190. * before a real commit is actually done. They do store pointers
  1191. * to file data extents, and those reference counts still get
  1192. * updated (along with back refs to the log tree).
  1193. */
  1194. root->ref_cows = 0;
  1195. leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
  1196. BTRFS_TREE_LOG_OBJECTID, NULL,
  1197. 0, 0, 0);
  1198. if (IS_ERR(leaf)) {
  1199. kfree(root);
  1200. return ERR_CAST(leaf);
  1201. }
  1202. memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
  1203. btrfs_set_header_bytenr(leaf, leaf->start);
  1204. btrfs_set_header_generation(leaf, trans->transid);
  1205. btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
  1206. btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
  1207. root->node = leaf;
  1208. write_extent_buffer(root->node, root->fs_info->fsid,
  1209. btrfs_header_fsid(root->node), BTRFS_FSID_SIZE);
  1210. btrfs_mark_buffer_dirty(root->node);
  1211. btrfs_tree_unlock(root->node);
  1212. return root;
  1213. }
  1214. int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
  1215. struct btrfs_fs_info *fs_info)
  1216. {
  1217. struct btrfs_root *log_root;
  1218. log_root = alloc_log_tree(trans, fs_info);
  1219. if (IS_ERR(log_root))
  1220. return PTR_ERR(log_root);
  1221. WARN_ON(fs_info->log_root_tree);
  1222. fs_info->log_root_tree = log_root;
  1223. return 0;
  1224. }
  1225. int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
  1226. struct btrfs_root *root)
  1227. {
  1228. struct btrfs_root *log_root;
  1229. struct btrfs_inode_item *inode_item;
  1230. log_root = alloc_log_tree(trans, root->fs_info);
  1231. if (IS_ERR(log_root))
  1232. return PTR_ERR(log_root);
  1233. log_root->last_trans = trans->transid;
  1234. log_root->root_key.offset = root->root_key.objectid;
  1235. inode_item = &log_root->root_item.inode;
  1236. btrfs_set_stack_inode_generation(inode_item, 1);
  1237. btrfs_set_stack_inode_size(inode_item, 3);
  1238. btrfs_set_stack_inode_nlink(inode_item, 1);
  1239. btrfs_set_stack_inode_nbytes(inode_item, root->leafsize);
  1240. btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
  1241. btrfs_set_root_node(&log_root->root_item, log_root->node);
  1242. WARN_ON(root->log_root);
  1243. root->log_root = log_root;
  1244. root->log_transid = 0;
  1245. root->last_log_commit = 0;
  1246. return 0;
  1247. }
  1248. static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
  1249. struct btrfs_key *key)
  1250. {
  1251. struct btrfs_root *root;
  1252. struct btrfs_fs_info *fs_info = tree_root->fs_info;
  1253. struct btrfs_path *path;
  1254. u64 generation;
  1255. u32 blocksize;
  1256. int ret;
  1257. path = btrfs_alloc_path();
  1258. if (!path)
  1259. return ERR_PTR(-ENOMEM);
  1260. root = btrfs_alloc_root(fs_info);
  1261. if (!root) {
  1262. ret = -ENOMEM;
  1263. goto alloc_fail;
  1264. }
  1265. __setup_root(tree_root->nodesize, tree_root->leafsize,
  1266. tree_root->sectorsize, tree_root->stripesize,
  1267. root, fs_info, key->objectid);
  1268. ret = btrfs_find_root(tree_root, key, path,
  1269. &root->root_item, &root->root_key);
  1270. if (ret) {
  1271. if (ret > 0)
  1272. ret = -ENOENT;
  1273. goto find_fail;
  1274. }
  1275. generation = btrfs_root_generation(&root->root_item);
  1276. blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
  1277. root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
  1278. blocksize, generation);
  1279. if (!root->node) {
  1280. ret = -ENOMEM;
  1281. goto find_fail;
  1282. } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
  1283. ret = -EIO;
  1284. goto read_fail;
  1285. }
  1286. root->commit_root = btrfs_root_node(root);
  1287. out:
  1288. btrfs_free_path(path);
  1289. return root;
  1290. read_fail:
  1291. free_extent_buffer(root->node);
  1292. find_fail:
  1293. kfree(root);
  1294. alloc_fail:
  1295. root = ERR_PTR(ret);
  1296. goto out;
  1297. }
  1298. struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
  1299. struct btrfs_key *location)
  1300. {
  1301. struct btrfs_root *root;
  1302. root = btrfs_read_tree_root(tree_root, location);
  1303. if (IS_ERR(root))
  1304. return root;
  1305. if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
  1306. root->ref_cows = 1;
  1307. btrfs_check_and_init_root_item(&root->root_item);
  1308. }
  1309. return root;
  1310. }
  1311. int btrfs_init_fs_root(struct btrfs_root *root)
  1312. {
  1313. int ret;
  1314. root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
  1315. root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
  1316. GFP_NOFS);
  1317. if (!root->free_ino_pinned || !root->free_ino_ctl) {
  1318. ret = -ENOMEM;
  1319. goto fail;
  1320. }
  1321. btrfs_init_free_ino_ctl(root);
  1322. mutex_init(&root->fs_commit_mutex);
  1323. spin_lock_init(&root->cache_lock);
  1324. init_waitqueue_head(&root->cache_wait);
  1325. ret = get_anon_bdev(&root->anon_dev);
  1326. if (ret)
  1327. goto fail;
  1328. return 0;
  1329. fail:
  1330. kfree(root->free_ino_ctl);
  1331. kfree(root->free_ino_pinned);
  1332. return ret;
  1333. }
  1334. static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
  1335. u64 root_id)
  1336. {
  1337. struct btrfs_root *root;
  1338. spin_lock(&fs_info->fs_roots_radix_lock);
  1339. root = radix_tree_lookup(&fs_info->fs_roots_radix,
  1340. (unsigned long)root_id);
  1341. spin_unlock(&fs_info->fs_roots_radix_lock);
  1342. return root;
  1343. }
  1344. int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
  1345. struct btrfs_root *root)
  1346. {
  1347. int ret;
  1348. ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
  1349. if (ret)
  1350. return ret;
  1351. spin_lock(&fs_info->fs_roots_radix_lock);
  1352. ret = radix_tree_insert(&fs_info->fs_roots_radix,
  1353. (unsigned long)root->root_key.objectid,
  1354. root);
  1355. if (ret == 0)
  1356. root->in_radix = 1;
  1357. spin_unlock(&fs_info->fs_roots_radix_lock);
  1358. radix_tree_preload_end();
  1359. return ret;
  1360. }
  1361. struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
  1362. struct btrfs_key *location,
  1363. bool check_ref)
  1364. {
  1365. struct btrfs_root *root;
  1366. int ret;
  1367. if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
  1368. return fs_info->tree_root;
  1369. if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
  1370. return fs_info->extent_root;
  1371. if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
  1372. return fs_info->chunk_root;
  1373. if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
  1374. return fs_info->dev_root;
  1375. if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
  1376. return fs_info->csum_root;
  1377. if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
  1378. return fs_info->quota_root ? fs_info->quota_root :
  1379. ERR_PTR(-ENOENT);
  1380. if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
  1381. return fs_info->uuid_root ? fs_info->uuid_root :
  1382. ERR_PTR(-ENOENT);
  1383. again:
  1384. root = btrfs_lookup_fs_root(fs_info, location->objectid);
  1385. if (root) {
  1386. if (check_ref && btrfs_root_refs(&root->root_item) == 0)
  1387. return ERR_PTR(-ENOENT);
  1388. return root;
  1389. }
  1390. root = btrfs_read_fs_root(fs_info->tree_root, location);
  1391. if (IS_ERR(root))
  1392. return root;
  1393. if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
  1394. ret = -ENOENT;
  1395. goto fail;
  1396. }
  1397. ret = btrfs_init_fs_root(root);
  1398. if (ret)
  1399. goto fail;
  1400. ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
  1401. if (ret < 0)
  1402. goto fail;
  1403. if (ret == 0)
  1404. root->orphan_item_inserted = 1;
  1405. ret = btrfs_insert_fs_root(fs_info, root);
  1406. if (ret) {
  1407. if (ret == -EEXIST) {
  1408. free_fs_root(root);
  1409. goto again;
  1410. }
  1411. goto fail;
  1412. }
  1413. return root;
  1414. fail:
  1415. free_fs_root(root);
  1416. return ERR_PTR(ret);
  1417. }
  1418. static int btrfs_congested_fn(void *congested_data, int bdi_bits)
  1419. {
  1420. struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
  1421. int ret = 0;
  1422. struct btrfs_device *device;
  1423. struct backing_dev_info *bdi;
  1424. rcu_read_lock();
  1425. list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
  1426. if (!device->bdev)
  1427. continue;
  1428. bdi = blk_get_backing_dev_info(device->bdev);
  1429. if (bdi && bdi_congested(bdi, bdi_bits)) {
  1430. ret = 1;
  1431. break;
  1432. }
  1433. }
  1434. rcu_read_unlock();
  1435. return ret;
  1436. }
  1437. /*
  1438. * If this fails, caller must call bdi_destroy() to get rid of the
  1439. * bdi again.
  1440. */
  1441. static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
  1442. {
  1443. int err;
  1444. bdi->capabilities = BDI_CAP_MAP_COPY;
  1445. err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
  1446. if (err)
  1447. return err;
  1448. bdi->ra_pages = default_backing_dev_info.ra_pages;
  1449. bdi->congested_fn = btrfs_congested_fn;
  1450. bdi->congested_data = info;
  1451. return 0;
  1452. }
  1453. /*
  1454. * called by the kthread helper functions to finally call the bio end_io
  1455. * functions. This is where read checksum verification actually happens
  1456. */
  1457. static void end_workqueue_fn(struct btrfs_work *work)
  1458. {
  1459. struct bio *bio;
  1460. struct end_io_wq *end_io_wq;
  1461. struct btrfs_fs_info *fs_info;
  1462. int error;
  1463. end_io_wq = container_of(work, struct end_io_wq, work);
  1464. bio = end_io_wq->bio;
  1465. fs_info = end_io_wq->info;
  1466. error = end_io_wq->error;
  1467. bio->bi_private = end_io_wq->private;
  1468. bio->bi_end_io = end_io_wq->end_io;
  1469. kfree(end_io_wq);
  1470. bio_endio(bio, error);
  1471. }
  1472. static int cleaner_kthread(void *arg)
  1473. {
  1474. struct btrfs_root *root = arg;
  1475. int again;
  1476. do {
  1477. again = 0;
  1478. /* Make the cleaner go to sleep early. */
  1479. if (btrfs_need_cleaner_sleep(root))
  1480. goto sleep;
  1481. if (!mutex_trylock(&root->fs_info->cleaner_mutex))
  1482. goto sleep;
  1483. /*
  1484. * Avoid the problem that we change the status of the fs
  1485. * during the above check and trylock.
  1486. */
  1487. if (btrfs_need_cleaner_sleep(root)) {
  1488. mutex_unlock(&root->fs_info->cleaner_mutex);
  1489. goto sleep;
  1490. }
  1491. btrfs_run_delayed_iputs(root);
  1492. again = btrfs_clean_one_deleted_snapshot(root);
  1493. mutex_unlock(&root->fs_info->cleaner_mutex);
  1494. /*
  1495. * The defragger has dealt with the R/O remount and umount,
  1496. * needn't do anything special here.
  1497. */
  1498. btrfs_run_defrag_inodes(root->fs_info);
  1499. sleep:
  1500. if (!try_to_freeze() && !again) {
  1501. set_current_state(TASK_INTERRUPTIBLE);
  1502. if (!kthread_should_stop())
  1503. schedule();
  1504. __set_current_state(TASK_RUNNING);
  1505. }
  1506. } while (!kthread_should_stop());
  1507. return 0;
  1508. }
  1509. static int transaction_kthread(void *arg)
  1510. {
  1511. struct btrfs_root *root = arg;
  1512. struct btrfs_trans_handle *trans;
  1513. struct btrfs_transaction *cur;
  1514. u64 transid;
  1515. unsigned long now;
  1516. unsigned long delay;
  1517. bool cannot_commit;
  1518. do {
  1519. cannot_commit = false;
  1520. delay = HZ * root->fs_info->commit_interval;
  1521. mutex_lock(&root->fs_info->transaction_kthread_mutex);
  1522. spin_lock(&root->fs_info->trans_lock);
  1523. cur = root->fs_info->running_transaction;
  1524. if (!cur) {
  1525. spin_unlock(&root->fs_info->trans_lock);
  1526. goto sleep;
  1527. }
  1528. now = get_seconds();
  1529. if (cur->state < TRANS_STATE_BLOCKED &&
  1530. (now < cur->start_time ||
  1531. now - cur->start_time < root->fs_info->commit_interval)) {
  1532. spin_unlock(&root->fs_info->trans_lock);
  1533. delay = HZ * 5;
  1534. goto sleep;
  1535. }
  1536. transid = cur->transid;
  1537. spin_unlock(&root->fs_info->trans_lock);
  1538. /* If the file system is aborted, this will always fail. */
  1539. trans = btrfs_attach_transaction(root);
  1540. if (IS_ERR(trans)) {
  1541. if (PTR_ERR(trans) != -ENOENT)
  1542. cannot_commit = true;
  1543. goto sleep;
  1544. }
  1545. if (transid == trans->transid) {
  1546. btrfs_commit_transaction(trans, root);
  1547. } else {
  1548. btrfs_end_transaction(trans, root);
  1549. }
  1550. sleep:
  1551. wake_up_process(root->fs_info->cleaner_kthread);
  1552. mutex_unlock(&root->fs_info->transaction_kthread_mutex);
  1553. if (!try_to_freeze()) {
  1554. set_current_state(TASK_INTERRUPTIBLE);
  1555. if (!kthread_should_stop() &&
  1556. (!btrfs_transaction_blocked(root->fs_info) ||
  1557. cannot_commit))
  1558. schedule_timeout(delay);
  1559. __set_current_state(TASK_RUNNING);
  1560. }
  1561. } while (!kthread_should_stop());
  1562. return 0;
  1563. }
  1564. /*
  1565. * this will find the highest generation in the array of
  1566. * root backups. The index of the highest array is returned,
  1567. * or -1 if we can't find anything.
  1568. *
  1569. * We check to make sure the array is valid by comparing the
  1570. * generation of the latest root in the array with the generation
  1571. * in the super block. If they don't match we pitch it.
  1572. */
  1573. static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
  1574. {
  1575. u64 cur;
  1576. int newest_index = -1;
  1577. struct btrfs_root_backup *root_backup;
  1578. int i;
  1579. for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
  1580. root_backup = info->super_copy->super_roots + i;
  1581. cur = btrfs_backup_tree_root_gen(root_backup);
  1582. if (cur == newest_gen)
  1583. newest_index = i;
  1584. }
  1585. /* check to see if we actually wrapped around */
  1586. if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
  1587. root_backup = info->super_copy->super_roots;
  1588. cur = btrfs_backup_tree_root_gen(root_backup);
  1589. if (cur == newest_gen)
  1590. newest_index = 0;
  1591. }
  1592. return newest_index;
  1593. }
  1594. /*
  1595. * find the oldest backup so we know where to store new entries
  1596. * in the backup array. This will set the backup_root_index
  1597. * field in the fs_info struct
  1598. */
  1599. static void find_oldest_super_backup(struct btrfs_fs_info *info,
  1600. u64 newest_gen)
  1601. {
  1602. int newest_index = -1;
  1603. newest_index = find_newest_super_backup(info, newest_gen);
  1604. /* if there was garbage in there, just move along */
  1605. if (newest_index == -1) {
  1606. info->backup_root_index = 0;
  1607. } else {
  1608. info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
  1609. }
  1610. }
  1611. /*
  1612. * copy all the root pointers into the super backup array.
  1613. * this will bump the backup pointer by one when it is
  1614. * done
  1615. */
  1616. static void backup_super_roots(struct btrfs_fs_info *info)
  1617. {
  1618. int next_backup;
  1619. struct btrfs_root_backup *root_backup;
  1620. int last_backup;
  1621. next_backup = info->backup_root_index;
  1622. last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
  1623. BTRFS_NUM_BACKUP_ROOTS;
  1624. /*
  1625. * just overwrite the last backup if we're at the same generation
  1626. * this happens only at umount
  1627. */
  1628. root_backup = info->super_for_commit->super_roots + last_backup;
  1629. if (btrfs_backup_tree_root_gen(root_backup) ==
  1630. btrfs_header_generation(info->tree_root->node))
  1631. next_backup = last_backup;
  1632. root_backup = info->super_for_commit->super_roots + next_backup;
  1633. /*
  1634. * make sure all of our padding and empty slots get zero filled
  1635. * regardless of which ones we use today
  1636. */
  1637. memset(root_backup, 0, sizeof(*root_backup));
  1638. info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
  1639. btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
  1640. btrfs_set_backup_tree_root_gen(root_backup,
  1641. btrfs_header_generation(info->tree_root->node));
  1642. btrfs_set_backup_tree_root_level(root_backup,
  1643. btrfs_header_level(info->tree_root->node));
  1644. btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
  1645. btrfs_set_backup_chunk_root_gen(root_backup,
  1646. btrfs_header_generation(info->chunk_root->node));
  1647. btrfs_set_backup_chunk_root_level(root_backup,
  1648. btrfs_header_level(info->chunk_root->node));
  1649. btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
  1650. btrfs_set_backup_extent_root_gen(root_backup,
  1651. btrfs_header_generation(info->extent_root->node));
  1652. btrfs_set_backup_extent_root_level(root_backup,
  1653. btrfs_header_level(info->extent_root->node));
  1654. /*
  1655. * we might commit during log recovery, which happens before we set
  1656. * the fs_root. Make sure it is valid before we fill it in.
  1657. */
  1658. if (info->fs_root && info->fs_root->node) {
  1659. btrfs_set_backup_fs_root(root_backup,
  1660. info->fs_root->node->start);
  1661. btrfs_set_backup_fs_root_gen(root_backup,
  1662. btrfs_header_generation(info->fs_root->node));
  1663. btrfs_set_backup_fs_root_level(root_backup,
  1664. btrfs_header_level(info->fs_root->node));
  1665. }
  1666. btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
  1667. btrfs_set_backup_dev_root_gen(root_backup,
  1668. btrfs_header_generation(info->dev_root->node));
  1669. btrfs_set_backup_dev_root_level(root_backup,
  1670. btrfs_header_level(info->dev_root->node));
  1671. btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
  1672. btrfs_set_backup_csum_root_gen(root_backup,
  1673. btrfs_header_generation(info->csum_root->node));
  1674. btrfs_set_backup_csum_root_level(root_backup,
  1675. btrfs_header_level(info->csum_root->node));
  1676. btrfs_set_backup_total_bytes(root_backup,
  1677. btrfs_super_total_bytes(info->super_copy));
  1678. btrfs_set_backup_bytes_used(root_backup,
  1679. btrfs_super_bytes_used(info->super_copy));
  1680. btrfs_set_backup_num_devices(root_backup,
  1681. btrfs_super_num_devices(info->super_copy));
  1682. /*
  1683. * if we don't copy this out to the super_copy, it won't get remembered
  1684. * for the next commit
  1685. */
  1686. memcpy(&info->super_copy->super_roots,
  1687. &info->super_for_commit->super_roots,
  1688. sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
  1689. }
  1690. /*
  1691. * this copies info out of the root backup array and back into
  1692. * the in-memory super block. It is meant to help iterate through
  1693. * the array, so you send it the number of backups you've already
  1694. * tried and the last backup index you used.
  1695. *
  1696. * this returns -1 when it has tried all the backups
  1697. */
  1698. static noinline int next_root_backup(struct btrfs_fs_info *info,
  1699. struct btrfs_super_block *super,
  1700. int *num_backups_tried, int *backup_index)
  1701. {
  1702. struct btrfs_root_backup *root_backup;
  1703. int newest = *backup_index;
  1704. if (*num_backups_tried == 0) {
  1705. u64 gen = btrfs_super_generation(super);
  1706. newest = find_newest_super_backup(info, gen);
  1707. if (newest == -1)
  1708. return -1;
  1709. *backup_index = newest;
  1710. *num_backups_tried = 1;
  1711. } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
  1712. /* we've tried all the backups, all done */
  1713. return -1;
  1714. } else {
  1715. /* jump to the next oldest backup */
  1716. newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
  1717. BTRFS_NUM_BACKUP_ROOTS;
  1718. *backup_index = newest;
  1719. *num_backups_tried += 1;
  1720. }
  1721. root_backup = super->super_roots + newest;
  1722. btrfs_set_super_generation(super,
  1723. btrfs_backup_tree_root_gen(root_backup));
  1724. btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
  1725. btrfs_set_super_root_level(super,
  1726. btrfs_backup_tree_root_level(root_backup));
  1727. btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
  1728. /*
  1729. * fixme: the total bytes and num_devices need to match or we should
  1730. * need a fsck
  1731. */
  1732. btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
  1733. btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
  1734. return 0;
  1735. }
  1736. /* helper to cleanup workers */
  1737. static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
  1738. {
  1739. btrfs_stop_workers(&fs_info->generic_worker);
  1740. btrfs_stop_workers(&fs_info->fixup_workers);
  1741. btrfs_stop_workers(&fs_info->delalloc_workers);
  1742. btrfs_stop_workers(&fs_info->workers);
  1743. btrfs_stop_workers(&fs_info->endio_workers);
  1744. btrfs_stop_workers(&fs_info->endio_meta_workers);
  1745. btrfs_stop_workers(&fs_info->endio_raid56_workers);
  1746. btrfs_stop_workers(&fs_info->rmw_workers);
  1747. btrfs_stop_workers(&fs_info->endio_meta_write_workers);
  1748. btrfs_stop_workers(&fs_info->endio_write_workers);
  1749. btrfs_stop_workers(&fs_info->endio_freespace_worker);
  1750. btrfs_stop_workers(&fs_info->submit_workers);
  1751. btrfs_stop_workers(&fs_info->delayed_workers);
  1752. btrfs_stop_workers(&fs_info->caching_workers);
  1753. btrfs_stop_workers(&fs_info->readahead_workers);
  1754. btrfs_stop_workers(&fs_info->flush_workers);
  1755. btrfs_stop_workers(&fs_info->qgroup_rescan_workers);
  1756. }
  1757. /* helper to cleanup tree roots */
  1758. static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
  1759. {
  1760. free_extent_buffer(info->tree_root->node);
  1761. free_extent_buffer(info->tree_root->commit_root);
  1762. info->tree_root->node = NULL;
  1763. info->tree_root->commit_root = NULL;
  1764. if (info->dev_root) {
  1765. free_extent_buffer(info->dev_root->node);
  1766. free_extent_buffer(info->dev_root->commit_root);
  1767. info->dev_root->node = NULL;
  1768. info->dev_root->commit_root = NULL;
  1769. }
  1770. if (info->extent_root) {
  1771. free_extent_buffer(info->extent_root->node);
  1772. free_extent_buffer(info->extent_root->commit_root);
  1773. info->extent_root->node = NULL;
  1774. info->extent_root->commit_root = NULL;
  1775. }
  1776. if (info->csum_root) {
  1777. free_extent_buffer(info->csum_root->node);
  1778. free_extent_buffer(info->csum_root->commit_root);
  1779. info->csum_root->node = NULL;
  1780. info->csum_root->commit_root = NULL;
  1781. }
  1782. if (info->quota_root) {
  1783. free_extent_buffer(info->quota_root->node);
  1784. free_extent_buffer(info->quota_root->commit_root);
  1785. info->quota_root->node = NULL;
  1786. info->quota_root->commit_root = NULL;
  1787. }
  1788. if (info->uuid_root) {
  1789. free_extent_buffer(info->uuid_root->node);
  1790. free_extent_buffer(info->uuid_root->commit_root);
  1791. info->uuid_root->node = NULL;
  1792. info->uuid_root->commit_root = NULL;
  1793. }
  1794. if (chunk_root) {
  1795. free_extent_buffer(info->chunk_root->node);
  1796. free_extent_buffer(info->chunk_root->commit_root);
  1797. info->chunk_root->node = NULL;
  1798. info->chunk_root->commit_root = NULL;
  1799. }
  1800. }
  1801. static void del_fs_roots(struct btrfs_fs_info *fs_info)
  1802. {
  1803. int ret;
  1804. struct btrfs_root *gang[8];
  1805. int i;
  1806. while (!list_empty(&fs_info->dead_roots)) {
  1807. gang[0] = list_entry(fs_info->dead_roots.next,
  1808. struct btrfs_root, root_list);
  1809. list_del(&gang[0]->root_list);
  1810. if (gang[0]->in_radix) {
  1811. btrfs_drop_and_free_fs_root(fs_info, gang[0]);
  1812. } else {
  1813. free_extent_buffer(gang[0]->node);
  1814. free_extent_buffer(gang[0]->commit_root);
  1815. btrfs_put_fs_root(gang[0]);
  1816. }
  1817. }
  1818. while (1) {
  1819. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  1820. (void **)gang, 0,
  1821. ARRAY_SIZE(gang));
  1822. if (!ret)
  1823. break;
  1824. for (i = 0; i < ret; i++)
  1825. btrfs_drop_and_free_fs_root(fs_info, gang[i]);
  1826. }
  1827. }
  1828. int open_ctree(struct super_block *sb,
  1829. struct btrfs_fs_devices *fs_devices,
  1830. char *options)
  1831. {
  1832. u32 sectorsize;
  1833. u32 nodesize;
  1834. u32 leafsize;
  1835. u32 blocksize;
  1836. u32 stripesize;
  1837. u64 generation;
  1838. u64 features;
  1839. struct btrfs_key location;
  1840. struct buffer_head *bh;
  1841. struct btrfs_super_block *disk_super;
  1842. struct btrfs_fs_info *fs_info = btrfs_sb(sb);
  1843. struct btrfs_root *tree_root;
  1844. struct btrfs_root *extent_root;
  1845. struct btrfs_root *csum_root;
  1846. struct btrfs_root *chunk_root;
  1847. struct btrfs_root *dev_root;
  1848. struct btrfs_root *quota_root;
  1849. struct btrfs_root *uuid_root;
  1850. struct btrfs_root *log_tree_root;
  1851. int ret;
  1852. int err = -EINVAL;
  1853. int num_backups_tried = 0;
  1854. int backup_index = 0;
  1855. bool create_uuid_tree;
  1856. bool check_uuid_tree;
  1857. tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
  1858. chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
  1859. if (!tree_root || !chunk_root) {
  1860. err = -ENOMEM;
  1861. goto fail;
  1862. }
  1863. ret = init_srcu_struct(&fs_info->subvol_srcu);
  1864. if (ret) {
  1865. err = ret;
  1866. goto fail;
  1867. }
  1868. ret = setup_bdi(fs_info, &fs_info->bdi);
  1869. if (ret) {
  1870. err = ret;
  1871. goto fail_srcu;
  1872. }
  1873. ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
  1874. if (ret) {
  1875. err = ret;
  1876. goto fail_bdi;
  1877. }
  1878. fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
  1879. (1 + ilog2(nr_cpu_ids));
  1880. ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
  1881. if (ret) {
  1882. err = ret;
  1883. goto fail_dirty_metadata_bytes;
  1884. }
  1885. fs_info->btree_inode = new_inode(sb);
  1886. if (!fs_info->btree_inode) {
  1887. err = -ENOMEM;
  1888. goto fail_delalloc_bytes;
  1889. }
  1890. mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
  1891. INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
  1892. INIT_LIST_HEAD(&fs_info->trans_list);
  1893. INIT_LIST_HEAD(&fs_info->dead_roots);
  1894. INIT_LIST_HEAD(&fs_info->delayed_iputs);
  1895. INIT_LIST_HEAD(&fs_info->delalloc_roots);
  1896. INIT_LIST_HEAD(&fs_info->caching_block_groups);
  1897. spin_lock_init(&fs_info->delalloc_root_lock);
  1898. spin_lock_init(&fs_info->trans_lock);
  1899. spin_lock_init(&fs_info->fs_roots_radix_lock);
  1900. spin_lock_init(&fs_info->delayed_iput_lock);
  1901. spin_lock_init(&fs_info->defrag_inodes_lock);
  1902. spin_lock_init(&fs_info->free_chunk_lock);
  1903. spin_lock_init(&fs_info->tree_mod_seq_lock);
  1904. spin_lock_init(&fs_info->super_lock);
  1905. rwlock_init(&fs_info->tree_mod_log_lock);
  1906. mutex_init(&fs_info->reloc_mutex);
  1907. seqlock_init(&fs_info->profiles_lock);
  1908. init_completion(&fs_info->kobj_unregister);
  1909. INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
  1910. INIT_LIST_HEAD(&fs_info->space_info);
  1911. INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
  1912. btrfs_mapping_init(&fs_info->mapping_tree);
  1913. btrfs_init_block_rsv(&fs_info->global_block_rsv,
  1914. BTRFS_BLOCK_RSV_GLOBAL);
  1915. btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
  1916. BTRFS_BLOCK_RSV_DELALLOC);
  1917. btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
  1918. btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
  1919. btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
  1920. btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
  1921. BTRFS_BLOCK_RSV_DELOPS);
  1922. atomic_set(&fs_info->nr_async_submits, 0);
  1923. atomic_set(&fs_info->async_delalloc_pages, 0);
  1924. atomic_set(&fs_info->async_submit_draining, 0);
  1925. atomic_set(&fs_info->nr_async_bios, 0);
  1926. atomic_set(&fs_info->defrag_running, 0);
  1927. atomic64_set(&fs_info->tree_mod_seq, 0);
  1928. fs_info->sb = sb;
  1929. fs_info->max_inline = 8192 * 1024;
  1930. fs_info->metadata_ratio = 0;
  1931. fs_info->defrag_inodes = RB_ROOT;
  1932. fs_info->free_chunk_space = 0;
  1933. fs_info->tree_mod_log = RB_ROOT;
  1934. fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
  1935. /* readahead state */
  1936. INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
  1937. spin_lock_init(&fs_info->reada_lock);
  1938. fs_info->thread_pool_size = min_t(unsigned long,
  1939. num_online_cpus() + 2, 8);
  1940. INIT_LIST_HEAD(&fs_info->ordered_roots);
  1941. spin_lock_init(&fs_info->ordered_root_lock);
  1942. fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
  1943. GFP_NOFS);
  1944. if (!fs_info->delayed_root) {
  1945. err = -ENOMEM;
  1946. goto fail_iput;
  1947. }
  1948. btrfs_init_delayed_root(fs_info->delayed_root);
  1949. mutex_init(&fs_info->scrub_lock);
  1950. atomic_set(&fs_info->scrubs_running, 0);
  1951. atomic_set(&fs_info->scrub_pause_req, 0);
  1952. atomic_set(&fs_info->scrubs_paused, 0);
  1953. atomic_set(&fs_info->scrub_cancel_req, 0);
  1954. init_waitqueue_head(&fs_info->scrub_pause_wait);
  1955. init_rwsem(&fs_info->scrub_super_lock);
  1956. fs_info->scrub_workers_refcnt = 0;
  1957. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  1958. fs_info->check_integrity_print_mask = 0;
  1959. #endif
  1960. spin_lock_init(&fs_info->balance_lock);
  1961. mutex_init(&fs_info->balance_mutex);
  1962. atomic_set(&fs_info->balance_running, 0);
  1963. atomic_set(&fs_info->balance_pause_req, 0);
  1964. atomic_set(&fs_info->balance_cancel_req, 0);
  1965. fs_info->balance_ctl = NULL;
  1966. init_waitqueue_head(&fs_info->balance_wait_q);
  1967. sb->s_blocksize = 4096;
  1968. sb->s_blocksize_bits = blksize_bits(4096);
  1969. sb->s_bdi = &fs_info->bdi;
  1970. fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
  1971. set_nlink(fs_info->btree_inode, 1);
  1972. /*
  1973. * we set the i_size on the btree inode to the max possible int.
  1974. * the real end of the address space is determined by all of
  1975. * the devices in the system
  1976. */
  1977. fs_info->btree_inode->i_size = OFFSET_MAX;
  1978. fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
  1979. fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
  1980. RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
  1981. extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
  1982. fs_info->btree_inode->i_mapping);
  1983. BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
  1984. extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
  1985. BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
  1986. BTRFS_I(fs_info->btree_inode)->root = tree_root;
  1987. memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
  1988. sizeof(struct btrfs_key));
  1989. set_bit(BTRFS_INODE_DUMMY,
  1990. &BTRFS_I(fs_info->btree_inode)->runtime_flags);
  1991. insert_inode_hash(fs_info->btree_inode);
  1992. spin_lock_init(&fs_info->block_group_cache_lock);
  1993. fs_info->block_group_cache_tree = RB_ROOT;
  1994. fs_info->first_logical_byte = (u64)-1;
  1995. extent_io_tree_init(&fs_info->freed_extents[0],
  1996. fs_info->btree_inode->i_mapping);
  1997. extent_io_tree_init(&fs_info->freed_extents[1],
  1998. fs_info->btree_inode->i_mapping);
  1999. fs_info->pinned_extents = &fs_info->freed_extents[0];
  2000. fs_info->do_barriers = 1;
  2001. mutex_init(&fs_info->ordered_operations_mutex);
  2002. mutex_init(&fs_info->ordered_extent_flush_mutex);
  2003. mutex_init(&fs_info->tree_log_mutex);
  2004. mutex_init(&fs_info->chunk_mutex);
  2005. mutex_init(&fs_info->transaction_kthread_mutex);
  2006. mutex_init(&fs_info->cleaner_mutex);
  2007. mutex_init(&fs_info->volume_mutex);
  2008. init_rwsem(&fs_info->extent_commit_sem);
  2009. init_rwsem(&fs_info->cleanup_work_sem);
  2010. init_rwsem(&fs_info->subvol_sem);
  2011. sema_init(&fs_info->uuid_tree_rescan_sem, 1);
  2012. fs_info->dev_replace.lock_owner = 0;
  2013. atomic_set(&fs_info->dev_replace.nesting_level, 0);
  2014. mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
  2015. mutex_init(&fs_info->dev_replace.lock_management_lock);
  2016. mutex_init(&fs_info->dev_replace.lock);
  2017. spin_lock_init(&fs_info->qgroup_lock);
  2018. mutex_init(&fs_info->qgroup_ioctl_lock);
  2019. fs_info->qgroup_tree = RB_ROOT;
  2020. INIT_LIST_HEAD(&fs_info->dirty_qgroups);
  2021. fs_info->qgroup_seq = 1;
  2022. fs_info->quota_enabled = 0;
  2023. fs_info->pending_quota_state = 0;
  2024. fs_info->qgroup_ulist = NULL;
  2025. mutex_init(&fs_info->qgroup_rescan_lock);
  2026. btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
  2027. btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
  2028. init_waitqueue_head(&fs_info->transaction_throttle);
  2029. init_waitqueue_head(&fs_info->transaction_wait);
  2030. init_waitqueue_head(&fs_info->transaction_blocked_wait);
  2031. init_waitqueue_head(&fs_info->async_submit_wait);
  2032. ret = btrfs_alloc_stripe_hash_table(fs_info);
  2033. if (ret) {
  2034. err = ret;
  2035. goto fail_alloc;
  2036. }
  2037. __setup_root(4096, 4096, 4096, 4096, tree_root,
  2038. fs_info, BTRFS_ROOT_TREE_OBJECTID);
  2039. invalidate_bdev(fs_devices->latest_bdev);
  2040. /*
  2041. * Read super block and check the signature bytes only
  2042. */
  2043. bh = btrfs_read_dev_super(fs_devices->latest_bdev);
  2044. if (!bh) {
  2045. err = -EINVAL;
  2046. goto fail_alloc;
  2047. }
  2048. /*
  2049. * We want to check superblock checksum, the type is stored inside.
  2050. * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
  2051. */
  2052. if (btrfs_check_super_csum(bh->b_data)) {
  2053. printk(KERN_ERR "btrfs: superblock checksum mismatch\n");
  2054. err = -EINVAL;
  2055. goto fail_alloc;
  2056. }
  2057. /*
  2058. * super_copy is zeroed at allocation time and we never touch the
  2059. * following bytes up to INFO_SIZE, the checksum is calculated from
  2060. * the whole block of INFO_SIZE
  2061. */
  2062. memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
  2063. memcpy(fs_info->super_for_commit, fs_info->super_copy,
  2064. sizeof(*fs_info->super_for_commit));
  2065. brelse(bh);
  2066. memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
  2067. ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
  2068. if (ret) {
  2069. printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
  2070. err = -EINVAL;
  2071. goto fail_alloc;
  2072. }
  2073. disk_super = fs_info->super_copy;
  2074. if (!btrfs_super_root(disk_super))
  2075. goto fail_alloc;
  2076. /* check FS state, whether FS is broken. */
  2077. if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
  2078. set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
  2079. /*
  2080. * run through our array of backup supers and setup
  2081. * our ring pointer to the oldest one
  2082. */
  2083. generation = btrfs_super_generation(disk_super);
  2084. find_oldest_super_backup(fs_info, generation);
  2085. /*
  2086. * In the long term, we'll store the compression type in the super
  2087. * block, and it'll be used for per file compression control.
  2088. */
  2089. fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
  2090. ret = btrfs_parse_options(tree_root, options);
  2091. if (ret) {
  2092. err = ret;
  2093. goto fail_alloc;
  2094. }
  2095. features = btrfs_super_incompat_flags(disk_super) &
  2096. ~BTRFS_FEATURE_INCOMPAT_SUPP;
  2097. if (features) {
  2098. printk(KERN_ERR "BTRFS: couldn't mount because of "
  2099. "unsupported optional features (%Lx).\n",
  2100. features);
  2101. err = -EINVAL;
  2102. goto fail_alloc;
  2103. }
  2104. if (btrfs_super_leafsize(disk_super) !=
  2105. btrfs_super_nodesize(disk_super)) {
  2106. printk(KERN_ERR "BTRFS: couldn't mount because metadata "
  2107. "blocksizes don't match. node %d leaf %d\n",
  2108. btrfs_super_nodesize(disk_super),
  2109. btrfs_super_leafsize(disk_super));
  2110. err = -EINVAL;
  2111. goto fail_alloc;
  2112. }
  2113. if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
  2114. printk(KERN_ERR "BTRFS: couldn't mount because metadata "
  2115. "blocksize (%d) was too large\n",
  2116. btrfs_super_leafsize(disk_super));
  2117. err = -EINVAL;
  2118. goto fail_alloc;
  2119. }
  2120. features = btrfs_super_incompat_flags(disk_super);
  2121. features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
  2122. if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
  2123. features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
  2124. if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
  2125. printk(KERN_ERR "btrfs: has skinny extents\n");
  2126. /*
  2127. * flag our filesystem as having big metadata blocks if
  2128. * they are bigger than the page size
  2129. */
  2130. if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
  2131. if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
  2132. printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
  2133. features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
  2134. }
  2135. nodesize = btrfs_super_nodesize(disk_super);
  2136. leafsize = btrfs_super_leafsize(disk_super);
  2137. sectorsize = btrfs_super_sectorsize(disk_super);
  2138. stripesize = btrfs_super_stripesize(disk_super);
  2139. fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
  2140. fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
  2141. /*
  2142. * mixed block groups end up with duplicate but slightly offset
  2143. * extent buffers for the same range. It leads to corruptions
  2144. */
  2145. if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
  2146. (sectorsize != leafsize)) {
  2147. printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
  2148. "are not allowed for mixed block groups on %s\n",
  2149. sb->s_id);
  2150. goto fail_alloc;
  2151. }
  2152. /*
  2153. * Needn't use the lock because there is no other task which will
  2154. * update the flag.
  2155. */
  2156. btrfs_set_super_incompat_flags(disk_super, features);
  2157. features = btrfs_super_compat_ro_flags(disk_super) &
  2158. ~BTRFS_FEATURE_COMPAT_RO_SUPP;
  2159. if (!(sb->s_flags & MS_RDONLY) && features) {
  2160. printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
  2161. "unsupported option features (%Lx).\n",
  2162. features);
  2163. err = -EINVAL;
  2164. goto fail_alloc;
  2165. }
  2166. btrfs_init_workers(&fs_info->generic_worker,
  2167. "genwork", 1, NULL);
  2168. btrfs_init_workers(&fs_info->workers, "worker",
  2169. fs_info->thread_pool_size,
  2170. &fs_info->generic_worker);
  2171. btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
  2172. fs_info->thread_pool_size, NULL);
  2173. btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc",
  2174. fs_info->thread_pool_size, NULL);
  2175. btrfs_init_workers(&fs_info->submit_workers, "submit",
  2176. min_t(u64, fs_devices->num_devices,
  2177. fs_info->thread_pool_size), NULL);
  2178. btrfs_init_workers(&fs_info->caching_workers, "cache",
  2179. fs_info->thread_pool_size, NULL);
  2180. /* a higher idle thresh on the submit workers makes it much more
  2181. * likely that bios will be send down in a sane order to the
  2182. * devices
  2183. */
  2184. fs_info->submit_workers.idle_thresh = 64;
  2185. fs_info->workers.idle_thresh = 16;
  2186. fs_info->workers.ordered = 1;
  2187. fs_info->delalloc_workers.idle_thresh = 2;
  2188. fs_info->delalloc_workers.ordered = 1;
  2189. btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
  2190. &fs_info->generic_worker);
  2191. btrfs_init_workers(&fs_info->endio_workers, "endio",
  2192. fs_info->thread_pool_size,
  2193. &fs_info->generic_worker);
  2194. btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
  2195. fs_info->thread_pool_size,
  2196. &fs_info->generic_worker);
  2197. btrfs_init_workers(&fs_info->endio_meta_write_workers,
  2198. "endio-meta-write", fs_info->thread_pool_size,
  2199. &fs_info->generic_worker);
  2200. btrfs_init_workers(&fs_info->endio_raid56_workers,
  2201. "endio-raid56", fs_info->thread_pool_size,
  2202. &fs_info->generic_worker);
  2203. btrfs_init_workers(&fs_info->rmw_workers,
  2204. "rmw", fs_info->thread_pool_size,
  2205. &fs_info->generic_worker);
  2206. btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
  2207. fs_info->thread_pool_size,
  2208. &fs_info->generic_worker);
  2209. btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
  2210. 1, &fs_info->generic_worker);
  2211. btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
  2212. fs_info->thread_pool_size,
  2213. &fs_info->generic_worker);
  2214. btrfs_init_workers(&fs_info->readahead_workers, "readahead",
  2215. fs_info->thread_pool_size,
  2216. &fs_info->generic_worker);
  2217. btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1,
  2218. &fs_info->generic_worker);
  2219. /*
  2220. * endios are largely parallel and should have a very
  2221. * low idle thresh
  2222. */
  2223. fs_info->endio_workers.idle_thresh = 4;
  2224. fs_info->endio_meta_workers.idle_thresh = 4;
  2225. fs_info->endio_raid56_workers.idle_thresh = 4;
  2226. fs_info->rmw_workers.idle_thresh = 2;
  2227. fs_info->endio_write_workers.idle_thresh = 2;
  2228. fs_info->endio_meta_write_workers.idle_thresh = 2;
  2229. fs_info->readahead_workers.idle_thresh = 2;
  2230. /*
  2231. * btrfs_start_workers can really only fail because of ENOMEM so just
  2232. * return -ENOMEM if any of these fail.
  2233. */
  2234. ret = btrfs_start_workers(&fs_info->workers);
  2235. ret |= btrfs_start_workers(&fs_info->generic_worker);
  2236. ret |= btrfs_start_workers(&fs_info->submit_workers);
  2237. ret |= btrfs_start_workers(&fs_info->delalloc_workers);
  2238. ret |= btrfs_start_workers(&fs_info->fixup_workers);
  2239. ret |= btrfs_start_workers(&fs_info->endio_workers);
  2240. ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
  2241. ret |= btrfs_start_workers(&fs_info->rmw_workers);
  2242. ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
  2243. ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
  2244. ret |= btrfs_start_workers(&fs_info->endio_write_workers);
  2245. ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
  2246. ret |= btrfs_start_workers(&fs_info->delayed_workers);
  2247. ret |= btrfs_start_workers(&fs_info->caching_workers);
  2248. ret |= btrfs_start_workers(&fs_info->readahead_workers);
  2249. ret |= btrfs_start_workers(&fs_info->flush_workers);
  2250. ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers);
  2251. if (ret) {
  2252. err = -ENOMEM;
  2253. goto fail_sb_buffer;
  2254. }
  2255. fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
  2256. fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
  2257. 4 * 1024 * 1024 / PAGE_CACHE_SIZE);
  2258. tree_root->nodesize = nodesize;
  2259. tree_root->leafsize = leafsize;
  2260. tree_root->sectorsize = sectorsize;
  2261. tree_root->stripesize = stripesize;
  2262. sb->s_blocksize = sectorsize;
  2263. sb->s_blocksize_bits = blksize_bits(sectorsize);
  2264. if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
  2265. printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
  2266. goto fail_sb_buffer;
  2267. }
  2268. if (sectorsize != PAGE_SIZE) {
  2269. printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
  2270. "found on %s\n", (unsigned long)sectorsize, sb->s_id);
  2271. goto fail_sb_buffer;
  2272. }
  2273. mutex_lock(&fs_info->chunk_mutex);
  2274. ret = btrfs_read_sys_array(tree_root);
  2275. mutex_unlock(&fs_info->chunk_mutex);
  2276. if (ret) {
  2277. printk(KERN_WARNING "btrfs: failed to read the system "
  2278. "array on %s\n", sb->s_id);
  2279. goto fail_sb_buffer;
  2280. }
  2281. blocksize = btrfs_level_size(tree_root,
  2282. btrfs_super_chunk_root_level(disk_super));
  2283. generation = btrfs_super_chunk_root_generation(disk_super);
  2284. __setup_root(nodesize, leafsize, sectorsize, stripesize,
  2285. chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
  2286. chunk_root->node = read_tree_block(chunk_root,
  2287. btrfs_super_chunk_root(disk_super),
  2288. blocksize, generation);
  2289. if (!chunk_root->node ||
  2290. !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
  2291. printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
  2292. sb->s_id);
  2293. goto fail_tree_roots;
  2294. }
  2295. btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
  2296. chunk_root->commit_root = btrfs_root_node(chunk_root);
  2297. read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
  2298. btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
  2299. ret = btrfs_read_chunk_tree(chunk_root);
  2300. if (ret) {
  2301. printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
  2302. sb->s_id);
  2303. goto fail_tree_roots;
  2304. }
  2305. /*
  2306. * keep the device that is marked to be the target device for the
  2307. * dev_replace procedure
  2308. */
  2309. btrfs_close_extra_devices(fs_info, fs_devices, 0);
  2310. if (!fs_devices->latest_bdev) {
  2311. printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
  2312. sb->s_id);
  2313. goto fail_tree_roots;
  2314. }
  2315. retry_root_backup:
  2316. blocksize = btrfs_level_size(tree_root,
  2317. btrfs_super_root_level(disk_super));
  2318. generation = btrfs_super_generation(disk_super);
  2319. tree_root->node = read_tree_block(tree_root,
  2320. btrfs_super_root(disk_super),
  2321. blocksize, generation);
  2322. if (!tree_root->node ||
  2323. !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
  2324. printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
  2325. sb->s_id);
  2326. goto recovery_tree_root;
  2327. }
  2328. btrfs_set_root_node(&tree_root->root_item, tree_root->node);
  2329. tree_root->commit_root = btrfs_root_node(tree_root);
  2330. location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
  2331. location.type = BTRFS_ROOT_ITEM_KEY;
  2332. location.offset = 0;
  2333. extent_root = btrfs_read_tree_root(tree_root, &location);
  2334. if (IS_ERR(extent_root)) {
  2335. ret = PTR_ERR(extent_root);
  2336. goto recovery_tree_root;
  2337. }
  2338. extent_root->track_dirty = 1;
  2339. fs_info->extent_root = extent_root;
  2340. location.objectid = BTRFS_DEV_TREE_OBJECTID;
  2341. dev_root = btrfs_read_tree_root(tree_root, &location);
  2342. if (IS_ERR(dev_root)) {
  2343. ret = PTR_ERR(dev_root);
  2344. goto recovery_tree_root;
  2345. }
  2346. dev_root->track_dirty = 1;
  2347. fs_info->dev_root = dev_root;
  2348. btrfs_init_devices_late(fs_info);
  2349. location.objectid = BTRFS_CSUM_TREE_OBJECTID;
  2350. csum_root = btrfs_read_tree_root(tree_root, &location);
  2351. if (IS_ERR(csum_root)) {
  2352. ret = PTR_ERR(csum_root);
  2353. goto recovery_tree_root;
  2354. }
  2355. csum_root->track_dirty = 1;
  2356. fs_info->csum_root = csum_root;
  2357. location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
  2358. quota_root = btrfs_read_tree_root(tree_root, &location);
  2359. if (!IS_ERR(quota_root)) {
  2360. quota_root->track_dirty = 1;
  2361. fs_info->quota_enabled = 1;
  2362. fs_info->pending_quota_state = 1;
  2363. fs_info->quota_root = quota_root;
  2364. }
  2365. location.objectid = BTRFS_UUID_TREE_OBJECTID;
  2366. uuid_root = btrfs_read_tree_root(tree_root, &location);
  2367. if (IS_ERR(uuid_root)) {
  2368. ret = PTR_ERR(uuid_root);
  2369. if (ret != -ENOENT)
  2370. goto recovery_tree_root;
  2371. create_uuid_tree = true;
  2372. check_uuid_tree = false;
  2373. } else {
  2374. uuid_root->track_dirty = 1;
  2375. fs_info->uuid_root = uuid_root;
  2376. create_uuid_tree = false;
  2377. check_uuid_tree =
  2378. generation != btrfs_super_uuid_tree_generation(disk_super);
  2379. }
  2380. fs_info->generation = generation;
  2381. fs_info->last_trans_committed = generation;
  2382. ret = btrfs_recover_balance(fs_info);
  2383. if (ret) {
  2384. printk(KERN_WARNING "btrfs: failed to recover balance\n");
  2385. goto fail_block_groups;
  2386. }
  2387. ret = btrfs_init_dev_stats(fs_info);
  2388. if (ret) {
  2389. printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
  2390. ret);
  2391. goto fail_block_groups;
  2392. }
  2393. ret = btrfs_init_dev_replace(fs_info);
  2394. if (ret) {
  2395. pr_err("btrfs: failed to init dev_replace: %d\n", ret);
  2396. goto fail_block_groups;
  2397. }
  2398. btrfs_close_extra_devices(fs_info, fs_devices, 1);
  2399. ret = btrfs_init_space_info(fs_info);
  2400. if (ret) {
  2401. printk(KERN_ERR "Failed to initial space info: %d\n", ret);
  2402. goto fail_block_groups;
  2403. }
  2404. ret = btrfs_read_block_groups(extent_root);
  2405. if (ret) {
  2406. printk(KERN_ERR "Failed to read block groups: %d\n", ret);
  2407. goto fail_block_groups;
  2408. }
  2409. fs_info->num_tolerated_disk_barrier_failures =
  2410. btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
  2411. if (fs_info->fs_devices->missing_devices >
  2412. fs_info->num_tolerated_disk_barrier_failures &&
  2413. !(sb->s_flags & MS_RDONLY)) {
  2414. printk(KERN_WARNING
  2415. "Btrfs: too many missing devices, writeable mount is not allowed\n");
  2416. goto fail_block_groups;
  2417. }
  2418. fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
  2419. "btrfs-cleaner");
  2420. if (IS_ERR(fs_info->cleaner_kthread))
  2421. goto fail_block_groups;
  2422. fs_info->transaction_kthread = kthread_run(transaction_kthread,
  2423. tree_root,
  2424. "btrfs-transaction");
  2425. if (IS_ERR(fs_info->transaction_kthread))
  2426. goto fail_cleaner;
  2427. if (!btrfs_test_opt(tree_root, SSD) &&
  2428. !btrfs_test_opt(tree_root, NOSSD) &&
  2429. !fs_info->fs_devices->rotating) {
  2430. printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
  2431. "mode\n");
  2432. btrfs_set_opt(fs_info->mount_opt, SSD);
  2433. }
  2434. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  2435. if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
  2436. ret = btrfsic_mount(tree_root, fs_devices,
  2437. btrfs_test_opt(tree_root,
  2438. CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
  2439. 1 : 0,
  2440. fs_info->check_integrity_print_mask);
  2441. if (ret)
  2442. printk(KERN_WARNING "btrfs: failed to initialize"
  2443. " integrity check module %s\n", sb->s_id);
  2444. }
  2445. #endif
  2446. ret = btrfs_read_qgroup_config(fs_info);
  2447. if (ret)
  2448. goto fail_trans_kthread;
  2449. /* do not make disk changes in broken FS */
  2450. if (btrfs_super_log_root(disk_super) != 0) {
  2451. u64 bytenr = btrfs_super_log_root(disk_super);
  2452. if (fs_devices->rw_devices == 0) {
  2453. printk(KERN_WARNING "Btrfs log replay required "
  2454. "on RO media\n");
  2455. err = -EIO;
  2456. goto fail_qgroup;
  2457. }
  2458. blocksize =
  2459. btrfs_level_size(tree_root,
  2460. btrfs_super_log_root_level(disk_super));
  2461. log_tree_root = btrfs_alloc_root(fs_info);
  2462. if (!log_tree_root) {
  2463. err = -ENOMEM;
  2464. goto fail_qgroup;
  2465. }
  2466. __setup_root(nodesize, leafsize, sectorsize, stripesize,
  2467. log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
  2468. log_tree_root->node = read_tree_block(tree_root, bytenr,
  2469. blocksize,
  2470. generation + 1);
  2471. if (!log_tree_root->node ||
  2472. !extent_buffer_uptodate(log_tree_root->node)) {
  2473. printk(KERN_ERR "btrfs: failed to read log tree\n");
  2474. free_extent_buffer(log_tree_root->node);
  2475. kfree(log_tree_root);
  2476. goto fail_trans_kthread;
  2477. }
  2478. /* returns with log_tree_root freed on success */
  2479. ret = btrfs_recover_log_trees(log_tree_root);
  2480. if (ret) {
  2481. btrfs_error(tree_root->fs_info, ret,
  2482. "Failed to recover log tree");
  2483. free_extent_buffer(log_tree_root->node);
  2484. kfree(log_tree_root);
  2485. goto fail_trans_kthread;
  2486. }
  2487. if (sb->s_flags & MS_RDONLY) {
  2488. ret = btrfs_commit_super(tree_root);
  2489. if (ret)
  2490. goto fail_trans_kthread;
  2491. }
  2492. }
  2493. ret = btrfs_find_orphan_roots(tree_root);
  2494. if (ret)
  2495. goto fail_trans_kthread;
  2496. if (!(sb->s_flags & MS_RDONLY)) {
  2497. ret = btrfs_cleanup_fs_roots(fs_info);
  2498. if (ret)
  2499. goto fail_trans_kthread;
  2500. ret = btrfs_recover_relocation(tree_root);
  2501. if (ret < 0) {
  2502. printk(KERN_WARNING
  2503. "btrfs: failed to recover relocation\n");
  2504. err = -EINVAL;
  2505. goto fail_qgroup;
  2506. }
  2507. }
  2508. location.objectid = BTRFS_FS_TREE_OBJECTID;
  2509. location.type = BTRFS_ROOT_ITEM_KEY;
  2510. location.offset = 0;
  2511. fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
  2512. if (IS_ERR(fs_info->fs_root)) {
  2513. err = PTR_ERR(fs_info->fs_root);
  2514. goto fail_qgroup;
  2515. }
  2516. if (sb->s_flags & MS_RDONLY)
  2517. return 0;
  2518. down_read(&fs_info->cleanup_work_sem);
  2519. if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
  2520. (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
  2521. up_read(&fs_info->cleanup_work_sem);
  2522. close_ctree(tree_root);
  2523. return ret;
  2524. }
  2525. up_read(&fs_info->cleanup_work_sem);
  2526. ret = btrfs_resume_balance_async(fs_info);
  2527. if (ret) {
  2528. printk(KERN_WARNING "btrfs: failed to resume balance\n");
  2529. close_ctree(tree_root);
  2530. return ret;
  2531. }
  2532. ret = btrfs_resume_dev_replace_async(fs_info);
  2533. if (ret) {
  2534. pr_warn("btrfs: failed to resume dev_replace\n");
  2535. close_ctree(tree_root);
  2536. return ret;
  2537. }
  2538. btrfs_qgroup_rescan_resume(fs_info);
  2539. if (create_uuid_tree) {
  2540. pr_info("btrfs: creating UUID tree\n");
  2541. ret = btrfs_create_uuid_tree(fs_info);
  2542. if (ret) {
  2543. pr_warn("btrfs: failed to create the UUID tree %d\n",
  2544. ret);
  2545. close_ctree(tree_root);
  2546. return ret;
  2547. }
  2548. } else if (check_uuid_tree ||
  2549. btrfs_test_opt(tree_root, RESCAN_UUID_TREE)) {
  2550. pr_info("btrfs: checking UUID tree\n");
  2551. ret = btrfs_check_uuid_tree(fs_info);
  2552. if (ret) {
  2553. pr_warn("btrfs: failed to check the UUID tree %d\n",
  2554. ret);
  2555. close_ctree(tree_root);
  2556. return ret;
  2557. }
  2558. } else {
  2559. fs_info->update_uuid_tree_gen = 1;
  2560. }
  2561. return 0;
  2562. fail_qgroup:
  2563. btrfs_free_qgroup_config(fs_info);
  2564. fail_trans_kthread:
  2565. kthread_stop(fs_info->transaction_kthread);
  2566. btrfs_cleanup_transaction(fs_info->tree_root);
  2567. del_fs_roots(fs_info);
  2568. fail_cleaner:
  2569. kthread_stop(fs_info->cleaner_kthread);
  2570. /*
  2571. * make sure we're done with the btree inode before we stop our
  2572. * kthreads
  2573. */
  2574. filemap_write_and_wait(fs_info->btree_inode->i_mapping);
  2575. fail_block_groups:
  2576. btrfs_put_block_group_cache(fs_info);
  2577. btrfs_free_block_groups(fs_info);
  2578. fail_tree_roots:
  2579. free_root_pointers(fs_info, 1);
  2580. invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
  2581. fail_sb_buffer:
  2582. btrfs_stop_all_workers(fs_info);
  2583. fail_alloc:
  2584. fail_iput:
  2585. btrfs_mapping_tree_free(&fs_info->mapping_tree);
  2586. iput(fs_info->btree_inode);
  2587. fail_delalloc_bytes:
  2588. percpu_counter_destroy(&fs_info->delalloc_bytes);
  2589. fail_dirty_metadata_bytes:
  2590. percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
  2591. fail_bdi:
  2592. bdi_destroy(&fs_info->bdi);
  2593. fail_srcu:
  2594. cleanup_srcu_struct(&fs_info->subvol_srcu);
  2595. fail:
  2596. btrfs_free_stripe_hash_table(fs_info);
  2597. btrfs_close_devices(fs_info->fs_devices);
  2598. return err;
  2599. recovery_tree_root:
  2600. if (!btrfs_test_opt(tree_root, RECOVERY))
  2601. goto fail_tree_roots;
  2602. free_root_pointers(fs_info, 0);
  2603. /* don't use the log in recovery mode, it won't be valid */
  2604. btrfs_set_super_log_root(disk_super, 0);
  2605. /* we can't trust the free space cache either */
  2606. btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
  2607. ret = next_root_backup(fs_info, fs_info->super_copy,
  2608. &num_backups_tried, &backup_index);
  2609. if (ret == -1)
  2610. goto fail_block_groups;
  2611. goto retry_root_backup;
  2612. }
  2613. static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
  2614. {
  2615. if (uptodate) {
  2616. set_buffer_uptodate(bh);
  2617. } else {
  2618. struct btrfs_device *device = (struct btrfs_device *)
  2619. bh->b_private;
  2620. printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
  2621. "I/O error on %s\n",
  2622. rcu_str_deref(device->name));
  2623. /* note, we dont' set_buffer_write_io_error because we have
  2624. * our own ways of dealing with the IO errors
  2625. */
  2626. clear_buffer_uptodate(bh);
  2627. btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
  2628. }
  2629. unlock_buffer(bh);
  2630. put_bh(bh);
  2631. }
  2632. struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
  2633. {
  2634. struct buffer_head *bh;
  2635. struct buffer_head *latest = NULL;
  2636. struct btrfs_super_block *super;
  2637. int i;
  2638. u64 transid = 0;
  2639. u64 bytenr;
  2640. /* we would like to check all the supers, but that would make
  2641. * a btrfs mount succeed after a mkfs from a different FS.
  2642. * So, we need to add a special mount option to scan for
  2643. * later supers, using BTRFS_SUPER_MIRROR_MAX instead
  2644. */
  2645. for (i = 0; i < 1; i++) {
  2646. bytenr = btrfs_sb_offset(i);
  2647. if (bytenr + BTRFS_SUPER_INFO_SIZE >=
  2648. i_size_read(bdev->bd_inode))
  2649. break;
  2650. bh = __bread(bdev, bytenr / 4096,
  2651. BTRFS_SUPER_INFO_SIZE);
  2652. if (!bh)
  2653. continue;
  2654. super = (struct btrfs_super_block *)bh->b_data;
  2655. if (btrfs_super_bytenr(super) != bytenr ||
  2656. btrfs_super_magic(super) != BTRFS_MAGIC) {
  2657. brelse(bh);
  2658. continue;
  2659. }
  2660. if (!latest || btrfs_super_generation(super) > transid) {
  2661. brelse(latest);
  2662. latest = bh;
  2663. transid = btrfs_super_generation(super);
  2664. } else {
  2665. brelse(bh);
  2666. }
  2667. }
  2668. return latest;
  2669. }
  2670. /*
  2671. * this should be called twice, once with wait == 0 and
  2672. * once with wait == 1. When wait == 0 is done, all the buffer heads
  2673. * we write are pinned.
  2674. *
  2675. * They are released when wait == 1 is done.
  2676. * max_mirrors must be the same for both runs, and it indicates how
  2677. * many supers on this one device should be written.
  2678. *
  2679. * max_mirrors == 0 means to write them all.
  2680. */
  2681. static int write_dev_supers(struct btrfs_device *device,
  2682. struct btrfs_super_block *sb,
  2683. int do_barriers, int wait, int max_mirrors)
  2684. {
  2685. struct buffer_head *bh;
  2686. int i;
  2687. int ret;
  2688. int errors = 0;
  2689. u32 crc;
  2690. u64 bytenr;
  2691. if (max_mirrors == 0)
  2692. max_mirrors = BTRFS_SUPER_MIRROR_MAX;
  2693. for (i = 0; i < max_mirrors; i++) {
  2694. bytenr = btrfs_sb_offset(i);
  2695. if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
  2696. break;
  2697. if (wait) {
  2698. bh = __find_get_block(device->bdev, bytenr / 4096,
  2699. BTRFS_SUPER_INFO_SIZE);
  2700. if (!bh) {
  2701. errors++;
  2702. continue;
  2703. }
  2704. wait_on_buffer(bh);
  2705. if (!buffer_uptodate(bh))
  2706. errors++;
  2707. /* drop our reference */
  2708. brelse(bh);
  2709. /* drop the reference from the wait == 0 run */
  2710. brelse(bh);
  2711. continue;
  2712. } else {
  2713. btrfs_set_super_bytenr(sb, bytenr);
  2714. crc = ~(u32)0;
  2715. crc = btrfs_csum_data((char *)sb +
  2716. BTRFS_CSUM_SIZE, crc,
  2717. BTRFS_SUPER_INFO_SIZE -
  2718. BTRFS_CSUM_SIZE);
  2719. btrfs_csum_final(crc, sb->csum);
  2720. /*
  2721. * one reference for us, and we leave it for the
  2722. * caller
  2723. */
  2724. bh = __getblk(device->bdev, bytenr / 4096,
  2725. BTRFS_SUPER_INFO_SIZE);
  2726. if (!bh) {
  2727. printk(KERN_ERR "btrfs: couldn't get super "
  2728. "buffer head for bytenr %Lu\n", bytenr);
  2729. errors++;
  2730. continue;
  2731. }
  2732. memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
  2733. /* one reference for submit_bh */
  2734. get_bh(bh);
  2735. set_buffer_uptodate(bh);
  2736. lock_buffer(bh);
  2737. bh->b_end_io = btrfs_end_buffer_write_sync;
  2738. bh->b_private = device;
  2739. }
  2740. /*
  2741. * we fua the first super. The others we allow
  2742. * to go down lazy.
  2743. */
  2744. ret = btrfsic_submit_bh(WRITE_FUA, bh);
  2745. if (ret)
  2746. errors++;
  2747. }
  2748. return errors < i ? 0 : -1;
  2749. }
  2750. /*
  2751. * endio for the write_dev_flush, this will wake anyone waiting
  2752. * for the barrier when it is done
  2753. */
  2754. static void btrfs_end_empty_barrier(struct bio *bio, int err)
  2755. {
  2756. if (err) {
  2757. if (err == -EOPNOTSUPP)
  2758. set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
  2759. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  2760. }
  2761. if (bio->bi_private)
  2762. complete(bio->bi_private);
  2763. bio_put(bio);
  2764. }
  2765. /*
  2766. * trigger flushes for one the devices. If you pass wait == 0, the flushes are
  2767. * sent down. With wait == 1, it waits for the previous flush.
  2768. *
  2769. * any device where the flush fails with eopnotsupp are flagged as not-barrier
  2770. * capable
  2771. */
  2772. static int write_dev_flush(struct btrfs_device *device, int wait)
  2773. {
  2774. struct bio *bio;
  2775. int ret = 0;
  2776. if (device->nobarriers)
  2777. return 0;
  2778. if (wait) {
  2779. bio = device->flush_bio;
  2780. if (!bio)
  2781. return 0;
  2782. wait_for_completion(&device->flush_wait);
  2783. if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
  2784. printk_in_rcu("btrfs: disabling barriers on dev %s\n",
  2785. rcu_str_deref(device->name));
  2786. device->nobarriers = 1;
  2787. } else if (!bio_flagged(bio, BIO_UPTODATE)) {
  2788. ret = -EIO;
  2789. btrfs_dev_stat_inc_and_print(device,
  2790. BTRFS_DEV_STAT_FLUSH_ERRS);
  2791. }
  2792. /* drop the reference from the wait == 0 run */
  2793. bio_put(bio);
  2794. device->flush_bio = NULL;
  2795. return ret;
  2796. }
  2797. /*
  2798. * one reference for us, and we leave it for the
  2799. * caller
  2800. */
  2801. device->flush_bio = NULL;
  2802. bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
  2803. if (!bio)
  2804. return -ENOMEM;
  2805. bio->bi_end_io = btrfs_end_empty_barrier;
  2806. bio->bi_bdev = device->bdev;
  2807. init_completion(&device->flush_wait);
  2808. bio->bi_private = &device->flush_wait;
  2809. device->flush_bio = bio;
  2810. bio_get(bio);
  2811. btrfsic_submit_bio(WRITE_FLUSH, bio);
  2812. return 0;
  2813. }
  2814. /*
  2815. * send an empty flush down to each device in parallel,
  2816. * then wait for them
  2817. */
  2818. static int barrier_all_devices(struct btrfs_fs_info *info)
  2819. {
  2820. struct list_head *head;
  2821. struct btrfs_device *dev;
  2822. int errors_send = 0;
  2823. int errors_wait = 0;
  2824. int ret;
  2825. /* send down all the barriers */
  2826. head = &info->fs_devices->devices;
  2827. list_for_each_entry_rcu(dev, head, dev_list) {
  2828. if (!dev->bdev) {
  2829. errors_send++;
  2830. continue;
  2831. }
  2832. if (!dev->in_fs_metadata || !dev->writeable)
  2833. continue;
  2834. ret = write_dev_flush(dev, 0);
  2835. if (ret)
  2836. errors_send++;
  2837. }
  2838. /* wait for all the barriers */
  2839. list_for_each_entry_rcu(dev, head, dev_list) {
  2840. if (!dev->bdev) {
  2841. errors_wait++;
  2842. continue;
  2843. }
  2844. if (!dev->in_fs_metadata || !dev->writeable)
  2845. continue;
  2846. ret = write_dev_flush(dev, 1);
  2847. if (ret)
  2848. errors_wait++;
  2849. }
  2850. if (errors_send > info->num_tolerated_disk_barrier_failures ||
  2851. errors_wait > info->num_tolerated_disk_barrier_failures)
  2852. return -EIO;
  2853. return 0;
  2854. }
  2855. int btrfs_calc_num_tolerated_disk_barrier_failures(
  2856. struct btrfs_fs_info *fs_info)
  2857. {
  2858. struct btrfs_ioctl_space_info space;
  2859. struct btrfs_space_info *sinfo;
  2860. u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
  2861. BTRFS_BLOCK_GROUP_SYSTEM,
  2862. BTRFS_BLOCK_GROUP_METADATA,
  2863. BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
  2864. int num_types = 4;
  2865. int i;
  2866. int c;
  2867. int num_tolerated_disk_barrier_failures =
  2868. (int)fs_info->fs_devices->num_devices;
  2869. for (i = 0; i < num_types; i++) {
  2870. struct btrfs_space_info *tmp;
  2871. sinfo = NULL;
  2872. rcu_read_lock();
  2873. list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
  2874. if (tmp->flags == types[i]) {
  2875. sinfo = tmp;
  2876. break;
  2877. }
  2878. }
  2879. rcu_read_unlock();
  2880. if (!sinfo)
  2881. continue;
  2882. down_read(&sinfo->groups_sem);
  2883. for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
  2884. if (!list_empty(&sinfo->block_groups[c])) {
  2885. u64 flags;
  2886. btrfs_get_block_group_info(
  2887. &sinfo->block_groups[c], &space);
  2888. if (space.total_bytes == 0 ||
  2889. space.used_bytes == 0)
  2890. continue;
  2891. flags = space.flags;
  2892. /*
  2893. * return
  2894. * 0: if dup, single or RAID0 is configured for
  2895. * any of metadata, system or data, else
  2896. * 1: if RAID5 is configured, or if RAID1 or
  2897. * RAID10 is configured and only two mirrors
  2898. * are used, else
  2899. * 2: if RAID6 is configured, else
  2900. * num_mirrors - 1: if RAID1 or RAID10 is
  2901. * configured and more than
  2902. * 2 mirrors are used.
  2903. */
  2904. if (num_tolerated_disk_barrier_failures > 0 &&
  2905. ((flags & (BTRFS_BLOCK_GROUP_DUP |
  2906. BTRFS_BLOCK_GROUP_RAID0)) ||
  2907. ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
  2908. == 0)))
  2909. num_tolerated_disk_barrier_failures = 0;
  2910. else if (num_tolerated_disk_barrier_failures > 1) {
  2911. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  2912. BTRFS_BLOCK_GROUP_RAID5 |
  2913. BTRFS_BLOCK_GROUP_RAID10)) {
  2914. num_tolerated_disk_barrier_failures = 1;
  2915. } else if (flags &
  2916. BTRFS_BLOCK_GROUP_RAID6) {
  2917. num_tolerated_disk_barrier_failures = 2;
  2918. }
  2919. }
  2920. }
  2921. }
  2922. up_read(&sinfo->groups_sem);
  2923. }
  2924. return num_tolerated_disk_barrier_failures;
  2925. }
  2926. static int write_all_supers(struct btrfs_root *root, int max_mirrors)
  2927. {
  2928. struct list_head *head;
  2929. struct btrfs_device *dev;
  2930. struct btrfs_super_block *sb;
  2931. struct btrfs_dev_item *dev_item;
  2932. int ret;
  2933. int do_barriers;
  2934. int max_errors;
  2935. int total_errors = 0;
  2936. u64 flags;
  2937. do_barriers = !btrfs_test_opt(root, NOBARRIER);
  2938. backup_super_roots(root->fs_info);
  2939. sb = root->fs_info->super_for_commit;
  2940. dev_item = &sb->dev_item;
  2941. mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
  2942. head = &root->fs_info->fs_devices->devices;
  2943. max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
  2944. if (do_barriers) {
  2945. ret = barrier_all_devices(root->fs_info);
  2946. if (ret) {
  2947. mutex_unlock(
  2948. &root->fs_info->fs_devices->device_list_mutex);
  2949. btrfs_error(root->fs_info, ret,
  2950. "errors while submitting device barriers.");
  2951. return ret;
  2952. }
  2953. }
  2954. list_for_each_entry_rcu(dev, head, dev_list) {
  2955. if (!dev->bdev) {
  2956. total_errors++;
  2957. continue;
  2958. }
  2959. if (!dev->in_fs_metadata || !dev->writeable)
  2960. continue;
  2961. btrfs_set_stack_device_generation(dev_item, 0);
  2962. btrfs_set_stack_device_type(dev_item, dev->type);
  2963. btrfs_set_stack_device_id(dev_item, dev->devid);
  2964. btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
  2965. btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
  2966. btrfs_set_stack_device_io_align(dev_item, dev->io_align);
  2967. btrfs_set_stack_device_io_width(dev_item, dev->io_width);
  2968. btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
  2969. memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
  2970. memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
  2971. flags = btrfs_super_flags(sb);
  2972. btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
  2973. ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
  2974. if (ret)
  2975. total_errors++;
  2976. }
  2977. if (total_errors > max_errors) {
  2978. printk(KERN_ERR "btrfs: %d errors while writing supers\n",
  2979. total_errors);
  2980. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  2981. /* FUA is masked off if unsupported and can't be the reason */
  2982. btrfs_error(root->fs_info, -EIO,
  2983. "%d errors while writing supers", total_errors);
  2984. return -EIO;
  2985. }
  2986. total_errors = 0;
  2987. list_for_each_entry_rcu(dev, head, dev_list) {
  2988. if (!dev->bdev)
  2989. continue;
  2990. if (!dev->in_fs_metadata || !dev->writeable)
  2991. continue;
  2992. ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
  2993. if (ret)
  2994. total_errors++;
  2995. }
  2996. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  2997. if (total_errors > max_errors) {
  2998. btrfs_error(root->fs_info, -EIO,
  2999. "%d errors while writing supers", total_errors);
  3000. return -EIO;
  3001. }
  3002. return 0;
  3003. }
  3004. int write_ctree_super(struct btrfs_trans_handle *trans,
  3005. struct btrfs_root *root, int max_mirrors)
  3006. {
  3007. int ret;
  3008. ret = write_all_supers(root, max_mirrors);
  3009. return ret;
  3010. }
  3011. /* Drop a fs root from the radix tree and free it. */
  3012. void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
  3013. struct btrfs_root *root)
  3014. {
  3015. spin_lock(&fs_info->fs_roots_radix_lock);
  3016. radix_tree_delete(&fs_info->fs_roots_radix,
  3017. (unsigned long)root->root_key.objectid);
  3018. spin_unlock(&fs_info->fs_roots_radix_lock);
  3019. if (btrfs_root_refs(&root->root_item) == 0)
  3020. synchronize_srcu(&fs_info->subvol_srcu);
  3021. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  3022. btrfs_free_log(NULL, root);
  3023. btrfs_free_log_root_tree(NULL, fs_info);
  3024. }
  3025. __btrfs_remove_free_space_cache(root->free_ino_pinned);
  3026. __btrfs_remove_free_space_cache(root->free_ino_ctl);
  3027. free_fs_root(root);
  3028. }
  3029. static void free_fs_root(struct btrfs_root *root)
  3030. {
  3031. iput(root->cache_inode);
  3032. WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
  3033. btrfs_free_block_rsv(root, root->orphan_block_rsv);
  3034. root->orphan_block_rsv = NULL;
  3035. if (root->anon_dev)
  3036. free_anon_bdev(root->anon_dev);
  3037. free_extent_buffer(root->node);
  3038. free_extent_buffer(root->commit_root);
  3039. kfree(root->free_ino_ctl);
  3040. kfree(root->free_ino_pinned);
  3041. kfree(root->name);
  3042. btrfs_put_fs_root(root);
  3043. }
  3044. void btrfs_free_fs_root(struct btrfs_root *root)
  3045. {
  3046. free_fs_root(root);
  3047. }
  3048. int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
  3049. {
  3050. u64 root_objectid = 0;
  3051. struct btrfs_root *gang[8];
  3052. int i;
  3053. int ret;
  3054. while (1) {
  3055. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  3056. (void **)gang, root_objectid,
  3057. ARRAY_SIZE(gang));
  3058. if (!ret)
  3059. break;
  3060. root_objectid = gang[ret - 1]->root_key.objectid + 1;
  3061. for (i = 0; i < ret; i++) {
  3062. int err;
  3063. root_objectid = gang[i]->root_key.objectid;
  3064. err = btrfs_orphan_cleanup(gang[i]);
  3065. if (err)
  3066. return err;
  3067. }
  3068. root_objectid++;
  3069. }
  3070. return 0;
  3071. }
  3072. int btrfs_commit_super(struct btrfs_root *root)
  3073. {
  3074. struct btrfs_trans_handle *trans;
  3075. int ret;
  3076. mutex_lock(&root->fs_info->cleaner_mutex);
  3077. btrfs_run_delayed_iputs(root);
  3078. mutex_unlock(&root->fs_info->cleaner_mutex);
  3079. wake_up_process(root->fs_info->cleaner_kthread);
  3080. /* wait until ongoing cleanup work done */
  3081. down_write(&root->fs_info->cleanup_work_sem);
  3082. up_write(&root->fs_info->cleanup_work_sem);
  3083. trans = btrfs_join_transaction(root);
  3084. if (IS_ERR(trans))
  3085. return PTR_ERR(trans);
  3086. ret = btrfs_commit_transaction(trans, root);
  3087. if (ret)
  3088. return ret;
  3089. /* run commit again to drop the original snapshot */
  3090. trans = btrfs_join_transaction(root);
  3091. if (IS_ERR(trans))
  3092. return PTR_ERR(trans);
  3093. ret = btrfs_commit_transaction(trans, root);
  3094. if (ret)
  3095. return ret;
  3096. ret = btrfs_write_and_wait_transaction(NULL, root);
  3097. if (ret) {
  3098. btrfs_error(root->fs_info, ret,
  3099. "Failed to sync btree inode to disk.");
  3100. return ret;
  3101. }
  3102. ret = write_ctree_super(NULL, root, 0);
  3103. return ret;
  3104. }
  3105. int close_ctree(struct btrfs_root *root)
  3106. {
  3107. struct btrfs_fs_info *fs_info = root->fs_info;
  3108. int ret;
  3109. fs_info->closing = 1;
  3110. smp_mb();
  3111. /* wait for the uuid_scan task to finish */
  3112. down(&fs_info->uuid_tree_rescan_sem);
  3113. /* avoid complains from lockdep et al., set sem back to initial state */
  3114. up(&fs_info->uuid_tree_rescan_sem);
  3115. /* pause restriper - we want to resume on mount */
  3116. btrfs_pause_balance(fs_info);
  3117. btrfs_dev_replace_suspend_for_unmount(fs_info);
  3118. btrfs_scrub_cancel(fs_info);
  3119. /* wait for any defraggers to finish */
  3120. wait_event(fs_info->transaction_wait,
  3121. (atomic_read(&fs_info->defrag_running) == 0));
  3122. /* clear out the rbtree of defraggable inodes */
  3123. btrfs_cleanup_defrag_inodes(fs_info);
  3124. if (!(fs_info->sb->s_flags & MS_RDONLY)) {
  3125. ret = btrfs_commit_super(root);
  3126. if (ret)
  3127. printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
  3128. }
  3129. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
  3130. btrfs_error_commit_super(root);
  3131. btrfs_put_block_group_cache(fs_info);
  3132. kthread_stop(fs_info->transaction_kthread);
  3133. kthread_stop(fs_info->cleaner_kthread);
  3134. fs_info->closing = 2;
  3135. smp_mb();
  3136. btrfs_free_qgroup_config(root->fs_info);
  3137. if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
  3138. printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
  3139. percpu_counter_sum(&fs_info->delalloc_bytes));
  3140. }
  3141. btrfs_free_block_groups(fs_info);
  3142. btrfs_stop_all_workers(fs_info);
  3143. del_fs_roots(fs_info);
  3144. free_root_pointers(fs_info, 1);
  3145. iput(fs_info->btree_inode);
  3146. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  3147. if (btrfs_test_opt(root, CHECK_INTEGRITY))
  3148. btrfsic_unmount(root, fs_info->fs_devices);
  3149. #endif
  3150. btrfs_close_devices(fs_info->fs_devices);
  3151. btrfs_mapping_tree_free(&fs_info->mapping_tree);
  3152. percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
  3153. percpu_counter_destroy(&fs_info->delalloc_bytes);
  3154. bdi_destroy(&fs_info->bdi);
  3155. cleanup_srcu_struct(&fs_info->subvol_srcu);
  3156. btrfs_free_stripe_hash_table(fs_info);
  3157. btrfs_free_block_rsv(root, root->orphan_block_rsv);
  3158. root->orphan_block_rsv = NULL;
  3159. return 0;
  3160. }
  3161. int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
  3162. int atomic)
  3163. {
  3164. int ret;
  3165. struct inode *btree_inode = buf->pages[0]->mapping->host;
  3166. ret = extent_buffer_uptodate(buf);
  3167. if (!ret)
  3168. return ret;
  3169. ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
  3170. parent_transid, atomic);
  3171. if (ret == -EAGAIN)
  3172. return ret;
  3173. return !ret;
  3174. }
  3175. int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
  3176. {
  3177. return set_extent_buffer_uptodate(buf);
  3178. }
  3179. void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
  3180. {
  3181. struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
  3182. u64 transid = btrfs_header_generation(buf);
  3183. int was_dirty;
  3184. btrfs_assert_tree_locked(buf);
  3185. if (transid != root->fs_info->generation)
  3186. WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
  3187. "found %llu running %llu\n",
  3188. buf->start, transid, root->fs_info->generation);
  3189. was_dirty = set_extent_buffer_dirty(buf);
  3190. if (!was_dirty)
  3191. __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
  3192. buf->len,
  3193. root->fs_info->dirty_metadata_batch);
  3194. }
  3195. static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
  3196. int flush_delayed)
  3197. {
  3198. /*
  3199. * looks as though older kernels can get into trouble with
  3200. * this code, they end up stuck in balance_dirty_pages forever
  3201. */
  3202. int ret;
  3203. if (current->flags & PF_MEMALLOC)
  3204. return;
  3205. if (flush_delayed)
  3206. btrfs_balance_delayed_items(root);
  3207. ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
  3208. BTRFS_DIRTY_METADATA_THRESH);
  3209. if (ret > 0) {
  3210. balance_dirty_pages_ratelimited(
  3211. root->fs_info->btree_inode->i_mapping);
  3212. }
  3213. return;
  3214. }
  3215. void btrfs_btree_balance_dirty(struct btrfs_root *root)
  3216. {
  3217. __btrfs_btree_balance_dirty(root, 1);
  3218. }
  3219. void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
  3220. {
  3221. __btrfs_btree_balance_dirty(root, 0);
  3222. }
  3223. int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
  3224. {
  3225. struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
  3226. return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
  3227. }
  3228. static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
  3229. int read_only)
  3230. {
  3231. /*
  3232. * Placeholder for checks
  3233. */
  3234. return 0;
  3235. }
  3236. static void btrfs_error_commit_super(struct btrfs_root *root)
  3237. {
  3238. mutex_lock(&root->fs_info->cleaner_mutex);
  3239. btrfs_run_delayed_iputs(root);
  3240. mutex_unlock(&root->fs_info->cleaner_mutex);
  3241. down_write(&root->fs_info->cleanup_work_sem);
  3242. up_write(&root->fs_info->cleanup_work_sem);
  3243. /* cleanup FS via transaction */
  3244. btrfs_cleanup_transaction(root);
  3245. }
  3246. static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
  3247. struct btrfs_root *root)
  3248. {
  3249. struct btrfs_inode *btrfs_inode;
  3250. struct list_head splice;
  3251. INIT_LIST_HEAD(&splice);
  3252. mutex_lock(&root->fs_info->ordered_operations_mutex);
  3253. spin_lock(&root->fs_info->ordered_root_lock);
  3254. list_splice_init(&t->ordered_operations, &splice);
  3255. while (!list_empty(&splice)) {
  3256. btrfs_inode = list_entry(splice.next, struct btrfs_inode,
  3257. ordered_operations);
  3258. list_del_init(&btrfs_inode->ordered_operations);
  3259. spin_unlock(&root->fs_info->ordered_root_lock);
  3260. btrfs_invalidate_inodes(btrfs_inode->root);
  3261. spin_lock(&root->fs_info->ordered_root_lock);
  3262. }
  3263. spin_unlock(&root->fs_info->ordered_root_lock);
  3264. mutex_unlock(&root->fs_info->ordered_operations_mutex);
  3265. }
  3266. static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
  3267. {
  3268. struct btrfs_ordered_extent *ordered;
  3269. spin_lock(&root->ordered_extent_lock);
  3270. /*
  3271. * This will just short circuit the ordered completion stuff which will
  3272. * make sure the ordered extent gets properly cleaned up.
  3273. */
  3274. list_for_each_entry(ordered, &root->ordered_extents,
  3275. root_extent_list)
  3276. set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
  3277. spin_unlock(&root->ordered_extent_lock);
  3278. }
  3279. static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
  3280. {
  3281. struct btrfs_root *root;
  3282. struct list_head splice;
  3283. INIT_LIST_HEAD(&splice);
  3284. spin_lock(&fs_info->ordered_root_lock);
  3285. list_splice_init(&fs_info->ordered_roots, &splice);
  3286. while (!list_empty(&splice)) {
  3287. root = list_first_entry(&splice, struct btrfs_root,
  3288. ordered_root);
  3289. list_del_init(&root->ordered_root);
  3290. btrfs_destroy_ordered_extents(root);
  3291. cond_resched_lock(&fs_info->ordered_root_lock);
  3292. }
  3293. spin_unlock(&fs_info->ordered_root_lock);
  3294. }
  3295. static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
  3296. struct btrfs_root *root)
  3297. {
  3298. struct rb_node *node;
  3299. struct btrfs_delayed_ref_root *delayed_refs;
  3300. struct btrfs_delayed_ref_node *ref;
  3301. int ret = 0;
  3302. delayed_refs = &trans->delayed_refs;
  3303. spin_lock(&delayed_refs->lock);
  3304. if (delayed_refs->num_entries == 0) {
  3305. spin_unlock(&delayed_refs->lock);
  3306. printk(KERN_INFO "delayed_refs has NO entry\n");
  3307. return ret;
  3308. }
  3309. while ((node = rb_first(&delayed_refs->root)) != NULL) {
  3310. struct btrfs_delayed_ref_head *head = NULL;
  3311. bool pin_bytes = false;
  3312. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  3313. atomic_set(&ref->refs, 1);
  3314. if (btrfs_delayed_ref_is_head(ref)) {
  3315. head = btrfs_delayed_node_to_head(ref);
  3316. if (!mutex_trylock(&head->mutex)) {
  3317. atomic_inc(&ref->refs);
  3318. spin_unlock(&delayed_refs->lock);
  3319. /* Need to wait for the delayed ref to run */
  3320. mutex_lock(&head->mutex);
  3321. mutex_unlock(&head->mutex);
  3322. btrfs_put_delayed_ref(ref);
  3323. spin_lock(&delayed_refs->lock);
  3324. continue;
  3325. }
  3326. if (head->must_insert_reserved)
  3327. pin_bytes = true;
  3328. btrfs_free_delayed_extent_op(head->extent_op);
  3329. delayed_refs->num_heads--;
  3330. if (list_empty(&head->cluster))
  3331. delayed_refs->num_heads_ready--;
  3332. list_del_init(&head->cluster);
  3333. }
  3334. ref->in_tree = 0;
  3335. rb_erase(&ref->rb_node, &delayed_refs->root);
  3336. delayed_refs->num_entries--;
  3337. spin_unlock(&delayed_refs->lock);
  3338. if (head) {
  3339. if (pin_bytes)
  3340. btrfs_pin_extent(root, ref->bytenr,
  3341. ref->num_bytes, 1);
  3342. mutex_unlock(&head->mutex);
  3343. }
  3344. btrfs_put_delayed_ref(ref);
  3345. cond_resched();
  3346. spin_lock(&delayed_refs->lock);
  3347. }
  3348. spin_unlock(&delayed_refs->lock);
  3349. return ret;
  3350. }
  3351. static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t)
  3352. {
  3353. struct btrfs_pending_snapshot *snapshot;
  3354. struct list_head splice;
  3355. INIT_LIST_HEAD(&splice);
  3356. list_splice_init(&t->pending_snapshots, &splice);
  3357. while (!list_empty(&splice)) {
  3358. snapshot = list_entry(splice.next,
  3359. struct btrfs_pending_snapshot,
  3360. list);
  3361. snapshot->error = -ECANCELED;
  3362. list_del_init(&snapshot->list);
  3363. }
  3364. }
  3365. static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
  3366. {
  3367. struct btrfs_inode *btrfs_inode;
  3368. struct list_head splice;
  3369. INIT_LIST_HEAD(&splice);
  3370. spin_lock(&root->delalloc_lock);
  3371. list_splice_init(&root->delalloc_inodes, &splice);
  3372. while (!list_empty(&splice)) {
  3373. btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
  3374. delalloc_inodes);
  3375. list_del_init(&btrfs_inode->delalloc_inodes);
  3376. clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
  3377. &btrfs_inode->runtime_flags);
  3378. spin_unlock(&root->delalloc_lock);
  3379. btrfs_invalidate_inodes(btrfs_inode->root);
  3380. spin_lock(&root->delalloc_lock);
  3381. }
  3382. spin_unlock(&root->delalloc_lock);
  3383. }
  3384. static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
  3385. {
  3386. struct btrfs_root *root;
  3387. struct list_head splice;
  3388. INIT_LIST_HEAD(&splice);
  3389. spin_lock(&fs_info->delalloc_root_lock);
  3390. list_splice_init(&fs_info->delalloc_roots, &splice);
  3391. while (!list_empty(&splice)) {
  3392. root = list_first_entry(&splice, struct btrfs_root,
  3393. delalloc_root);
  3394. list_del_init(&root->delalloc_root);
  3395. root = btrfs_grab_fs_root(root);
  3396. BUG_ON(!root);
  3397. spin_unlock(&fs_info->delalloc_root_lock);
  3398. btrfs_destroy_delalloc_inodes(root);
  3399. btrfs_put_fs_root(root);
  3400. spin_lock(&fs_info->delalloc_root_lock);
  3401. }
  3402. spin_unlock(&fs_info->delalloc_root_lock);
  3403. }
  3404. static int btrfs_destroy_marked_extents(struct btrfs_root *root,
  3405. struct extent_io_tree *dirty_pages,
  3406. int mark)
  3407. {
  3408. int ret;
  3409. struct extent_buffer *eb;
  3410. u64 start = 0;
  3411. u64 end;
  3412. while (1) {
  3413. ret = find_first_extent_bit(dirty_pages, start, &start, &end,
  3414. mark, NULL);
  3415. if (ret)
  3416. break;
  3417. clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
  3418. while (start <= end) {
  3419. eb = btrfs_find_tree_block(root, start,
  3420. root->leafsize);
  3421. start += root->leafsize;
  3422. if (!eb)
  3423. continue;
  3424. wait_on_extent_buffer_writeback(eb);
  3425. if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
  3426. &eb->bflags))
  3427. clear_extent_buffer_dirty(eb);
  3428. free_extent_buffer_stale(eb);
  3429. }
  3430. }
  3431. return ret;
  3432. }
  3433. static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
  3434. struct extent_io_tree *pinned_extents)
  3435. {
  3436. struct extent_io_tree *unpin;
  3437. u64 start;
  3438. u64 end;
  3439. int ret;
  3440. bool loop = true;
  3441. unpin = pinned_extents;
  3442. again:
  3443. while (1) {
  3444. ret = find_first_extent_bit(unpin, 0, &start, &end,
  3445. EXTENT_DIRTY, NULL);
  3446. if (ret)
  3447. break;
  3448. /* opt_discard */
  3449. if (btrfs_test_opt(root, DISCARD))
  3450. ret = btrfs_error_discard_extent(root, start,
  3451. end + 1 - start,
  3452. NULL);
  3453. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  3454. btrfs_error_unpin_extent_range(root, start, end);
  3455. cond_resched();
  3456. }
  3457. if (loop) {
  3458. if (unpin == &root->fs_info->freed_extents[0])
  3459. unpin = &root->fs_info->freed_extents[1];
  3460. else
  3461. unpin = &root->fs_info->freed_extents[0];
  3462. loop = false;
  3463. goto again;
  3464. }
  3465. return 0;
  3466. }
  3467. void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
  3468. struct btrfs_root *root)
  3469. {
  3470. btrfs_destroy_delayed_refs(cur_trans, root);
  3471. btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
  3472. cur_trans->dirty_pages.dirty_bytes);
  3473. cur_trans->state = TRANS_STATE_COMMIT_START;
  3474. wake_up(&root->fs_info->transaction_blocked_wait);
  3475. btrfs_evict_pending_snapshots(cur_trans);
  3476. cur_trans->state = TRANS_STATE_UNBLOCKED;
  3477. wake_up(&root->fs_info->transaction_wait);
  3478. btrfs_destroy_delayed_inodes(root);
  3479. btrfs_assert_delayed_root_empty(root);
  3480. btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
  3481. EXTENT_DIRTY);
  3482. btrfs_destroy_pinned_extent(root,
  3483. root->fs_info->pinned_extents);
  3484. cur_trans->state =TRANS_STATE_COMPLETED;
  3485. wake_up(&cur_trans->commit_wait);
  3486. /*
  3487. memset(cur_trans, 0, sizeof(*cur_trans));
  3488. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  3489. */
  3490. }
  3491. static int btrfs_cleanup_transaction(struct btrfs_root *root)
  3492. {
  3493. struct btrfs_transaction *t;
  3494. LIST_HEAD(list);
  3495. mutex_lock(&root->fs_info->transaction_kthread_mutex);
  3496. spin_lock(&root->fs_info->trans_lock);
  3497. list_splice_init(&root->fs_info->trans_list, &list);
  3498. root->fs_info->running_transaction = NULL;
  3499. spin_unlock(&root->fs_info->trans_lock);
  3500. while (!list_empty(&list)) {
  3501. t = list_entry(list.next, struct btrfs_transaction, list);
  3502. btrfs_destroy_ordered_operations(t, root);
  3503. btrfs_destroy_all_ordered_extents(root->fs_info);
  3504. btrfs_destroy_delayed_refs(t, root);
  3505. /*
  3506. * FIXME: cleanup wait for commit
  3507. * We needn't acquire the lock here, because we are during
  3508. * the umount, there is no other task which will change it.
  3509. */
  3510. t->state = TRANS_STATE_COMMIT_START;
  3511. smp_mb();
  3512. if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
  3513. wake_up(&root->fs_info->transaction_blocked_wait);
  3514. btrfs_evict_pending_snapshots(t);
  3515. t->state = TRANS_STATE_UNBLOCKED;
  3516. smp_mb();
  3517. if (waitqueue_active(&root->fs_info->transaction_wait))
  3518. wake_up(&root->fs_info->transaction_wait);
  3519. btrfs_destroy_delayed_inodes(root);
  3520. btrfs_assert_delayed_root_empty(root);
  3521. btrfs_destroy_all_delalloc_inodes(root->fs_info);
  3522. btrfs_destroy_marked_extents(root, &t->dirty_pages,
  3523. EXTENT_DIRTY);
  3524. btrfs_destroy_pinned_extent(root,
  3525. root->fs_info->pinned_extents);
  3526. t->state = TRANS_STATE_COMPLETED;
  3527. smp_mb();
  3528. if (waitqueue_active(&t->commit_wait))
  3529. wake_up(&t->commit_wait);
  3530. atomic_set(&t->use_count, 0);
  3531. list_del_init(&t->list);
  3532. memset(t, 0, sizeof(*t));
  3533. kmem_cache_free(btrfs_transaction_cachep, t);
  3534. }
  3535. mutex_unlock(&root->fs_info->transaction_kthread_mutex);
  3536. return 0;
  3537. }
  3538. static struct extent_io_ops btree_extent_io_ops = {
  3539. .readpage_end_io_hook = btree_readpage_end_io_hook,
  3540. .readpage_io_failed_hook = btree_io_failed_hook,
  3541. .submit_bio_hook = btree_submit_bio_hook,
  3542. /* note we're sharing with inode.c for the merge bio hook */
  3543. .merge_bio_hook = btrfs_merge_bio_hook,
  3544. };