disk-io.c 103 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/swap.h>
  22. #include <linux/radix-tree.h>
  23. #include <linux/writeback.h>
  24. #include <linux/buffer_head.h>
  25. #include <linux/workqueue.h>
  26. #include <linux/kthread.h>
  27. #include <linux/freezer.h>
  28. #include <linux/crc32c.h>
  29. #include <linux/slab.h>
  30. #include <linux/migrate.h>
  31. #include <linux/ratelimit.h>
  32. #include <asm/unaligned.h>
  33. #include "compat.h"
  34. #include "ctree.h"
  35. #include "disk-io.h"
  36. #include "transaction.h"
  37. #include "btrfs_inode.h"
  38. #include "volumes.h"
  39. #include "print-tree.h"
  40. #include "async-thread.h"
  41. #include "locking.h"
  42. #include "tree-log.h"
  43. #include "free-space-cache.h"
  44. #include "inode-map.h"
  45. #include "check-integrity.h"
  46. #include "rcu-string.h"
  47. static struct extent_io_ops btree_extent_io_ops;
  48. static void end_workqueue_fn(struct btrfs_work *work);
  49. static void free_fs_root(struct btrfs_root *root);
  50. static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
  51. int read_only);
  52. static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
  53. static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
  54. static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
  55. struct btrfs_root *root);
  56. static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
  57. static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
  58. static int btrfs_destroy_marked_extents(struct btrfs_root *root,
  59. struct extent_io_tree *dirty_pages,
  60. int mark);
  61. static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
  62. struct extent_io_tree *pinned_extents);
  63. /*
  64. * end_io_wq structs are used to do processing in task context when an IO is
  65. * complete. This is used during reads to verify checksums, and it is used
  66. * by writes to insert metadata for new file extents after IO is complete.
  67. */
  68. struct end_io_wq {
  69. struct bio *bio;
  70. bio_end_io_t *end_io;
  71. void *private;
  72. struct btrfs_fs_info *info;
  73. int error;
  74. int metadata;
  75. struct list_head list;
  76. struct btrfs_work work;
  77. };
  78. /*
  79. * async submit bios are used to offload expensive checksumming
  80. * onto the worker threads. They checksum file and metadata bios
  81. * just before they are sent down the IO stack.
  82. */
  83. struct async_submit_bio {
  84. struct inode *inode;
  85. struct bio *bio;
  86. struct list_head list;
  87. extent_submit_bio_hook_t *submit_bio_start;
  88. extent_submit_bio_hook_t *submit_bio_done;
  89. int rw;
  90. int mirror_num;
  91. unsigned long bio_flags;
  92. /*
  93. * bio_offset is optional, can be used if the pages in the bio
  94. * can't tell us where in the file the bio should go
  95. */
  96. u64 bio_offset;
  97. struct btrfs_work work;
  98. int error;
  99. };
  100. /*
  101. * Lockdep class keys for extent_buffer->lock's in this root. For a given
  102. * eb, the lockdep key is determined by the btrfs_root it belongs to and
  103. * the level the eb occupies in the tree.
  104. *
  105. * Different roots are used for different purposes and may nest inside each
  106. * other and they require separate keysets. As lockdep keys should be
  107. * static, assign keysets according to the purpose of the root as indicated
  108. * by btrfs_root->objectid. This ensures that all special purpose roots
  109. * have separate keysets.
  110. *
  111. * Lock-nesting across peer nodes is always done with the immediate parent
  112. * node locked thus preventing deadlock. As lockdep doesn't know this, use
  113. * subclass to avoid triggering lockdep warning in such cases.
  114. *
  115. * The key is set by the readpage_end_io_hook after the buffer has passed
  116. * csum validation but before the pages are unlocked. It is also set by
  117. * btrfs_init_new_buffer on freshly allocated blocks.
  118. *
  119. * We also add a check to make sure the highest level of the tree is the
  120. * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
  121. * needs update as well.
  122. */
  123. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  124. # if BTRFS_MAX_LEVEL != 8
  125. # error
  126. # endif
  127. static struct btrfs_lockdep_keyset {
  128. u64 id; /* root objectid */
  129. const char *name_stem; /* lock name stem */
  130. char names[BTRFS_MAX_LEVEL + 1][20];
  131. struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
  132. } btrfs_lockdep_keysets[] = {
  133. { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
  134. { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
  135. { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
  136. { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
  137. { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
  138. { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
  139. { .id = BTRFS_ORPHAN_OBJECTID, .name_stem = "orphan" },
  140. { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
  141. { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
  142. { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
  143. { .id = 0, .name_stem = "tree" },
  144. };
  145. void __init btrfs_init_lockdep(void)
  146. {
  147. int i, j;
  148. /* initialize lockdep class names */
  149. for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
  150. struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
  151. for (j = 0; j < ARRAY_SIZE(ks->names); j++)
  152. snprintf(ks->names[j], sizeof(ks->names[j]),
  153. "btrfs-%s-%02d", ks->name_stem, j);
  154. }
  155. }
  156. void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
  157. int level)
  158. {
  159. struct btrfs_lockdep_keyset *ks;
  160. BUG_ON(level >= ARRAY_SIZE(ks->keys));
  161. /* find the matching keyset, id 0 is the default entry */
  162. for (ks = btrfs_lockdep_keysets; ks->id; ks++)
  163. if (ks->id == objectid)
  164. break;
  165. lockdep_set_class_and_name(&eb->lock,
  166. &ks->keys[level], ks->names[level]);
  167. }
  168. #endif
  169. /*
  170. * extents on the btree inode are pretty simple, there's one extent
  171. * that covers the entire device
  172. */
  173. static struct extent_map *btree_get_extent(struct inode *inode,
  174. struct page *page, size_t pg_offset, u64 start, u64 len,
  175. int create)
  176. {
  177. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  178. struct extent_map *em;
  179. int ret;
  180. read_lock(&em_tree->lock);
  181. em = lookup_extent_mapping(em_tree, start, len);
  182. if (em) {
  183. em->bdev =
  184. BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
  185. read_unlock(&em_tree->lock);
  186. goto out;
  187. }
  188. read_unlock(&em_tree->lock);
  189. em = alloc_extent_map();
  190. if (!em) {
  191. em = ERR_PTR(-ENOMEM);
  192. goto out;
  193. }
  194. em->start = 0;
  195. em->len = (u64)-1;
  196. em->block_len = (u64)-1;
  197. em->block_start = 0;
  198. em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
  199. write_lock(&em_tree->lock);
  200. ret = add_extent_mapping(em_tree, em);
  201. if (ret == -EEXIST) {
  202. u64 failed_start = em->start;
  203. u64 failed_len = em->len;
  204. free_extent_map(em);
  205. em = lookup_extent_mapping(em_tree, start, len);
  206. if (em) {
  207. ret = 0;
  208. } else {
  209. em = lookup_extent_mapping(em_tree, failed_start,
  210. failed_len);
  211. ret = -EIO;
  212. }
  213. } else if (ret) {
  214. free_extent_map(em);
  215. em = NULL;
  216. }
  217. write_unlock(&em_tree->lock);
  218. if (ret)
  219. em = ERR_PTR(ret);
  220. out:
  221. return em;
  222. }
  223. u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
  224. {
  225. return crc32c(seed, data, len);
  226. }
  227. void btrfs_csum_final(u32 crc, char *result)
  228. {
  229. put_unaligned_le32(~crc, result);
  230. }
  231. /*
  232. * compute the csum for a btree block, and either verify it or write it
  233. * into the csum field of the block.
  234. */
  235. static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
  236. int verify)
  237. {
  238. u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
  239. char *result = NULL;
  240. unsigned long len;
  241. unsigned long cur_len;
  242. unsigned long offset = BTRFS_CSUM_SIZE;
  243. char *kaddr;
  244. unsigned long map_start;
  245. unsigned long map_len;
  246. int err;
  247. u32 crc = ~(u32)0;
  248. unsigned long inline_result;
  249. len = buf->len - offset;
  250. while (len > 0) {
  251. err = map_private_extent_buffer(buf, offset, 32,
  252. &kaddr, &map_start, &map_len);
  253. if (err)
  254. return 1;
  255. cur_len = min(len, map_len - (offset - map_start));
  256. crc = btrfs_csum_data(root, kaddr + offset - map_start,
  257. crc, cur_len);
  258. len -= cur_len;
  259. offset += cur_len;
  260. }
  261. if (csum_size > sizeof(inline_result)) {
  262. result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
  263. if (!result)
  264. return 1;
  265. } else {
  266. result = (char *)&inline_result;
  267. }
  268. btrfs_csum_final(crc, result);
  269. if (verify) {
  270. if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
  271. u32 val;
  272. u32 found = 0;
  273. memcpy(&found, result, csum_size);
  274. read_extent_buffer(buf, &val, 0, csum_size);
  275. printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
  276. "failed on %llu wanted %X found %X "
  277. "level %d\n",
  278. root->fs_info->sb->s_id,
  279. (unsigned long long)buf->start, val, found,
  280. btrfs_header_level(buf));
  281. if (result != (char *)&inline_result)
  282. kfree(result);
  283. return 1;
  284. }
  285. } else {
  286. write_extent_buffer(buf, result, 0, csum_size);
  287. }
  288. if (result != (char *)&inline_result)
  289. kfree(result);
  290. return 0;
  291. }
  292. /*
  293. * we can't consider a given block up to date unless the transid of the
  294. * block matches the transid in the parent node's pointer. This is how we
  295. * detect blocks that either didn't get written at all or got written
  296. * in the wrong place.
  297. */
  298. static int verify_parent_transid(struct extent_io_tree *io_tree,
  299. struct extent_buffer *eb, u64 parent_transid,
  300. int atomic)
  301. {
  302. struct extent_state *cached_state = NULL;
  303. int ret;
  304. if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
  305. return 0;
  306. if (atomic)
  307. return -EAGAIN;
  308. lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
  309. 0, &cached_state);
  310. if (extent_buffer_uptodate(eb) &&
  311. btrfs_header_generation(eb) == parent_transid) {
  312. ret = 0;
  313. goto out;
  314. }
  315. printk_ratelimited("parent transid verify failed on %llu wanted %llu "
  316. "found %llu\n",
  317. (unsigned long long)eb->start,
  318. (unsigned long long)parent_transid,
  319. (unsigned long long)btrfs_header_generation(eb));
  320. ret = 1;
  321. clear_extent_buffer_uptodate(eb);
  322. out:
  323. unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
  324. &cached_state, GFP_NOFS);
  325. return ret;
  326. }
  327. /*
  328. * helper to read a given tree block, doing retries as required when
  329. * the checksums don't match and we have alternate mirrors to try.
  330. */
  331. static int btree_read_extent_buffer_pages(struct btrfs_root *root,
  332. struct extent_buffer *eb,
  333. u64 start, u64 parent_transid)
  334. {
  335. struct extent_io_tree *io_tree;
  336. int failed = 0;
  337. int ret;
  338. int num_copies = 0;
  339. int mirror_num = 0;
  340. int failed_mirror = 0;
  341. clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
  342. io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
  343. while (1) {
  344. ret = read_extent_buffer_pages(io_tree, eb, start,
  345. WAIT_COMPLETE,
  346. btree_get_extent, mirror_num);
  347. if (!ret && !verify_parent_transid(io_tree, eb,
  348. parent_transid, 0))
  349. break;
  350. /*
  351. * This buffer's crc is fine, but its contents are corrupted, so
  352. * there is no reason to read the other copies, they won't be
  353. * any less wrong.
  354. */
  355. if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
  356. break;
  357. num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
  358. eb->start, eb->len);
  359. if (num_copies == 1)
  360. break;
  361. if (!failed_mirror) {
  362. failed = 1;
  363. failed_mirror = eb->read_mirror;
  364. }
  365. mirror_num++;
  366. if (mirror_num == failed_mirror)
  367. mirror_num++;
  368. if (mirror_num > num_copies)
  369. break;
  370. }
  371. if (failed && !ret)
  372. repair_eb_io_failure(root, eb, failed_mirror);
  373. return ret;
  374. }
  375. /*
  376. * checksum a dirty tree block before IO. This has extra checks to make sure
  377. * we only fill in the checksum field in the first page of a multi-page block
  378. */
  379. static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
  380. {
  381. struct extent_io_tree *tree;
  382. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  383. u64 found_start;
  384. struct extent_buffer *eb;
  385. tree = &BTRFS_I(page->mapping->host)->io_tree;
  386. eb = (struct extent_buffer *)page->private;
  387. if (page != eb->pages[0])
  388. return 0;
  389. found_start = btrfs_header_bytenr(eb);
  390. if (found_start != start) {
  391. WARN_ON(1);
  392. return 0;
  393. }
  394. if (eb->pages[0] != page) {
  395. WARN_ON(1);
  396. return 0;
  397. }
  398. if (!PageUptodate(page)) {
  399. WARN_ON(1);
  400. return 0;
  401. }
  402. csum_tree_block(root, eb, 0);
  403. return 0;
  404. }
  405. static int check_tree_block_fsid(struct btrfs_root *root,
  406. struct extent_buffer *eb)
  407. {
  408. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  409. u8 fsid[BTRFS_UUID_SIZE];
  410. int ret = 1;
  411. read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
  412. BTRFS_FSID_SIZE);
  413. while (fs_devices) {
  414. if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
  415. ret = 0;
  416. break;
  417. }
  418. fs_devices = fs_devices->seed;
  419. }
  420. return ret;
  421. }
  422. #define CORRUPT(reason, eb, root, slot) \
  423. printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
  424. "root=%llu, slot=%d\n", reason, \
  425. (unsigned long long)btrfs_header_bytenr(eb), \
  426. (unsigned long long)root->objectid, slot)
  427. static noinline int check_leaf(struct btrfs_root *root,
  428. struct extent_buffer *leaf)
  429. {
  430. struct btrfs_key key;
  431. struct btrfs_key leaf_key;
  432. u32 nritems = btrfs_header_nritems(leaf);
  433. int slot;
  434. if (nritems == 0)
  435. return 0;
  436. /* Check the 0 item */
  437. if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
  438. BTRFS_LEAF_DATA_SIZE(root)) {
  439. CORRUPT("invalid item offset size pair", leaf, root, 0);
  440. return -EIO;
  441. }
  442. /*
  443. * Check to make sure each items keys are in the correct order and their
  444. * offsets make sense. We only have to loop through nritems-1 because
  445. * we check the current slot against the next slot, which verifies the
  446. * next slot's offset+size makes sense and that the current's slot
  447. * offset is correct.
  448. */
  449. for (slot = 0; slot < nritems - 1; slot++) {
  450. btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
  451. btrfs_item_key_to_cpu(leaf, &key, slot + 1);
  452. /* Make sure the keys are in the right order */
  453. if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
  454. CORRUPT("bad key order", leaf, root, slot);
  455. return -EIO;
  456. }
  457. /*
  458. * Make sure the offset and ends are right, remember that the
  459. * item data starts at the end of the leaf and grows towards the
  460. * front.
  461. */
  462. if (btrfs_item_offset_nr(leaf, slot) !=
  463. btrfs_item_end_nr(leaf, slot + 1)) {
  464. CORRUPT("slot offset bad", leaf, root, slot);
  465. return -EIO;
  466. }
  467. /*
  468. * Check to make sure that we don't point outside of the leaf,
  469. * just incase all the items are consistent to eachother, but
  470. * all point outside of the leaf.
  471. */
  472. if (btrfs_item_end_nr(leaf, slot) >
  473. BTRFS_LEAF_DATA_SIZE(root)) {
  474. CORRUPT("slot end outside of leaf", leaf, root, slot);
  475. return -EIO;
  476. }
  477. }
  478. return 0;
  479. }
  480. struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
  481. struct page *page, int max_walk)
  482. {
  483. struct extent_buffer *eb;
  484. u64 start = page_offset(page);
  485. u64 target = start;
  486. u64 min_start;
  487. if (start < max_walk)
  488. min_start = 0;
  489. else
  490. min_start = start - max_walk;
  491. while (start >= min_start) {
  492. eb = find_extent_buffer(tree, start, 0);
  493. if (eb) {
  494. /*
  495. * we found an extent buffer and it contains our page
  496. * horray!
  497. */
  498. if (eb->start <= target &&
  499. eb->start + eb->len > target)
  500. return eb;
  501. /* we found an extent buffer that wasn't for us */
  502. free_extent_buffer(eb);
  503. return NULL;
  504. }
  505. if (start == 0)
  506. break;
  507. start -= PAGE_CACHE_SIZE;
  508. }
  509. return NULL;
  510. }
  511. static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
  512. struct extent_state *state, int mirror)
  513. {
  514. struct extent_io_tree *tree;
  515. u64 found_start;
  516. int found_level;
  517. struct extent_buffer *eb;
  518. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  519. int ret = 0;
  520. int reads_done;
  521. if (!page->private)
  522. goto out;
  523. tree = &BTRFS_I(page->mapping->host)->io_tree;
  524. eb = (struct extent_buffer *)page->private;
  525. /* the pending IO might have been the only thing that kept this buffer
  526. * in memory. Make sure we have a ref for all this other checks
  527. */
  528. extent_buffer_get(eb);
  529. reads_done = atomic_dec_and_test(&eb->io_pages);
  530. if (!reads_done)
  531. goto err;
  532. eb->read_mirror = mirror;
  533. if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
  534. ret = -EIO;
  535. goto err;
  536. }
  537. found_start = btrfs_header_bytenr(eb);
  538. if (found_start != eb->start) {
  539. printk_ratelimited(KERN_INFO "btrfs bad tree block start "
  540. "%llu %llu\n",
  541. (unsigned long long)found_start,
  542. (unsigned long long)eb->start);
  543. ret = -EIO;
  544. goto err;
  545. }
  546. if (check_tree_block_fsid(root, eb)) {
  547. printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
  548. (unsigned long long)eb->start);
  549. ret = -EIO;
  550. goto err;
  551. }
  552. found_level = btrfs_header_level(eb);
  553. btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
  554. eb, found_level);
  555. ret = csum_tree_block(root, eb, 1);
  556. if (ret) {
  557. ret = -EIO;
  558. goto err;
  559. }
  560. /*
  561. * If this is a leaf block and it is corrupt, set the corrupt bit so
  562. * that we don't try and read the other copies of this block, just
  563. * return -EIO.
  564. */
  565. if (found_level == 0 && check_leaf(root, eb)) {
  566. set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
  567. ret = -EIO;
  568. }
  569. if (!ret)
  570. set_extent_buffer_uptodate(eb);
  571. err:
  572. if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
  573. clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
  574. btree_readahead_hook(root, eb, eb->start, ret);
  575. }
  576. if (ret)
  577. clear_extent_buffer_uptodate(eb);
  578. free_extent_buffer(eb);
  579. out:
  580. return ret;
  581. }
  582. static int btree_io_failed_hook(struct page *page, int failed_mirror)
  583. {
  584. struct extent_buffer *eb;
  585. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  586. eb = (struct extent_buffer *)page->private;
  587. set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
  588. eb->read_mirror = failed_mirror;
  589. if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
  590. btree_readahead_hook(root, eb, eb->start, -EIO);
  591. return -EIO; /* we fixed nothing */
  592. }
  593. static void end_workqueue_bio(struct bio *bio, int err)
  594. {
  595. struct end_io_wq *end_io_wq = bio->bi_private;
  596. struct btrfs_fs_info *fs_info;
  597. fs_info = end_io_wq->info;
  598. end_io_wq->error = err;
  599. end_io_wq->work.func = end_workqueue_fn;
  600. end_io_wq->work.flags = 0;
  601. if (bio->bi_rw & REQ_WRITE) {
  602. if (end_io_wq->metadata == 1)
  603. btrfs_queue_worker(&fs_info->endio_meta_write_workers,
  604. &end_io_wq->work);
  605. else if (end_io_wq->metadata == 2)
  606. btrfs_queue_worker(&fs_info->endio_freespace_worker,
  607. &end_io_wq->work);
  608. else
  609. btrfs_queue_worker(&fs_info->endio_write_workers,
  610. &end_io_wq->work);
  611. } else {
  612. if (end_io_wq->metadata)
  613. btrfs_queue_worker(&fs_info->endio_meta_workers,
  614. &end_io_wq->work);
  615. else
  616. btrfs_queue_worker(&fs_info->endio_workers,
  617. &end_io_wq->work);
  618. }
  619. }
  620. /*
  621. * For the metadata arg you want
  622. *
  623. * 0 - if data
  624. * 1 - if normal metadta
  625. * 2 - if writing to the free space cache area
  626. */
  627. int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
  628. int metadata)
  629. {
  630. struct end_io_wq *end_io_wq;
  631. end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
  632. if (!end_io_wq)
  633. return -ENOMEM;
  634. end_io_wq->private = bio->bi_private;
  635. end_io_wq->end_io = bio->bi_end_io;
  636. end_io_wq->info = info;
  637. end_io_wq->error = 0;
  638. end_io_wq->bio = bio;
  639. end_io_wq->metadata = metadata;
  640. bio->bi_private = end_io_wq;
  641. bio->bi_end_io = end_workqueue_bio;
  642. return 0;
  643. }
  644. unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
  645. {
  646. unsigned long limit = min_t(unsigned long,
  647. info->workers.max_workers,
  648. info->fs_devices->open_devices);
  649. return 256 * limit;
  650. }
  651. static void run_one_async_start(struct btrfs_work *work)
  652. {
  653. struct async_submit_bio *async;
  654. int ret;
  655. async = container_of(work, struct async_submit_bio, work);
  656. ret = async->submit_bio_start(async->inode, async->rw, async->bio,
  657. async->mirror_num, async->bio_flags,
  658. async->bio_offset);
  659. if (ret)
  660. async->error = ret;
  661. }
  662. static void run_one_async_done(struct btrfs_work *work)
  663. {
  664. struct btrfs_fs_info *fs_info;
  665. struct async_submit_bio *async;
  666. int limit;
  667. async = container_of(work, struct async_submit_bio, work);
  668. fs_info = BTRFS_I(async->inode)->root->fs_info;
  669. limit = btrfs_async_submit_limit(fs_info);
  670. limit = limit * 2 / 3;
  671. atomic_dec(&fs_info->nr_async_submits);
  672. if (atomic_read(&fs_info->nr_async_submits) < limit &&
  673. waitqueue_active(&fs_info->async_submit_wait))
  674. wake_up(&fs_info->async_submit_wait);
  675. /* If an error occured we just want to clean up the bio and move on */
  676. if (async->error) {
  677. bio_endio(async->bio, async->error);
  678. return;
  679. }
  680. async->submit_bio_done(async->inode, async->rw, async->bio,
  681. async->mirror_num, async->bio_flags,
  682. async->bio_offset);
  683. }
  684. static void run_one_async_free(struct btrfs_work *work)
  685. {
  686. struct async_submit_bio *async;
  687. async = container_of(work, struct async_submit_bio, work);
  688. kfree(async);
  689. }
  690. int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
  691. int rw, struct bio *bio, int mirror_num,
  692. unsigned long bio_flags,
  693. u64 bio_offset,
  694. extent_submit_bio_hook_t *submit_bio_start,
  695. extent_submit_bio_hook_t *submit_bio_done)
  696. {
  697. struct async_submit_bio *async;
  698. async = kmalloc(sizeof(*async), GFP_NOFS);
  699. if (!async)
  700. return -ENOMEM;
  701. async->inode = inode;
  702. async->rw = rw;
  703. async->bio = bio;
  704. async->mirror_num = mirror_num;
  705. async->submit_bio_start = submit_bio_start;
  706. async->submit_bio_done = submit_bio_done;
  707. async->work.func = run_one_async_start;
  708. async->work.ordered_func = run_one_async_done;
  709. async->work.ordered_free = run_one_async_free;
  710. async->work.flags = 0;
  711. async->bio_flags = bio_flags;
  712. async->bio_offset = bio_offset;
  713. async->error = 0;
  714. atomic_inc(&fs_info->nr_async_submits);
  715. if (rw & REQ_SYNC)
  716. btrfs_set_work_high_prio(&async->work);
  717. btrfs_queue_worker(&fs_info->workers, &async->work);
  718. while (atomic_read(&fs_info->async_submit_draining) &&
  719. atomic_read(&fs_info->nr_async_submits)) {
  720. wait_event(fs_info->async_submit_wait,
  721. (atomic_read(&fs_info->nr_async_submits) == 0));
  722. }
  723. return 0;
  724. }
  725. static int btree_csum_one_bio(struct bio *bio)
  726. {
  727. struct bio_vec *bvec = bio->bi_io_vec;
  728. int bio_index = 0;
  729. struct btrfs_root *root;
  730. int ret = 0;
  731. WARN_ON(bio->bi_vcnt <= 0);
  732. while (bio_index < bio->bi_vcnt) {
  733. root = BTRFS_I(bvec->bv_page->mapping->host)->root;
  734. ret = csum_dirty_buffer(root, bvec->bv_page);
  735. if (ret)
  736. break;
  737. bio_index++;
  738. bvec++;
  739. }
  740. return ret;
  741. }
  742. static int __btree_submit_bio_start(struct inode *inode, int rw,
  743. struct bio *bio, int mirror_num,
  744. unsigned long bio_flags,
  745. u64 bio_offset)
  746. {
  747. /*
  748. * when we're called for a write, we're already in the async
  749. * submission context. Just jump into btrfs_map_bio
  750. */
  751. return btree_csum_one_bio(bio);
  752. }
  753. static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
  754. int mirror_num, unsigned long bio_flags,
  755. u64 bio_offset)
  756. {
  757. /*
  758. * when we're called for a write, we're already in the async
  759. * submission context. Just jump into btrfs_map_bio
  760. */
  761. return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
  762. }
  763. static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
  764. int mirror_num, unsigned long bio_flags,
  765. u64 bio_offset)
  766. {
  767. int ret;
  768. if (!(rw & REQ_WRITE)) {
  769. /*
  770. * called for a read, do the setup so that checksum validation
  771. * can happen in the async kernel threads
  772. */
  773. ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
  774. bio, 1);
  775. if (ret)
  776. return ret;
  777. return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
  778. mirror_num, 0);
  779. }
  780. /*
  781. * kthread helpers are used to submit writes so that checksumming
  782. * can happen in parallel across all CPUs
  783. */
  784. return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
  785. inode, rw, bio, mirror_num, 0,
  786. bio_offset,
  787. __btree_submit_bio_start,
  788. __btree_submit_bio_done);
  789. }
  790. #ifdef CONFIG_MIGRATION
  791. static int btree_migratepage(struct address_space *mapping,
  792. struct page *newpage, struct page *page,
  793. enum migrate_mode mode)
  794. {
  795. /*
  796. * we can't safely write a btree page from here,
  797. * we haven't done the locking hook
  798. */
  799. if (PageDirty(page))
  800. return -EAGAIN;
  801. /*
  802. * Buffers may be managed in a filesystem specific way.
  803. * We must have no buffers or drop them.
  804. */
  805. if (page_has_private(page) &&
  806. !try_to_release_page(page, GFP_KERNEL))
  807. return -EAGAIN;
  808. return migrate_page(mapping, newpage, page, mode);
  809. }
  810. #endif
  811. static int btree_writepages(struct address_space *mapping,
  812. struct writeback_control *wbc)
  813. {
  814. struct extent_io_tree *tree;
  815. tree = &BTRFS_I(mapping->host)->io_tree;
  816. if (wbc->sync_mode == WB_SYNC_NONE) {
  817. struct btrfs_root *root = BTRFS_I(mapping->host)->root;
  818. u64 num_dirty;
  819. unsigned long thresh = 32 * 1024 * 1024;
  820. if (wbc->for_kupdate)
  821. return 0;
  822. /* this is a bit racy, but that's ok */
  823. num_dirty = root->fs_info->dirty_metadata_bytes;
  824. if (num_dirty < thresh)
  825. return 0;
  826. }
  827. return btree_write_cache_pages(mapping, wbc);
  828. }
  829. static int btree_readpage(struct file *file, struct page *page)
  830. {
  831. struct extent_io_tree *tree;
  832. tree = &BTRFS_I(page->mapping->host)->io_tree;
  833. return extent_read_full_page(tree, page, btree_get_extent, 0);
  834. }
  835. static int btree_releasepage(struct page *page, gfp_t gfp_flags)
  836. {
  837. if (PageWriteback(page) || PageDirty(page))
  838. return 0;
  839. /*
  840. * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
  841. * slab allocation from alloc_extent_state down the callchain where
  842. * it'd hit a BUG_ON as those flags are not allowed.
  843. */
  844. gfp_flags &= ~GFP_SLAB_BUG_MASK;
  845. return try_release_extent_buffer(page, gfp_flags);
  846. }
  847. static void btree_invalidatepage(struct page *page, unsigned long offset)
  848. {
  849. struct extent_io_tree *tree;
  850. tree = &BTRFS_I(page->mapping->host)->io_tree;
  851. extent_invalidatepage(tree, page, offset);
  852. btree_releasepage(page, GFP_NOFS);
  853. if (PagePrivate(page)) {
  854. printk(KERN_WARNING "btrfs warning page private not zero "
  855. "on page %llu\n", (unsigned long long)page_offset(page));
  856. ClearPagePrivate(page);
  857. set_page_private(page, 0);
  858. page_cache_release(page);
  859. }
  860. }
  861. static int btree_set_page_dirty(struct page *page)
  862. {
  863. struct extent_buffer *eb;
  864. BUG_ON(!PagePrivate(page));
  865. eb = (struct extent_buffer *)page->private;
  866. BUG_ON(!eb);
  867. BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
  868. BUG_ON(!atomic_read(&eb->refs));
  869. btrfs_assert_tree_locked(eb);
  870. return __set_page_dirty_nobuffers(page);
  871. }
  872. static const struct address_space_operations btree_aops = {
  873. .readpage = btree_readpage,
  874. .writepages = btree_writepages,
  875. .releasepage = btree_releasepage,
  876. .invalidatepage = btree_invalidatepage,
  877. #ifdef CONFIG_MIGRATION
  878. .migratepage = btree_migratepage,
  879. #endif
  880. .set_page_dirty = btree_set_page_dirty,
  881. };
  882. int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
  883. u64 parent_transid)
  884. {
  885. struct extent_buffer *buf = NULL;
  886. struct inode *btree_inode = root->fs_info->btree_inode;
  887. int ret = 0;
  888. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  889. if (!buf)
  890. return 0;
  891. read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
  892. buf, 0, WAIT_NONE, btree_get_extent, 0);
  893. free_extent_buffer(buf);
  894. return ret;
  895. }
  896. int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
  897. int mirror_num, struct extent_buffer **eb)
  898. {
  899. struct extent_buffer *buf = NULL;
  900. struct inode *btree_inode = root->fs_info->btree_inode;
  901. struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
  902. int ret;
  903. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  904. if (!buf)
  905. return 0;
  906. set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
  907. ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
  908. btree_get_extent, mirror_num);
  909. if (ret) {
  910. free_extent_buffer(buf);
  911. return ret;
  912. }
  913. if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
  914. free_extent_buffer(buf);
  915. return -EIO;
  916. } else if (extent_buffer_uptodate(buf)) {
  917. *eb = buf;
  918. } else {
  919. free_extent_buffer(buf);
  920. }
  921. return 0;
  922. }
  923. struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
  924. u64 bytenr, u32 blocksize)
  925. {
  926. struct inode *btree_inode = root->fs_info->btree_inode;
  927. struct extent_buffer *eb;
  928. eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
  929. bytenr, blocksize);
  930. return eb;
  931. }
  932. struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
  933. u64 bytenr, u32 blocksize)
  934. {
  935. struct inode *btree_inode = root->fs_info->btree_inode;
  936. struct extent_buffer *eb;
  937. eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
  938. bytenr, blocksize);
  939. return eb;
  940. }
  941. int btrfs_write_tree_block(struct extent_buffer *buf)
  942. {
  943. return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
  944. buf->start + buf->len - 1);
  945. }
  946. int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
  947. {
  948. return filemap_fdatawait_range(buf->pages[0]->mapping,
  949. buf->start, buf->start + buf->len - 1);
  950. }
  951. struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
  952. u32 blocksize, u64 parent_transid)
  953. {
  954. struct extent_buffer *buf = NULL;
  955. int ret;
  956. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  957. if (!buf)
  958. return NULL;
  959. ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
  960. return buf;
  961. }
  962. void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  963. struct extent_buffer *buf)
  964. {
  965. if (btrfs_header_generation(buf) ==
  966. root->fs_info->running_transaction->transid) {
  967. btrfs_assert_tree_locked(buf);
  968. if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
  969. spin_lock(&root->fs_info->delalloc_lock);
  970. if (root->fs_info->dirty_metadata_bytes >= buf->len)
  971. root->fs_info->dirty_metadata_bytes -= buf->len;
  972. else {
  973. spin_unlock(&root->fs_info->delalloc_lock);
  974. btrfs_panic(root->fs_info, -EOVERFLOW,
  975. "Can't clear %lu bytes from "
  976. " dirty_mdatadata_bytes (%lu)",
  977. buf->len,
  978. root->fs_info->dirty_metadata_bytes);
  979. }
  980. spin_unlock(&root->fs_info->delalloc_lock);
  981. }
  982. /* ugh, clear_extent_buffer_dirty needs to lock the page */
  983. btrfs_set_lock_blocking(buf);
  984. clear_extent_buffer_dirty(buf);
  985. }
  986. }
  987. static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
  988. u32 stripesize, struct btrfs_root *root,
  989. struct btrfs_fs_info *fs_info,
  990. u64 objectid)
  991. {
  992. root->node = NULL;
  993. root->commit_root = NULL;
  994. root->sectorsize = sectorsize;
  995. root->nodesize = nodesize;
  996. root->leafsize = leafsize;
  997. root->stripesize = stripesize;
  998. root->ref_cows = 0;
  999. root->track_dirty = 0;
  1000. root->in_radix = 0;
  1001. root->orphan_item_inserted = 0;
  1002. root->orphan_cleanup_state = 0;
  1003. root->objectid = objectid;
  1004. root->last_trans = 0;
  1005. root->highest_objectid = 0;
  1006. root->name = NULL;
  1007. root->inode_tree = RB_ROOT;
  1008. INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
  1009. root->block_rsv = NULL;
  1010. root->orphan_block_rsv = NULL;
  1011. INIT_LIST_HEAD(&root->dirty_list);
  1012. INIT_LIST_HEAD(&root->root_list);
  1013. spin_lock_init(&root->orphan_lock);
  1014. spin_lock_init(&root->inode_lock);
  1015. spin_lock_init(&root->accounting_lock);
  1016. mutex_init(&root->objectid_mutex);
  1017. mutex_init(&root->log_mutex);
  1018. init_waitqueue_head(&root->log_writer_wait);
  1019. init_waitqueue_head(&root->log_commit_wait[0]);
  1020. init_waitqueue_head(&root->log_commit_wait[1]);
  1021. atomic_set(&root->log_commit[0], 0);
  1022. atomic_set(&root->log_commit[1], 0);
  1023. atomic_set(&root->log_writers, 0);
  1024. atomic_set(&root->orphan_inodes, 0);
  1025. root->log_batch = 0;
  1026. root->log_transid = 0;
  1027. root->last_log_commit = 0;
  1028. extent_io_tree_init(&root->dirty_log_pages,
  1029. fs_info->btree_inode->i_mapping);
  1030. memset(&root->root_key, 0, sizeof(root->root_key));
  1031. memset(&root->root_item, 0, sizeof(root->root_item));
  1032. memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
  1033. memset(&root->root_kobj, 0, sizeof(root->root_kobj));
  1034. root->defrag_trans_start = fs_info->generation;
  1035. init_completion(&root->kobj_unregister);
  1036. root->defrag_running = 0;
  1037. root->root_key.objectid = objectid;
  1038. root->anon_dev = 0;
  1039. }
  1040. static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
  1041. struct btrfs_fs_info *fs_info,
  1042. u64 objectid,
  1043. struct btrfs_root *root)
  1044. {
  1045. int ret;
  1046. u32 blocksize;
  1047. u64 generation;
  1048. __setup_root(tree_root->nodesize, tree_root->leafsize,
  1049. tree_root->sectorsize, tree_root->stripesize,
  1050. root, fs_info, objectid);
  1051. ret = btrfs_find_last_root(tree_root, objectid,
  1052. &root->root_item, &root->root_key);
  1053. if (ret > 0)
  1054. return -ENOENT;
  1055. else if (ret < 0)
  1056. return ret;
  1057. generation = btrfs_root_generation(&root->root_item);
  1058. blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
  1059. root->commit_root = NULL;
  1060. root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
  1061. blocksize, generation);
  1062. if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) {
  1063. free_extent_buffer(root->node);
  1064. root->node = NULL;
  1065. return -EIO;
  1066. }
  1067. root->commit_root = btrfs_root_node(root);
  1068. return 0;
  1069. }
  1070. static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
  1071. {
  1072. struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
  1073. if (root)
  1074. root->fs_info = fs_info;
  1075. return root;
  1076. }
  1077. struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
  1078. struct btrfs_fs_info *fs_info,
  1079. u64 objectid)
  1080. {
  1081. struct extent_buffer *leaf;
  1082. struct btrfs_root *tree_root = fs_info->tree_root;
  1083. struct btrfs_root *root;
  1084. struct btrfs_key key;
  1085. int ret = 0;
  1086. u64 bytenr;
  1087. root = btrfs_alloc_root(fs_info);
  1088. if (!root)
  1089. return ERR_PTR(-ENOMEM);
  1090. __setup_root(tree_root->nodesize, tree_root->leafsize,
  1091. tree_root->sectorsize, tree_root->stripesize,
  1092. root, fs_info, objectid);
  1093. root->root_key.objectid = objectid;
  1094. root->root_key.type = BTRFS_ROOT_ITEM_KEY;
  1095. root->root_key.offset = 0;
  1096. leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
  1097. 0, objectid, NULL, 0, 0, 0);
  1098. if (IS_ERR(leaf)) {
  1099. ret = PTR_ERR(leaf);
  1100. goto fail;
  1101. }
  1102. bytenr = leaf->start;
  1103. memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
  1104. btrfs_set_header_bytenr(leaf, leaf->start);
  1105. btrfs_set_header_generation(leaf, trans->transid);
  1106. btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
  1107. btrfs_set_header_owner(leaf, objectid);
  1108. root->node = leaf;
  1109. write_extent_buffer(leaf, fs_info->fsid,
  1110. (unsigned long)btrfs_header_fsid(leaf),
  1111. BTRFS_FSID_SIZE);
  1112. write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
  1113. (unsigned long)btrfs_header_chunk_tree_uuid(leaf),
  1114. BTRFS_UUID_SIZE);
  1115. btrfs_mark_buffer_dirty(leaf);
  1116. root->commit_root = btrfs_root_node(root);
  1117. root->track_dirty = 1;
  1118. root->root_item.flags = 0;
  1119. root->root_item.byte_limit = 0;
  1120. btrfs_set_root_bytenr(&root->root_item, leaf->start);
  1121. btrfs_set_root_generation(&root->root_item, trans->transid);
  1122. btrfs_set_root_level(&root->root_item, 0);
  1123. btrfs_set_root_refs(&root->root_item, 1);
  1124. btrfs_set_root_used(&root->root_item, leaf->len);
  1125. btrfs_set_root_last_snapshot(&root->root_item, 0);
  1126. btrfs_set_root_dirid(&root->root_item, 0);
  1127. root->root_item.drop_level = 0;
  1128. key.objectid = objectid;
  1129. key.type = BTRFS_ROOT_ITEM_KEY;
  1130. key.offset = 0;
  1131. ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
  1132. if (ret)
  1133. goto fail;
  1134. btrfs_tree_unlock(leaf);
  1135. fail:
  1136. if (ret)
  1137. return ERR_PTR(ret);
  1138. return root;
  1139. }
  1140. static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
  1141. struct btrfs_fs_info *fs_info)
  1142. {
  1143. struct btrfs_root *root;
  1144. struct btrfs_root *tree_root = fs_info->tree_root;
  1145. struct extent_buffer *leaf;
  1146. root = btrfs_alloc_root(fs_info);
  1147. if (!root)
  1148. return ERR_PTR(-ENOMEM);
  1149. __setup_root(tree_root->nodesize, tree_root->leafsize,
  1150. tree_root->sectorsize, tree_root->stripesize,
  1151. root, fs_info, BTRFS_TREE_LOG_OBJECTID);
  1152. root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
  1153. root->root_key.type = BTRFS_ROOT_ITEM_KEY;
  1154. root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
  1155. /*
  1156. * log trees do not get reference counted because they go away
  1157. * before a real commit is actually done. They do store pointers
  1158. * to file data extents, and those reference counts still get
  1159. * updated (along with back refs to the log tree).
  1160. */
  1161. root->ref_cows = 0;
  1162. leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
  1163. BTRFS_TREE_LOG_OBJECTID, NULL,
  1164. 0, 0, 0);
  1165. if (IS_ERR(leaf)) {
  1166. kfree(root);
  1167. return ERR_CAST(leaf);
  1168. }
  1169. memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
  1170. btrfs_set_header_bytenr(leaf, leaf->start);
  1171. btrfs_set_header_generation(leaf, trans->transid);
  1172. btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
  1173. btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
  1174. root->node = leaf;
  1175. write_extent_buffer(root->node, root->fs_info->fsid,
  1176. (unsigned long)btrfs_header_fsid(root->node),
  1177. BTRFS_FSID_SIZE);
  1178. btrfs_mark_buffer_dirty(root->node);
  1179. btrfs_tree_unlock(root->node);
  1180. return root;
  1181. }
  1182. int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
  1183. struct btrfs_fs_info *fs_info)
  1184. {
  1185. struct btrfs_root *log_root;
  1186. log_root = alloc_log_tree(trans, fs_info);
  1187. if (IS_ERR(log_root))
  1188. return PTR_ERR(log_root);
  1189. WARN_ON(fs_info->log_root_tree);
  1190. fs_info->log_root_tree = log_root;
  1191. return 0;
  1192. }
  1193. int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
  1194. struct btrfs_root *root)
  1195. {
  1196. struct btrfs_root *log_root;
  1197. struct btrfs_inode_item *inode_item;
  1198. log_root = alloc_log_tree(trans, root->fs_info);
  1199. if (IS_ERR(log_root))
  1200. return PTR_ERR(log_root);
  1201. log_root->last_trans = trans->transid;
  1202. log_root->root_key.offset = root->root_key.objectid;
  1203. inode_item = &log_root->root_item.inode;
  1204. inode_item->generation = cpu_to_le64(1);
  1205. inode_item->size = cpu_to_le64(3);
  1206. inode_item->nlink = cpu_to_le32(1);
  1207. inode_item->nbytes = cpu_to_le64(root->leafsize);
  1208. inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
  1209. btrfs_set_root_node(&log_root->root_item, log_root->node);
  1210. WARN_ON(root->log_root);
  1211. root->log_root = log_root;
  1212. root->log_transid = 0;
  1213. root->last_log_commit = 0;
  1214. return 0;
  1215. }
  1216. struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
  1217. struct btrfs_key *location)
  1218. {
  1219. struct btrfs_root *root;
  1220. struct btrfs_fs_info *fs_info = tree_root->fs_info;
  1221. struct btrfs_path *path;
  1222. struct extent_buffer *l;
  1223. u64 generation;
  1224. u32 blocksize;
  1225. int ret = 0;
  1226. root = btrfs_alloc_root(fs_info);
  1227. if (!root)
  1228. return ERR_PTR(-ENOMEM);
  1229. if (location->offset == (u64)-1) {
  1230. ret = find_and_setup_root(tree_root, fs_info,
  1231. location->objectid, root);
  1232. if (ret) {
  1233. kfree(root);
  1234. return ERR_PTR(ret);
  1235. }
  1236. goto out;
  1237. }
  1238. __setup_root(tree_root->nodesize, tree_root->leafsize,
  1239. tree_root->sectorsize, tree_root->stripesize,
  1240. root, fs_info, location->objectid);
  1241. path = btrfs_alloc_path();
  1242. if (!path) {
  1243. kfree(root);
  1244. return ERR_PTR(-ENOMEM);
  1245. }
  1246. ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
  1247. if (ret == 0) {
  1248. l = path->nodes[0];
  1249. read_extent_buffer(l, &root->root_item,
  1250. btrfs_item_ptr_offset(l, path->slots[0]),
  1251. sizeof(root->root_item));
  1252. memcpy(&root->root_key, location, sizeof(*location));
  1253. }
  1254. btrfs_free_path(path);
  1255. if (ret) {
  1256. kfree(root);
  1257. if (ret > 0)
  1258. ret = -ENOENT;
  1259. return ERR_PTR(ret);
  1260. }
  1261. generation = btrfs_root_generation(&root->root_item);
  1262. blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
  1263. root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
  1264. blocksize, generation);
  1265. root->commit_root = btrfs_root_node(root);
  1266. BUG_ON(!root->node); /* -ENOMEM */
  1267. out:
  1268. if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
  1269. root->ref_cows = 1;
  1270. btrfs_check_and_init_root_item(&root->root_item);
  1271. }
  1272. return root;
  1273. }
  1274. struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
  1275. struct btrfs_key *location)
  1276. {
  1277. struct btrfs_root *root;
  1278. int ret;
  1279. if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
  1280. return fs_info->tree_root;
  1281. if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
  1282. return fs_info->extent_root;
  1283. if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
  1284. return fs_info->chunk_root;
  1285. if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
  1286. return fs_info->dev_root;
  1287. if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
  1288. return fs_info->csum_root;
  1289. again:
  1290. spin_lock(&fs_info->fs_roots_radix_lock);
  1291. root = radix_tree_lookup(&fs_info->fs_roots_radix,
  1292. (unsigned long)location->objectid);
  1293. spin_unlock(&fs_info->fs_roots_radix_lock);
  1294. if (root)
  1295. return root;
  1296. root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
  1297. if (IS_ERR(root))
  1298. return root;
  1299. root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
  1300. root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
  1301. GFP_NOFS);
  1302. if (!root->free_ino_pinned || !root->free_ino_ctl) {
  1303. ret = -ENOMEM;
  1304. goto fail;
  1305. }
  1306. btrfs_init_free_ino_ctl(root);
  1307. mutex_init(&root->fs_commit_mutex);
  1308. spin_lock_init(&root->cache_lock);
  1309. init_waitqueue_head(&root->cache_wait);
  1310. ret = get_anon_bdev(&root->anon_dev);
  1311. if (ret)
  1312. goto fail;
  1313. if (btrfs_root_refs(&root->root_item) == 0) {
  1314. ret = -ENOENT;
  1315. goto fail;
  1316. }
  1317. ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
  1318. if (ret < 0)
  1319. goto fail;
  1320. if (ret == 0)
  1321. root->orphan_item_inserted = 1;
  1322. ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
  1323. if (ret)
  1324. goto fail;
  1325. spin_lock(&fs_info->fs_roots_radix_lock);
  1326. ret = radix_tree_insert(&fs_info->fs_roots_radix,
  1327. (unsigned long)root->root_key.objectid,
  1328. root);
  1329. if (ret == 0)
  1330. root->in_radix = 1;
  1331. spin_unlock(&fs_info->fs_roots_radix_lock);
  1332. radix_tree_preload_end();
  1333. if (ret) {
  1334. if (ret == -EEXIST) {
  1335. free_fs_root(root);
  1336. goto again;
  1337. }
  1338. goto fail;
  1339. }
  1340. ret = btrfs_find_dead_roots(fs_info->tree_root,
  1341. root->root_key.objectid);
  1342. WARN_ON(ret);
  1343. return root;
  1344. fail:
  1345. free_fs_root(root);
  1346. return ERR_PTR(ret);
  1347. }
  1348. static int btrfs_congested_fn(void *congested_data, int bdi_bits)
  1349. {
  1350. struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
  1351. int ret = 0;
  1352. struct btrfs_device *device;
  1353. struct backing_dev_info *bdi;
  1354. rcu_read_lock();
  1355. list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
  1356. if (!device->bdev)
  1357. continue;
  1358. bdi = blk_get_backing_dev_info(device->bdev);
  1359. if (bdi && bdi_congested(bdi, bdi_bits)) {
  1360. ret = 1;
  1361. break;
  1362. }
  1363. }
  1364. rcu_read_unlock();
  1365. return ret;
  1366. }
  1367. /*
  1368. * If this fails, caller must call bdi_destroy() to get rid of the
  1369. * bdi again.
  1370. */
  1371. static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
  1372. {
  1373. int err;
  1374. bdi->capabilities = BDI_CAP_MAP_COPY;
  1375. err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
  1376. if (err)
  1377. return err;
  1378. bdi->ra_pages = default_backing_dev_info.ra_pages;
  1379. bdi->congested_fn = btrfs_congested_fn;
  1380. bdi->congested_data = info;
  1381. return 0;
  1382. }
  1383. /*
  1384. * called by the kthread helper functions to finally call the bio end_io
  1385. * functions. This is where read checksum verification actually happens
  1386. */
  1387. static void end_workqueue_fn(struct btrfs_work *work)
  1388. {
  1389. struct bio *bio;
  1390. struct end_io_wq *end_io_wq;
  1391. struct btrfs_fs_info *fs_info;
  1392. int error;
  1393. end_io_wq = container_of(work, struct end_io_wq, work);
  1394. bio = end_io_wq->bio;
  1395. fs_info = end_io_wq->info;
  1396. error = end_io_wq->error;
  1397. bio->bi_private = end_io_wq->private;
  1398. bio->bi_end_io = end_io_wq->end_io;
  1399. kfree(end_io_wq);
  1400. bio_endio(bio, error);
  1401. }
  1402. static int cleaner_kthread(void *arg)
  1403. {
  1404. struct btrfs_root *root = arg;
  1405. do {
  1406. vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
  1407. if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
  1408. mutex_trylock(&root->fs_info->cleaner_mutex)) {
  1409. btrfs_run_delayed_iputs(root);
  1410. btrfs_clean_old_snapshots(root);
  1411. mutex_unlock(&root->fs_info->cleaner_mutex);
  1412. btrfs_run_defrag_inodes(root->fs_info);
  1413. }
  1414. if (!try_to_freeze()) {
  1415. set_current_state(TASK_INTERRUPTIBLE);
  1416. if (!kthread_should_stop())
  1417. schedule();
  1418. __set_current_state(TASK_RUNNING);
  1419. }
  1420. } while (!kthread_should_stop());
  1421. return 0;
  1422. }
  1423. static int transaction_kthread(void *arg)
  1424. {
  1425. struct btrfs_root *root = arg;
  1426. struct btrfs_trans_handle *trans;
  1427. struct btrfs_transaction *cur;
  1428. u64 transid;
  1429. unsigned long now;
  1430. unsigned long delay;
  1431. bool cannot_commit;
  1432. do {
  1433. cannot_commit = false;
  1434. delay = HZ * 30;
  1435. vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
  1436. mutex_lock(&root->fs_info->transaction_kthread_mutex);
  1437. spin_lock(&root->fs_info->trans_lock);
  1438. cur = root->fs_info->running_transaction;
  1439. if (!cur) {
  1440. spin_unlock(&root->fs_info->trans_lock);
  1441. goto sleep;
  1442. }
  1443. now = get_seconds();
  1444. if (!cur->blocked &&
  1445. (now < cur->start_time || now - cur->start_time < 30)) {
  1446. spin_unlock(&root->fs_info->trans_lock);
  1447. delay = HZ * 5;
  1448. goto sleep;
  1449. }
  1450. transid = cur->transid;
  1451. spin_unlock(&root->fs_info->trans_lock);
  1452. /* If the file system is aborted, this will always fail. */
  1453. trans = btrfs_join_transaction(root);
  1454. if (IS_ERR(trans)) {
  1455. cannot_commit = true;
  1456. goto sleep;
  1457. }
  1458. if (transid == trans->transid) {
  1459. btrfs_commit_transaction(trans, root);
  1460. } else {
  1461. btrfs_end_transaction(trans, root);
  1462. }
  1463. sleep:
  1464. wake_up_process(root->fs_info->cleaner_kthread);
  1465. mutex_unlock(&root->fs_info->transaction_kthread_mutex);
  1466. if (!try_to_freeze()) {
  1467. set_current_state(TASK_INTERRUPTIBLE);
  1468. if (!kthread_should_stop() &&
  1469. (!btrfs_transaction_blocked(root->fs_info) ||
  1470. cannot_commit))
  1471. schedule_timeout(delay);
  1472. __set_current_state(TASK_RUNNING);
  1473. }
  1474. } while (!kthread_should_stop());
  1475. return 0;
  1476. }
  1477. /*
  1478. * this will find the highest generation in the array of
  1479. * root backups. The index of the highest array is returned,
  1480. * or -1 if we can't find anything.
  1481. *
  1482. * We check to make sure the array is valid by comparing the
  1483. * generation of the latest root in the array with the generation
  1484. * in the super block. If they don't match we pitch it.
  1485. */
  1486. static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
  1487. {
  1488. u64 cur;
  1489. int newest_index = -1;
  1490. struct btrfs_root_backup *root_backup;
  1491. int i;
  1492. for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
  1493. root_backup = info->super_copy->super_roots + i;
  1494. cur = btrfs_backup_tree_root_gen(root_backup);
  1495. if (cur == newest_gen)
  1496. newest_index = i;
  1497. }
  1498. /* check to see if we actually wrapped around */
  1499. if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
  1500. root_backup = info->super_copy->super_roots;
  1501. cur = btrfs_backup_tree_root_gen(root_backup);
  1502. if (cur == newest_gen)
  1503. newest_index = 0;
  1504. }
  1505. return newest_index;
  1506. }
  1507. /*
  1508. * find the oldest backup so we know where to store new entries
  1509. * in the backup array. This will set the backup_root_index
  1510. * field in the fs_info struct
  1511. */
  1512. static void find_oldest_super_backup(struct btrfs_fs_info *info,
  1513. u64 newest_gen)
  1514. {
  1515. int newest_index = -1;
  1516. newest_index = find_newest_super_backup(info, newest_gen);
  1517. /* if there was garbage in there, just move along */
  1518. if (newest_index == -1) {
  1519. info->backup_root_index = 0;
  1520. } else {
  1521. info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
  1522. }
  1523. }
  1524. /*
  1525. * copy all the root pointers into the super backup array.
  1526. * this will bump the backup pointer by one when it is
  1527. * done
  1528. */
  1529. static void backup_super_roots(struct btrfs_fs_info *info)
  1530. {
  1531. int next_backup;
  1532. struct btrfs_root_backup *root_backup;
  1533. int last_backup;
  1534. next_backup = info->backup_root_index;
  1535. last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
  1536. BTRFS_NUM_BACKUP_ROOTS;
  1537. /*
  1538. * just overwrite the last backup if we're at the same generation
  1539. * this happens only at umount
  1540. */
  1541. root_backup = info->super_for_commit->super_roots + last_backup;
  1542. if (btrfs_backup_tree_root_gen(root_backup) ==
  1543. btrfs_header_generation(info->tree_root->node))
  1544. next_backup = last_backup;
  1545. root_backup = info->super_for_commit->super_roots + next_backup;
  1546. /*
  1547. * make sure all of our padding and empty slots get zero filled
  1548. * regardless of which ones we use today
  1549. */
  1550. memset(root_backup, 0, sizeof(*root_backup));
  1551. info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
  1552. btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
  1553. btrfs_set_backup_tree_root_gen(root_backup,
  1554. btrfs_header_generation(info->tree_root->node));
  1555. btrfs_set_backup_tree_root_level(root_backup,
  1556. btrfs_header_level(info->tree_root->node));
  1557. btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
  1558. btrfs_set_backup_chunk_root_gen(root_backup,
  1559. btrfs_header_generation(info->chunk_root->node));
  1560. btrfs_set_backup_chunk_root_level(root_backup,
  1561. btrfs_header_level(info->chunk_root->node));
  1562. btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
  1563. btrfs_set_backup_extent_root_gen(root_backup,
  1564. btrfs_header_generation(info->extent_root->node));
  1565. btrfs_set_backup_extent_root_level(root_backup,
  1566. btrfs_header_level(info->extent_root->node));
  1567. /*
  1568. * we might commit during log recovery, which happens before we set
  1569. * the fs_root. Make sure it is valid before we fill it in.
  1570. */
  1571. if (info->fs_root && info->fs_root->node) {
  1572. btrfs_set_backup_fs_root(root_backup,
  1573. info->fs_root->node->start);
  1574. btrfs_set_backup_fs_root_gen(root_backup,
  1575. btrfs_header_generation(info->fs_root->node));
  1576. btrfs_set_backup_fs_root_level(root_backup,
  1577. btrfs_header_level(info->fs_root->node));
  1578. }
  1579. btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
  1580. btrfs_set_backup_dev_root_gen(root_backup,
  1581. btrfs_header_generation(info->dev_root->node));
  1582. btrfs_set_backup_dev_root_level(root_backup,
  1583. btrfs_header_level(info->dev_root->node));
  1584. btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
  1585. btrfs_set_backup_csum_root_gen(root_backup,
  1586. btrfs_header_generation(info->csum_root->node));
  1587. btrfs_set_backup_csum_root_level(root_backup,
  1588. btrfs_header_level(info->csum_root->node));
  1589. btrfs_set_backup_total_bytes(root_backup,
  1590. btrfs_super_total_bytes(info->super_copy));
  1591. btrfs_set_backup_bytes_used(root_backup,
  1592. btrfs_super_bytes_used(info->super_copy));
  1593. btrfs_set_backup_num_devices(root_backup,
  1594. btrfs_super_num_devices(info->super_copy));
  1595. /*
  1596. * if we don't copy this out to the super_copy, it won't get remembered
  1597. * for the next commit
  1598. */
  1599. memcpy(&info->super_copy->super_roots,
  1600. &info->super_for_commit->super_roots,
  1601. sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
  1602. }
  1603. /*
  1604. * this copies info out of the root backup array and back into
  1605. * the in-memory super block. It is meant to help iterate through
  1606. * the array, so you send it the number of backups you've already
  1607. * tried and the last backup index you used.
  1608. *
  1609. * this returns -1 when it has tried all the backups
  1610. */
  1611. static noinline int next_root_backup(struct btrfs_fs_info *info,
  1612. struct btrfs_super_block *super,
  1613. int *num_backups_tried, int *backup_index)
  1614. {
  1615. struct btrfs_root_backup *root_backup;
  1616. int newest = *backup_index;
  1617. if (*num_backups_tried == 0) {
  1618. u64 gen = btrfs_super_generation(super);
  1619. newest = find_newest_super_backup(info, gen);
  1620. if (newest == -1)
  1621. return -1;
  1622. *backup_index = newest;
  1623. *num_backups_tried = 1;
  1624. } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
  1625. /* we've tried all the backups, all done */
  1626. return -1;
  1627. } else {
  1628. /* jump to the next oldest backup */
  1629. newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
  1630. BTRFS_NUM_BACKUP_ROOTS;
  1631. *backup_index = newest;
  1632. *num_backups_tried += 1;
  1633. }
  1634. root_backup = super->super_roots + newest;
  1635. btrfs_set_super_generation(super,
  1636. btrfs_backup_tree_root_gen(root_backup));
  1637. btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
  1638. btrfs_set_super_root_level(super,
  1639. btrfs_backup_tree_root_level(root_backup));
  1640. btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
  1641. /*
  1642. * fixme: the total bytes and num_devices need to match or we should
  1643. * need a fsck
  1644. */
  1645. btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
  1646. btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
  1647. return 0;
  1648. }
  1649. /* helper to cleanup tree roots */
  1650. static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
  1651. {
  1652. free_extent_buffer(info->tree_root->node);
  1653. free_extent_buffer(info->tree_root->commit_root);
  1654. free_extent_buffer(info->dev_root->node);
  1655. free_extent_buffer(info->dev_root->commit_root);
  1656. free_extent_buffer(info->extent_root->node);
  1657. free_extent_buffer(info->extent_root->commit_root);
  1658. free_extent_buffer(info->csum_root->node);
  1659. free_extent_buffer(info->csum_root->commit_root);
  1660. info->tree_root->node = NULL;
  1661. info->tree_root->commit_root = NULL;
  1662. info->dev_root->node = NULL;
  1663. info->dev_root->commit_root = NULL;
  1664. info->extent_root->node = NULL;
  1665. info->extent_root->commit_root = NULL;
  1666. info->csum_root->node = NULL;
  1667. info->csum_root->commit_root = NULL;
  1668. if (chunk_root) {
  1669. free_extent_buffer(info->chunk_root->node);
  1670. free_extent_buffer(info->chunk_root->commit_root);
  1671. info->chunk_root->node = NULL;
  1672. info->chunk_root->commit_root = NULL;
  1673. }
  1674. }
  1675. int open_ctree(struct super_block *sb,
  1676. struct btrfs_fs_devices *fs_devices,
  1677. char *options)
  1678. {
  1679. u32 sectorsize;
  1680. u32 nodesize;
  1681. u32 leafsize;
  1682. u32 blocksize;
  1683. u32 stripesize;
  1684. u64 generation;
  1685. u64 features;
  1686. struct btrfs_key location;
  1687. struct buffer_head *bh;
  1688. struct btrfs_super_block *disk_super;
  1689. struct btrfs_fs_info *fs_info = btrfs_sb(sb);
  1690. struct btrfs_root *tree_root;
  1691. struct btrfs_root *extent_root;
  1692. struct btrfs_root *csum_root;
  1693. struct btrfs_root *chunk_root;
  1694. struct btrfs_root *dev_root;
  1695. struct btrfs_root *log_tree_root;
  1696. int ret;
  1697. int err = -EINVAL;
  1698. int num_backups_tried = 0;
  1699. int backup_index = 0;
  1700. tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
  1701. extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
  1702. csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
  1703. chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
  1704. dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
  1705. if (!tree_root || !extent_root || !csum_root ||
  1706. !chunk_root || !dev_root) {
  1707. err = -ENOMEM;
  1708. goto fail;
  1709. }
  1710. ret = init_srcu_struct(&fs_info->subvol_srcu);
  1711. if (ret) {
  1712. err = ret;
  1713. goto fail;
  1714. }
  1715. ret = setup_bdi(fs_info, &fs_info->bdi);
  1716. if (ret) {
  1717. err = ret;
  1718. goto fail_srcu;
  1719. }
  1720. fs_info->btree_inode = new_inode(sb);
  1721. if (!fs_info->btree_inode) {
  1722. err = -ENOMEM;
  1723. goto fail_bdi;
  1724. }
  1725. mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
  1726. INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
  1727. INIT_LIST_HEAD(&fs_info->trans_list);
  1728. INIT_LIST_HEAD(&fs_info->dead_roots);
  1729. INIT_LIST_HEAD(&fs_info->delayed_iputs);
  1730. INIT_LIST_HEAD(&fs_info->hashers);
  1731. INIT_LIST_HEAD(&fs_info->delalloc_inodes);
  1732. INIT_LIST_HEAD(&fs_info->ordered_operations);
  1733. INIT_LIST_HEAD(&fs_info->caching_block_groups);
  1734. spin_lock_init(&fs_info->delalloc_lock);
  1735. spin_lock_init(&fs_info->trans_lock);
  1736. spin_lock_init(&fs_info->ref_cache_lock);
  1737. spin_lock_init(&fs_info->fs_roots_radix_lock);
  1738. spin_lock_init(&fs_info->delayed_iput_lock);
  1739. spin_lock_init(&fs_info->defrag_inodes_lock);
  1740. spin_lock_init(&fs_info->free_chunk_lock);
  1741. spin_lock_init(&fs_info->tree_mod_seq_lock);
  1742. rwlock_init(&fs_info->tree_mod_log_lock);
  1743. mutex_init(&fs_info->reloc_mutex);
  1744. init_completion(&fs_info->kobj_unregister);
  1745. INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
  1746. INIT_LIST_HEAD(&fs_info->space_info);
  1747. INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
  1748. btrfs_mapping_init(&fs_info->mapping_tree);
  1749. btrfs_init_block_rsv(&fs_info->global_block_rsv);
  1750. btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
  1751. btrfs_init_block_rsv(&fs_info->trans_block_rsv);
  1752. btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
  1753. btrfs_init_block_rsv(&fs_info->empty_block_rsv);
  1754. btrfs_init_block_rsv(&fs_info->delayed_block_rsv);
  1755. atomic_set(&fs_info->nr_async_submits, 0);
  1756. atomic_set(&fs_info->async_delalloc_pages, 0);
  1757. atomic_set(&fs_info->async_submit_draining, 0);
  1758. atomic_set(&fs_info->nr_async_bios, 0);
  1759. atomic_set(&fs_info->defrag_running, 0);
  1760. atomic_set(&fs_info->tree_mod_seq, 0);
  1761. fs_info->sb = sb;
  1762. fs_info->max_inline = 8192 * 1024;
  1763. fs_info->metadata_ratio = 0;
  1764. fs_info->defrag_inodes = RB_ROOT;
  1765. fs_info->trans_no_join = 0;
  1766. fs_info->free_chunk_space = 0;
  1767. fs_info->tree_mod_log = RB_ROOT;
  1768. init_waitqueue_head(&fs_info->tree_mod_seq_wait);
  1769. /* readahead state */
  1770. INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
  1771. spin_lock_init(&fs_info->reada_lock);
  1772. fs_info->thread_pool_size = min_t(unsigned long,
  1773. num_online_cpus() + 2, 8);
  1774. INIT_LIST_HEAD(&fs_info->ordered_extents);
  1775. spin_lock_init(&fs_info->ordered_extent_lock);
  1776. fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
  1777. GFP_NOFS);
  1778. if (!fs_info->delayed_root) {
  1779. err = -ENOMEM;
  1780. goto fail_iput;
  1781. }
  1782. btrfs_init_delayed_root(fs_info->delayed_root);
  1783. mutex_init(&fs_info->scrub_lock);
  1784. atomic_set(&fs_info->scrubs_running, 0);
  1785. atomic_set(&fs_info->scrub_pause_req, 0);
  1786. atomic_set(&fs_info->scrubs_paused, 0);
  1787. atomic_set(&fs_info->scrub_cancel_req, 0);
  1788. init_waitqueue_head(&fs_info->scrub_pause_wait);
  1789. init_rwsem(&fs_info->scrub_super_lock);
  1790. fs_info->scrub_workers_refcnt = 0;
  1791. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  1792. fs_info->check_integrity_print_mask = 0;
  1793. #endif
  1794. spin_lock_init(&fs_info->balance_lock);
  1795. mutex_init(&fs_info->balance_mutex);
  1796. atomic_set(&fs_info->balance_running, 0);
  1797. atomic_set(&fs_info->balance_pause_req, 0);
  1798. atomic_set(&fs_info->balance_cancel_req, 0);
  1799. fs_info->balance_ctl = NULL;
  1800. init_waitqueue_head(&fs_info->balance_wait_q);
  1801. sb->s_blocksize = 4096;
  1802. sb->s_blocksize_bits = blksize_bits(4096);
  1803. sb->s_bdi = &fs_info->bdi;
  1804. fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
  1805. set_nlink(fs_info->btree_inode, 1);
  1806. /*
  1807. * we set the i_size on the btree inode to the max possible int.
  1808. * the real end of the address space is determined by all of
  1809. * the devices in the system
  1810. */
  1811. fs_info->btree_inode->i_size = OFFSET_MAX;
  1812. fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
  1813. fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
  1814. RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
  1815. extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
  1816. fs_info->btree_inode->i_mapping);
  1817. BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
  1818. extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
  1819. BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
  1820. BTRFS_I(fs_info->btree_inode)->root = tree_root;
  1821. memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
  1822. sizeof(struct btrfs_key));
  1823. set_bit(BTRFS_INODE_DUMMY,
  1824. &BTRFS_I(fs_info->btree_inode)->runtime_flags);
  1825. insert_inode_hash(fs_info->btree_inode);
  1826. spin_lock_init(&fs_info->block_group_cache_lock);
  1827. fs_info->block_group_cache_tree = RB_ROOT;
  1828. extent_io_tree_init(&fs_info->freed_extents[0],
  1829. fs_info->btree_inode->i_mapping);
  1830. extent_io_tree_init(&fs_info->freed_extents[1],
  1831. fs_info->btree_inode->i_mapping);
  1832. fs_info->pinned_extents = &fs_info->freed_extents[0];
  1833. fs_info->do_barriers = 1;
  1834. mutex_init(&fs_info->ordered_operations_mutex);
  1835. mutex_init(&fs_info->tree_log_mutex);
  1836. mutex_init(&fs_info->chunk_mutex);
  1837. mutex_init(&fs_info->transaction_kthread_mutex);
  1838. mutex_init(&fs_info->cleaner_mutex);
  1839. mutex_init(&fs_info->volume_mutex);
  1840. init_rwsem(&fs_info->extent_commit_sem);
  1841. init_rwsem(&fs_info->cleanup_work_sem);
  1842. init_rwsem(&fs_info->subvol_sem);
  1843. spin_lock_init(&fs_info->qgroup_lock);
  1844. fs_info->qgroup_tree = RB_ROOT;
  1845. INIT_LIST_HEAD(&fs_info->dirty_qgroups);
  1846. fs_info->qgroup_seq = 1;
  1847. fs_info->quota_enabled = 0;
  1848. fs_info->pending_quota_state = 0;
  1849. btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
  1850. btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
  1851. init_waitqueue_head(&fs_info->transaction_throttle);
  1852. init_waitqueue_head(&fs_info->transaction_wait);
  1853. init_waitqueue_head(&fs_info->transaction_blocked_wait);
  1854. init_waitqueue_head(&fs_info->async_submit_wait);
  1855. __setup_root(4096, 4096, 4096, 4096, tree_root,
  1856. fs_info, BTRFS_ROOT_TREE_OBJECTID);
  1857. invalidate_bdev(fs_devices->latest_bdev);
  1858. bh = btrfs_read_dev_super(fs_devices->latest_bdev);
  1859. if (!bh) {
  1860. err = -EINVAL;
  1861. goto fail_alloc;
  1862. }
  1863. memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
  1864. memcpy(fs_info->super_for_commit, fs_info->super_copy,
  1865. sizeof(*fs_info->super_for_commit));
  1866. brelse(bh);
  1867. memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
  1868. disk_super = fs_info->super_copy;
  1869. if (!btrfs_super_root(disk_super))
  1870. goto fail_alloc;
  1871. /* check FS state, whether FS is broken. */
  1872. fs_info->fs_state |= btrfs_super_flags(disk_super);
  1873. ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
  1874. if (ret) {
  1875. printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
  1876. err = ret;
  1877. goto fail_alloc;
  1878. }
  1879. /*
  1880. * run through our array of backup supers and setup
  1881. * our ring pointer to the oldest one
  1882. */
  1883. generation = btrfs_super_generation(disk_super);
  1884. find_oldest_super_backup(fs_info, generation);
  1885. /*
  1886. * In the long term, we'll store the compression type in the super
  1887. * block, and it'll be used for per file compression control.
  1888. */
  1889. fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
  1890. ret = btrfs_parse_options(tree_root, options);
  1891. if (ret) {
  1892. err = ret;
  1893. goto fail_alloc;
  1894. }
  1895. features = btrfs_super_incompat_flags(disk_super) &
  1896. ~BTRFS_FEATURE_INCOMPAT_SUPP;
  1897. if (features) {
  1898. printk(KERN_ERR "BTRFS: couldn't mount because of "
  1899. "unsupported optional features (%Lx).\n",
  1900. (unsigned long long)features);
  1901. err = -EINVAL;
  1902. goto fail_alloc;
  1903. }
  1904. if (btrfs_super_leafsize(disk_super) !=
  1905. btrfs_super_nodesize(disk_super)) {
  1906. printk(KERN_ERR "BTRFS: couldn't mount because metadata "
  1907. "blocksizes don't match. node %d leaf %d\n",
  1908. btrfs_super_nodesize(disk_super),
  1909. btrfs_super_leafsize(disk_super));
  1910. err = -EINVAL;
  1911. goto fail_alloc;
  1912. }
  1913. if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
  1914. printk(KERN_ERR "BTRFS: couldn't mount because metadata "
  1915. "blocksize (%d) was too large\n",
  1916. btrfs_super_leafsize(disk_super));
  1917. err = -EINVAL;
  1918. goto fail_alloc;
  1919. }
  1920. features = btrfs_super_incompat_flags(disk_super);
  1921. features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
  1922. if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
  1923. features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
  1924. /*
  1925. * flag our filesystem as having big metadata blocks if
  1926. * they are bigger than the page size
  1927. */
  1928. if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
  1929. if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
  1930. printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
  1931. features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
  1932. }
  1933. nodesize = btrfs_super_nodesize(disk_super);
  1934. leafsize = btrfs_super_leafsize(disk_super);
  1935. sectorsize = btrfs_super_sectorsize(disk_super);
  1936. stripesize = btrfs_super_stripesize(disk_super);
  1937. /*
  1938. * mixed block groups end up with duplicate but slightly offset
  1939. * extent buffers for the same range. It leads to corruptions
  1940. */
  1941. if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
  1942. (sectorsize != leafsize)) {
  1943. printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
  1944. "are not allowed for mixed block groups on %s\n",
  1945. sb->s_id);
  1946. goto fail_alloc;
  1947. }
  1948. btrfs_set_super_incompat_flags(disk_super, features);
  1949. features = btrfs_super_compat_ro_flags(disk_super) &
  1950. ~BTRFS_FEATURE_COMPAT_RO_SUPP;
  1951. if (!(sb->s_flags & MS_RDONLY) && features) {
  1952. printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
  1953. "unsupported option features (%Lx).\n",
  1954. (unsigned long long)features);
  1955. err = -EINVAL;
  1956. goto fail_alloc;
  1957. }
  1958. btrfs_init_workers(&fs_info->generic_worker,
  1959. "genwork", 1, NULL);
  1960. btrfs_init_workers(&fs_info->workers, "worker",
  1961. fs_info->thread_pool_size,
  1962. &fs_info->generic_worker);
  1963. btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
  1964. fs_info->thread_pool_size,
  1965. &fs_info->generic_worker);
  1966. btrfs_init_workers(&fs_info->submit_workers, "submit",
  1967. min_t(u64, fs_devices->num_devices,
  1968. fs_info->thread_pool_size),
  1969. &fs_info->generic_worker);
  1970. btrfs_init_workers(&fs_info->caching_workers, "cache",
  1971. 2, &fs_info->generic_worker);
  1972. /* a higher idle thresh on the submit workers makes it much more
  1973. * likely that bios will be send down in a sane order to the
  1974. * devices
  1975. */
  1976. fs_info->submit_workers.idle_thresh = 64;
  1977. fs_info->workers.idle_thresh = 16;
  1978. fs_info->workers.ordered = 1;
  1979. fs_info->delalloc_workers.idle_thresh = 2;
  1980. fs_info->delalloc_workers.ordered = 1;
  1981. btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
  1982. &fs_info->generic_worker);
  1983. btrfs_init_workers(&fs_info->endio_workers, "endio",
  1984. fs_info->thread_pool_size,
  1985. &fs_info->generic_worker);
  1986. btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
  1987. fs_info->thread_pool_size,
  1988. &fs_info->generic_worker);
  1989. btrfs_init_workers(&fs_info->endio_meta_write_workers,
  1990. "endio-meta-write", fs_info->thread_pool_size,
  1991. &fs_info->generic_worker);
  1992. btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
  1993. fs_info->thread_pool_size,
  1994. &fs_info->generic_worker);
  1995. btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
  1996. 1, &fs_info->generic_worker);
  1997. btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
  1998. fs_info->thread_pool_size,
  1999. &fs_info->generic_worker);
  2000. btrfs_init_workers(&fs_info->readahead_workers, "readahead",
  2001. fs_info->thread_pool_size,
  2002. &fs_info->generic_worker);
  2003. /*
  2004. * endios are largely parallel and should have a very
  2005. * low idle thresh
  2006. */
  2007. fs_info->endio_workers.idle_thresh = 4;
  2008. fs_info->endio_meta_workers.idle_thresh = 4;
  2009. fs_info->endio_write_workers.idle_thresh = 2;
  2010. fs_info->endio_meta_write_workers.idle_thresh = 2;
  2011. fs_info->readahead_workers.idle_thresh = 2;
  2012. /*
  2013. * btrfs_start_workers can really only fail because of ENOMEM so just
  2014. * return -ENOMEM if any of these fail.
  2015. */
  2016. ret = btrfs_start_workers(&fs_info->workers);
  2017. ret |= btrfs_start_workers(&fs_info->generic_worker);
  2018. ret |= btrfs_start_workers(&fs_info->submit_workers);
  2019. ret |= btrfs_start_workers(&fs_info->delalloc_workers);
  2020. ret |= btrfs_start_workers(&fs_info->fixup_workers);
  2021. ret |= btrfs_start_workers(&fs_info->endio_workers);
  2022. ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
  2023. ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
  2024. ret |= btrfs_start_workers(&fs_info->endio_write_workers);
  2025. ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
  2026. ret |= btrfs_start_workers(&fs_info->delayed_workers);
  2027. ret |= btrfs_start_workers(&fs_info->caching_workers);
  2028. ret |= btrfs_start_workers(&fs_info->readahead_workers);
  2029. if (ret) {
  2030. ret = -ENOMEM;
  2031. goto fail_sb_buffer;
  2032. }
  2033. fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
  2034. fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
  2035. 4 * 1024 * 1024 / PAGE_CACHE_SIZE);
  2036. tree_root->nodesize = nodesize;
  2037. tree_root->leafsize = leafsize;
  2038. tree_root->sectorsize = sectorsize;
  2039. tree_root->stripesize = stripesize;
  2040. sb->s_blocksize = sectorsize;
  2041. sb->s_blocksize_bits = blksize_bits(sectorsize);
  2042. if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
  2043. sizeof(disk_super->magic))) {
  2044. printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
  2045. goto fail_sb_buffer;
  2046. }
  2047. if (sectorsize != PAGE_SIZE) {
  2048. printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
  2049. "found on %s\n", (unsigned long)sectorsize, sb->s_id);
  2050. goto fail_sb_buffer;
  2051. }
  2052. mutex_lock(&fs_info->chunk_mutex);
  2053. ret = btrfs_read_sys_array(tree_root);
  2054. mutex_unlock(&fs_info->chunk_mutex);
  2055. if (ret) {
  2056. printk(KERN_WARNING "btrfs: failed to read the system "
  2057. "array on %s\n", sb->s_id);
  2058. goto fail_sb_buffer;
  2059. }
  2060. blocksize = btrfs_level_size(tree_root,
  2061. btrfs_super_chunk_root_level(disk_super));
  2062. generation = btrfs_super_chunk_root_generation(disk_super);
  2063. __setup_root(nodesize, leafsize, sectorsize, stripesize,
  2064. chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
  2065. chunk_root->node = read_tree_block(chunk_root,
  2066. btrfs_super_chunk_root(disk_super),
  2067. blocksize, generation);
  2068. BUG_ON(!chunk_root->node); /* -ENOMEM */
  2069. if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
  2070. printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
  2071. sb->s_id);
  2072. goto fail_tree_roots;
  2073. }
  2074. btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
  2075. chunk_root->commit_root = btrfs_root_node(chunk_root);
  2076. read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
  2077. (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
  2078. BTRFS_UUID_SIZE);
  2079. ret = btrfs_read_chunk_tree(chunk_root);
  2080. if (ret) {
  2081. printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
  2082. sb->s_id);
  2083. goto fail_tree_roots;
  2084. }
  2085. btrfs_close_extra_devices(fs_devices);
  2086. if (!fs_devices->latest_bdev) {
  2087. printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
  2088. sb->s_id);
  2089. goto fail_tree_roots;
  2090. }
  2091. retry_root_backup:
  2092. blocksize = btrfs_level_size(tree_root,
  2093. btrfs_super_root_level(disk_super));
  2094. generation = btrfs_super_generation(disk_super);
  2095. tree_root->node = read_tree_block(tree_root,
  2096. btrfs_super_root(disk_super),
  2097. blocksize, generation);
  2098. if (!tree_root->node ||
  2099. !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
  2100. printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
  2101. sb->s_id);
  2102. goto recovery_tree_root;
  2103. }
  2104. btrfs_set_root_node(&tree_root->root_item, tree_root->node);
  2105. tree_root->commit_root = btrfs_root_node(tree_root);
  2106. ret = find_and_setup_root(tree_root, fs_info,
  2107. BTRFS_EXTENT_TREE_OBJECTID, extent_root);
  2108. if (ret)
  2109. goto recovery_tree_root;
  2110. extent_root->track_dirty = 1;
  2111. ret = find_and_setup_root(tree_root, fs_info,
  2112. BTRFS_DEV_TREE_OBJECTID, dev_root);
  2113. if (ret)
  2114. goto recovery_tree_root;
  2115. dev_root->track_dirty = 1;
  2116. ret = find_and_setup_root(tree_root, fs_info,
  2117. BTRFS_CSUM_TREE_OBJECTID, csum_root);
  2118. if (ret)
  2119. goto recovery_tree_root;
  2120. csum_root->track_dirty = 1;
  2121. fs_info->generation = generation;
  2122. fs_info->last_trans_committed = generation;
  2123. ret = btrfs_recover_balance(fs_info);
  2124. if (ret) {
  2125. printk(KERN_WARNING "btrfs: failed to recover balance\n");
  2126. goto fail_block_groups;
  2127. }
  2128. ret = btrfs_init_dev_stats(fs_info);
  2129. if (ret) {
  2130. printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
  2131. ret);
  2132. goto fail_block_groups;
  2133. }
  2134. ret = btrfs_init_space_info(fs_info);
  2135. if (ret) {
  2136. printk(KERN_ERR "Failed to initial space info: %d\n", ret);
  2137. goto fail_block_groups;
  2138. }
  2139. ret = btrfs_read_block_groups(extent_root);
  2140. if (ret) {
  2141. printk(KERN_ERR "Failed to read block groups: %d\n", ret);
  2142. goto fail_block_groups;
  2143. }
  2144. fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
  2145. "btrfs-cleaner");
  2146. if (IS_ERR(fs_info->cleaner_kthread))
  2147. goto fail_block_groups;
  2148. fs_info->transaction_kthread = kthread_run(transaction_kthread,
  2149. tree_root,
  2150. "btrfs-transaction");
  2151. if (IS_ERR(fs_info->transaction_kthread))
  2152. goto fail_cleaner;
  2153. if (!btrfs_test_opt(tree_root, SSD) &&
  2154. !btrfs_test_opt(tree_root, NOSSD) &&
  2155. !fs_info->fs_devices->rotating) {
  2156. printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
  2157. "mode\n");
  2158. btrfs_set_opt(fs_info->mount_opt, SSD);
  2159. }
  2160. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  2161. if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
  2162. ret = btrfsic_mount(tree_root, fs_devices,
  2163. btrfs_test_opt(tree_root,
  2164. CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
  2165. 1 : 0,
  2166. fs_info->check_integrity_print_mask);
  2167. if (ret)
  2168. printk(KERN_WARNING "btrfs: failed to initialize"
  2169. " integrity check module %s\n", sb->s_id);
  2170. }
  2171. #endif
  2172. /* do not make disk changes in broken FS */
  2173. if (btrfs_super_log_root(disk_super) != 0 &&
  2174. !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
  2175. u64 bytenr = btrfs_super_log_root(disk_super);
  2176. if (fs_devices->rw_devices == 0) {
  2177. printk(KERN_WARNING "Btrfs log replay required "
  2178. "on RO media\n");
  2179. err = -EIO;
  2180. goto fail_trans_kthread;
  2181. }
  2182. blocksize =
  2183. btrfs_level_size(tree_root,
  2184. btrfs_super_log_root_level(disk_super));
  2185. log_tree_root = btrfs_alloc_root(fs_info);
  2186. if (!log_tree_root) {
  2187. err = -ENOMEM;
  2188. goto fail_trans_kthread;
  2189. }
  2190. __setup_root(nodesize, leafsize, sectorsize, stripesize,
  2191. log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
  2192. log_tree_root->node = read_tree_block(tree_root, bytenr,
  2193. blocksize,
  2194. generation + 1);
  2195. /* returns with log_tree_root freed on success */
  2196. ret = btrfs_recover_log_trees(log_tree_root);
  2197. if (ret) {
  2198. btrfs_error(tree_root->fs_info, ret,
  2199. "Failed to recover log tree");
  2200. free_extent_buffer(log_tree_root->node);
  2201. kfree(log_tree_root);
  2202. goto fail_trans_kthread;
  2203. }
  2204. if (sb->s_flags & MS_RDONLY) {
  2205. ret = btrfs_commit_super(tree_root);
  2206. if (ret)
  2207. goto fail_trans_kthread;
  2208. }
  2209. }
  2210. ret = btrfs_find_orphan_roots(tree_root);
  2211. if (ret)
  2212. goto fail_trans_kthread;
  2213. if (!(sb->s_flags & MS_RDONLY)) {
  2214. ret = btrfs_cleanup_fs_roots(fs_info);
  2215. if (ret) {
  2216. }
  2217. ret = btrfs_recover_relocation(tree_root);
  2218. if (ret < 0) {
  2219. printk(KERN_WARNING
  2220. "btrfs: failed to recover relocation\n");
  2221. err = -EINVAL;
  2222. goto fail_trans_kthread;
  2223. }
  2224. }
  2225. location.objectid = BTRFS_FS_TREE_OBJECTID;
  2226. location.type = BTRFS_ROOT_ITEM_KEY;
  2227. location.offset = (u64)-1;
  2228. fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
  2229. if (!fs_info->fs_root)
  2230. goto fail_trans_kthread;
  2231. if (IS_ERR(fs_info->fs_root)) {
  2232. err = PTR_ERR(fs_info->fs_root);
  2233. goto fail_trans_kthread;
  2234. }
  2235. if (sb->s_flags & MS_RDONLY)
  2236. return 0;
  2237. down_read(&fs_info->cleanup_work_sem);
  2238. if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
  2239. (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
  2240. up_read(&fs_info->cleanup_work_sem);
  2241. close_ctree(tree_root);
  2242. return ret;
  2243. }
  2244. up_read(&fs_info->cleanup_work_sem);
  2245. ret = btrfs_resume_balance_async(fs_info);
  2246. if (ret) {
  2247. printk(KERN_WARNING "btrfs: failed to resume balance\n");
  2248. close_ctree(tree_root);
  2249. return ret;
  2250. }
  2251. return 0;
  2252. fail_trans_kthread:
  2253. kthread_stop(fs_info->transaction_kthread);
  2254. fail_cleaner:
  2255. kthread_stop(fs_info->cleaner_kthread);
  2256. /*
  2257. * make sure we're done with the btree inode before we stop our
  2258. * kthreads
  2259. */
  2260. filemap_write_and_wait(fs_info->btree_inode->i_mapping);
  2261. invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
  2262. fail_block_groups:
  2263. btrfs_free_block_groups(fs_info);
  2264. fail_tree_roots:
  2265. free_root_pointers(fs_info, 1);
  2266. fail_sb_buffer:
  2267. btrfs_stop_workers(&fs_info->generic_worker);
  2268. btrfs_stop_workers(&fs_info->readahead_workers);
  2269. btrfs_stop_workers(&fs_info->fixup_workers);
  2270. btrfs_stop_workers(&fs_info->delalloc_workers);
  2271. btrfs_stop_workers(&fs_info->workers);
  2272. btrfs_stop_workers(&fs_info->endio_workers);
  2273. btrfs_stop_workers(&fs_info->endio_meta_workers);
  2274. btrfs_stop_workers(&fs_info->endio_meta_write_workers);
  2275. btrfs_stop_workers(&fs_info->endio_write_workers);
  2276. btrfs_stop_workers(&fs_info->endio_freespace_worker);
  2277. btrfs_stop_workers(&fs_info->submit_workers);
  2278. btrfs_stop_workers(&fs_info->delayed_workers);
  2279. btrfs_stop_workers(&fs_info->caching_workers);
  2280. fail_alloc:
  2281. fail_iput:
  2282. btrfs_mapping_tree_free(&fs_info->mapping_tree);
  2283. invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
  2284. iput(fs_info->btree_inode);
  2285. fail_bdi:
  2286. bdi_destroy(&fs_info->bdi);
  2287. fail_srcu:
  2288. cleanup_srcu_struct(&fs_info->subvol_srcu);
  2289. fail:
  2290. btrfs_close_devices(fs_info->fs_devices);
  2291. return err;
  2292. recovery_tree_root:
  2293. if (!btrfs_test_opt(tree_root, RECOVERY))
  2294. goto fail_tree_roots;
  2295. free_root_pointers(fs_info, 0);
  2296. /* don't use the log in recovery mode, it won't be valid */
  2297. btrfs_set_super_log_root(disk_super, 0);
  2298. /* we can't trust the free space cache either */
  2299. btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
  2300. ret = next_root_backup(fs_info, fs_info->super_copy,
  2301. &num_backups_tried, &backup_index);
  2302. if (ret == -1)
  2303. goto fail_block_groups;
  2304. goto retry_root_backup;
  2305. }
  2306. static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
  2307. {
  2308. if (uptodate) {
  2309. set_buffer_uptodate(bh);
  2310. } else {
  2311. struct btrfs_device *device = (struct btrfs_device *)
  2312. bh->b_private;
  2313. printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
  2314. "I/O error on %s\n",
  2315. rcu_str_deref(device->name));
  2316. /* note, we dont' set_buffer_write_io_error because we have
  2317. * our own ways of dealing with the IO errors
  2318. */
  2319. clear_buffer_uptodate(bh);
  2320. btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
  2321. }
  2322. unlock_buffer(bh);
  2323. put_bh(bh);
  2324. }
  2325. struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
  2326. {
  2327. struct buffer_head *bh;
  2328. struct buffer_head *latest = NULL;
  2329. struct btrfs_super_block *super;
  2330. int i;
  2331. u64 transid = 0;
  2332. u64 bytenr;
  2333. /* we would like to check all the supers, but that would make
  2334. * a btrfs mount succeed after a mkfs from a different FS.
  2335. * So, we need to add a special mount option to scan for
  2336. * later supers, using BTRFS_SUPER_MIRROR_MAX instead
  2337. */
  2338. for (i = 0; i < 1; i++) {
  2339. bytenr = btrfs_sb_offset(i);
  2340. if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
  2341. break;
  2342. bh = __bread(bdev, bytenr / 4096, 4096);
  2343. if (!bh)
  2344. continue;
  2345. super = (struct btrfs_super_block *)bh->b_data;
  2346. if (btrfs_super_bytenr(super) != bytenr ||
  2347. strncmp((char *)(&super->magic), BTRFS_MAGIC,
  2348. sizeof(super->magic))) {
  2349. brelse(bh);
  2350. continue;
  2351. }
  2352. if (!latest || btrfs_super_generation(super) > transid) {
  2353. brelse(latest);
  2354. latest = bh;
  2355. transid = btrfs_super_generation(super);
  2356. } else {
  2357. brelse(bh);
  2358. }
  2359. }
  2360. return latest;
  2361. }
  2362. /*
  2363. * this should be called twice, once with wait == 0 and
  2364. * once with wait == 1. When wait == 0 is done, all the buffer heads
  2365. * we write are pinned.
  2366. *
  2367. * They are released when wait == 1 is done.
  2368. * max_mirrors must be the same for both runs, and it indicates how
  2369. * many supers on this one device should be written.
  2370. *
  2371. * max_mirrors == 0 means to write them all.
  2372. */
  2373. static int write_dev_supers(struct btrfs_device *device,
  2374. struct btrfs_super_block *sb,
  2375. int do_barriers, int wait, int max_mirrors)
  2376. {
  2377. struct buffer_head *bh;
  2378. int i;
  2379. int ret;
  2380. int errors = 0;
  2381. u32 crc;
  2382. u64 bytenr;
  2383. if (max_mirrors == 0)
  2384. max_mirrors = BTRFS_SUPER_MIRROR_MAX;
  2385. for (i = 0; i < max_mirrors; i++) {
  2386. bytenr = btrfs_sb_offset(i);
  2387. if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
  2388. break;
  2389. if (wait) {
  2390. bh = __find_get_block(device->bdev, bytenr / 4096,
  2391. BTRFS_SUPER_INFO_SIZE);
  2392. BUG_ON(!bh);
  2393. wait_on_buffer(bh);
  2394. if (!buffer_uptodate(bh))
  2395. errors++;
  2396. /* drop our reference */
  2397. brelse(bh);
  2398. /* drop the reference from the wait == 0 run */
  2399. brelse(bh);
  2400. continue;
  2401. } else {
  2402. btrfs_set_super_bytenr(sb, bytenr);
  2403. crc = ~(u32)0;
  2404. crc = btrfs_csum_data(NULL, (char *)sb +
  2405. BTRFS_CSUM_SIZE, crc,
  2406. BTRFS_SUPER_INFO_SIZE -
  2407. BTRFS_CSUM_SIZE);
  2408. btrfs_csum_final(crc, sb->csum);
  2409. /*
  2410. * one reference for us, and we leave it for the
  2411. * caller
  2412. */
  2413. bh = __getblk(device->bdev, bytenr / 4096,
  2414. BTRFS_SUPER_INFO_SIZE);
  2415. memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
  2416. /* one reference for submit_bh */
  2417. get_bh(bh);
  2418. set_buffer_uptodate(bh);
  2419. lock_buffer(bh);
  2420. bh->b_end_io = btrfs_end_buffer_write_sync;
  2421. bh->b_private = device;
  2422. }
  2423. /*
  2424. * we fua the first super. The others we allow
  2425. * to go down lazy.
  2426. */
  2427. ret = btrfsic_submit_bh(WRITE_FUA, bh);
  2428. if (ret)
  2429. errors++;
  2430. }
  2431. return errors < i ? 0 : -1;
  2432. }
  2433. /*
  2434. * endio for the write_dev_flush, this will wake anyone waiting
  2435. * for the barrier when it is done
  2436. */
  2437. static void btrfs_end_empty_barrier(struct bio *bio, int err)
  2438. {
  2439. if (err) {
  2440. if (err == -EOPNOTSUPP)
  2441. set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
  2442. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  2443. }
  2444. if (bio->bi_private)
  2445. complete(bio->bi_private);
  2446. bio_put(bio);
  2447. }
  2448. /*
  2449. * trigger flushes for one the devices. If you pass wait == 0, the flushes are
  2450. * sent down. With wait == 1, it waits for the previous flush.
  2451. *
  2452. * any device where the flush fails with eopnotsupp are flagged as not-barrier
  2453. * capable
  2454. */
  2455. static int write_dev_flush(struct btrfs_device *device, int wait)
  2456. {
  2457. struct bio *bio;
  2458. int ret = 0;
  2459. if (device->nobarriers)
  2460. return 0;
  2461. if (wait) {
  2462. bio = device->flush_bio;
  2463. if (!bio)
  2464. return 0;
  2465. wait_for_completion(&device->flush_wait);
  2466. if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
  2467. printk_in_rcu("btrfs: disabling barriers on dev %s\n",
  2468. rcu_str_deref(device->name));
  2469. device->nobarriers = 1;
  2470. }
  2471. if (!bio_flagged(bio, BIO_UPTODATE)) {
  2472. ret = -EIO;
  2473. if (!bio_flagged(bio, BIO_EOPNOTSUPP))
  2474. btrfs_dev_stat_inc_and_print(device,
  2475. BTRFS_DEV_STAT_FLUSH_ERRS);
  2476. }
  2477. /* drop the reference from the wait == 0 run */
  2478. bio_put(bio);
  2479. device->flush_bio = NULL;
  2480. return ret;
  2481. }
  2482. /*
  2483. * one reference for us, and we leave it for the
  2484. * caller
  2485. */
  2486. device->flush_bio = NULL;;
  2487. bio = bio_alloc(GFP_NOFS, 0);
  2488. if (!bio)
  2489. return -ENOMEM;
  2490. bio->bi_end_io = btrfs_end_empty_barrier;
  2491. bio->bi_bdev = device->bdev;
  2492. init_completion(&device->flush_wait);
  2493. bio->bi_private = &device->flush_wait;
  2494. device->flush_bio = bio;
  2495. bio_get(bio);
  2496. btrfsic_submit_bio(WRITE_FLUSH, bio);
  2497. return 0;
  2498. }
  2499. /*
  2500. * send an empty flush down to each device in parallel,
  2501. * then wait for them
  2502. */
  2503. static int barrier_all_devices(struct btrfs_fs_info *info)
  2504. {
  2505. struct list_head *head;
  2506. struct btrfs_device *dev;
  2507. int errors = 0;
  2508. int ret;
  2509. /* send down all the barriers */
  2510. head = &info->fs_devices->devices;
  2511. list_for_each_entry_rcu(dev, head, dev_list) {
  2512. if (!dev->bdev) {
  2513. errors++;
  2514. continue;
  2515. }
  2516. if (!dev->in_fs_metadata || !dev->writeable)
  2517. continue;
  2518. ret = write_dev_flush(dev, 0);
  2519. if (ret)
  2520. errors++;
  2521. }
  2522. /* wait for all the barriers */
  2523. list_for_each_entry_rcu(dev, head, dev_list) {
  2524. if (!dev->bdev) {
  2525. errors++;
  2526. continue;
  2527. }
  2528. if (!dev->in_fs_metadata || !dev->writeable)
  2529. continue;
  2530. ret = write_dev_flush(dev, 1);
  2531. if (ret)
  2532. errors++;
  2533. }
  2534. if (errors)
  2535. return -EIO;
  2536. return 0;
  2537. }
  2538. int write_all_supers(struct btrfs_root *root, int max_mirrors)
  2539. {
  2540. struct list_head *head;
  2541. struct btrfs_device *dev;
  2542. struct btrfs_super_block *sb;
  2543. struct btrfs_dev_item *dev_item;
  2544. int ret;
  2545. int do_barriers;
  2546. int max_errors;
  2547. int total_errors = 0;
  2548. u64 flags;
  2549. max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
  2550. do_barriers = !btrfs_test_opt(root, NOBARRIER);
  2551. backup_super_roots(root->fs_info);
  2552. sb = root->fs_info->super_for_commit;
  2553. dev_item = &sb->dev_item;
  2554. mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
  2555. head = &root->fs_info->fs_devices->devices;
  2556. if (do_barriers)
  2557. barrier_all_devices(root->fs_info);
  2558. list_for_each_entry_rcu(dev, head, dev_list) {
  2559. if (!dev->bdev) {
  2560. total_errors++;
  2561. continue;
  2562. }
  2563. if (!dev->in_fs_metadata || !dev->writeable)
  2564. continue;
  2565. btrfs_set_stack_device_generation(dev_item, 0);
  2566. btrfs_set_stack_device_type(dev_item, dev->type);
  2567. btrfs_set_stack_device_id(dev_item, dev->devid);
  2568. btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
  2569. btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
  2570. btrfs_set_stack_device_io_align(dev_item, dev->io_align);
  2571. btrfs_set_stack_device_io_width(dev_item, dev->io_width);
  2572. btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
  2573. memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
  2574. memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
  2575. flags = btrfs_super_flags(sb);
  2576. btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
  2577. ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
  2578. if (ret)
  2579. total_errors++;
  2580. }
  2581. if (total_errors > max_errors) {
  2582. printk(KERN_ERR "btrfs: %d errors while writing supers\n",
  2583. total_errors);
  2584. /* This shouldn't happen. FUA is masked off if unsupported */
  2585. BUG();
  2586. }
  2587. total_errors = 0;
  2588. list_for_each_entry_rcu(dev, head, dev_list) {
  2589. if (!dev->bdev)
  2590. continue;
  2591. if (!dev->in_fs_metadata || !dev->writeable)
  2592. continue;
  2593. ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
  2594. if (ret)
  2595. total_errors++;
  2596. }
  2597. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  2598. if (total_errors > max_errors) {
  2599. btrfs_error(root->fs_info, -EIO,
  2600. "%d errors while writing supers", total_errors);
  2601. return -EIO;
  2602. }
  2603. return 0;
  2604. }
  2605. int write_ctree_super(struct btrfs_trans_handle *trans,
  2606. struct btrfs_root *root, int max_mirrors)
  2607. {
  2608. int ret;
  2609. ret = write_all_supers(root, max_mirrors);
  2610. return ret;
  2611. }
  2612. void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
  2613. {
  2614. spin_lock(&fs_info->fs_roots_radix_lock);
  2615. radix_tree_delete(&fs_info->fs_roots_radix,
  2616. (unsigned long)root->root_key.objectid);
  2617. spin_unlock(&fs_info->fs_roots_radix_lock);
  2618. if (btrfs_root_refs(&root->root_item) == 0)
  2619. synchronize_srcu(&fs_info->subvol_srcu);
  2620. __btrfs_remove_free_space_cache(root->free_ino_pinned);
  2621. __btrfs_remove_free_space_cache(root->free_ino_ctl);
  2622. free_fs_root(root);
  2623. }
  2624. static void free_fs_root(struct btrfs_root *root)
  2625. {
  2626. iput(root->cache_inode);
  2627. WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
  2628. if (root->anon_dev)
  2629. free_anon_bdev(root->anon_dev);
  2630. free_extent_buffer(root->node);
  2631. free_extent_buffer(root->commit_root);
  2632. kfree(root->free_ino_ctl);
  2633. kfree(root->free_ino_pinned);
  2634. kfree(root->name);
  2635. kfree(root);
  2636. }
  2637. static void del_fs_roots(struct btrfs_fs_info *fs_info)
  2638. {
  2639. int ret;
  2640. struct btrfs_root *gang[8];
  2641. int i;
  2642. while (!list_empty(&fs_info->dead_roots)) {
  2643. gang[0] = list_entry(fs_info->dead_roots.next,
  2644. struct btrfs_root, root_list);
  2645. list_del(&gang[0]->root_list);
  2646. if (gang[0]->in_radix) {
  2647. btrfs_free_fs_root(fs_info, gang[0]);
  2648. } else {
  2649. free_extent_buffer(gang[0]->node);
  2650. free_extent_buffer(gang[0]->commit_root);
  2651. kfree(gang[0]);
  2652. }
  2653. }
  2654. while (1) {
  2655. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  2656. (void **)gang, 0,
  2657. ARRAY_SIZE(gang));
  2658. if (!ret)
  2659. break;
  2660. for (i = 0; i < ret; i++)
  2661. btrfs_free_fs_root(fs_info, gang[i]);
  2662. }
  2663. }
  2664. int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
  2665. {
  2666. u64 root_objectid = 0;
  2667. struct btrfs_root *gang[8];
  2668. int i;
  2669. int ret;
  2670. while (1) {
  2671. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  2672. (void **)gang, root_objectid,
  2673. ARRAY_SIZE(gang));
  2674. if (!ret)
  2675. break;
  2676. root_objectid = gang[ret - 1]->root_key.objectid + 1;
  2677. for (i = 0; i < ret; i++) {
  2678. int err;
  2679. root_objectid = gang[i]->root_key.objectid;
  2680. err = btrfs_orphan_cleanup(gang[i]);
  2681. if (err)
  2682. return err;
  2683. }
  2684. root_objectid++;
  2685. }
  2686. return 0;
  2687. }
  2688. int btrfs_commit_super(struct btrfs_root *root)
  2689. {
  2690. struct btrfs_trans_handle *trans;
  2691. int ret;
  2692. mutex_lock(&root->fs_info->cleaner_mutex);
  2693. btrfs_run_delayed_iputs(root);
  2694. btrfs_clean_old_snapshots(root);
  2695. mutex_unlock(&root->fs_info->cleaner_mutex);
  2696. /* wait until ongoing cleanup work done */
  2697. down_write(&root->fs_info->cleanup_work_sem);
  2698. up_write(&root->fs_info->cleanup_work_sem);
  2699. trans = btrfs_join_transaction(root);
  2700. if (IS_ERR(trans))
  2701. return PTR_ERR(trans);
  2702. ret = btrfs_commit_transaction(trans, root);
  2703. if (ret)
  2704. return ret;
  2705. /* run commit again to drop the original snapshot */
  2706. trans = btrfs_join_transaction(root);
  2707. if (IS_ERR(trans))
  2708. return PTR_ERR(trans);
  2709. ret = btrfs_commit_transaction(trans, root);
  2710. if (ret)
  2711. return ret;
  2712. ret = btrfs_write_and_wait_transaction(NULL, root);
  2713. if (ret) {
  2714. btrfs_error(root->fs_info, ret,
  2715. "Failed to sync btree inode to disk.");
  2716. return ret;
  2717. }
  2718. ret = write_ctree_super(NULL, root, 0);
  2719. return ret;
  2720. }
  2721. int close_ctree(struct btrfs_root *root)
  2722. {
  2723. struct btrfs_fs_info *fs_info = root->fs_info;
  2724. int ret;
  2725. fs_info->closing = 1;
  2726. smp_mb();
  2727. /* pause restriper - we want to resume on mount */
  2728. btrfs_pause_balance(root->fs_info);
  2729. btrfs_scrub_cancel(root);
  2730. /* wait for any defraggers to finish */
  2731. wait_event(fs_info->transaction_wait,
  2732. (atomic_read(&fs_info->defrag_running) == 0));
  2733. /* clear out the rbtree of defraggable inodes */
  2734. btrfs_run_defrag_inodes(fs_info);
  2735. /*
  2736. * Here come 2 situations when btrfs is broken to flip readonly:
  2737. *
  2738. * 1. when btrfs flips readonly somewhere else before
  2739. * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
  2740. * and btrfs will skip to write sb directly to keep
  2741. * ERROR state on disk.
  2742. *
  2743. * 2. when btrfs flips readonly just in btrfs_commit_super,
  2744. * and in such case, btrfs cannot write sb via btrfs_commit_super,
  2745. * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
  2746. * btrfs will cleanup all FS resources first and write sb then.
  2747. */
  2748. if (!(fs_info->sb->s_flags & MS_RDONLY)) {
  2749. ret = btrfs_commit_super(root);
  2750. if (ret)
  2751. printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
  2752. }
  2753. if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  2754. ret = btrfs_error_commit_super(root);
  2755. if (ret)
  2756. printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
  2757. }
  2758. btrfs_put_block_group_cache(fs_info);
  2759. kthread_stop(fs_info->transaction_kthread);
  2760. kthread_stop(fs_info->cleaner_kthread);
  2761. fs_info->closing = 2;
  2762. smp_mb();
  2763. if (fs_info->delalloc_bytes) {
  2764. printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
  2765. (unsigned long long)fs_info->delalloc_bytes);
  2766. }
  2767. if (fs_info->total_ref_cache_size) {
  2768. printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
  2769. (unsigned long long)fs_info->total_ref_cache_size);
  2770. }
  2771. free_extent_buffer(fs_info->extent_root->node);
  2772. free_extent_buffer(fs_info->extent_root->commit_root);
  2773. free_extent_buffer(fs_info->tree_root->node);
  2774. free_extent_buffer(fs_info->tree_root->commit_root);
  2775. free_extent_buffer(fs_info->chunk_root->node);
  2776. free_extent_buffer(fs_info->chunk_root->commit_root);
  2777. free_extent_buffer(fs_info->dev_root->node);
  2778. free_extent_buffer(fs_info->dev_root->commit_root);
  2779. free_extent_buffer(fs_info->csum_root->node);
  2780. free_extent_buffer(fs_info->csum_root->commit_root);
  2781. btrfs_free_block_groups(fs_info);
  2782. del_fs_roots(fs_info);
  2783. iput(fs_info->btree_inode);
  2784. btrfs_stop_workers(&fs_info->generic_worker);
  2785. btrfs_stop_workers(&fs_info->fixup_workers);
  2786. btrfs_stop_workers(&fs_info->delalloc_workers);
  2787. btrfs_stop_workers(&fs_info->workers);
  2788. btrfs_stop_workers(&fs_info->endio_workers);
  2789. btrfs_stop_workers(&fs_info->endio_meta_workers);
  2790. btrfs_stop_workers(&fs_info->endio_meta_write_workers);
  2791. btrfs_stop_workers(&fs_info->endio_write_workers);
  2792. btrfs_stop_workers(&fs_info->endio_freespace_worker);
  2793. btrfs_stop_workers(&fs_info->submit_workers);
  2794. btrfs_stop_workers(&fs_info->delayed_workers);
  2795. btrfs_stop_workers(&fs_info->caching_workers);
  2796. btrfs_stop_workers(&fs_info->readahead_workers);
  2797. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  2798. if (btrfs_test_opt(root, CHECK_INTEGRITY))
  2799. btrfsic_unmount(root, fs_info->fs_devices);
  2800. #endif
  2801. btrfs_close_devices(fs_info->fs_devices);
  2802. btrfs_mapping_tree_free(&fs_info->mapping_tree);
  2803. bdi_destroy(&fs_info->bdi);
  2804. cleanup_srcu_struct(&fs_info->subvol_srcu);
  2805. return 0;
  2806. }
  2807. int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
  2808. int atomic)
  2809. {
  2810. int ret;
  2811. struct inode *btree_inode = buf->pages[0]->mapping->host;
  2812. ret = extent_buffer_uptodate(buf);
  2813. if (!ret)
  2814. return ret;
  2815. ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
  2816. parent_transid, atomic);
  2817. if (ret == -EAGAIN)
  2818. return ret;
  2819. return !ret;
  2820. }
  2821. int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
  2822. {
  2823. return set_extent_buffer_uptodate(buf);
  2824. }
  2825. void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
  2826. {
  2827. struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
  2828. u64 transid = btrfs_header_generation(buf);
  2829. int was_dirty;
  2830. btrfs_assert_tree_locked(buf);
  2831. if (transid != root->fs_info->generation) {
  2832. printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
  2833. "found %llu running %llu\n",
  2834. (unsigned long long)buf->start,
  2835. (unsigned long long)transid,
  2836. (unsigned long long)root->fs_info->generation);
  2837. WARN_ON(1);
  2838. }
  2839. was_dirty = set_extent_buffer_dirty(buf);
  2840. if (!was_dirty) {
  2841. spin_lock(&root->fs_info->delalloc_lock);
  2842. root->fs_info->dirty_metadata_bytes += buf->len;
  2843. spin_unlock(&root->fs_info->delalloc_lock);
  2844. }
  2845. }
  2846. void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
  2847. {
  2848. /*
  2849. * looks as though older kernels can get into trouble with
  2850. * this code, they end up stuck in balance_dirty_pages forever
  2851. */
  2852. u64 num_dirty;
  2853. unsigned long thresh = 32 * 1024 * 1024;
  2854. if (current->flags & PF_MEMALLOC)
  2855. return;
  2856. btrfs_balance_delayed_items(root);
  2857. num_dirty = root->fs_info->dirty_metadata_bytes;
  2858. if (num_dirty > thresh) {
  2859. balance_dirty_pages_ratelimited_nr(
  2860. root->fs_info->btree_inode->i_mapping, 1);
  2861. }
  2862. return;
  2863. }
  2864. void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
  2865. {
  2866. /*
  2867. * looks as though older kernels can get into trouble with
  2868. * this code, they end up stuck in balance_dirty_pages forever
  2869. */
  2870. u64 num_dirty;
  2871. unsigned long thresh = 32 * 1024 * 1024;
  2872. if (current->flags & PF_MEMALLOC)
  2873. return;
  2874. num_dirty = root->fs_info->dirty_metadata_bytes;
  2875. if (num_dirty > thresh) {
  2876. balance_dirty_pages_ratelimited_nr(
  2877. root->fs_info->btree_inode->i_mapping, 1);
  2878. }
  2879. return;
  2880. }
  2881. int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
  2882. {
  2883. struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
  2884. return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
  2885. }
  2886. int btree_lock_page_hook(struct page *page, void *data,
  2887. void (*flush_fn)(void *))
  2888. {
  2889. struct inode *inode = page->mapping->host;
  2890. struct btrfs_root *root = BTRFS_I(inode)->root;
  2891. struct extent_buffer *eb;
  2892. /*
  2893. * We culled this eb but the page is still hanging out on the mapping,
  2894. * carry on.
  2895. */
  2896. if (!PagePrivate(page))
  2897. goto out;
  2898. eb = (struct extent_buffer *)page->private;
  2899. if (!eb) {
  2900. WARN_ON(1);
  2901. goto out;
  2902. }
  2903. if (page != eb->pages[0])
  2904. goto out;
  2905. if (!btrfs_try_tree_write_lock(eb)) {
  2906. flush_fn(data);
  2907. btrfs_tree_lock(eb);
  2908. }
  2909. btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
  2910. if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
  2911. spin_lock(&root->fs_info->delalloc_lock);
  2912. if (root->fs_info->dirty_metadata_bytes >= eb->len)
  2913. root->fs_info->dirty_metadata_bytes -= eb->len;
  2914. else
  2915. WARN_ON(1);
  2916. spin_unlock(&root->fs_info->delalloc_lock);
  2917. }
  2918. btrfs_tree_unlock(eb);
  2919. out:
  2920. if (!trylock_page(page)) {
  2921. flush_fn(data);
  2922. lock_page(page);
  2923. }
  2924. return 0;
  2925. }
  2926. static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
  2927. int read_only)
  2928. {
  2929. if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) {
  2930. printk(KERN_ERR "btrfs: unsupported checksum algorithm\n");
  2931. return -EINVAL;
  2932. }
  2933. if (read_only)
  2934. return 0;
  2935. if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  2936. printk(KERN_WARNING "warning: mount fs with errors, "
  2937. "running btrfsck is recommended\n");
  2938. }
  2939. return 0;
  2940. }
  2941. int btrfs_error_commit_super(struct btrfs_root *root)
  2942. {
  2943. int ret;
  2944. mutex_lock(&root->fs_info->cleaner_mutex);
  2945. btrfs_run_delayed_iputs(root);
  2946. mutex_unlock(&root->fs_info->cleaner_mutex);
  2947. down_write(&root->fs_info->cleanup_work_sem);
  2948. up_write(&root->fs_info->cleanup_work_sem);
  2949. /* cleanup FS via transaction */
  2950. btrfs_cleanup_transaction(root);
  2951. ret = write_ctree_super(NULL, root, 0);
  2952. return ret;
  2953. }
  2954. static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
  2955. {
  2956. struct btrfs_inode *btrfs_inode;
  2957. struct list_head splice;
  2958. INIT_LIST_HEAD(&splice);
  2959. mutex_lock(&root->fs_info->ordered_operations_mutex);
  2960. spin_lock(&root->fs_info->ordered_extent_lock);
  2961. list_splice_init(&root->fs_info->ordered_operations, &splice);
  2962. while (!list_empty(&splice)) {
  2963. btrfs_inode = list_entry(splice.next, struct btrfs_inode,
  2964. ordered_operations);
  2965. list_del_init(&btrfs_inode->ordered_operations);
  2966. btrfs_invalidate_inodes(btrfs_inode->root);
  2967. }
  2968. spin_unlock(&root->fs_info->ordered_extent_lock);
  2969. mutex_unlock(&root->fs_info->ordered_operations_mutex);
  2970. }
  2971. static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
  2972. {
  2973. struct list_head splice;
  2974. struct btrfs_ordered_extent *ordered;
  2975. struct inode *inode;
  2976. INIT_LIST_HEAD(&splice);
  2977. spin_lock(&root->fs_info->ordered_extent_lock);
  2978. list_splice_init(&root->fs_info->ordered_extents, &splice);
  2979. while (!list_empty(&splice)) {
  2980. ordered = list_entry(splice.next, struct btrfs_ordered_extent,
  2981. root_extent_list);
  2982. list_del_init(&ordered->root_extent_list);
  2983. atomic_inc(&ordered->refs);
  2984. /* the inode may be getting freed (in sys_unlink path). */
  2985. inode = igrab(ordered->inode);
  2986. spin_unlock(&root->fs_info->ordered_extent_lock);
  2987. if (inode)
  2988. iput(inode);
  2989. atomic_set(&ordered->refs, 1);
  2990. btrfs_put_ordered_extent(ordered);
  2991. spin_lock(&root->fs_info->ordered_extent_lock);
  2992. }
  2993. spin_unlock(&root->fs_info->ordered_extent_lock);
  2994. }
  2995. int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
  2996. struct btrfs_root *root)
  2997. {
  2998. struct rb_node *node;
  2999. struct btrfs_delayed_ref_root *delayed_refs;
  3000. struct btrfs_delayed_ref_node *ref;
  3001. int ret = 0;
  3002. delayed_refs = &trans->delayed_refs;
  3003. spin_lock(&delayed_refs->lock);
  3004. if (delayed_refs->num_entries == 0) {
  3005. spin_unlock(&delayed_refs->lock);
  3006. printk(KERN_INFO "delayed_refs has NO entry\n");
  3007. return ret;
  3008. }
  3009. while ((node = rb_first(&delayed_refs->root)) != NULL) {
  3010. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  3011. atomic_set(&ref->refs, 1);
  3012. if (btrfs_delayed_ref_is_head(ref)) {
  3013. struct btrfs_delayed_ref_head *head;
  3014. head = btrfs_delayed_node_to_head(ref);
  3015. if (!mutex_trylock(&head->mutex)) {
  3016. atomic_inc(&ref->refs);
  3017. spin_unlock(&delayed_refs->lock);
  3018. /* Need to wait for the delayed ref to run */
  3019. mutex_lock(&head->mutex);
  3020. mutex_unlock(&head->mutex);
  3021. btrfs_put_delayed_ref(ref);
  3022. spin_lock(&delayed_refs->lock);
  3023. continue;
  3024. }
  3025. kfree(head->extent_op);
  3026. delayed_refs->num_heads--;
  3027. if (list_empty(&head->cluster))
  3028. delayed_refs->num_heads_ready--;
  3029. list_del_init(&head->cluster);
  3030. }
  3031. ref->in_tree = 0;
  3032. rb_erase(&ref->rb_node, &delayed_refs->root);
  3033. delayed_refs->num_entries--;
  3034. spin_unlock(&delayed_refs->lock);
  3035. btrfs_put_delayed_ref(ref);
  3036. cond_resched();
  3037. spin_lock(&delayed_refs->lock);
  3038. }
  3039. spin_unlock(&delayed_refs->lock);
  3040. return ret;
  3041. }
  3042. static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
  3043. {
  3044. struct btrfs_pending_snapshot *snapshot;
  3045. struct list_head splice;
  3046. INIT_LIST_HEAD(&splice);
  3047. list_splice_init(&t->pending_snapshots, &splice);
  3048. while (!list_empty(&splice)) {
  3049. snapshot = list_entry(splice.next,
  3050. struct btrfs_pending_snapshot,
  3051. list);
  3052. list_del_init(&snapshot->list);
  3053. kfree(snapshot);
  3054. }
  3055. }
  3056. static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
  3057. {
  3058. struct btrfs_inode *btrfs_inode;
  3059. struct list_head splice;
  3060. INIT_LIST_HEAD(&splice);
  3061. spin_lock(&root->fs_info->delalloc_lock);
  3062. list_splice_init(&root->fs_info->delalloc_inodes, &splice);
  3063. while (!list_empty(&splice)) {
  3064. btrfs_inode = list_entry(splice.next, struct btrfs_inode,
  3065. delalloc_inodes);
  3066. list_del_init(&btrfs_inode->delalloc_inodes);
  3067. btrfs_invalidate_inodes(btrfs_inode->root);
  3068. }
  3069. spin_unlock(&root->fs_info->delalloc_lock);
  3070. }
  3071. static int btrfs_destroy_marked_extents(struct btrfs_root *root,
  3072. struct extent_io_tree *dirty_pages,
  3073. int mark)
  3074. {
  3075. int ret;
  3076. struct page *page;
  3077. struct inode *btree_inode = root->fs_info->btree_inode;
  3078. struct extent_buffer *eb;
  3079. u64 start = 0;
  3080. u64 end;
  3081. u64 offset;
  3082. unsigned long index;
  3083. while (1) {
  3084. ret = find_first_extent_bit(dirty_pages, start, &start, &end,
  3085. mark);
  3086. if (ret)
  3087. break;
  3088. clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
  3089. while (start <= end) {
  3090. index = start >> PAGE_CACHE_SHIFT;
  3091. start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
  3092. page = find_get_page(btree_inode->i_mapping, index);
  3093. if (!page)
  3094. continue;
  3095. offset = page_offset(page);
  3096. spin_lock(&dirty_pages->buffer_lock);
  3097. eb = radix_tree_lookup(
  3098. &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
  3099. offset >> PAGE_CACHE_SHIFT);
  3100. spin_unlock(&dirty_pages->buffer_lock);
  3101. if (eb)
  3102. ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
  3103. &eb->bflags);
  3104. if (PageWriteback(page))
  3105. end_page_writeback(page);
  3106. lock_page(page);
  3107. if (PageDirty(page)) {
  3108. clear_page_dirty_for_io(page);
  3109. spin_lock_irq(&page->mapping->tree_lock);
  3110. radix_tree_tag_clear(&page->mapping->page_tree,
  3111. page_index(page),
  3112. PAGECACHE_TAG_DIRTY);
  3113. spin_unlock_irq(&page->mapping->tree_lock);
  3114. }
  3115. unlock_page(page);
  3116. page_cache_release(page);
  3117. }
  3118. }
  3119. return ret;
  3120. }
  3121. static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
  3122. struct extent_io_tree *pinned_extents)
  3123. {
  3124. struct extent_io_tree *unpin;
  3125. u64 start;
  3126. u64 end;
  3127. int ret;
  3128. bool loop = true;
  3129. unpin = pinned_extents;
  3130. again:
  3131. while (1) {
  3132. ret = find_first_extent_bit(unpin, 0, &start, &end,
  3133. EXTENT_DIRTY);
  3134. if (ret)
  3135. break;
  3136. /* opt_discard */
  3137. if (btrfs_test_opt(root, DISCARD))
  3138. ret = btrfs_error_discard_extent(root, start,
  3139. end + 1 - start,
  3140. NULL);
  3141. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  3142. btrfs_error_unpin_extent_range(root, start, end);
  3143. cond_resched();
  3144. }
  3145. if (loop) {
  3146. if (unpin == &root->fs_info->freed_extents[0])
  3147. unpin = &root->fs_info->freed_extents[1];
  3148. else
  3149. unpin = &root->fs_info->freed_extents[0];
  3150. loop = false;
  3151. goto again;
  3152. }
  3153. return 0;
  3154. }
  3155. void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
  3156. struct btrfs_root *root)
  3157. {
  3158. btrfs_destroy_delayed_refs(cur_trans, root);
  3159. btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
  3160. cur_trans->dirty_pages.dirty_bytes);
  3161. /* FIXME: cleanup wait for commit */
  3162. cur_trans->in_commit = 1;
  3163. cur_trans->blocked = 1;
  3164. wake_up(&root->fs_info->transaction_blocked_wait);
  3165. cur_trans->blocked = 0;
  3166. wake_up(&root->fs_info->transaction_wait);
  3167. cur_trans->commit_done = 1;
  3168. wake_up(&cur_trans->commit_wait);
  3169. btrfs_destroy_delayed_inodes(root);
  3170. btrfs_assert_delayed_root_empty(root);
  3171. btrfs_destroy_pending_snapshots(cur_trans);
  3172. btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
  3173. EXTENT_DIRTY);
  3174. btrfs_destroy_pinned_extent(root,
  3175. root->fs_info->pinned_extents);
  3176. /*
  3177. memset(cur_trans, 0, sizeof(*cur_trans));
  3178. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  3179. */
  3180. }
  3181. int btrfs_cleanup_transaction(struct btrfs_root *root)
  3182. {
  3183. struct btrfs_transaction *t;
  3184. LIST_HEAD(list);
  3185. mutex_lock(&root->fs_info->transaction_kthread_mutex);
  3186. spin_lock(&root->fs_info->trans_lock);
  3187. list_splice_init(&root->fs_info->trans_list, &list);
  3188. root->fs_info->trans_no_join = 1;
  3189. spin_unlock(&root->fs_info->trans_lock);
  3190. while (!list_empty(&list)) {
  3191. t = list_entry(list.next, struct btrfs_transaction, list);
  3192. if (!t)
  3193. break;
  3194. btrfs_destroy_ordered_operations(root);
  3195. btrfs_destroy_ordered_extents(root);
  3196. btrfs_destroy_delayed_refs(t, root);
  3197. btrfs_block_rsv_release(root,
  3198. &root->fs_info->trans_block_rsv,
  3199. t->dirty_pages.dirty_bytes);
  3200. /* FIXME: cleanup wait for commit */
  3201. t->in_commit = 1;
  3202. t->blocked = 1;
  3203. if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
  3204. wake_up(&root->fs_info->transaction_blocked_wait);
  3205. t->blocked = 0;
  3206. if (waitqueue_active(&root->fs_info->transaction_wait))
  3207. wake_up(&root->fs_info->transaction_wait);
  3208. t->commit_done = 1;
  3209. if (waitqueue_active(&t->commit_wait))
  3210. wake_up(&t->commit_wait);
  3211. btrfs_destroy_delayed_inodes(root);
  3212. btrfs_assert_delayed_root_empty(root);
  3213. btrfs_destroy_pending_snapshots(t);
  3214. btrfs_destroy_delalloc_inodes(root);
  3215. spin_lock(&root->fs_info->trans_lock);
  3216. root->fs_info->running_transaction = NULL;
  3217. spin_unlock(&root->fs_info->trans_lock);
  3218. btrfs_destroy_marked_extents(root, &t->dirty_pages,
  3219. EXTENT_DIRTY);
  3220. btrfs_destroy_pinned_extent(root,
  3221. root->fs_info->pinned_extents);
  3222. atomic_set(&t->use_count, 0);
  3223. list_del_init(&t->list);
  3224. memset(t, 0, sizeof(*t));
  3225. kmem_cache_free(btrfs_transaction_cachep, t);
  3226. }
  3227. spin_lock(&root->fs_info->trans_lock);
  3228. root->fs_info->trans_no_join = 0;
  3229. spin_unlock(&root->fs_info->trans_lock);
  3230. mutex_unlock(&root->fs_info->transaction_kthread_mutex);
  3231. return 0;
  3232. }
  3233. static struct extent_io_ops btree_extent_io_ops = {
  3234. .write_cache_pages_lock_hook = btree_lock_page_hook,
  3235. .readpage_end_io_hook = btree_readpage_end_io_hook,
  3236. .readpage_io_failed_hook = btree_io_failed_hook,
  3237. .submit_bio_hook = btree_submit_bio_hook,
  3238. /* note we're sharing with inode.c for the merge bio hook */
  3239. .merge_bio_hook = btrfs_merge_bio_hook,
  3240. };