disk-io.c 108 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/swap.h>
  22. #include <linux/radix-tree.h>
  23. #include <linux/writeback.h>
  24. #include <linux/buffer_head.h>
  25. #include <linux/workqueue.h>
  26. #include <linux/kthread.h>
  27. #include <linux/freezer.h>
  28. #include <linux/crc32c.h>
  29. #include <linux/slab.h>
  30. #include <linux/migrate.h>
  31. #include <linux/ratelimit.h>
  32. #include <linux/uuid.h>
  33. #include <asm/unaligned.h>
  34. #include "compat.h"
  35. #include "ctree.h"
  36. #include "disk-io.h"
  37. #include "transaction.h"
  38. #include "btrfs_inode.h"
  39. #include "volumes.h"
  40. #include "print-tree.h"
  41. #include "async-thread.h"
  42. #include "locking.h"
  43. #include "tree-log.h"
  44. #include "free-space-cache.h"
  45. #include "inode-map.h"
  46. #include "check-integrity.h"
  47. #include "rcu-string.h"
  48. #include "dev-replace.h"
  49. #include "raid56.h"
  50. #ifdef CONFIG_X86
  51. #include <asm/cpufeature.h>
  52. #endif
  53. static struct extent_io_ops btree_extent_io_ops;
  54. static void end_workqueue_fn(struct btrfs_work *work);
  55. static void free_fs_root(struct btrfs_root *root);
  56. static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
  57. int read_only);
  58. static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
  59. struct btrfs_root *root);
  60. static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
  61. static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
  62. struct btrfs_root *root);
  63. static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t);
  64. static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
  65. static int btrfs_destroy_marked_extents(struct btrfs_root *root,
  66. struct extent_io_tree *dirty_pages,
  67. int mark);
  68. static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
  69. struct extent_io_tree *pinned_extents);
  70. static int btrfs_cleanup_transaction(struct btrfs_root *root);
  71. static void btrfs_error_commit_super(struct btrfs_root *root);
  72. /*
  73. * end_io_wq structs are used to do processing in task context when an IO is
  74. * complete. This is used during reads to verify checksums, and it is used
  75. * by writes to insert metadata for new file extents after IO is complete.
  76. */
  77. struct end_io_wq {
  78. struct bio *bio;
  79. bio_end_io_t *end_io;
  80. void *private;
  81. struct btrfs_fs_info *info;
  82. int error;
  83. int metadata;
  84. struct list_head list;
  85. struct btrfs_work work;
  86. };
  87. /*
  88. * async submit bios are used to offload expensive checksumming
  89. * onto the worker threads. They checksum file and metadata bios
  90. * just before they are sent down the IO stack.
  91. */
  92. struct async_submit_bio {
  93. struct inode *inode;
  94. struct bio *bio;
  95. struct list_head list;
  96. extent_submit_bio_hook_t *submit_bio_start;
  97. extent_submit_bio_hook_t *submit_bio_done;
  98. int rw;
  99. int mirror_num;
  100. unsigned long bio_flags;
  101. /*
  102. * bio_offset is optional, can be used if the pages in the bio
  103. * can't tell us where in the file the bio should go
  104. */
  105. u64 bio_offset;
  106. struct btrfs_work work;
  107. int error;
  108. };
  109. /*
  110. * Lockdep class keys for extent_buffer->lock's in this root. For a given
  111. * eb, the lockdep key is determined by the btrfs_root it belongs to and
  112. * the level the eb occupies in the tree.
  113. *
  114. * Different roots are used for different purposes and may nest inside each
  115. * other and they require separate keysets. As lockdep keys should be
  116. * static, assign keysets according to the purpose of the root as indicated
  117. * by btrfs_root->objectid. This ensures that all special purpose roots
  118. * have separate keysets.
  119. *
  120. * Lock-nesting across peer nodes is always done with the immediate parent
  121. * node locked thus preventing deadlock. As lockdep doesn't know this, use
  122. * subclass to avoid triggering lockdep warning in such cases.
  123. *
  124. * The key is set by the readpage_end_io_hook after the buffer has passed
  125. * csum validation but before the pages are unlocked. It is also set by
  126. * btrfs_init_new_buffer on freshly allocated blocks.
  127. *
  128. * We also add a check to make sure the highest level of the tree is the
  129. * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
  130. * needs update as well.
  131. */
  132. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  133. # if BTRFS_MAX_LEVEL != 8
  134. # error
  135. # endif
  136. static struct btrfs_lockdep_keyset {
  137. u64 id; /* root objectid */
  138. const char *name_stem; /* lock name stem */
  139. char names[BTRFS_MAX_LEVEL + 1][20];
  140. struct lock_class_key keys[BTRFS_MAX_LEVEL + 1];
  141. } btrfs_lockdep_keysets[] = {
  142. { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" },
  143. { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" },
  144. { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" },
  145. { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
  146. { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
  147. { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
  148. { .id = BTRFS_ORPHAN_OBJECTID, .name_stem = "orphan" },
  149. { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
  150. { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
  151. { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
  152. { .id = 0, .name_stem = "tree" },
  153. };
  154. void __init btrfs_init_lockdep(void)
  155. {
  156. int i, j;
  157. /* initialize lockdep class names */
  158. for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
  159. struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
  160. for (j = 0; j < ARRAY_SIZE(ks->names); j++)
  161. snprintf(ks->names[j], sizeof(ks->names[j]),
  162. "btrfs-%s-%02d", ks->name_stem, j);
  163. }
  164. }
  165. void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
  166. int level)
  167. {
  168. struct btrfs_lockdep_keyset *ks;
  169. BUG_ON(level >= ARRAY_SIZE(ks->keys));
  170. /* find the matching keyset, id 0 is the default entry */
  171. for (ks = btrfs_lockdep_keysets; ks->id; ks++)
  172. if (ks->id == objectid)
  173. break;
  174. lockdep_set_class_and_name(&eb->lock,
  175. &ks->keys[level], ks->names[level]);
  176. }
  177. #endif
  178. /*
  179. * extents on the btree inode are pretty simple, there's one extent
  180. * that covers the entire device
  181. */
  182. static struct extent_map *btree_get_extent(struct inode *inode,
  183. struct page *page, size_t pg_offset, u64 start, u64 len,
  184. int create)
  185. {
  186. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  187. struct extent_map *em;
  188. int ret;
  189. read_lock(&em_tree->lock);
  190. em = lookup_extent_mapping(em_tree, start, len);
  191. if (em) {
  192. em->bdev =
  193. BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
  194. read_unlock(&em_tree->lock);
  195. goto out;
  196. }
  197. read_unlock(&em_tree->lock);
  198. em = alloc_extent_map();
  199. if (!em) {
  200. em = ERR_PTR(-ENOMEM);
  201. goto out;
  202. }
  203. em->start = 0;
  204. em->len = (u64)-1;
  205. em->block_len = (u64)-1;
  206. em->block_start = 0;
  207. em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
  208. write_lock(&em_tree->lock);
  209. ret = add_extent_mapping(em_tree, em, 0);
  210. if (ret == -EEXIST) {
  211. free_extent_map(em);
  212. em = lookup_extent_mapping(em_tree, start, len);
  213. if (!em)
  214. em = ERR_PTR(-EIO);
  215. } else if (ret) {
  216. free_extent_map(em);
  217. em = ERR_PTR(ret);
  218. }
  219. write_unlock(&em_tree->lock);
  220. out:
  221. return em;
  222. }
  223. u32 btrfs_csum_data(char *data, u32 seed, size_t len)
  224. {
  225. return crc32c(seed, data, len);
  226. }
  227. void btrfs_csum_final(u32 crc, char *result)
  228. {
  229. put_unaligned_le32(~crc, result);
  230. }
  231. /*
  232. * compute the csum for a btree block, and either verify it or write it
  233. * into the csum field of the block.
  234. */
  235. static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
  236. int verify)
  237. {
  238. u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
  239. char *result = NULL;
  240. unsigned long len;
  241. unsigned long cur_len;
  242. unsigned long offset = BTRFS_CSUM_SIZE;
  243. char *kaddr;
  244. unsigned long map_start;
  245. unsigned long map_len;
  246. int err;
  247. u32 crc = ~(u32)0;
  248. unsigned long inline_result;
  249. len = buf->len - offset;
  250. while (len > 0) {
  251. err = map_private_extent_buffer(buf, offset, 32,
  252. &kaddr, &map_start, &map_len);
  253. if (err)
  254. return 1;
  255. cur_len = min(len, map_len - (offset - map_start));
  256. crc = btrfs_csum_data(kaddr + offset - map_start,
  257. crc, cur_len);
  258. len -= cur_len;
  259. offset += cur_len;
  260. }
  261. if (csum_size > sizeof(inline_result)) {
  262. result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
  263. if (!result)
  264. return 1;
  265. } else {
  266. result = (char *)&inline_result;
  267. }
  268. btrfs_csum_final(crc, result);
  269. if (verify) {
  270. if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
  271. u32 val;
  272. u32 found = 0;
  273. memcpy(&found, result, csum_size);
  274. read_extent_buffer(buf, &val, 0, csum_size);
  275. printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
  276. "failed on %llu wanted %X found %X "
  277. "level %d\n",
  278. root->fs_info->sb->s_id,
  279. (unsigned long long)buf->start, val, found,
  280. btrfs_header_level(buf));
  281. if (result != (char *)&inline_result)
  282. kfree(result);
  283. return 1;
  284. }
  285. } else {
  286. write_extent_buffer(buf, result, 0, csum_size);
  287. }
  288. if (result != (char *)&inline_result)
  289. kfree(result);
  290. return 0;
  291. }
  292. /*
  293. * we can't consider a given block up to date unless the transid of the
  294. * block matches the transid in the parent node's pointer. This is how we
  295. * detect blocks that either didn't get written at all or got written
  296. * in the wrong place.
  297. */
  298. static int verify_parent_transid(struct extent_io_tree *io_tree,
  299. struct extent_buffer *eb, u64 parent_transid,
  300. int atomic)
  301. {
  302. struct extent_state *cached_state = NULL;
  303. int ret;
  304. if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
  305. return 0;
  306. if (atomic)
  307. return -EAGAIN;
  308. lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
  309. 0, &cached_state);
  310. if (extent_buffer_uptodate(eb) &&
  311. btrfs_header_generation(eb) == parent_transid) {
  312. ret = 0;
  313. goto out;
  314. }
  315. printk_ratelimited("parent transid verify failed on %llu wanted %llu "
  316. "found %llu\n",
  317. (unsigned long long)eb->start,
  318. (unsigned long long)parent_transid,
  319. (unsigned long long)btrfs_header_generation(eb));
  320. ret = 1;
  321. clear_extent_buffer_uptodate(eb);
  322. out:
  323. unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
  324. &cached_state, GFP_NOFS);
  325. return ret;
  326. }
  327. /*
  328. * Return 0 if the superblock checksum type matches the checksum value of that
  329. * algorithm. Pass the raw disk superblock data.
  330. */
  331. static int btrfs_check_super_csum(char *raw_disk_sb)
  332. {
  333. struct btrfs_super_block *disk_sb =
  334. (struct btrfs_super_block *)raw_disk_sb;
  335. u16 csum_type = btrfs_super_csum_type(disk_sb);
  336. int ret = 0;
  337. if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
  338. u32 crc = ~(u32)0;
  339. const int csum_size = sizeof(crc);
  340. char result[csum_size];
  341. /*
  342. * The super_block structure does not span the whole
  343. * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
  344. * is filled with zeros and is included in the checkum.
  345. */
  346. crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
  347. crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
  348. btrfs_csum_final(crc, result);
  349. if (memcmp(raw_disk_sb, result, csum_size))
  350. ret = 1;
  351. }
  352. if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
  353. printk(KERN_ERR "btrfs: unsupported checksum algorithm %u\n",
  354. csum_type);
  355. ret = 1;
  356. }
  357. return ret;
  358. }
  359. /*
  360. * helper to read a given tree block, doing retries as required when
  361. * the checksums don't match and we have alternate mirrors to try.
  362. */
  363. static int btree_read_extent_buffer_pages(struct btrfs_root *root,
  364. struct extent_buffer *eb,
  365. u64 start, u64 parent_transid)
  366. {
  367. struct extent_io_tree *io_tree;
  368. int failed = 0;
  369. int ret;
  370. int num_copies = 0;
  371. int mirror_num = 0;
  372. int failed_mirror = 0;
  373. clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
  374. io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
  375. while (1) {
  376. ret = read_extent_buffer_pages(io_tree, eb, start,
  377. WAIT_COMPLETE,
  378. btree_get_extent, mirror_num);
  379. if (!ret) {
  380. if (!verify_parent_transid(io_tree, eb,
  381. parent_transid, 0))
  382. break;
  383. else
  384. ret = -EIO;
  385. }
  386. /*
  387. * This buffer's crc is fine, but its contents are corrupted, so
  388. * there is no reason to read the other copies, they won't be
  389. * any less wrong.
  390. */
  391. if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
  392. break;
  393. num_copies = btrfs_num_copies(root->fs_info,
  394. eb->start, eb->len);
  395. if (num_copies == 1)
  396. break;
  397. if (!failed_mirror) {
  398. failed = 1;
  399. failed_mirror = eb->read_mirror;
  400. }
  401. mirror_num++;
  402. if (mirror_num == failed_mirror)
  403. mirror_num++;
  404. if (mirror_num > num_copies)
  405. break;
  406. }
  407. if (failed && !ret && failed_mirror)
  408. repair_eb_io_failure(root, eb, failed_mirror);
  409. return ret;
  410. }
  411. /*
  412. * checksum a dirty tree block before IO. This has extra checks to make sure
  413. * we only fill in the checksum field in the first page of a multi-page block
  414. */
  415. static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
  416. {
  417. struct extent_io_tree *tree;
  418. u64 start = page_offset(page);
  419. u64 found_start;
  420. struct extent_buffer *eb;
  421. tree = &BTRFS_I(page->mapping->host)->io_tree;
  422. eb = (struct extent_buffer *)page->private;
  423. if (page != eb->pages[0])
  424. return 0;
  425. found_start = btrfs_header_bytenr(eb);
  426. if (found_start != start) {
  427. WARN_ON(1);
  428. return 0;
  429. }
  430. if (!PageUptodate(page)) {
  431. WARN_ON(1);
  432. return 0;
  433. }
  434. csum_tree_block(root, eb, 0);
  435. return 0;
  436. }
  437. static int check_tree_block_fsid(struct btrfs_root *root,
  438. struct extent_buffer *eb)
  439. {
  440. struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
  441. u8 fsid[BTRFS_UUID_SIZE];
  442. int ret = 1;
  443. read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
  444. BTRFS_FSID_SIZE);
  445. while (fs_devices) {
  446. if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
  447. ret = 0;
  448. break;
  449. }
  450. fs_devices = fs_devices->seed;
  451. }
  452. return ret;
  453. }
  454. #define CORRUPT(reason, eb, root, slot) \
  455. printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
  456. "root=%llu, slot=%d\n", reason, \
  457. (unsigned long long)btrfs_header_bytenr(eb), \
  458. (unsigned long long)root->objectid, slot)
  459. static noinline int check_leaf(struct btrfs_root *root,
  460. struct extent_buffer *leaf)
  461. {
  462. struct btrfs_key key;
  463. struct btrfs_key leaf_key;
  464. u32 nritems = btrfs_header_nritems(leaf);
  465. int slot;
  466. if (nritems == 0)
  467. return 0;
  468. /* Check the 0 item */
  469. if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
  470. BTRFS_LEAF_DATA_SIZE(root)) {
  471. CORRUPT("invalid item offset size pair", leaf, root, 0);
  472. return -EIO;
  473. }
  474. /*
  475. * Check to make sure each items keys are in the correct order and their
  476. * offsets make sense. We only have to loop through nritems-1 because
  477. * we check the current slot against the next slot, which verifies the
  478. * next slot's offset+size makes sense and that the current's slot
  479. * offset is correct.
  480. */
  481. for (slot = 0; slot < nritems - 1; slot++) {
  482. btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
  483. btrfs_item_key_to_cpu(leaf, &key, slot + 1);
  484. /* Make sure the keys are in the right order */
  485. if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
  486. CORRUPT("bad key order", leaf, root, slot);
  487. return -EIO;
  488. }
  489. /*
  490. * Make sure the offset and ends are right, remember that the
  491. * item data starts at the end of the leaf and grows towards the
  492. * front.
  493. */
  494. if (btrfs_item_offset_nr(leaf, slot) !=
  495. btrfs_item_end_nr(leaf, slot + 1)) {
  496. CORRUPT("slot offset bad", leaf, root, slot);
  497. return -EIO;
  498. }
  499. /*
  500. * Check to make sure that we don't point outside of the leaf,
  501. * just incase all the items are consistent to eachother, but
  502. * all point outside of the leaf.
  503. */
  504. if (btrfs_item_end_nr(leaf, slot) >
  505. BTRFS_LEAF_DATA_SIZE(root)) {
  506. CORRUPT("slot end outside of leaf", leaf, root, slot);
  507. return -EIO;
  508. }
  509. }
  510. return 0;
  511. }
  512. static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
  513. struct extent_state *state, int mirror)
  514. {
  515. struct extent_io_tree *tree;
  516. u64 found_start;
  517. int found_level;
  518. struct extent_buffer *eb;
  519. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  520. int ret = 0;
  521. int reads_done;
  522. if (!page->private)
  523. goto out;
  524. tree = &BTRFS_I(page->mapping->host)->io_tree;
  525. eb = (struct extent_buffer *)page->private;
  526. /* the pending IO might have been the only thing that kept this buffer
  527. * in memory. Make sure we have a ref for all this other checks
  528. */
  529. extent_buffer_get(eb);
  530. reads_done = atomic_dec_and_test(&eb->io_pages);
  531. if (!reads_done)
  532. goto err;
  533. eb->read_mirror = mirror;
  534. if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
  535. ret = -EIO;
  536. goto err;
  537. }
  538. found_start = btrfs_header_bytenr(eb);
  539. if (found_start != eb->start) {
  540. printk_ratelimited(KERN_INFO "btrfs bad tree block start "
  541. "%llu %llu\n",
  542. (unsigned long long)found_start,
  543. (unsigned long long)eb->start);
  544. ret = -EIO;
  545. goto err;
  546. }
  547. if (check_tree_block_fsid(root, eb)) {
  548. printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
  549. (unsigned long long)eb->start);
  550. ret = -EIO;
  551. goto err;
  552. }
  553. found_level = btrfs_header_level(eb);
  554. if (found_level >= BTRFS_MAX_LEVEL) {
  555. btrfs_info(root->fs_info, "bad tree block level %d\n",
  556. (int)btrfs_header_level(eb));
  557. ret = -EIO;
  558. goto err;
  559. }
  560. btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
  561. eb, found_level);
  562. ret = csum_tree_block(root, eb, 1);
  563. if (ret) {
  564. ret = -EIO;
  565. goto err;
  566. }
  567. /*
  568. * If this is a leaf block and it is corrupt, set the corrupt bit so
  569. * that we don't try and read the other copies of this block, just
  570. * return -EIO.
  571. */
  572. if (found_level == 0 && check_leaf(root, eb)) {
  573. set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
  574. ret = -EIO;
  575. }
  576. if (!ret)
  577. set_extent_buffer_uptodate(eb);
  578. err:
  579. if (reads_done &&
  580. test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
  581. btree_readahead_hook(root, eb, eb->start, ret);
  582. if (ret) {
  583. /*
  584. * our io error hook is going to dec the io pages
  585. * again, we have to make sure it has something
  586. * to decrement
  587. */
  588. atomic_inc(&eb->io_pages);
  589. clear_extent_buffer_uptodate(eb);
  590. }
  591. free_extent_buffer(eb);
  592. out:
  593. return ret;
  594. }
  595. static int btree_io_failed_hook(struct page *page, int failed_mirror)
  596. {
  597. struct extent_buffer *eb;
  598. struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
  599. eb = (struct extent_buffer *)page->private;
  600. set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
  601. eb->read_mirror = failed_mirror;
  602. atomic_dec(&eb->io_pages);
  603. if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
  604. btree_readahead_hook(root, eb, eb->start, -EIO);
  605. return -EIO; /* we fixed nothing */
  606. }
  607. static void end_workqueue_bio(struct bio *bio, int err)
  608. {
  609. struct end_io_wq *end_io_wq = bio->bi_private;
  610. struct btrfs_fs_info *fs_info;
  611. fs_info = end_io_wq->info;
  612. end_io_wq->error = err;
  613. end_io_wq->work.func = end_workqueue_fn;
  614. end_io_wq->work.flags = 0;
  615. if (bio->bi_rw & REQ_WRITE) {
  616. if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
  617. btrfs_queue_worker(&fs_info->endio_meta_write_workers,
  618. &end_io_wq->work);
  619. else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
  620. btrfs_queue_worker(&fs_info->endio_freespace_worker,
  621. &end_io_wq->work);
  622. else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
  623. btrfs_queue_worker(&fs_info->endio_raid56_workers,
  624. &end_io_wq->work);
  625. else
  626. btrfs_queue_worker(&fs_info->endio_write_workers,
  627. &end_io_wq->work);
  628. } else {
  629. if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
  630. btrfs_queue_worker(&fs_info->endio_raid56_workers,
  631. &end_io_wq->work);
  632. else if (end_io_wq->metadata)
  633. btrfs_queue_worker(&fs_info->endio_meta_workers,
  634. &end_io_wq->work);
  635. else
  636. btrfs_queue_worker(&fs_info->endio_workers,
  637. &end_io_wq->work);
  638. }
  639. }
  640. /*
  641. * For the metadata arg you want
  642. *
  643. * 0 - if data
  644. * 1 - if normal metadta
  645. * 2 - if writing to the free space cache area
  646. * 3 - raid parity work
  647. */
  648. int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
  649. int metadata)
  650. {
  651. struct end_io_wq *end_io_wq;
  652. end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
  653. if (!end_io_wq)
  654. return -ENOMEM;
  655. end_io_wq->private = bio->bi_private;
  656. end_io_wq->end_io = bio->bi_end_io;
  657. end_io_wq->info = info;
  658. end_io_wq->error = 0;
  659. end_io_wq->bio = bio;
  660. end_io_wq->metadata = metadata;
  661. bio->bi_private = end_io_wq;
  662. bio->bi_end_io = end_workqueue_bio;
  663. return 0;
  664. }
  665. unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
  666. {
  667. unsigned long limit = min_t(unsigned long,
  668. info->workers.max_workers,
  669. info->fs_devices->open_devices);
  670. return 256 * limit;
  671. }
  672. static void run_one_async_start(struct btrfs_work *work)
  673. {
  674. struct async_submit_bio *async;
  675. int ret;
  676. async = container_of(work, struct async_submit_bio, work);
  677. ret = async->submit_bio_start(async->inode, async->rw, async->bio,
  678. async->mirror_num, async->bio_flags,
  679. async->bio_offset);
  680. if (ret)
  681. async->error = ret;
  682. }
  683. static void run_one_async_done(struct btrfs_work *work)
  684. {
  685. struct btrfs_fs_info *fs_info;
  686. struct async_submit_bio *async;
  687. int limit;
  688. async = container_of(work, struct async_submit_bio, work);
  689. fs_info = BTRFS_I(async->inode)->root->fs_info;
  690. limit = btrfs_async_submit_limit(fs_info);
  691. limit = limit * 2 / 3;
  692. if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
  693. waitqueue_active(&fs_info->async_submit_wait))
  694. wake_up(&fs_info->async_submit_wait);
  695. /* If an error occured we just want to clean up the bio and move on */
  696. if (async->error) {
  697. bio_endio(async->bio, async->error);
  698. return;
  699. }
  700. async->submit_bio_done(async->inode, async->rw, async->bio,
  701. async->mirror_num, async->bio_flags,
  702. async->bio_offset);
  703. }
  704. static void run_one_async_free(struct btrfs_work *work)
  705. {
  706. struct async_submit_bio *async;
  707. async = container_of(work, struct async_submit_bio, work);
  708. kfree(async);
  709. }
  710. int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
  711. int rw, struct bio *bio, int mirror_num,
  712. unsigned long bio_flags,
  713. u64 bio_offset,
  714. extent_submit_bio_hook_t *submit_bio_start,
  715. extent_submit_bio_hook_t *submit_bio_done)
  716. {
  717. struct async_submit_bio *async;
  718. async = kmalloc(sizeof(*async), GFP_NOFS);
  719. if (!async)
  720. return -ENOMEM;
  721. async->inode = inode;
  722. async->rw = rw;
  723. async->bio = bio;
  724. async->mirror_num = mirror_num;
  725. async->submit_bio_start = submit_bio_start;
  726. async->submit_bio_done = submit_bio_done;
  727. async->work.func = run_one_async_start;
  728. async->work.ordered_func = run_one_async_done;
  729. async->work.ordered_free = run_one_async_free;
  730. async->work.flags = 0;
  731. async->bio_flags = bio_flags;
  732. async->bio_offset = bio_offset;
  733. async->error = 0;
  734. atomic_inc(&fs_info->nr_async_submits);
  735. if (rw & REQ_SYNC)
  736. btrfs_set_work_high_prio(&async->work);
  737. btrfs_queue_worker(&fs_info->workers, &async->work);
  738. while (atomic_read(&fs_info->async_submit_draining) &&
  739. atomic_read(&fs_info->nr_async_submits)) {
  740. wait_event(fs_info->async_submit_wait,
  741. (atomic_read(&fs_info->nr_async_submits) == 0));
  742. }
  743. return 0;
  744. }
  745. static int btree_csum_one_bio(struct bio *bio)
  746. {
  747. struct bio_vec *bvec = bio->bi_io_vec;
  748. int bio_index = 0;
  749. struct btrfs_root *root;
  750. int ret = 0;
  751. WARN_ON(bio->bi_vcnt <= 0);
  752. while (bio_index < bio->bi_vcnt) {
  753. root = BTRFS_I(bvec->bv_page->mapping->host)->root;
  754. ret = csum_dirty_buffer(root, bvec->bv_page);
  755. if (ret)
  756. break;
  757. bio_index++;
  758. bvec++;
  759. }
  760. return ret;
  761. }
  762. static int __btree_submit_bio_start(struct inode *inode, int rw,
  763. struct bio *bio, int mirror_num,
  764. unsigned long bio_flags,
  765. u64 bio_offset)
  766. {
  767. /*
  768. * when we're called for a write, we're already in the async
  769. * submission context. Just jump into btrfs_map_bio
  770. */
  771. return btree_csum_one_bio(bio);
  772. }
  773. static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
  774. int mirror_num, unsigned long bio_flags,
  775. u64 bio_offset)
  776. {
  777. int ret;
  778. /*
  779. * when we're called for a write, we're already in the async
  780. * submission context. Just jump into btrfs_map_bio
  781. */
  782. ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
  783. if (ret)
  784. bio_endio(bio, ret);
  785. return ret;
  786. }
  787. static int check_async_write(struct inode *inode, unsigned long bio_flags)
  788. {
  789. if (bio_flags & EXTENT_BIO_TREE_LOG)
  790. return 0;
  791. #ifdef CONFIG_X86
  792. if (cpu_has_xmm4_2)
  793. return 0;
  794. #endif
  795. return 1;
  796. }
  797. static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
  798. int mirror_num, unsigned long bio_flags,
  799. u64 bio_offset)
  800. {
  801. int async = check_async_write(inode, bio_flags);
  802. int ret;
  803. if (!(rw & REQ_WRITE)) {
  804. /*
  805. * called for a read, do the setup so that checksum validation
  806. * can happen in the async kernel threads
  807. */
  808. ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
  809. bio, 1);
  810. if (ret)
  811. goto out_w_error;
  812. ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
  813. mirror_num, 0);
  814. } else if (!async) {
  815. ret = btree_csum_one_bio(bio);
  816. if (ret)
  817. goto out_w_error;
  818. ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
  819. mirror_num, 0);
  820. } else {
  821. /*
  822. * kthread helpers are used to submit writes so that
  823. * checksumming can happen in parallel across all CPUs
  824. */
  825. ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
  826. inode, rw, bio, mirror_num, 0,
  827. bio_offset,
  828. __btree_submit_bio_start,
  829. __btree_submit_bio_done);
  830. }
  831. if (ret) {
  832. out_w_error:
  833. bio_endio(bio, ret);
  834. }
  835. return ret;
  836. }
  837. #ifdef CONFIG_MIGRATION
  838. static int btree_migratepage(struct address_space *mapping,
  839. struct page *newpage, struct page *page,
  840. enum migrate_mode mode)
  841. {
  842. /*
  843. * we can't safely write a btree page from here,
  844. * we haven't done the locking hook
  845. */
  846. if (PageDirty(page))
  847. return -EAGAIN;
  848. /*
  849. * Buffers may be managed in a filesystem specific way.
  850. * We must have no buffers or drop them.
  851. */
  852. if (page_has_private(page) &&
  853. !try_to_release_page(page, GFP_KERNEL))
  854. return -EAGAIN;
  855. return migrate_page(mapping, newpage, page, mode);
  856. }
  857. #endif
  858. static int btree_writepages(struct address_space *mapping,
  859. struct writeback_control *wbc)
  860. {
  861. struct extent_io_tree *tree;
  862. struct btrfs_fs_info *fs_info;
  863. int ret;
  864. tree = &BTRFS_I(mapping->host)->io_tree;
  865. if (wbc->sync_mode == WB_SYNC_NONE) {
  866. if (wbc->for_kupdate)
  867. return 0;
  868. fs_info = BTRFS_I(mapping->host)->root->fs_info;
  869. /* this is a bit racy, but that's ok */
  870. ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
  871. BTRFS_DIRTY_METADATA_THRESH);
  872. if (ret < 0)
  873. return 0;
  874. }
  875. return btree_write_cache_pages(mapping, wbc);
  876. }
  877. static int btree_readpage(struct file *file, struct page *page)
  878. {
  879. struct extent_io_tree *tree;
  880. tree = &BTRFS_I(page->mapping->host)->io_tree;
  881. return extent_read_full_page(tree, page, btree_get_extent, 0);
  882. }
  883. static int btree_releasepage(struct page *page, gfp_t gfp_flags)
  884. {
  885. if (PageWriteback(page) || PageDirty(page))
  886. return 0;
  887. return try_release_extent_buffer(page);
  888. }
  889. static void btree_invalidatepage(struct page *page, unsigned long offset)
  890. {
  891. struct extent_io_tree *tree;
  892. tree = &BTRFS_I(page->mapping->host)->io_tree;
  893. extent_invalidatepage(tree, page, offset);
  894. btree_releasepage(page, GFP_NOFS);
  895. if (PagePrivate(page)) {
  896. printk(KERN_WARNING "btrfs warning page private not zero "
  897. "on page %llu\n", (unsigned long long)page_offset(page));
  898. ClearPagePrivate(page);
  899. set_page_private(page, 0);
  900. page_cache_release(page);
  901. }
  902. }
  903. static int btree_set_page_dirty(struct page *page)
  904. {
  905. #ifdef DEBUG
  906. struct extent_buffer *eb;
  907. BUG_ON(!PagePrivate(page));
  908. eb = (struct extent_buffer *)page->private;
  909. BUG_ON(!eb);
  910. BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
  911. BUG_ON(!atomic_read(&eb->refs));
  912. btrfs_assert_tree_locked(eb);
  913. #endif
  914. return __set_page_dirty_nobuffers(page);
  915. }
  916. static const struct address_space_operations btree_aops = {
  917. .readpage = btree_readpage,
  918. .writepages = btree_writepages,
  919. .releasepage = btree_releasepage,
  920. .invalidatepage = btree_invalidatepage,
  921. #ifdef CONFIG_MIGRATION
  922. .migratepage = btree_migratepage,
  923. #endif
  924. .set_page_dirty = btree_set_page_dirty,
  925. };
  926. int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
  927. u64 parent_transid)
  928. {
  929. struct extent_buffer *buf = NULL;
  930. struct inode *btree_inode = root->fs_info->btree_inode;
  931. int ret = 0;
  932. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  933. if (!buf)
  934. return 0;
  935. read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
  936. buf, 0, WAIT_NONE, btree_get_extent, 0);
  937. free_extent_buffer(buf);
  938. return ret;
  939. }
  940. int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
  941. int mirror_num, struct extent_buffer **eb)
  942. {
  943. struct extent_buffer *buf = NULL;
  944. struct inode *btree_inode = root->fs_info->btree_inode;
  945. struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
  946. int ret;
  947. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  948. if (!buf)
  949. return 0;
  950. set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
  951. ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
  952. btree_get_extent, mirror_num);
  953. if (ret) {
  954. free_extent_buffer(buf);
  955. return ret;
  956. }
  957. if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
  958. free_extent_buffer(buf);
  959. return -EIO;
  960. } else if (extent_buffer_uptodate(buf)) {
  961. *eb = buf;
  962. } else {
  963. free_extent_buffer(buf);
  964. }
  965. return 0;
  966. }
  967. struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
  968. u64 bytenr, u32 blocksize)
  969. {
  970. struct inode *btree_inode = root->fs_info->btree_inode;
  971. struct extent_buffer *eb;
  972. eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
  973. bytenr, blocksize);
  974. return eb;
  975. }
  976. struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
  977. u64 bytenr, u32 blocksize)
  978. {
  979. struct inode *btree_inode = root->fs_info->btree_inode;
  980. struct extent_buffer *eb;
  981. eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
  982. bytenr, blocksize);
  983. return eb;
  984. }
  985. int btrfs_write_tree_block(struct extent_buffer *buf)
  986. {
  987. return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
  988. buf->start + buf->len - 1);
  989. }
  990. int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
  991. {
  992. return filemap_fdatawait_range(buf->pages[0]->mapping,
  993. buf->start, buf->start + buf->len - 1);
  994. }
  995. struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
  996. u32 blocksize, u64 parent_transid)
  997. {
  998. struct extent_buffer *buf = NULL;
  999. int ret;
  1000. buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
  1001. if (!buf)
  1002. return NULL;
  1003. ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
  1004. return buf;
  1005. }
  1006. void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  1007. struct extent_buffer *buf)
  1008. {
  1009. struct btrfs_fs_info *fs_info = root->fs_info;
  1010. if (btrfs_header_generation(buf) ==
  1011. fs_info->running_transaction->transid) {
  1012. btrfs_assert_tree_locked(buf);
  1013. if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
  1014. __percpu_counter_add(&fs_info->dirty_metadata_bytes,
  1015. -buf->len,
  1016. fs_info->dirty_metadata_batch);
  1017. /* ugh, clear_extent_buffer_dirty needs to lock the page */
  1018. btrfs_set_lock_blocking(buf);
  1019. clear_extent_buffer_dirty(buf);
  1020. }
  1021. }
  1022. }
  1023. static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
  1024. u32 stripesize, struct btrfs_root *root,
  1025. struct btrfs_fs_info *fs_info,
  1026. u64 objectid)
  1027. {
  1028. root->node = NULL;
  1029. root->commit_root = NULL;
  1030. root->sectorsize = sectorsize;
  1031. root->nodesize = nodesize;
  1032. root->leafsize = leafsize;
  1033. root->stripesize = stripesize;
  1034. root->ref_cows = 0;
  1035. root->track_dirty = 0;
  1036. root->in_radix = 0;
  1037. root->orphan_item_inserted = 0;
  1038. root->orphan_cleanup_state = 0;
  1039. root->objectid = objectid;
  1040. root->last_trans = 0;
  1041. root->highest_objectid = 0;
  1042. root->name = NULL;
  1043. root->inode_tree = RB_ROOT;
  1044. INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
  1045. root->block_rsv = NULL;
  1046. root->orphan_block_rsv = NULL;
  1047. INIT_LIST_HEAD(&root->dirty_list);
  1048. INIT_LIST_HEAD(&root->root_list);
  1049. INIT_LIST_HEAD(&root->logged_list[0]);
  1050. INIT_LIST_HEAD(&root->logged_list[1]);
  1051. spin_lock_init(&root->orphan_lock);
  1052. spin_lock_init(&root->inode_lock);
  1053. spin_lock_init(&root->accounting_lock);
  1054. spin_lock_init(&root->log_extents_lock[0]);
  1055. spin_lock_init(&root->log_extents_lock[1]);
  1056. mutex_init(&root->objectid_mutex);
  1057. mutex_init(&root->log_mutex);
  1058. init_waitqueue_head(&root->log_writer_wait);
  1059. init_waitqueue_head(&root->log_commit_wait[0]);
  1060. init_waitqueue_head(&root->log_commit_wait[1]);
  1061. atomic_set(&root->log_commit[0], 0);
  1062. atomic_set(&root->log_commit[1], 0);
  1063. atomic_set(&root->log_writers, 0);
  1064. atomic_set(&root->log_batch, 0);
  1065. atomic_set(&root->orphan_inodes, 0);
  1066. root->log_transid = 0;
  1067. root->last_log_commit = 0;
  1068. extent_io_tree_init(&root->dirty_log_pages,
  1069. fs_info->btree_inode->i_mapping);
  1070. memset(&root->root_key, 0, sizeof(root->root_key));
  1071. memset(&root->root_item, 0, sizeof(root->root_item));
  1072. memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
  1073. memset(&root->root_kobj, 0, sizeof(root->root_kobj));
  1074. root->defrag_trans_start = fs_info->generation;
  1075. init_completion(&root->kobj_unregister);
  1076. root->defrag_running = 0;
  1077. root->root_key.objectid = objectid;
  1078. root->anon_dev = 0;
  1079. spin_lock_init(&root->root_item_lock);
  1080. }
  1081. static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
  1082. struct btrfs_fs_info *fs_info,
  1083. u64 objectid,
  1084. struct btrfs_root *root)
  1085. {
  1086. int ret;
  1087. u32 blocksize;
  1088. u64 generation;
  1089. __setup_root(tree_root->nodesize, tree_root->leafsize,
  1090. tree_root->sectorsize, tree_root->stripesize,
  1091. root, fs_info, objectid);
  1092. ret = btrfs_find_last_root(tree_root, objectid,
  1093. &root->root_item, &root->root_key);
  1094. if (ret > 0)
  1095. return -ENOENT;
  1096. else if (ret < 0)
  1097. return ret;
  1098. generation = btrfs_root_generation(&root->root_item);
  1099. blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
  1100. root->commit_root = NULL;
  1101. root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
  1102. blocksize, generation);
  1103. if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) {
  1104. free_extent_buffer(root->node);
  1105. root->node = NULL;
  1106. return -EIO;
  1107. }
  1108. root->commit_root = btrfs_root_node(root);
  1109. return 0;
  1110. }
  1111. static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
  1112. {
  1113. struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
  1114. if (root)
  1115. root->fs_info = fs_info;
  1116. return root;
  1117. }
  1118. struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
  1119. struct btrfs_fs_info *fs_info,
  1120. u64 objectid)
  1121. {
  1122. struct extent_buffer *leaf;
  1123. struct btrfs_root *tree_root = fs_info->tree_root;
  1124. struct btrfs_root *root;
  1125. struct btrfs_key key;
  1126. int ret = 0;
  1127. u64 bytenr;
  1128. uuid_le uuid;
  1129. root = btrfs_alloc_root(fs_info);
  1130. if (!root)
  1131. return ERR_PTR(-ENOMEM);
  1132. __setup_root(tree_root->nodesize, tree_root->leafsize,
  1133. tree_root->sectorsize, tree_root->stripesize,
  1134. root, fs_info, objectid);
  1135. root->root_key.objectid = objectid;
  1136. root->root_key.type = BTRFS_ROOT_ITEM_KEY;
  1137. root->root_key.offset = 0;
  1138. leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
  1139. 0, objectid, NULL, 0, 0, 0);
  1140. if (IS_ERR(leaf)) {
  1141. ret = PTR_ERR(leaf);
  1142. leaf = NULL;
  1143. goto fail;
  1144. }
  1145. bytenr = leaf->start;
  1146. memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
  1147. btrfs_set_header_bytenr(leaf, leaf->start);
  1148. btrfs_set_header_generation(leaf, trans->transid);
  1149. btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
  1150. btrfs_set_header_owner(leaf, objectid);
  1151. root->node = leaf;
  1152. write_extent_buffer(leaf, fs_info->fsid,
  1153. (unsigned long)btrfs_header_fsid(leaf),
  1154. BTRFS_FSID_SIZE);
  1155. write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
  1156. (unsigned long)btrfs_header_chunk_tree_uuid(leaf),
  1157. BTRFS_UUID_SIZE);
  1158. btrfs_mark_buffer_dirty(leaf);
  1159. root->commit_root = btrfs_root_node(root);
  1160. root->track_dirty = 1;
  1161. root->root_item.flags = 0;
  1162. root->root_item.byte_limit = 0;
  1163. btrfs_set_root_bytenr(&root->root_item, leaf->start);
  1164. btrfs_set_root_generation(&root->root_item, trans->transid);
  1165. btrfs_set_root_level(&root->root_item, 0);
  1166. btrfs_set_root_refs(&root->root_item, 1);
  1167. btrfs_set_root_used(&root->root_item, leaf->len);
  1168. btrfs_set_root_last_snapshot(&root->root_item, 0);
  1169. btrfs_set_root_dirid(&root->root_item, 0);
  1170. uuid_le_gen(&uuid);
  1171. memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
  1172. root->root_item.drop_level = 0;
  1173. key.objectid = objectid;
  1174. key.type = BTRFS_ROOT_ITEM_KEY;
  1175. key.offset = 0;
  1176. ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
  1177. if (ret)
  1178. goto fail;
  1179. btrfs_tree_unlock(leaf);
  1180. return root;
  1181. fail:
  1182. if (leaf) {
  1183. btrfs_tree_unlock(leaf);
  1184. free_extent_buffer(leaf);
  1185. }
  1186. kfree(root);
  1187. return ERR_PTR(ret);
  1188. }
  1189. static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
  1190. struct btrfs_fs_info *fs_info)
  1191. {
  1192. struct btrfs_root *root;
  1193. struct btrfs_root *tree_root = fs_info->tree_root;
  1194. struct extent_buffer *leaf;
  1195. root = btrfs_alloc_root(fs_info);
  1196. if (!root)
  1197. return ERR_PTR(-ENOMEM);
  1198. __setup_root(tree_root->nodesize, tree_root->leafsize,
  1199. tree_root->sectorsize, tree_root->stripesize,
  1200. root, fs_info, BTRFS_TREE_LOG_OBJECTID);
  1201. root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
  1202. root->root_key.type = BTRFS_ROOT_ITEM_KEY;
  1203. root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
  1204. /*
  1205. * log trees do not get reference counted because they go away
  1206. * before a real commit is actually done. They do store pointers
  1207. * to file data extents, and those reference counts still get
  1208. * updated (along with back refs to the log tree).
  1209. */
  1210. root->ref_cows = 0;
  1211. leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
  1212. BTRFS_TREE_LOG_OBJECTID, NULL,
  1213. 0, 0, 0);
  1214. if (IS_ERR(leaf)) {
  1215. kfree(root);
  1216. return ERR_CAST(leaf);
  1217. }
  1218. memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
  1219. btrfs_set_header_bytenr(leaf, leaf->start);
  1220. btrfs_set_header_generation(leaf, trans->transid);
  1221. btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
  1222. btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
  1223. root->node = leaf;
  1224. write_extent_buffer(root->node, root->fs_info->fsid,
  1225. (unsigned long)btrfs_header_fsid(root->node),
  1226. BTRFS_FSID_SIZE);
  1227. btrfs_mark_buffer_dirty(root->node);
  1228. btrfs_tree_unlock(root->node);
  1229. return root;
  1230. }
  1231. int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
  1232. struct btrfs_fs_info *fs_info)
  1233. {
  1234. struct btrfs_root *log_root;
  1235. log_root = alloc_log_tree(trans, fs_info);
  1236. if (IS_ERR(log_root))
  1237. return PTR_ERR(log_root);
  1238. WARN_ON(fs_info->log_root_tree);
  1239. fs_info->log_root_tree = log_root;
  1240. return 0;
  1241. }
  1242. int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
  1243. struct btrfs_root *root)
  1244. {
  1245. struct btrfs_root *log_root;
  1246. struct btrfs_inode_item *inode_item;
  1247. log_root = alloc_log_tree(trans, root->fs_info);
  1248. if (IS_ERR(log_root))
  1249. return PTR_ERR(log_root);
  1250. log_root->last_trans = trans->transid;
  1251. log_root->root_key.offset = root->root_key.objectid;
  1252. inode_item = &log_root->root_item.inode;
  1253. inode_item->generation = cpu_to_le64(1);
  1254. inode_item->size = cpu_to_le64(3);
  1255. inode_item->nlink = cpu_to_le32(1);
  1256. inode_item->nbytes = cpu_to_le64(root->leafsize);
  1257. inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
  1258. btrfs_set_root_node(&log_root->root_item, log_root->node);
  1259. WARN_ON(root->log_root);
  1260. root->log_root = log_root;
  1261. root->log_transid = 0;
  1262. root->last_log_commit = 0;
  1263. return 0;
  1264. }
  1265. struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
  1266. struct btrfs_key *location)
  1267. {
  1268. struct btrfs_root *root;
  1269. struct btrfs_fs_info *fs_info = tree_root->fs_info;
  1270. struct btrfs_path *path;
  1271. struct extent_buffer *l;
  1272. u64 generation;
  1273. u32 blocksize;
  1274. int ret = 0;
  1275. int slot;
  1276. root = btrfs_alloc_root(fs_info);
  1277. if (!root)
  1278. return ERR_PTR(-ENOMEM);
  1279. if (location->offset == (u64)-1) {
  1280. ret = find_and_setup_root(tree_root, fs_info,
  1281. location->objectid, root);
  1282. if (ret) {
  1283. kfree(root);
  1284. return ERR_PTR(ret);
  1285. }
  1286. goto out;
  1287. }
  1288. __setup_root(tree_root->nodesize, tree_root->leafsize,
  1289. tree_root->sectorsize, tree_root->stripesize,
  1290. root, fs_info, location->objectid);
  1291. path = btrfs_alloc_path();
  1292. if (!path) {
  1293. kfree(root);
  1294. return ERR_PTR(-ENOMEM);
  1295. }
  1296. ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
  1297. if (ret == 0) {
  1298. l = path->nodes[0];
  1299. slot = path->slots[0];
  1300. btrfs_read_root_item(l, slot, &root->root_item);
  1301. memcpy(&root->root_key, location, sizeof(*location));
  1302. }
  1303. btrfs_free_path(path);
  1304. if (ret) {
  1305. kfree(root);
  1306. if (ret > 0)
  1307. ret = -ENOENT;
  1308. return ERR_PTR(ret);
  1309. }
  1310. generation = btrfs_root_generation(&root->root_item);
  1311. blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
  1312. root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
  1313. blocksize, generation);
  1314. if (!root->node || !extent_buffer_uptodate(root->node)) {
  1315. ret = (!root->node) ? -ENOMEM : -EIO;
  1316. free_extent_buffer(root->node);
  1317. kfree(root);
  1318. return ERR_PTR(ret);
  1319. }
  1320. root->commit_root = btrfs_root_node(root);
  1321. BUG_ON(!root->node); /* -ENOMEM */
  1322. out:
  1323. if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
  1324. root->ref_cows = 1;
  1325. btrfs_check_and_init_root_item(&root->root_item);
  1326. }
  1327. return root;
  1328. }
  1329. struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
  1330. struct btrfs_key *location)
  1331. {
  1332. struct btrfs_root *root;
  1333. int ret;
  1334. if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
  1335. return fs_info->tree_root;
  1336. if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
  1337. return fs_info->extent_root;
  1338. if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
  1339. return fs_info->chunk_root;
  1340. if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
  1341. return fs_info->dev_root;
  1342. if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
  1343. return fs_info->csum_root;
  1344. if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
  1345. return fs_info->quota_root ? fs_info->quota_root :
  1346. ERR_PTR(-ENOENT);
  1347. again:
  1348. spin_lock(&fs_info->fs_roots_radix_lock);
  1349. root = radix_tree_lookup(&fs_info->fs_roots_radix,
  1350. (unsigned long)location->objectid);
  1351. spin_unlock(&fs_info->fs_roots_radix_lock);
  1352. if (root)
  1353. return root;
  1354. root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
  1355. if (IS_ERR(root))
  1356. return root;
  1357. root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
  1358. root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
  1359. GFP_NOFS);
  1360. if (!root->free_ino_pinned || !root->free_ino_ctl) {
  1361. ret = -ENOMEM;
  1362. goto fail;
  1363. }
  1364. btrfs_init_free_ino_ctl(root);
  1365. mutex_init(&root->fs_commit_mutex);
  1366. spin_lock_init(&root->cache_lock);
  1367. init_waitqueue_head(&root->cache_wait);
  1368. ret = get_anon_bdev(&root->anon_dev);
  1369. if (ret)
  1370. goto fail;
  1371. if (btrfs_root_refs(&root->root_item) == 0) {
  1372. ret = -ENOENT;
  1373. goto fail;
  1374. }
  1375. ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
  1376. if (ret < 0)
  1377. goto fail;
  1378. if (ret == 0)
  1379. root->orphan_item_inserted = 1;
  1380. ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
  1381. if (ret)
  1382. goto fail;
  1383. spin_lock(&fs_info->fs_roots_radix_lock);
  1384. ret = radix_tree_insert(&fs_info->fs_roots_radix,
  1385. (unsigned long)root->root_key.objectid,
  1386. root);
  1387. if (ret == 0)
  1388. root->in_radix = 1;
  1389. spin_unlock(&fs_info->fs_roots_radix_lock);
  1390. radix_tree_preload_end();
  1391. if (ret) {
  1392. if (ret == -EEXIST) {
  1393. free_fs_root(root);
  1394. goto again;
  1395. }
  1396. goto fail;
  1397. }
  1398. ret = btrfs_find_dead_roots(fs_info->tree_root,
  1399. root->root_key.objectid);
  1400. WARN_ON(ret);
  1401. return root;
  1402. fail:
  1403. free_fs_root(root);
  1404. return ERR_PTR(ret);
  1405. }
  1406. static int btrfs_congested_fn(void *congested_data, int bdi_bits)
  1407. {
  1408. struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
  1409. int ret = 0;
  1410. struct btrfs_device *device;
  1411. struct backing_dev_info *bdi;
  1412. rcu_read_lock();
  1413. list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
  1414. if (!device->bdev)
  1415. continue;
  1416. bdi = blk_get_backing_dev_info(device->bdev);
  1417. if (bdi && bdi_congested(bdi, bdi_bits)) {
  1418. ret = 1;
  1419. break;
  1420. }
  1421. }
  1422. rcu_read_unlock();
  1423. return ret;
  1424. }
  1425. /*
  1426. * If this fails, caller must call bdi_destroy() to get rid of the
  1427. * bdi again.
  1428. */
  1429. static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
  1430. {
  1431. int err;
  1432. bdi->capabilities = BDI_CAP_MAP_COPY;
  1433. err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
  1434. if (err)
  1435. return err;
  1436. bdi->ra_pages = default_backing_dev_info.ra_pages;
  1437. bdi->congested_fn = btrfs_congested_fn;
  1438. bdi->congested_data = info;
  1439. return 0;
  1440. }
  1441. /*
  1442. * called by the kthread helper functions to finally call the bio end_io
  1443. * functions. This is where read checksum verification actually happens
  1444. */
  1445. static void end_workqueue_fn(struct btrfs_work *work)
  1446. {
  1447. struct bio *bio;
  1448. struct end_io_wq *end_io_wq;
  1449. struct btrfs_fs_info *fs_info;
  1450. int error;
  1451. end_io_wq = container_of(work, struct end_io_wq, work);
  1452. bio = end_io_wq->bio;
  1453. fs_info = end_io_wq->info;
  1454. error = end_io_wq->error;
  1455. bio->bi_private = end_io_wq->private;
  1456. bio->bi_end_io = end_io_wq->end_io;
  1457. kfree(end_io_wq);
  1458. bio_endio(bio, error);
  1459. }
  1460. static int cleaner_kthread(void *arg)
  1461. {
  1462. struct btrfs_root *root = arg;
  1463. do {
  1464. int again = 0;
  1465. if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
  1466. down_read_trylock(&root->fs_info->sb->s_umount)) {
  1467. if (mutex_trylock(&root->fs_info->cleaner_mutex)) {
  1468. btrfs_run_delayed_iputs(root);
  1469. again = btrfs_clean_one_deleted_snapshot(root);
  1470. mutex_unlock(&root->fs_info->cleaner_mutex);
  1471. }
  1472. btrfs_run_defrag_inodes(root->fs_info);
  1473. up_read(&root->fs_info->sb->s_umount);
  1474. }
  1475. if (!try_to_freeze() && !again) {
  1476. set_current_state(TASK_INTERRUPTIBLE);
  1477. if (!kthread_should_stop())
  1478. schedule();
  1479. __set_current_state(TASK_RUNNING);
  1480. }
  1481. } while (!kthread_should_stop());
  1482. return 0;
  1483. }
  1484. static int transaction_kthread(void *arg)
  1485. {
  1486. struct btrfs_root *root = arg;
  1487. struct btrfs_trans_handle *trans;
  1488. struct btrfs_transaction *cur;
  1489. u64 transid;
  1490. unsigned long now;
  1491. unsigned long delay;
  1492. bool cannot_commit;
  1493. do {
  1494. cannot_commit = false;
  1495. delay = HZ * 30;
  1496. mutex_lock(&root->fs_info->transaction_kthread_mutex);
  1497. spin_lock(&root->fs_info->trans_lock);
  1498. cur = root->fs_info->running_transaction;
  1499. if (!cur) {
  1500. spin_unlock(&root->fs_info->trans_lock);
  1501. goto sleep;
  1502. }
  1503. now = get_seconds();
  1504. if (!cur->blocked &&
  1505. (now < cur->start_time || now - cur->start_time < 30)) {
  1506. spin_unlock(&root->fs_info->trans_lock);
  1507. delay = HZ * 5;
  1508. goto sleep;
  1509. }
  1510. transid = cur->transid;
  1511. spin_unlock(&root->fs_info->trans_lock);
  1512. /* If the file system is aborted, this will always fail. */
  1513. trans = btrfs_attach_transaction(root);
  1514. if (IS_ERR(trans)) {
  1515. if (PTR_ERR(trans) != -ENOENT)
  1516. cannot_commit = true;
  1517. goto sleep;
  1518. }
  1519. if (transid == trans->transid) {
  1520. btrfs_commit_transaction(trans, root);
  1521. } else {
  1522. btrfs_end_transaction(trans, root);
  1523. }
  1524. sleep:
  1525. wake_up_process(root->fs_info->cleaner_kthread);
  1526. mutex_unlock(&root->fs_info->transaction_kthread_mutex);
  1527. if (!try_to_freeze()) {
  1528. set_current_state(TASK_INTERRUPTIBLE);
  1529. if (!kthread_should_stop() &&
  1530. (!btrfs_transaction_blocked(root->fs_info) ||
  1531. cannot_commit))
  1532. schedule_timeout(delay);
  1533. __set_current_state(TASK_RUNNING);
  1534. }
  1535. } while (!kthread_should_stop());
  1536. return 0;
  1537. }
  1538. /*
  1539. * this will find the highest generation in the array of
  1540. * root backups. The index of the highest array is returned,
  1541. * or -1 if we can't find anything.
  1542. *
  1543. * We check to make sure the array is valid by comparing the
  1544. * generation of the latest root in the array with the generation
  1545. * in the super block. If they don't match we pitch it.
  1546. */
  1547. static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
  1548. {
  1549. u64 cur;
  1550. int newest_index = -1;
  1551. struct btrfs_root_backup *root_backup;
  1552. int i;
  1553. for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
  1554. root_backup = info->super_copy->super_roots + i;
  1555. cur = btrfs_backup_tree_root_gen(root_backup);
  1556. if (cur == newest_gen)
  1557. newest_index = i;
  1558. }
  1559. /* check to see if we actually wrapped around */
  1560. if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
  1561. root_backup = info->super_copy->super_roots;
  1562. cur = btrfs_backup_tree_root_gen(root_backup);
  1563. if (cur == newest_gen)
  1564. newest_index = 0;
  1565. }
  1566. return newest_index;
  1567. }
  1568. /*
  1569. * find the oldest backup so we know where to store new entries
  1570. * in the backup array. This will set the backup_root_index
  1571. * field in the fs_info struct
  1572. */
  1573. static void find_oldest_super_backup(struct btrfs_fs_info *info,
  1574. u64 newest_gen)
  1575. {
  1576. int newest_index = -1;
  1577. newest_index = find_newest_super_backup(info, newest_gen);
  1578. /* if there was garbage in there, just move along */
  1579. if (newest_index == -1) {
  1580. info->backup_root_index = 0;
  1581. } else {
  1582. info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
  1583. }
  1584. }
  1585. /*
  1586. * copy all the root pointers into the super backup array.
  1587. * this will bump the backup pointer by one when it is
  1588. * done
  1589. */
  1590. static void backup_super_roots(struct btrfs_fs_info *info)
  1591. {
  1592. int next_backup;
  1593. struct btrfs_root_backup *root_backup;
  1594. int last_backup;
  1595. next_backup = info->backup_root_index;
  1596. last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
  1597. BTRFS_NUM_BACKUP_ROOTS;
  1598. /*
  1599. * just overwrite the last backup if we're at the same generation
  1600. * this happens only at umount
  1601. */
  1602. root_backup = info->super_for_commit->super_roots + last_backup;
  1603. if (btrfs_backup_tree_root_gen(root_backup) ==
  1604. btrfs_header_generation(info->tree_root->node))
  1605. next_backup = last_backup;
  1606. root_backup = info->super_for_commit->super_roots + next_backup;
  1607. /*
  1608. * make sure all of our padding and empty slots get zero filled
  1609. * regardless of which ones we use today
  1610. */
  1611. memset(root_backup, 0, sizeof(*root_backup));
  1612. info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
  1613. btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
  1614. btrfs_set_backup_tree_root_gen(root_backup,
  1615. btrfs_header_generation(info->tree_root->node));
  1616. btrfs_set_backup_tree_root_level(root_backup,
  1617. btrfs_header_level(info->tree_root->node));
  1618. btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
  1619. btrfs_set_backup_chunk_root_gen(root_backup,
  1620. btrfs_header_generation(info->chunk_root->node));
  1621. btrfs_set_backup_chunk_root_level(root_backup,
  1622. btrfs_header_level(info->chunk_root->node));
  1623. btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
  1624. btrfs_set_backup_extent_root_gen(root_backup,
  1625. btrfs_header_generation(info->extent_root->node));
  1626. btrfs_set_backup_extent_root_level(root_backup,
  1627. btrfs_header_level(info->extent_root->node));
  1628. /*
  1629. * we might commit during log recovery, which happens before we set
  1630. * the fs_root. Make sure it is valid before we fill it in.
  1631. */
  1632. if (info->fs_root && info->fs_root->node) {
  1633. btrfs_set_backup_fs_root(root_backup,
  1634. info->fs_root->node->start);
  1635. btrfs_set_backup_fs_root_gen(root_backup,
  1636. btrfs_header_generation(info->fs_root->node));
  1637. btrfs_set_backup_fs_root_level(root_backup,
  1638. btrfs_header_level(info->fs_root->node));
  1639. }
  1640. btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
  1641. btrfs_set_backup_dev_root_gen(root_backup,
  1642. btrfs_header_generation(info->dev_root->node));
  1643. btrfs_set_backup_dev_root_level(root_backup,
  1644. btrfs_header_level(info->dev_root->node));
  1645. btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
  1646. btrfs_set_backup_csum_root_gen(root_backup,
  1647. btrfs_header_generation(info->csum_root->node));
  1648. btrfs_set_backup_csum_root_level(root_backup,
  1649. btrfs_header_level(info->csum_root->node));
  1650. btrfs_set_backup_total_bytes(root_backup,
  1651. btrfs_super_total_bytes(info->super_copy));
  1652. btrfs_set_backup_bytes_used(root_backup,
  1653. btrfs_super_bytes_used(info->super_copy));
  1654. btrfs_set_backup_num_devices(root_backup,
  1655. btrfs_super_num_devices(info->super_copy));
  1656. /*
  1657. * if we don't copy this out to the super_copy, it won't get remembered
  1658. * for the next commit
  1659. */
  1660. memcpy(&info->super_copy->super_roots,
  1661. &info->super_for_commit->super_roots,
  1662. sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
  1663. }
  1664. /*
  1665. * this copies info out of the root backup array and back into
  1666. * the in-memory super block. It is meant to help iterate through
  1667. * the array, so you send it the number of backups you've already
  1668. * tried and the last backup index you used.
  1669. *
  1670. * this returns -1 when it has tried all the backups
  1671. */
  1672. static noinline int next_root_backup(struct btrfs_fs_info *info,
  1673. struct btrfs_super_block *super,
  1674. int *num_backups_tried, int *backup_index)
  1675. {
  1676. struct btrfs_root_backup *root_backup;
  1677. int newest = *backup_index;
  1678. if (*num_backups_tried == 0) {
  1679. u64 gen = btrfs_super_generation(super);
  1680. newest = find_newest_super_backup(info, gen);
  1681. if (newest == -1)
  1682. return -1;
  1683. *backup_index = newest;
  1684. *num_backups_tried = 1;
  1685. } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
  1686. /* we've tried all the backups, all done */
  1687. return -1;
  1688. } else {
  1689. /* jump to the next oldest backup */
  1690. newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
  1691. BTRFS_NUM_BACKUP_ROOTS;
  1692. *backup_index = newest;
  1693. *num_backups_tried += 1;
  1694. }
  1695. root_backup = super->super_roots + newest;
  1696. btrfs_set_super_generation(super,
  1697. btrfs_backup_tree_root_gen(root_backup));
  1698. btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
  1699. btrfs_set_super_root_level(super,
  1700. btrfs_backup_tree_root_level(root_backup));
  1701. btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
  1702. /*
  1703. * fixme: the total bytes and num_devices need to match or we should
  1704. * need a fsck
  1705. */
  1706. btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
  1707. btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
  1708. return 0;
  1709. }
  1710. /* helper to cleanup workers */
  1711. static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
  1712. {
  1713. btrfs_stop_workers(&fs_info->generic_worker);
  1714. btrfs_stop_workers(&fs_info->fixup_workers);
  1715. btrfs_stop_workers(&fs_info->delalloc_workers);
  1716. btrfs_stop_workers(&fs_info->workers);
  1717. btrfs_stop_workers(&fs_info->endio_workers);
  1718. btrfs_stop_workers(&fs_info->endio_meta_workers);
  1719. btrfs_stop_workers(&fs_info->endio_raid56_workers);
  1720. btrfs_stop_workers(&fs_info->rmw_workers);
  1721. btrfs_stop_workers(&fs_info->endio_meta_write_workers);
  1722. btrfs_stop_workers(&fs_info->endio_write_workers);
  1723. btrfs_stop_workers(&fs_info->endio_freespace_worker);
  1724. btrfs_stop_workers(&fs_info->submit_workers);
  1725. btrfs_stop_workers(&fs_info->delayed_workers);
  1726. btrfs_stop_workers(&fs_info->caching_workers);
  1727. btrfs_stop_workers(&fs_info->readahead_workers);
  1728. btrfs_stop_workers(&fs_info->flush_workers);
  1729. btrfs_stop_workers(&fs_info->qgroup_rescan_workers);
  1730. }
  1731. /* helper to cleanup tree roots */
  1732. static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
  1733. {
  1734. free_extent_buffer(info->tree_root->node);
  1735. free_extent_buffer(info->tree_root->commit_root);
  1736. free_extent_buffer(info->dev_root->node);
  1737. free_extent_buffer(info->dev_root->commit_root);
  1738. free_extent_buffer(info->extent_root->node);
  1739. free_extent_buffer(info->extent_root->commit_root);
  1740. free_extent_buffer(info->csum_root->node);
  1741. free_extent_buffer(info->csum_root->commit_root);
  1742. if (info->quota_root) {
  1743. free_extent_buffer(info->quota_root->node);
  1744. free_extent_buffer(info->quota_root->commit_root);
  1745. }
  1746. info->tree_root->node = NULL;
  1747. info->tree_root->commit_root = NULL;
  1748. info->dev_root->node = NULL;
  1749. info->dev_root->commit_root = NULL;
  1750. info->extent_root->node = NULL;
  1751. info->extent_root->commit_root = NULL;
  1752. info->csum_root->node = NULL;
  1753. info->csum_root->commit_root = NULL;
  1754. if (info->quota_root) {
  1755. info->quota_root->node = NULL;
  1756. info->quota_root->commit_root = NULL;
  1757. }
  1758. if (chunk_root) {
  1759. free_extent_buffer(info->chunk_root->node);
  1760. free_extent_buffer(info->chunk_root->commit_root);
  1761. info->chunk_root->node = NULL;
  1762. info->chunk_root->commit_root = NULL;
  1763. }
  1764. }
  1765. static void del_fs_roots(struct btrfs_fs_info *fs_info)
  1766. {
  1767. int ret;
  1768. struct btrfs_root *gang[8];
  1769. int i;
  1770. while (!list_empty(&fs_info->dead_roots)) {
  1771. gang[0] = list_entry(fs_info->dead_roots.next,
  1772. struct btrfs_root, root_list);
  1773. list_del(&gang[0]->root_list);
  1774. if (gang[0]->in_radix) {
  1775. btrfs_free_fs_root(fs_info, gang[0]);
  1776. } else {
  1777. free_extent_buffer(gang[0]->node);
  1778. free_extent_buffer(gang[0]->commit_root);
  1779. kfree(gang[0]);
  1780. }
  1781. }
  1782. while (1) {
  1783. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  1784. (void **)gang, 0,
  1785. ARRAY_SIZE(gang));
  1786. if (!ret)
  1787. break;
  1788. for (i = 0; i < ret; i++)
  1789. btrfs_free_fs_root(fs_info, gang[i]);
  1790. }
  1791. }
  1792. int open_ctree(struct super_block *sb,
  1793. struct btrfs_fs_devices *fs_devices,
  1794. char *options)
  1795. {
  1796. u32 sectorsize;
  1797. u32 nodesize;
  1798. u32 leafsize;
  1799. u32 blocksize;
  1800. u32 stripesize;
  1801. u64 generation;
  1802. u64 features;
  1803. struct btrfs_key location;
  1804. struct buffer_head *bh;
  1805. struct btrfs_super_block *disk_super;
  1806. struct btrfs_fs_info *fs_info = btrfs_sb(sb);
  1807. struct btrfs_root *tree_root;
  1808. struct btrfs_root *extent_root;
  1809. struct btrfs_root *csum_root;
  1810. struct btrfs_root *chunk_root;
  1811. struct btrfs_root *dev_root;
  1812. struct btrfs_root *quota_root;
  1813. struct btrfs_root *log_tree_root;
  1814. int ret;
  1815. int err = -EINVAL;
  1816. int num_backups_tried = 0;
  1817. int backup_index = 0;
  1818. tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
  1819. extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
  1820. csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
  1821. chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
  1822. dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
  1823. quota_root = fs_info->quota_root = btrfs_alloc_root(fs_info);
  1824. if (!tree_root || !extent_root || !csum_root ||
  1825. !chunk_root || !dev_root || !quota_root) {
  1826. err = -ENOMEM;
  1827. goto fail;
  1828. }
  1829. ret = init_srcu_struct(&fs_info->subvol_srcu);
  1830. if (ret) {
  1831. err = ret;
  1832. goto fail;
  1833. }
  1834. ret = setup_bdi(fs_info, &fs_info->bdi);
  1835. if (ret) {
  1836. err = ret;
  1837. goto fail_srcu;
  1838. }
  1839. ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
  1840. if (ret) {
  1841. err = ret;
  1842. goto fail_bdi;
  1843. }
  1844. fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
  1845. (1 + ilog2(nr_cpu_ids));
  1846. ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
  1847. if (ret) {
  1848. err = ret;
  1849. goto fail_dirty_metadata_bytes;
  1850. }
  1851. fs_info->btree_inode = new_inode(sb);
  1852. if (!fs_info->btree_inode) {
  1853. err = -ENOMEM;
  1854. goto fail_delalloc_bytes;
  1855. }
  1856. mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
  1857. INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
  1858. INIT_LIST_HEAD(&fs_info->trans_list);
  1859. INIT_LIST_HEAD(&fs_info->dead_roots);
  1860. INIT_LIST_HEAD(&fs_info->delayed_iputs);
  1861. INIT_LIST_HEAD(&fs_info->delalloc_inodes);
  1862. INIT_LIST_HEAD(&fs_info->caching_block_groups);
  1863. spin_lock_init(&fs_info->delalloc_lock);
  1864. spin_lock_init(&fs_info->trans_lock);
  1865. spin_lock_init(&fs_info->fs_roots_radix_lock);
  1866. spin_lock_init(&fs_info->delayed_iput_lock);
  1867. spin_lock_init(&fs_info->defrag_inodes_lock);
  1868. spin_lock_init(&fs_info->free_chunk_lock);
  1869. spin_lock_init(&fs_info->tree_mod_seq_lock);
  1870. spin_lock_init(&fs_info->super_lock);
  1871. rwlock_init(&fs_info->tree_mod_log_lock);
  1872. mutex_init(&fs_info->reloc_mutex);
  1873. seqlock_init(&fs_info->profiles_lock);
  1874. init_completion(&fs_info->kobj_unregister);
  1875. INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
  1876. INIT_LIST_HEAD(&fs_info->space_info);
  1877. INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
  1878. btrfs_mapping_init(&fs_info->mapping_tree);
  1879. btrfs_init_block_rsv(&fs_info->global_block_rsv,
  1880. BTRFS_BLOCK_RSV_GLOBAL);
  1881. btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
  1882. BTRFS_BLOCK_RSV_DELALLOC);
  1883. btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
  1884. btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
  1885. btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
  1886. btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
  1887. BTRFS_BLOCK_RSV_DELOPS);
  1888. atomic_set(&fs_info->nr_async_submits, 0);
  1889. atomic_set(&fs_info->async_delalloc_pages, 0);
  1890. atomic_set(&fs_info->async_submit_draining, 0);
  1891. atomic_set(&fs_info->nr_async_bios, 0);
  1892. atomic_set(&fs_info->defrag_running, 0);
  1893. atomic64_set(&fs_info->tree_mod_seq, 0);
  1894. fs_info->sb = sb;
  1895. fs_info->max_inline = 8192 * 1024;
  1896. fs_info->metadata_ratio = 0;
  1897. fs_info->defrag_inodes = RB_ROOT;
  1898. fs_info->trans_no_join = 0;
  1899. fs_info->free_chunk_space = 0;
  1900. fs_info->tree_mod_log = RB_ROOT;
  1901. /* readahead state */
  1902. INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
  1903. spin_lock_init(&fs_info->reada_lock);
  1904. fs_info->thread_pool_size = min_t(unsigned long,
  1905. num_online_cpus() + 2, 8);
  1906. INIT_LIST_HEAD(&fs_info->ordered_extents);
  1907. spin_lock_init(&fs_info->ordered_extent_lock);
  1908. fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
  1909. GFP_NOFS);
  1910. if (!fs_info->delayed_root) {
  1911. err = -ENOMEM;
  1912. goto fail_iput;
  1913. }
  1914. btrfs_init_delayed_root(fs_info->delayed_root);
  1915. mutex_init(&fs_info->scrub_lock);
  1916. atomic_set(&fs_info->scrubs_running, 0);
  1917. atomic_set(&fs_info->scrub_pause_req, 0);
  1918. atomic_set(&fs_info->scrubs_paused, 0);
  1919. atomic_set(&fs_info->scrub_cancel_req, 0);
  1920. init_waitqueue_head(&fs_info->scrub_pause_wait);
  1921. init_rwsem(&fs_info->scrub_super_lock);
  1922. fs_info->scrub_workers_refcnt = 0;
  1923. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  1924. fs_info->check_integrity_print_mask = 0;
  1925. #endif
  1926. spin_lock_init(&fs_info->balance_lock);
  1927. mutex_init(&fs_info->balance_mutex);
  1928. atomic_set(&fs_info->balance_running, 0);
  1929. atomic_set(&fs_info->balance_pause_req, 0);
  1930. atomic_set(&fs_info->balance_cancel_req, 0);
  1931. fs_info->balance_ctl = NULL;
  1932. init_waitqueue_head(&fs_info->balance_wait_q);
  1933. sb->s_blocksize = 4096;
  1934. sb->s_blocksize_bits = blksize_bits(4096);
  1935. sb->s_bdi = &fs_info->bdi;
  1936. fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
  1937. set_nlink(fs_info->btree_inode, 1);
  1938. /*
  1939. * we set the i_size on the btree inode to the max possible int.
  1940. * the real end of the address space is determined by all of
  1941. * the devices in the system
  1942. */
  1943. fs_info->btree_inode->i_size = OFFSET_MAX;
  1944. fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
  1945. fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
  1946. RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
  1947. extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
  1948. fs_info->btree_inode->i_mapping);
  1949. BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
  1950. extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
  1951. BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
  1952. BTRFS_I(fs_info->btree_inode)->root = tree_root;
  1953. memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
  1954. sizeof(struct btrfs_key));
  1955. set_bit(BTRFS_INODE_DUMMY,
  1956. &BTRFS_I(fs_info->btree_inode)->runtime_flags);
  1957. insert_inode_hash(fs_info->btree_inode);
  1958. spin_lock_init(&fs_info->block_group_cache_lock);
  1959. fs_info->block_group_cache_tree = RB_ROOT;
  1960. fs_info->first_logical_byte = (u64)-1;
  1961. extent_io_tree_init(&fs_info->freed_extents[0],
  1962. fs_info->btree_inode->i_mapping);
  1963. extent_io_tree_init(&fs_info->freed_extents[1],
  1964. fs_info->btree_inode->i_mapping);
  1965. fs_info->pinned_extents = &fs_info->freed_extents[0];
  1966. fs_info->do_barriers = 1;
  1967. mutex_init(&fs_info->ordered_operations_mutex);
  1968. mutex_init(&fs_info->tree_log_mutex);
  1969. mutex_init(&fs_info->chunk_mutex);
  1970. mutex_init(&fs_info->transaction_kthread_mutex);
  1971. mutex_init(&fs_info->cleaner_mutex);
  1972. mutex_init(&fs_info->volume_mutex);
  1973. init_rwsem(&fs_info->extent_commit_sem);
  1974. init_rwsem(&fs_info->cleanup_work_sem);
  1975. init_rwsem(&fs_info->subvol_sem);
  1976. fs_info->dev_replace.lock_owner = 0;
  1977. atomic_set(&fs_info->dev_replace.nesting_level, 0);
  1978. mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
  1979. mutex_init(&fs_info->dev_replace.lock_management_lock);
  1980. mutex_init(&fs_info->dev_replace.lock);
  1981. spin_lock_init(&fs_info->qgroup_lock);
  1982. mutex_init(&fs_info->qgroup_ioctl_lock);
  1983. fs_info->qgroup_tree = RB_ROOT;
  1984. INIT_LIST_HEAD(&fs_info->dirty_qgroups);
  1985. fs_info->qgroup_seq = 1;
  1986. fs_info->quota_enabled = 0;
  1987. fs_info->pending_quota_state = 0;
  1988. mutex_init(&fs_info->qgroup_rescan_lock);
  1989. btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
  1990. btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
  1991. init_waitqueue_head(&fs_info->transaction_throttle);
  1992. init_waitqueue_head(&fs_info->transaction_wait);
  1993. init_waitqueue_head(&fs_info->transaction_blocked_wait);
  1994. init_waitqueue_head(&fs_info->async_submit_wait);
  1995. ret = btrfs_alloc_stripe_hash_table(fs_info);
  1996. if (ret) {
  1997. err = ret;
  1998. goto fail_alloc;
  1999. }
  2000. __setup_root(4096, 4096, 4096, 4096, tree_root,
  2001. fs_info, BTRFS_ROOT_TREE_OBJECTID);
  2002. invalidate_bdev(fs_devices->latest_bdev);
  2003. /*
  2004. * Read super block and check the signature bytes only
  2005. */
  2006. bh = btrfs_read_dev_super(fs_devices->latest_bdev);
  2007. if (!bh) {
  2008. err = -EINVAL;
  2009. goto fail_alloc;
  2010. }
  2011. /*
  2012. * We want to check superblock checksum, the type is stored inside.
  2013. * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
  2014. */
  2015. if (btrfs_check_super_csum(bh->b_data)) {
  2016. printk(KERN_ERR "btrfs: superblock checksum mismatch\n");
  2017. err = -EINVAL;
  2018. goto fail_alloc;
  2019. }
  2020. /*
  2021. * super_copy is zeroed at allocation time and we never touch the
  2022. * following bytes up to INFO_SIZE, the checksum is calculated from
  2023. * the whole block of INFO_SIZE
  2024. */
  2025. memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
  2026. memcpy(fs_info->super_for_commit, fs_info->super_copy,
  2027. sizeof(*fs_info->super_for_commit));
  2028. brelse(bh);
  2029. memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
  2030. ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
  2031. if (ret) {
  2032. printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
  2033. err = -EINVAL;
  2034. goto fail_alloc;
  2035. }
  2036. disk_super = fs_info->super_copy;
  2037. if (!btrfs_super_root(disk_super))
  2038. goto fail_alloc;
  2039. /* check FS state, whether FS is broken. */
  2040. if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
  2041. set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
  2042. /*
  2043. * run through our array of backup supers and setup
  2044. * our ring pointer to the oldest one
  2045. */
  2046. generation = btrfs_super_generation(disk_super);
  2047. find_oldest_super_backup(fs_info, generation);
  2048. /*
  2049. * In the long term, we'll store the compression type in the super
  2050. * block, and it'll be used for per file compression control.
  2051. */
  2052. fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
  2053. ret = btrfs_parse_options(tree_root, options);
  2054. if (ret) {
  2055. err = ret;
  2056. goto fail_alloc;
  2057. }
  2058. features = btrfs_super_incompat_flags(disk_super) &
  2059. ~BTRFS_FEATURE_INCOMPAT_SUPP;
  2060. if (features) {
  2061. printk(KERN_ERR "BTRFS: couldn't mount because of "
  2062. "unsupported optional features (%Lx).\n",
  2063. (unsigned long long)features);
  2064. err = -EINVAL;
  2065. goto fail_alloc;
  2066. }
  2067. if (btrfs_super_leafsize(disk_super) !=
  2068. btrfs_super_nodesize(disk_super)) {
  2069. printk(KERN_ERR "BTRFS: couldn't mount because metadata "
  2070. "blocksizes don't match. node %d leaf %d\n",
  2071. btrfs_super_nodesize(disk_super),
  2072. btrfs_super_leafsize(disk_super));
  2073. err = -EINVAL;
  2074. goto fail_alloc;
  2075. }
  2076. if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
  2077. printk(KERN_ERR "BTRFS: couldn't mount because metadata "
  2078. "blocksize (%d) was too large\n",
  2079. btrfs_super_leafsize(disk_super));
  2080. err = -EINVAL;
  2081. goto fail_alloc;
  2082. }
  2083. features = btrfs_super_incompat_flags(disk_super);
  2084. features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
  2085. if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
  2086. features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
  2087. if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
  2088. printk(KERN_ERR "btrfs: has skinny extents\n");
  2089. /*
  2090. * flag our filesystem as having big metadata blocks if
  2091. * they are bigger than the page size
  2092. */
  2093. if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
  2094. if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
  2095. printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
  2096. features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
  2097. }
  2098. nodesize = btrfs_super_nodesize(disk_super);
  2099. leafsize = btrfs_super_leafsize(disk_super);
  2100. sectorsize = btrfs_super_sectorsize(disk_super);
  2101. stripesize = btrfs_super_stripesize(disk_super);
  2102. fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
  2103. fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
  2104. /*
  2105. * mixed block groups end up with duplicate but slightly offset
  2106. * extent buffers for the same range. It leads to corruptions
  2107. */
  2108. if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
  2109. (sectorsize != leafsize)) {
  2110. printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
  2111. "are not allowed for mixed block groups on %s\n",
  2112. sb->s_id);
  2113. goto fail_alloc;
  2114. }
  2115. /*
  2116. * Needn't use the lock because there is no other task which will
  2117. * update the flag.
  2118. */
  2119. btrfs_set_super_incompat_flags(disk_super, features);
  2120. features = btrfs_super_compat_ro_flags(disk_super) &
  2121. ~BTRFS_FEATURE_COMPAT_RO_SUPP;
  2122. if (!(sb->s_flags & MS_RDONLY) && features) {
  2123. printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
  2124. "unsupported option features (%Lx).\n",
  2125. (unsigned long long)features);
  2126. err = -EINVAL;
  2127. goto fail_alloc;
  2128. }
  2129. btrfs_init_workers(&fs_info->generic_worker,
  2130. "genwork", 1, NULL);
  2131. btrfs_init_workers(&fs_info->workers, "worker",
  2132. fs_info->thread_pool_size,
  2133. &fs_info->generic_worker);
  2134. btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
  2135. fs_info->thread_pool_size,
  2136. &fs_info->generic_worker);
  2137. btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc",
  2138. fs_info->thread_pool_size,
  2139. &fs_info->generic_worker);
  2140. btrfs_init_workers(&fs_info->submit_workers, "submit",
  2141. min_t(u64, fs_devices->num_devices,
  2142. fs_info->thread_pool_size),
  2143. &fs_info->generic_worker);
  2144. btrfs_init_workers(&fs_info->caching_workers, "cache",
  2145. 2, &fs_info->generic_worker);
  2146. /* a higher idle thresh on the submit workers makes it much more
  2147. * likely that bios will be send down in a sane order to the
  2148. * devices
  2149. */
  2150. fs_info->submit_workers.idle_thresh = 64;
  2151. fs_info->workers.idle_thresh = 16;
  2152. fs_info->workers.ordered = 1;
  2153. fs_info->delalloc_workers.idle_thresh = 2;
  2154. fs_info->delalloc_workers.ordered = 1;
  2155. btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
  2156. &fs_info->generic_worker);
  2157. btrfs_init_workers(&fs_info->endio_workers, "endio",
  2158. fs_info->thread_pool_size,
  2159. &fs_info->generic_worker);
  2160. btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
  2161. fs_info->thread_pool_size,
  2162. &fs_info->generic_worker);
  2163. btrfs_init_workers(&fs_info->endio_meta_write_workers,
  2164. "endio-meta-write", fs_info->thread_pool_size,
  2165. &fs_info->generic_worker);
  2166. btrfs_init_workers(&fs_info->endio_raid56_workers,
  2167. "endio-raid56", fs_info->thread_pool_size,
  2168. &fs_info->generic_worker);
  2169. btrfs_init_workers(&fs_info->rmw_workers,
  2170. "rmw", fs_info->thread_pool_size,
  2171. &fs_info->generic_worker);
  2172. btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
  2173. fs_info->thread_pool_size,
  2174. &fs_info->generic_worker);
  2175. btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
  2176. 1, &fs_info->generic_worker);
  2177. btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
  2178. fs_info->thread_pool_size,
  2179. &fs_info->generic_worker);
  2180. btrfs_init_workers(&fs_info->readahead_workers, "readahead",
  2181. fs_info->thread_pool_size,
  2182. &fs_info->generic_worker);
  2183. btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1,
  2184. &fs_info->generic_worker);
  2185. /*
  2186. * endios are largely parallel and should have a very
  2187. * low idle thresh
  2188. */
  2189. fs_info->endio_workers.idle_thresh = 4;
  2190. fs_info->endio_meta_workers.idle_thresh = 4;
  2191. fs_info->endio_raid56_workers.idle_thresh = 4;
  2192. fs_info->rmw_workers.idle_thresh = 2;
  2193. fs_info->endio_write_workers.idle_thresh = 2;
  2194. fs_info->endio_meta_write_workers.idle_thresh = 2;
  2195. fs_info->readahead_workers.idle_thresh = 2;
  2196. /*
  2197. * btrfs_start_workers can really only fail because of ENOMEM so just
  2198. * return -ENOMEM if any of these fail.
  2199. */
  2200. ret = btrfs_start_workers(&fs_info->workers);
  2201. ret |= btrfs_start_workers(&fs_info->generic_worker);
  2202. ret |= btrfs_start_workers(&fs_info->submit_workers);
  2203. ret |= btrfs_start_workers(&fs_info->delalloc_workers);
  2204. ret |= btrfs_start_workers(&fs_info->fixup_workers);
  2205. ret |= btrfs_start_workers(&fs_info->endio_workers);
  2206. ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
  2207. ret |= btrfs_start_workers(&fs_info->rmw_workers);
  2208. ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
  2209. ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
  2210. ret |= btrfs_start_workers(&fs_info->endio_write_workers);
  2211. ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
  2212. ret |= btrfs_start_workers(&fs_info->delayed_workers);
  2213. ret |= btrfs_start_workers(&fs_info->caching_workers);
  2214. ret |= btrfs_start_workers(&fs_info->readahead_workers);
  2215. ret |= btrfs_start_workers(&fs_info->flush_workers);
  2216. ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers);
  2217. if (ret) {
  2218. err = -ENOMEM;
  2219. goto fail_sb_buffer;
  2220. }
  2221. fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
  2222. fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
  2223. 4 * 1024 * 1024 / PAGE_CACHE_SIZE);
  2224. tree_root->nodesize = nodesize;
  2225. tree_root->leafsize = leafsize;
  2226. tree_root->sectorsize = sectorsize;
  2227. tree_root->stripesize = stripesize;
  2228. sb->s_blocksize = sectorsize;
  2229. sb->s_blocksize_bits = blksize_bits(sectorsize);
  2230. if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
  2231. printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
  2232. goto fail_sb_buffer;
  2233. }
  2234. if (sectorsize != PAGE_SIZE) {
  2235. printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
  2236. "found on %s\n", (unsigned long)sectorsize, sb->s_id);
  2237. goto fail_sb_buffer;
  2238. }
  2239. mutex_lock(&fs_info->chunk_mutex);
  2240. ret = btrfs_read_sys_array(tree_root);
  2241. mutex_unlock(&fs_info->chunk_mutex);
  2242. if (ret) {
  2243. printk(KERN_WARNING "btrfs: failed to read the system "
  2244. "array on %s\n", sb->s_id);
  2245. goto fail_sb_buffer;
  2246. }
  2247. blocksize = btrfs_level_size(tree_root,
  2248. btrfs_super_chunk_root_level(disk_super));
  2249. generation = btrfs_super_chunk_root_generation(disk_super);
  2250. __setup_root(nodesize, leafsize, sectorsize, stripesize,
  2251. chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
  2252. chunk_root->node = read_tree_block(chunk_root,
  2253. btrfs_super_chunk_root(disk_super),
  2254. blocksize, generation);
  2255. if (!chunk_root->node ||
  2256. !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
  2257. printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
  2258. sb->s_id);
  2259. goto fail_tree_roots;
  2260. }
  2261. btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
  2262. chunk_root->commit_root = btrfs_root_node(chunk_root);
  2263. read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
  2264. (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
  2265. BTRFS_UUID_SIZE);
  2266. ret = btrfs_read_chunk_tree(chunk_root);
  2267. if (ret) {
  2268. printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
  2269. sb->s_id);
  2270. goto fail_tree_roots;
  2271. }
  2272. /*
  2273. * keep the device that is marked to be the target device for the
  2274. * dev_replace procedure
  2275. */
  2276. btrfs_close_extra_devices(fs_info, fs_devices, 0);
  2277. if (!fs_devices->latest_bdev) {
  2278. printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
  2279. sb->s_id);
  2280. goto fail_tree_roots;
  2281. }
  2282. retry_root_backup:
  2283. blocksize = btrfs_level_size(tree_root,
  2284. btrfs_super_root_level(disk_super));
  2285. generation = btrfs_super_generation(disk_super);
  2286. tree_root->node = read_tree_block(tree_root,
  2287. btrfs_super_root(disk_super),
  2288. blocksize, generation);
  2289. if (!tree_root->node ||
  2290. !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
  2291. printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
  2292. sb->s_id);
  2293. goto recovery_tree_root;
  2294. }
  2295. btrfs_set_root_node(&tree_root->root_item, tree_root->node);
  2296. tree_root->commit_root = btrfs_root_node(tree_root);
  2297. ret = find_and_setup_root(tree_root, fs_info,
  2298. BTRFS_EXTENT_TREE_OBJECTID, extent_root);
  2299. if (ret)
  2300. goto recovery_tree_root;
  2301. extent_root->track_dirty = 1;
  2302. ret = find_and_setup_root(tree_root, fs_info,
  2303. BTRFS_DEV_TREE_OBJECTID, dev_root);
  2304. if (ret)
  2305. goto recovery_tree_root;
  2306. dev_root->track_dirty = 1;
  2307. ret = find_and_setup_root(tree_root, fs_info,
  2308. BTRFS_CSUM_TREE_OBJECTID, csum_root);
  2309. if (ret)
  2310. goto recovery_tree_root;
  2311. csum_root->track_dirty = 1;
  2312. ret = find_and_setup_root(tree_root, fs_info,
  2313. BTRFS_QUOTA_TREE_OBJECTID, quota_root);
  2314. if (ret) {
  2315. kfree(quota_root);
  2316. quota_root = fs_info->quota_root = NULL;
  2317. } else {
  2318. quota_root->track_dirty = 1;
  2319. fs_info->quota_enabled = 1;
  2320. fs_info->pending_quota_state = 1;
  2321. }
  2322. fs_info->generation = generation;
  2323. fs_info->last_trans_committed = generation;
  2324. ret = btrfs_recover_balance(fs_info);
  2325. if (ret) {
  2326. printk(KERN_WARNING "btrfs: failed to recover balance\n");
  2327. goto fail_block_groups;
  2328. }
  2329. ret = btrfs_init_dev_stats(fs_info);
  2330. if (ret) {
  2331. printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
  2332. ret);
  2333. goto fail_block_groups;
  2334. }
  2335. ret = btrfs_init_dev_replace(fs_info);
  2336. if (ret) {
  2337. pr_err("btrfs: failed to init dev_replace: %d\n", ret);
  2338. goto fail_block_groups;
  2339. }
  2340. btrfs_close_extra_devices(fs_info, fs_devices, 1);
  2341. ret = btrfs_init_space_info(fs_info);
  2342. if (ret) {
  2343. printk(KERN_ERR "Failed to initial space info: %d\n", ret);
  2344. goto fail_block_groups;
  2345. }
  2346. ret = btrfs_read_block_groups(extent_root);
  2347. if (ret) {
  2348. printk(KERN_ERR "Failed to read block groups: %d\n", ret);
  2349. goto fail_block_groups;
  2350. }
  2351. fs_info->num_tolerated_disk_barrier_failures =
  2352. btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
  2353. if (fs_info->fs_devices->missing_devices >
  2354. fs_info->num_tolerated_disk_barrier_failures &&
  2355. !(sb->s_flags & MS_RDONLY)) {
  2356. printk(KERN_WARNING
  2357. "Btrfs: too many missing devices, writeable mount is not allowed\n");
  2358. goto fail_block_groups;
  2359. }
  2360. fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
  2361. "btrfs-cleaner");
  2362. if (IS_ERR(fs_info->cleaner_kthread))
  2363. goto fail_block_groups;
  2364. fs_info->transaction_kthread = kthread_run(transaction_kthread,
  2365. tree_root,
  2366. "btrfs-transaction");
  2367. if (IS_ERR(fs_info->transaction_kthread))
  2368. goto fail_cleaner;
  2369. if (!btrfs_test_opt(tree_root, SSD) &&
  2370. !btrfs_test_opt(tree_root, NOSSD) &&
  2371. !fs_info->fs_devices->rotating) {
  2372. printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
  2373. "mode\n");
  2374. btrfs_set_opt(fs_info->mount_opt, SSD);
  2375. }
  2376. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  2377. if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
  2378. ret = btrfsic_mount(tree_root, fs_devices,
  2379. btrfs_test_opt(tree_root,
  2380. CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
  2381. 1 : 0,
  2382. fs_info->check_integrity_print_mask);
  2383. if (ret)
  2384. printk(KERN_WARNING "btrfs: failed to initialize"
  2385. " integrity check module %s\n", sb->s_id);
  2386. }
  2387. #endif
  2388. ret = btrfs_read_qgroup_config(fs_info);
  2389. if (ret)
  2390. goto fail_trans_kthread;
  2391. /* do not make disk changes in broken FS */
  2392. if (btrfs_super_log_root(disk_super) != 0) {
  2393. u64 bytenr = btrfs_super_log_root(disk_super);
  2394. if (fs_devices->rw_devices == 0) {
  2395. printk(KERN_WARNING "Btrfs log replay required "
  2396. "on RO media\n");
  2397. err = -EIO;
  2398. goto fail_qgroup;
  2399. }
  2400. blocksize =
  2401. btrfs_level_size(tree_root,
  2402. btrfs_super_log_root_level(disk_super));
  2403. log_tree_root = btrfs_alloc_root(fs_info);
  2404. if (!log_tree_root) {
  2405. err = -ENOMEM;
  2406. goto fail_qgroup;
  2407. }
  2408. __setup_root(nodesize, leafsize, sectorsize, stripesize,
  2409. log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
  2410. log_tree_root->node = read_tree_block(tree_root, bytenr,
  2411. blocksize,
  2412. generation + 1);
  2413. if (!log_tree_root->node ||
  2414. !extent_buffer_uptodate(log_tree_root->node)) {
  2415. printk(KERN_ERR "btrfs: failed to read log tree\n");
  2416. free_extent_buffer(log_tree_root->node);
  2417. kfree(log_tree_root);
  2418. goto fail_trans_kthread;
  2419. }
  2420. /* returns with log_tree_root freed on success */
  2421. ret = btrfs_recover_log_trees(log_tree_root);
  2422. if (ret) {
  2423. btrfs_error(tree_root->fs_info, ret,
  2424. "Failed to recover log tree");
  2425. free_extent_buffer(log_tree_root->node);
  2426. kfree(log_tree_root);
  2427. goto fail_trans_kthread;
  2428. }
  2429. if (sb->s_flags & MS_RDONLY) {
  2430. ret = btrfs_commit_super(tree_root);
  2431. if (ret)
  2432. goto fail_trans_kthread;
  2433. }
  2434. }
  2435. ret = btrfs_find_orphan_roots(tree_root);
  2436. if (ret)
  2437. goto fail_trans_kthread;
  2438. if (!(sb->s_flags & MS_RDONLY)) {
  2439. ret = btrfs_cleanup_fs_roots(fs_info);
  2440. if (ret)
  2441. goto fail_trans_kthread;
  2442. ret = btrfs_recover_relocation(tree_root);
  2443. if (ret < 0) {
  2444. printk(KERN_WARNING
  2445. "btrfs: failed to recover relocation\n");
  2446. err = -EINVAL;
  2447. goto fail_qgroup;
  2448. }
  2449. }
  2450. location.objectid = BTRFS_FS_TREE_OBJECTID;
  2451. location.type = BTRFS_ROOT_ITEM_KEY;
  2452. location.offset = (u64)-1;
  2453. fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
  2454. if (!fs_info->fs_root)
  2455. goto fail_qgroup;
  2456. if (IS_ERR(fs_info->fs_root)) {
  2457. err = PTR_ERR(fs_info->fs_root);
  2458. goto fail_qgroup;
  2459. }
  2460. if (sb->s_flags & MS_RDONLY)
  2461. return 0;
  2462. down_read(&fs_info->cleanup_work_sem);
  2463. if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
  2464. (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
  2465. up_read(&fs_info->cleanup_work_sem);
  2466. close_ctree(tree_root);
  2467. return ret;
  2468. }
  2469. up_read(&fs_info->cleanup_work_sem);
  2470. ret = btrfs_resume_balance_async(fs_info);
  2471. if (ret) {
  2472. printk(KERN_WARNING "btrfs: failed to resume balance\n");
  2473. close_ctree(tree_root);
  2474. return ret;
  2475. }
  2476. ret = btrfs_resume_dev_replace_async(fs_info);
  2477. if (ret) {
  2478. pr_warn("btrfs: failed to resume dev_replace\n");
  2479. close_ctree(tree_root);
  2480. return ret;
  2481. }
  2482. return 0;
  2483. fail_qgroup:
  2484. btrfs_free_qgroup_config(fs_info);
  2485. fail_trans_kthread:
  2486. kthread_stop(fs_info->transaction_kthread);
  2487. del_fs_roots(fs_info);
  2488. btrfs_cleanup_transaction(fs_info->tree_root);
  2489. fail_cleaner:
  2490. kthread_stop(fs_info->cleaner_kthread);
  2491. /*
  2492. * make sure we're done with the btree inode before we stop our
  2493. * kthreads
  2494. */
  2495. filemap_write_and_wait(fs_info->btree_inode->i_mapping);
  2496. fail_block_groups:
  2497. btrfs_put_block_group_cache(fs_info);
  2498. btrfs_free_block_groups(fs_info);
  2499. fail_tree_roots:
  2500. free_root_pointers(fs_info, 1);
  2501. invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
  2502. fail_sb_buffer:
  2503. btrfs_stop_all_workers(fs_info);
  2504. fail_alloc:
  2505. fail_iput:
  2506. btrfs_mapping_tree_free(&fs_info->mapping_tree);
  2507. iput(fs_info->btree_inode);
  2508. fail_delalloc_bytes:
  2509. percpu_counter_destroy(&fs_info->delalloc_bytes);
  2510. fail_dirty_metadata_bytes:
  2511. percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
  2512. fail_bdi:
  2513. bdi_destroy(&fs_info->bdi);
  2514. fail_srcu:
  2515. cleanup_srcu_struct(&fs_info->subvol_srcu);
  2516. fail:
  2517. btrfs_free_stripe_hash_table(fs_info);
  2518. btrfs_close_devices(fs_info->fs_devices);
  2519. return err;
  2520. recovery_tree_root:
  2521. if (!btrfs_test_opt(tree_root, RECOVERY))
  2522. goto fail_tree_roots;
  2523. free_root_pointers(fs_info, 0);
  2524. /* don't use the log in recovery mode, it won't be valid */
  2525. btrfs_set_super_log_root(disk_super, 0);
  2526. /* we can't trust the free space cache either */
  2527. btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
  2528. ret = next_root_backup(fs_info, fs_info->super_copy,
  2529. &num_backups_tried, &backup_index);
  2530. if (ret == -1)
  2531. goto fail_block_groups;
  2532. goto retry_root_backup;
  2533. }
  2534. static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
  2535. {
  2536. if (uptodate) {
  2537. set_buffer_uptodate(bh);
  2538. } else {
  2539. struct btrfs_device *device = (struct btrfs_device *)
  2540. bh->b_private;
  2541. printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
  2542. "I/O error on %s\n",
  2543. rcu_str_deref(device->name));
  2544. /* note, we dont' set_buffer_write_io_error because we have
  2545. * our own ways of dealing with the IO errors
  2546. */
  2547. clear_buffer_uptodate(bh);
  2548. btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
  2549. }
  2550. unlock_buffer(bh);
  2551. put_bh(bh);
  2552. }
  2553. struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
  2554. {
  2555. struct buffer_head *bh;
  2556. struct buffer_head *latest = NULL;
  2557. struct btrfs_super_block *super;
  2558. int i;
  2559. u64 transid = 0;
  2560. u64 bytenr;
  2561. /* we would like to check all the supers, but that would make
  2562. * a btrfs mount succeed after a mkfs from a different FS.
  2563. * So, we need to add a special mount option to scan for
  2564. * later supers, using BTRFS_SUPER_MIRROR_MAX instead
  2565. */
  2566. for (i = 0; i < 1; i++) {
  2567. bytenr = btrfs_sb_offset(i);
  2568. if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
  2569. break;
  2570. bh = __bread(bdev, bytenr / 4096, 4096);
  2571. if (!bh)
  2572. continue;
  2573. super = (struct btrfs_super_block *)bh->b_data;
  2574. if (btrfs_super_bytenr(super) != bytenr ||
  2575. super->magic != cpu_to_le64(BTRFS_MAGIC)) {
  2576. brelse(bh);
  2577. continue;
  2578. }
  2579. if (!latest || btrfs_super_generation(super) > transid) {
  2580. brelse(latest);
  2581. latest = bh;
  2582. transid = btrfs_super_generation(super);
  2583. } else {
  2584. brelse(bh);
  2585. }
  2586. }
  2587. return latest;
  2588. }
  2589. /*
  2590. * this should be called twice, once with wait == 0 and
  2591. * once with wait == 1. When wait == 0 is done, all the buffer heads
  2592. * we write are pinned.
  2593. *
  2594. * They are released when wait == 1 is done.
  2595. * max_mirrors must be the same for both runs, and it indicates how
  2596. * many supers on this one device should be written.
  2597. *
  2598. * max_mirrors == 0 means to write them all.
  2599. */
  2600. static int write_dev_supers(struct btrfs_device *device,
  2601. struct btrfs_super_block *sb,
  2602. int do_barriers, int wait, int max_mirrors)
  2603. {
  2604. struct buffer_head *bh;
  2605. int i;
  2606. int ret;
  2607. int errors = 0;
  2608. u32 crc;
  2609. u64 bytenr;
  2610. if (max_mirrors == 0)
  2611. max_mirrors = BTRFS_SUPER_MIRROR_MAX;
  2612. for (i = 0; i < max_mirrors; i++) {
  2613. bytenr = btrfs_sb_offset(i);
  2614. if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
  2615. break;
  2616. if (wait) {
  2617. bh = __find_get_block(device->bdev, bytenr / 4096,
  2618. BTRFS_SUPER_INFO_SIZE);
  2619. if (!bh) {
  2620. errors++;
  2621. continue;
  2622. }
  2623. wait_on_buffer(bh);
  2624. if (!buffer_uptodate(bh))
  2625. errors++;
  2626. /* drop our reference */
  2627. brelse(bh);
  2628. /* drop the reference from the wait == 0 run */
  2629. brelse(bh);
  2630. continue;
  2631. } else {
  2632. btrfs_set_super_bytenr(sb, bytenr);
  2633. crc = ~(u32)0;
  2634. crc = btrfs_csum_data((char *)sb +
  2635. BTRFS_CSUM_SIZE, crc,
  2636. BTRFS_SUPER_INFO_SIZE -
  2637. BTRFS_CSUM_SIZE);
  2638. btrfs_csum_final(crc, sb->csum);
  2639. /*
  2640. * one reference for us, and we leave it for the
  2641. * caller
  2642. */
  2643. bh = __getblk(device->bdev, bytenr / 4096,
  2644. BTRFS_SUPER_INFO_SIZE);
  2645. if (!bh) {
  2646. printk(KERN_ERR "btrfs: couldn't get super "
  2647. "buffer head for bytenr %Lu\n", bytenr);
  2648. errors++;
  2649. continue;
  2650. }
  2651. memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
  2652. /* one reference for submit_bh */
  2653. get_bh(bh);
  2654. set_buffer_uptodate(bh);
  2655. lock_buffer(bh);
  2656. bh->b_end_io = btrfs_end_buffer_write_sync;
  2657. bh->b_private = device;
  2658. }
  2659. /*
  2660. * we fua the first super. The others we allow
  2661. * to go down lazy.
  2662. */
  2663. ret = btrfsic_submit_bh(WRITE_FUA, bh);
  2664. if (ret)
  2665. errors++;
  2666. }
  2667. return errors < i ? 0 : -1;
  2668. }
  2669. /*
  2670. * endio for the write_dev_flush, this will wake anyone waiting
  2671. * for the barrier when it is done
  2672. */
  2673. static void btrfs_end_empty_barrier(struct bio *bio, int err)
  2674. {
  2675. if (err) {
  2676. if (err == -EOPNOTSUPP)
  2677. set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
  2678. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  2679. }
  2680. if (bio->bi_private)
  2681. complete(bio->bi_private);
  2682. bio_put(bio);
  2683. }
  2684. /*
  2685. * trigger flushes for one the devices. If you pass wait == 0, the flushes are
  2686. * sent down. With wait == 1, it waits for the previous flush.
  2687. *
  2688. * any device where the flush fails with eopnotsupp are flagged as not-barrier
  2689. * capable
  2690. */
  2691. static int write_dev_flush(struct btrfs_device *device, int wait)
  2692. {
  2693. struct bio *bio;
  2694. int ret = 0;
  2695. if (device->nobarriers)
  2696. return 0;
  2697. if (wait) {
  2698. bio = device->flush_bio;
  2699. if (!bio)
  2700. return 0;
  2701. wait_for_completion(&device->flush_wait);
  2702. if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
  2703. printk_in_rcu("btrfs: disabling barriers on dev %s\n",
  2704. rcu_str_deref(device->name));
  2705. device->nobarriers = 1;
  2706. } else if (!bio_flagged(bio, BIO_UPTODATE)) {
  2707. ret = -EIO;
  2708. btrfs_dev_stat_inc_and_print(device,
  2709. BTRFS_DEV_STAT_FLUSH_ERRS);
  2710. }
  2711. /* drop the reference from the wait == 0 run */
  2712. bio_put(bio);
  2713. device->flush_bio = NULL;
  2714. return ret;
  2715. }
  2716. /*
  2717. * one reference for us, and we leave it for the
  2718. * caller
  2719. */
  2720. device->flush_bio = NULL;
  2721. bio = bio_alloc(GFP_NOFS, 0);
  2722. if (!bio)
  2723. return -ENOMEM;
  2724. bio->bi_end_io = btrfs_end_empty_barrier;
  2725. bio->bi_bdev = device->bdev;
  2726. init_completion(&device->flush_wait);
  2727. bio->bi_private = &device->flush_wait;
  2728. device->flush_bio = bio;
  2729. bio_get(bio);
  2730. btrfsic_submit_bio(WRITE_FLUSH, bio);
  2731. return 0;
  2732. }
  2733. /*
  2734. * send an empty flush down to each device in parallel,
  2735. * then wait for them
  2736. */
  2737. static int barrier_all_devices(struct btrfs_fs_info *info)
  2738. {
  2739. struct list_head *head;
  2740. struct btrfs_device *dev;
  2741. int errors_send = 0;
  2742. int errors_wait = 0;
  2743. int ret;
  2744. /* send down all the barriers */
  2745. head = &info->fs_devices->devices;
  2746. list_for_each_entry_rcu(dev, head, dev_list) {
  2747. if (!dev->bdev) {
  2748. errors_send++;
  2749. continue;
  2750. }
  2751. if (!dev->in_fs_metadata || !dev->writeable)
  2752. continue;
  2753. ret = write_dev_flush(dev, 0);
  2754. if (ret)
  2755. errors_send++;
  2756. }
  2757. /* wait for all the barriers */
  2758. list_for_each_entry_rcu(dev, head, dev_list) {
  2759. if (!dev->bdev) {
  2760. errors_wait++;
  2761. continue;
  2762. }
  2763. if (!dev->in_fs_metadata || !dev->writeable)
  2764. continue;
  2765. ret = write_dev_flush(dev, 1);
  2766. if (ret)
  2767. errors_wait++;
  2768. }
  2769. if (errors_send > info->num_tolerated_disk_barrier_failures ||
  2770. errors_wait > info->num_tolerated_disk_barrier_failures)
  2771. return -EIO;
  2772. return 0;
  2773. }
  2774. int btrfs_calc_num_tolerated_disk_barrier_failures(
  2775. struct btrfs_fs_info *fs_info)
  2776. {
  2777. struct btrfs_ioctl_space_info space;
  2778. struct btrfs_space_info *sinfo;
  2779. u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
  2780. BTRFS_BLOCK_GROUP_SYSTEM,
  2781. BTRFS_BLOCK_GROUP_METADATA,
  2782. BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
  2783. int num_types = 4;
  2784. int i;
  2785. int c;
  2786. int num_tolerated_disk_barrier_failures =
  2787. (int)fs_info->fs_devices->num_devices;
  2788. for (i = 0; i < num_types; i++) {
  2789. struct btrfs_space_info *tmp;
  2790. sinfo = NULL;
  2791. rcu_read_lock();
  2792. list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
  2793. if (tmp->flags == types[i]) {
  2794. sinfo = tmp;
  2795. break;
  2796. }
  2797. }
  2798. rcu_read_unlock();
  2799. if (!sinfo)
  2800. continue;
  2801. down_read(&sinfo->groups_sem);
  2802. for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
  2803. if (!list_empty(&sinfo->block_groups[c])) {
  2804. u64 flags;
  2805. btrfs_get_block_group_info(
  2806. &sinfo->block_groups[c], &space);
  2807. if (space.total_bytes == 0 ||
  2808. space.used_bytes == 0)
  2809. continue;
  2810. flags = space.flags;
  2811. /*
  2812. * return
  2813. * 0: if dup, single or RAID0 is configured for
  2814. * any of metadata, system or data, else
  2815. * 1: if RAID5 is configured, or if RAID1 or
  2816. * RAID10 is configured and only two mirrors
  2817. * are used, else
  2818. * 2: if RAID6 is configured, else
  2819. * num_mirrors - 1: if RAID1 or RAID10 is
  2820. * configured and more than
  2821. * 2 mirrors are used.
  2822. */
  2823. if (num_tolerated_disk_barrier_failures > 0 &&
  2824. ((flags & (BTRFS_BLOCK_GROUP_DUP |
  2825. BTRFS_BLOCK_GROUP_RAID0)) ||
  2826. ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
  2827. == 0)))
  2828. num_tolerated_disk_barrier_failures = 0;
  2829. else if (num_tolerated_disk_barrier_failures > 1) {
  2830. if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
  2831. BTRFS_BLOCK_GROUP_RAID5 |
  2832. BTRFS_BLOCK_GROUP_RAID10)) {
  2833. num_tolerated_disk_barrier_failures = 1;
  2834. } else if (flags &
  2835. BTRFS_BLOCK_GROUP_RAID5) {
  2836. num_tolerated_disk_barrier_failures = 2;
  2837. }
  2838. }
  2839. }
  2840. }
  2841. up_read(&sinfo->groups_sem);
  2842. }
  2843. return num_tolerated_disk_barrier_failures;
  2844. }
  2845. static int write_all_supers(struct btrfs_root *root, int max_mirrors)
  2846. {
  2847. struct list_head *head;
  2848. struct btrfs_device *dev;
  2849. struct btrfs_super_block *sb;
  2850. struct btrfs_dev_item *dev_item;
  2851. int ret;
  2852. int do_barriers;
  2853. int max_errors;
  2854. int total_errors = 0;
  2855. u64 flags;
  2856. max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
  2857. do_barriers = !btrfs_test_opt(root, NOBARRIER);
  2858. backup_super_roots(root->fs_info);
  2859. sb = root->fs_info->super_for_commit;
  2860. dev_item = &sb->dev_item;
  2861. mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
  2862. head = &root->fs_info->fs_devices->devices;
  2863. if (do_barriers) {
  2864. ret = barrier_all_devices(root->fs_info);
  2865. if (ret) {
  2866. mutex_unlock(
  2867. &root->fs_info->fs_devices->device_list_mutex);
  2868. btrfs_error(root->fs_info, ret,
  2869. "errors while submitting device barriers.");
  2870. return ret;
  2871. }
  2872. }
  2873. list_for_each_entry_rcu(dev, head, dev_list) {
  2874. if (!dev->bdev) {
  2875. total_errors++;
  2876. continue;
  2877. }
  2878. if (!dev->in_fs_metadata || !dev->writeable)
  2879. continue;
  2880. btrfs_set_stack_device_generation(dev_item, 0);
  2881. btrfs_set_stack_device_type(dev_item, dev->type);
  2882. btrfs_set_stack_device_id(dev_item, dev->devid);
  2883. btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
  2884. btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
  2885. btrfs_set_stack_device_io_align(dev_item, dev->io_align);
  2886. btrfs_set_stack_device_io_width(dev_item, dev->io_width);
  2887. btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
  2888. memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
  2889. memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
  2890. flags = btrfs_super_flags(sb);
  2891. btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
  2892. ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
  2893. if (ret)
  2894. total_errors++;
  2895. }
  2896. if (total_errors > max_errors) {
  2897. printk(KERN_ERR "btrfs: %d errors while writing supers\n",
  2898. total_errors);
  2899. /* This shouldn't happen. FUA is masked off if unsupported */
  2900. BUG();
  2901. }
  2902. total_errors = 0;
  2903. list_for_each_entry_rcu(dev, head, dev_list) {
  2904. if (!dev->bdev)
  2905. continue;
  2906. if (!dev->in_fs_metadata || !dev->writeable)
  2907. continue;
  2908. ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
  2909. if (ret)
  2910. total_errors++;
  2911. }
  2912. mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
  2913. if (total_errors > max_errors) {
  2914. btrfs_error(root->fs_info, -EIO,
  2915. "%d errors while writing supers", total_errors);
  2916. return -EIO;
  2917. }
  2918. return 0;
  2919. }
  2920. int write_ctree_super(struct btrfs_trans_handle *trans,
  2921. struct btrfs_root *root, int max_mirrors)
  2922. {
  2923. int ret;
  2924. ret = write_all_supers(root, max_mirrors);
  2925. return ret;
  2926. }
  2927. void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
  2928. {
  2929. spin_lock(&fs_info->fs_roots_radix_lock);
  2930. radix_tree_delete(&fs_info->fs_roots_radix,
  2931. (unsigned long)root->root_key.objectid);
  2932. spin_unlock(&fs_info->fs_roots_radix_lock);
  2933. if (btrfs_root_refs(&root->root_item) == 0)
  2934. synchronize_srcu(&fs_info->subvol_srcu);
  2935. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  2936. btrfs_free_log(NULL, root);
  2937. btrfs_free_log_root_tree(NULL, fs_info);
  2938. }
  2939. __btrfs_remove_free_space_cache(root->free_ino_pinned);
  2940. __btrfs_remove_free_space_cache(root->free_ino_ctl);
  2941. free_fs_root(root);
  2942. }
  2943. static void free_fs_root(struct btrfs_root *root)
  2944. {
  2945. iput(root->cache_inode);
  2946. WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
  2947. if (root->anon_dev)
  2948. free_anon_bdev(root->anon_dev);
  2949. free_extent_buffer(root->node);
  2950. free_extent_buffer(root->commit_root);
  2951. kfree(root->free_ino_ctl);
  2952. kfree(root->free_ino_pinned);
  2953. kfree(root->name);
  2954. kfree(root);
  2955. }
  2956. int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
  2957. {
  2958. u64 root_objectid = 0;
  2959. struct btrfs_root *gang[8];
  2960. int i;
  2961. int ret;
  2962. while (1) {
  2963. ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
  2964. (void **)gang, root_objectid,
  2965. ARRAY_SIZE(gang));
  2966. if (!ret)
  2967. break;
  2968. root_objectid = gang[ret - 1]->root_key.objectid + 1;
  2969. for (i = 0; i < ret; i++) {
  2970. int err;
  2971. root_objectid = gang[i]->root_key.objectid;
  2972. err = btrfs_orphan_cleanup(gang[i]);
  2973. if (err)
  2974. return err;
  2975. }
  2976. root_objectid++;
  2977. }
  2978. return 0;
  2979. }
  2980. int btrfs_commit_super(struct btrfs_root *root)
  2981. {
  2982. struct btrfs_trans_handle *trans;
  2983. int ret;
  2984. mutex_lock(&root->fs_info->cleaner_mutex);
  2985. btrfs_run_delayed_iputs(root);
  2986. mutex_unlock(&root->fs_info->cleaner_mutex);
  2987. wake_up_process(root->fs_info->cleaner_kthread);
  2988. /* wait until ongoing cleanup work done */
  2989. down_write(&root->fs_info->cleanup_work_sem);
  2990. up_write(&root->fs_info->cleanup_work_sem);
  2991. trans = btrfs_join_transaction(root);
  2992. if (IS_ERR(trans))
  2993. return PTR_ERR(trans);
  2994. ret = btrfs_commit_transaction(trans, root);
  2995. if (ret)
  2996. return ret;
  2997. /* run commit again to drop the original snapshot */
  2998. trans = btrfs_join_transaction(root);
  2999. if (IS_ERR(trans))
  3000. return PTR_ERR(trans);
  3001. ret = btrfs_commit_transaction(trans, root);
  3002. if (ret)
  3003. return ret;
  3004. ret = btrfs_write_and_wait_transaction(NULL, root);
  3005. if (ret) {
  3006. btrfs_error(root->fs_info, ret,
  3007. "Failed to sync btree inode to disk.");
  3008. return ret;
  3009. }
  3010. ret = write_ctree_super(NULL, root, 0);
  3011. return ret;
  3012. }
  3013. int close_ctree(struct btrfs_root *root)
  3014. {
  3015. struct btrfs_fs_info *fs_info = root->fs_info;
  3016. int ret;
  3017. fs_info->closing = 1;
  3018. smp_mb();
  3019. /* pause restriper - we want to resume on mount */
  3020. btrfs_pause_balance(fs_info);
  3021. btrfs_dev_replace_suspend_for_unmount(fs_info);
  3022. btrfs_scrub_cancel(fs_info);
  3023. /* wait for any defraggers to finish */
  3024. wait_event(fs_info->transaction_wait,
  3025. (atomic_read(&fs_info->defrag_running) == 0));
  3026. /* clear out the rbtree of defraggable inodes */
  3027. btrfs_cleanup_defrag_inodes(fs_info);
  3028. if (!(fs_info->sb->s_flags & MS_RDONLY)) {
  3029. ret = btrfs_commit_super(root);
  3030. if (ret)
  3031. printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
  3032. }
  3033. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
  3034. btrfs_error_commit_super(root);
  3035. btrfs_put_block_group_cache(fs_info);
  3036. kthread_stop(fs_info->transaction_kthread);
  3037. kthread_stop(fs_info->cleaner_kthread);
  3038. fs_info->closing = 2;
  3039. smp_mb();
  3040. btrfs_free_qgroup_config(root->fs_info);
  3041. if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
  3042. printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
  3043. percpu_counter_sum(&fs_info->delalloc_bytes));
  3044. }
  3045. free_root_pointers(fs_info, 1);
  3046. btrfs_free_block_groups(fs_info);
  3047. del_fs_roots(fs_info);
  3048. iput(fs_info->btree_inode);
  3049. btrfs_stop_all_workers(fs_info);
  3050. #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
  3051. if (btrfs_test_opt(root, CHECK_INTEGRITY))
  3052. btrfsic_unmount(root, fs_info->fs_devices);
  3053. #endif
  3054. btrfs_close_devices(fs_info->fs_devices);
  3055. btrfs_mapping_tree_free(&fs_info->mapping_tree);
  3056. percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
  3057. percpu_counter_destroy(&fs_info->delalloc_bytes);
  3058. bdi_destroy(&fs_info->bdi);
  3059. cleanup_srcu_struct(&fs_info->subvol_srcu);
  3060. btrfs_free_stripe_hash_table(fs_info);
  3061. return 0;
  3062. }
  3063. int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
  3064. int atomic)
  3065. {
  3066. int ret;
  3067. struct inode *btree_inode = buf->pages[0]->mapping->host;
  3068. ret = extent_buffer_uptodate(buf);
  3069. if (!ret)
  3070. return ret;
  3071. ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
  3072. parent_transid, atomic);
  3073. if (ret == -EAGAIN)
  3074. return ret;
  3075. return !ret;
  3076. }
  3077. int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
  3078. {
  3079. return set_extent_buffer_uptodate(buf);
  3080. }
  3081. void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
  3082. {
  3083. struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
  3084. u64 transid = btrfs_header_generation(buf);
  3085. int was_dirty;
  3086. btrfs_assert_tree_locked(buf);
  3087. if (transid != root->fs_info->generation)
  3088. WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
  3089. "found %llu running %llu\n",
  3090. (unsigned long long)buf->start,
  3091. (unsigned long long)transid,
  3092. (unsigned long long)root->fs_info->generation);
  3093. was_dirty = set_extent_buffer_dirty(buf);
  3094. if (!was_dirty)
  3095. __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
  3096. buf->len,
  3097. root->fs_info->dirty_metadata_batch);
  3098. }
  3099. static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
  3100. int flush_delayed)
  3101. {
  3102. /*
  3103. * looks as though older kernels can get into trouble with
  3104. * this code, they end up stuck in balance_dirty_pages forever
  3105. */
  3106. int ret;
  3107. if (current->flags & PF_MEMALLOC)
  3108. return;
  3109. if (flush_delayed)
  3110. btrfs_balance_delayed_items(root);
  3111. ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
  3112. BTRFS_DIRTY_METADATA_THRESH);
  3113. if (ret > 0) {
  3114. balance_dirty_pages_ratelimited(
  3115. root->fs_info->btree_inode->i_mapping);
  3116. }
  3117. return;
  3118. }
  3119. void btrfs_btree_balance_dirty(struct btrfs_root *root)
  3120. {
  3121. __btrfs_btree_balance_dirty(root, 1);
  3122. }
  3123. void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
  3124. {
  3125. __btrfs_btree_balance_dirty(root, 0);
  3126. }
  3127. int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
  3128. {
  3129. struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
  3130. return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
  3131. }
  3132. static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
  3133. int read_only)
  3134. {
  3135. /*
  3136. * Placeholder for checks
  3137. */
  3138. return 0;
  3139. }
  3140. static void btrfs_error_commit_super(struct btrfs_root *root)
  3141. {
  3142. mutex_lock(&root->fs_info->cleaner_mutex);
  3143. btrfs_run_delayed_iputs(root);
  3144. mutex_unlock(&root->fs_info->cleaner_mutex);
  3145. down_write(&root->fs_info->cleanup_work_sem);
  3146. up_write(&root->fs_info->cleanup_work_sem);
  3147. /* cleanup FS via transaction */
  3148. btrfs_cleanup_transaction(root);
  3149. }
  3150. static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
  3151. struct btrfs_root *root)
  3152. {
  3153. struct btrfs_inode *btrfs_inode;
  3154. struct list_head splice;
  3155. INIT_LIST_HEAD(&splice);
  3156. mutex_lock(&root->fs_info->ordered_operations_mutex);
  3157. spin_lock(&root->fs_info->ordered_extent_lock);
  3158. list_splice_init(&t->ordered_operations, &splice);
  3159. while (!list_empty(&splice)) {
  3160. btrfs_inode = list_entry(splice.next, struct btrfs_inode,
  3161. ordered_operations);
  3162. list_del_init(&btrfs_inode->ordered_operations);
  3163. btrfs_invalidate_inodes(btrfs_inode->root);
  3164. }
  3165. spin_unlock(&root->fs_info->ordered_extent_lock);
  3166. mutex_unlock(&root->fs_info->ordered_operations_mutex);
  3167. }
  3168. static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
  3169. {
  3170. struct btrfs_ordered_extent *ordered;
  3171. spin_lock(&root->fs_info->ordered_extent_lock);
  3172. /*
  3173. * This will just short circuit the ordered completion stuff which will
  3174. * make sure the ordered extent gets properly cleaned up.
  3175. */
  3176. list_for_each_entry(ordered, &root->fs_info->ordered_extents,
  3177. root_extent_list)
  3178. set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
  3179. spin_unlock(&root->fs_info->ordered_extent_lock);
  3180. }
  3181. int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
  3182. struct btrfs_root *root)
  3183. {
  3184. struct rb_node *node;
  3185. struct btrfs_delayed_ref_root *delayed_refs;
  3186. struct btrfs_delayed_ref_node *ref;
  3187. int ret = 0;
  3188. delayed_refs = &trans->delayed_refs;
  3189. spin_lock(&delayed_refs->lock);
  3190. if (delayed_refs->num_entries == 0) {
  3191. spin_unlock(&delayed_refs->lock);
  3192. printk(KERN_INFO "delayed_refs has NO entry\n");
  3193. return ret;
  3194. }
  3195. while ((node = rb_first(&delayed_refs->root)) != NULL) {
  3196. struct btrfs_delayed_ref_head *head = NULL;
  3197. ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
  3198. atomic_set(&ref->refs, 1);
  3199. if (btrfs_delayed_ref_is_head(ref)) {
  3200. head = btrfs_delayed_node_to_head(ref);
  3201. if (!mutex_trylock(&head->mutex)) {
  3202. atomic_inc(&ref->refs);
  3203. spin_unlock(&delayed_refs->lock);
  3204. /* Need to wait for the delayed ref to run */
  3205. mutex_lock(&head->mutex);
  3206. mutex_unlock(&head->mutex);
  3207. btrfs_put_delayed_ref(ref);
  3208. spin_lock(&delayed_refs->lock);
  3209. continue;
  3210. }
  3211. if (head->must_insert_reserved)
  3212. btrfs_pin_extent(root, ref->bytenr,
  3213. ref->num_bytes, 1);
  3214. btrfs_free_delayed_extent_op(head->extent_op);
  3215. delayed_refs->num_heads--;
  3216. if (list_empty(&head->cluster))
  3217. delayed_refs->num_heads_ready--;
  3218. list_del_init(&head->cluster);
  3219. }
  3220. ref->in_tree = 0;
  3221. rb_erase(&ref->rb_node, &delayed_refs->root);
  3222. delayed_refs->num_entries--;
  3223. if (head)
  3224. mutex_unlock(&head->mutex);
  3225. spin_unlock(&delayed_refs->lock);
  3226. btrfs_put_delayed_ref(ref);
  3227. cond_resched();
  3228. spin_lock(&delayed_refs->lock);
  3229. }
  3230. spin_unlock(&delayed_refs->lock);
  3231. return ret;
  3232. }
  3233. static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t)
  3234. {
  3235. struct btrfs_pending_snapshot *snapshot;
  3236. struct list_head splice;
  3237. INIT_LIST_HEAD(&splice);
  3238. list_splice_init(&t->pending_snapshots, &splice);
  3239. while (!list_empty(&splice)) {
  3240. snapshot = list_entry(splice.next,
  3241. struct btrfs_pending_snapshot,
  3242. list);
  3243. snapshot->error = -ECANCELED;
  3244. list_del_init(&snapshot->list);
  3245. }
  3246. }
  3247. static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
  3248. {
  3249. struct btrfs_inode *btrfs_inode;
  3250. struct list_head splice;
  3251. INIT_LIST_HEAD(&splice);
  3252. spin_lock(&root->fs_info->delalloc_lock);
  3253. list_splice_init(&root->fs_info->delalloc_inodes, &splice);
  3254. while (!list_empty(&splice)) {
  3255. btrfs_inode = list_entry(splice.next, struct btrfs_inode,
  3256. delalloc_inodes);
  3257. list_del_init(&btrfs_inode->delalloc_inodes);
  3258. clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
  3259. &btrfs_inode->runtime_flags);
  3260. btrfs_invalidate_inodes(btrfs_inode->root);
  3261. }
  3262. spin_unlock(&root->fs_info->delalloc_lock);
  3263. }
  3264. static int btrfs_destroy_marked_extents(struct btrfs_root *root,
  3265. struct extent_io_tree *dirty_pages,
  3266. int mark)
  3267. {
  3268. int ret;
  3269. struct extent_buffer *eb;
  3270. u64 start = 0;
  3271. u64 end;
  3272. while (1) {
  3273. ret = find_first_extent_bit(dirty_pages, start, &start, &end,
  3274. mark, NULL);
  3275. if (ret)
  3276. break;
  3277. clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
  3278. while (start <= end) {
  3279. eb = btrfs_find_tree_block(root, start,
  3280. root->leafsize);
  3281. start += eb->len;
  3282. if (!eb)
  3283. continue;
  3284. wait_on_extent_buffer_writeback(eb);
  3285. if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
  3286. &eb->bflags))
  3287. clear_extent_buffer_dirty(eb);
  3288. free_extent_buffer_stale(eb);
  3289. }
  3290. }
  3291. return ret;
  3292. }
  3293. static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
  3294. struct extent_io_tree *pinned_extents)
  3295. {
  3296. struct extent_io_tree *unpin;
  3297. u64 start;
  3298. u64 end;
  3299. int ret;
  3300. bool loop = true;
  3301. unpin = pinned_extents;
  3302. again:
  3303. while (1) {
  3304. ret = find_first_extent_bit(unpin, 0, &start, &end,
  3305. EXTENT_DIRTY, NULL);
  3306. if (ret)
  3307. break;
  3308. /* opt_discard */
  3309. if (btrfs_test_opt(root, DISCARD))
  3310. ret = btrfs_error_discard_extent(root, start,
  3311. end + 1 - start,
  3312. NULL);
  3313. clear_extent_dirty(unpin, start, end, GFP_NOFS);
  3314. btrfs_error_unpin_extent_range(root, start, end);
  3315. cond_resched();
  3316. }
  3317. if (loop) {
  3318. if (unpin == &root->fs_info->freed_extents[0])
  3319. unpin = &root->fs_info->freed_extents[1];
  3320. else
  3321. unpin = &root->fs_info->freed_extents[0];
  3322. loop = false;
  3323. goto again;
  3324. }
  3325. return 0;
  3326. }
  3327. void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
  3328. struct btrfs_root *root)
  3329. {
  3330. btrfs_destroy_delayed_refs(cur_trans, root);
  3331. btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
  3332. cur_trans->dirty_pages.dirty_bytes);
  3333. /* FIXME: cleanup wait for commit */
  3334. cur_trans->in_commit = 1;
  3335. cur_trans->blocked = 1;
  3336. wake_up(&root->fs_info->transaction_blocked_wait);
  3337. btrfs_evict_pending_snapshots(cur_trans);
  3338. cur_trans->blocked = 0;
  3339. wake_up(&root->fs_info->transaction_wait);
  3340. cur_trans->commit_done = 1;
  3341. wake_up(&cur_trans->commit_wait);
  3342. btrfs_destroy_delayed_inodes(root);
  3343. btrfs_assert_delayed_root_empty(root);
  3344. btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
  3345. EXTENT_DIRTY);
  3346. btrfs_destroy_pinned_extent(root,
  3347. root->fs_info->pinned_extents);
  3348. /*
  3349. memset(cur_trans, 0, sizeof(*cur_trans));
  3350. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  3351. */
  3352. }
  3353. static int btrfs_cleanup_transaction(struct btrfs_root *root)
  3354. {
  3355. struct btrfs_transaction *t;
  3356. LIST_HEAD(list);
  3357. mutex_lock(&root->fs_info->transaction_kthread_mutex);
  3358. spin_lock(&root->fs_info->trans_lock);
  3359. list_splice_init(&root->fs_info->trans_list, &list);
  3360. root->fs_info->trans_no_join = 1;
  3361. spin_unlock(&root->fs_info->trans_lock);
  3362. while (!list_empty(&list)) {
  3363. t = list_entry(list.next, struct btrfs_transaction, list);
  3364. btrfs_destroy_ordered_operations(t, root);
  3365. btrfs_destroy_ordered_extents(root);
  3366. btrfs_destroy_delayed_refs(t, root);
  3367. /* FIXME: cleanup wait for commit */
  3368. t->in_commit = 1;
  3369. t->blocked = 1;
  3370. smp_mb();
  3371. if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
  3372. wake_up(&root->fs_info->transaction_blocked_wait);
  3373. btrfs_evict_pending_snapshots(t);
  3374. t->blocked = 0;
  3375. smp_mb();
  3376. if (waitqueue_active(&root->fs_info->transaction_wait))
  3377. wake_up(&root->fs_info->transaction_wait);
  3378. t->commit_done = 1;
  3379. smp_mb();
  3380. if (waitqueue_active(&t->commit_wait))
  3381. wake_up(&t->commit_wait);
  3382. btrfs_destroy_delayed_inodes(root);
  3383. btrfs_assert_delayed_root_empty(root);
  3384. btrfs_destroy_delalloc_inodes(root);
  3385. spin_lock(&root->fs_info->trans_lock);
  3386. root->fs_info->running_transaction = NULL;
  3387. spin_unlock(&root->fs_info->trans_lock);
  3388. btrfs_destroy_marked_extents(root, &t->dirty_pages,
  3389. EXTENT_DIRTY);
  3390. btrfs_destroy_pinned_extent(root,
  3391. root->fs_info->pinned_extents);
  3392. atomic_set(&t->use_count, 0);
  3393. list_del_init(&t->list);
  3394. memset(t, 0, sizeof(*t));
  3395. kmem_cache_free(btrfs_transaction_cachep, t);
  3396. }
  3397. spin_lock(&root->fs_info->trans_lock);
  3398. root->fs_info->trans_no_join = 0;
  3399. spin_unlock(&root->fs_info->trans_lock);
  3400. mutex_unlock(&root->fs_info->transaction_kthread_mutex);
  3401. return 0;
  3402. }
  3403. static struct extent_io_ops btree_extent_io_ops = {
  3404. .readpage_end_io_hook = btree_readpage_end_io_hook,
  3405. .readpage_io_failed_hook = btree_io_failed_hook,
  3406. .submit_bio_hook = btree_submit_bio_hook,
  3407. /* note we're sharing with inode.c for the merge bio hook */
  3408. .merge_bio_hook = btrfs_merge_bio_hook,
  3409. };